repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
MrAhmedElsayed/python-lover
https://github.com/MrAhmedElsayed/python-lover
c7c60e7cbd55cd9e6f51255041a716fb7a27220c
ad720f80b8ae023109cc196ff525221f7e805571
28006d7806d492d088662d542aeb0e21c15b7aea
refs/heads/master
2020-04-26T12:26:59.392412
2019-10-23T10:51:46
2019-10-23T10:51:46
173,549,625
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.760617733001709, "alphanum_fraction": 0.760617733001709, "avg_line_length": 42.16666793823242, "blob_id": "3c20c32ca2ff0612b79f9f5ab093aa09984f6d84", "content_id": "8df83afca77a0349b74da3e2bee11a65b14a2455", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 259, "license_type": "no_license", "max_line_length": 133, "num_lines": 6, "path": "/README.txt", "repo_name": "MrAhmedElsayed/python-lover", "src_encoding": "UTF-8", "text": "Using IMAP (imapclient) to log in and access to your email (Gmail, Hotmail, Yahoo Mail) and select \"Sender\" then Delete all selected.\n\n#Note:\nyou have to unprotect your mail in email setting by using less protection. \n\nYou have idea Send me an email: ahmedsayed551991@gmail.com\n" }, { "alpha_fraction": 0.3456658124923706, "alphanum_fraction": 0.37351107597351074, "avg_line_length": 52.74787139892578, "blob_id": "2fb853d69103e57bda450dc92be91916572d87dc", "content_id": "1bac5a438f1df3f4e5c8ccfe288e59dc22be2118", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51463, "license_type": "no_license", "max_line_length": 218, "num_lines": 940, "path": "/stable_version.py", "repo_name": "MrAhmedElsayed/python-lover", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\nimport time\r\n\r\nimport requests\r\nfrom imapclient import *\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nclass Ui_Main_Window(object):\r\n def setupUi(self, Main_Window):\r\n Main_Window.setObjectName(\"Main_Window\")\r\n Main_Window.resize(840, 600)\r\n Main_Window.setStyleSheet(\"QToolTip\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid black;\\n\"\r\n \" background-color: #ffa02f;\\n\"\r\n \" padding: 1px;\\n\"\r\n \" border-radius: 3px;\\n\"\r\n \" opacity: 100;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QWidget\\n\"\r\n \"{\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" background-color: #323232;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTreeView, QListView\\n\"\r\n \"{\\n\"\r\n \" background-color: silver;\\n\"\r\n \" margin-left: 5px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QWidget:item:hover\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #ca0619);\\n\"\r\n \" color: #000000;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QWidget:item:selected\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenuBar::item\\n\"\r\n \"{\\n\"\r\n \" background: transparent;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenuBar::item:selected\\n\"\r\n \"{\\n\"\r\n \" background: transparent;\\n\"\r\n \" border: 1px solid #ffaa00;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenuBar::item:pressed\\n\"\r\n \"{\\n\"\r\n \" background: #444;\\n\"\r\n \" border: 1px solid #000;\\n\"\r\n \" background-color: QLinearGradient(\\n\"\r\n \" x1:0, y1:0,\\n\"\r\n \" x2:0, y2:1,\\n\"\r\n \" stop:1 #212121,\\n\"\r\n \" stop:0.4 #343434/*,\\n\"\r\n \" stop:0.2 #343434,\\n\"\r\n \" stop:0.1 #ffaa00*/\\n\"\r\n \" );\\n\"\r\n \" margin-bottom:-1px;\\n\"\r\n \" padding-bottom:1px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenu\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid #000;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenu::item\\n\"\r\n \"{\\n\"\r\n \" padding: 2px 20px 2px 20px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenu::item:selected\\n\"\r\n \"{\\n\"\r\n \" color: #000000;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QWidget:disabled\\n\"\r\n \"{\\n\"\r\n \" color: #808080;\\n\"\r\n \" background-color: #323232;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QAbstractItemView\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #4d4d4d, stop: 0.1 #646464, stop: 1 #5d5d5d);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QWidget:focus\\n\"\r\n \"{\\n\"\r\n \" /*border: 1px solid darkgray;*/\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QLineEdit\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #4d4d4d, stop: 0 #646464, stop: 1 #5d5d5d);\\n\"\r\n \" padding: 1px;\\n\"\r\n \" border-style: solid;\\n\"\r\n \" border: 1px solid #1e1e1e;\\n\"\r\n \" border-radius: 5;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QPushButton\\n\"\r\n \"{\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #565656, stop: 0.1 #525252, stop: 0.5 #4e4e4e, stop: 0.9 #4a4a4a, stop: 1 #464646);\\n\"\r\n \" border-width: 1px;\\n\"\r\n \" border-color: #1e1e1e;\\n\"\r\n \" border-style: solid;\\n\"\r\n \" border-radius: 6;\\n\"\r\n \" padding: 3px;\\n\"\r\n \" font-size: 12px;\\n\"\r\n \" padding-left: 5px;\\n\"\r\n \" padding-right: 5px;\\n\"\r\n \" min-width: 40px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QPushButton:pressed\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #2d2d2d, stop: 0.1 #2b2b2b, stop: 0.5 #292929, stop: 0.9 #282828, stop: 1 #252525);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QComboBox\\n\"\r\n \"{\\n\"\r\n \" selection-background-color: #ffaa00;\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #565656, stop: 0.1 #525252, stop: 0.5 #4e4e4e, stop: 0.9 #4a4a4a, stop: 1 #464646);\\n\"\r\n \" border-style: solid;\\n\"\r\n \" border: 1px solid #1e1e1e;\\n\"\r\n \" border-radius: 5;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QComboBox:hover,QPushButton:hover\\n\"\r\n \"{\\n\"\r\n \" border: 2px solid QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"\\n\"\r\n \"QComboBox:on\\n\"\r\n \"{\\n\"\r\n \" padding-top: 3px;\\n\"\r\n \" padding-left: 4px;\\n\"\r\n \" background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #2d2d2d, stop: 0.1 #2b2b2b, stop: 0.5 #292929, stop: 0.9 #282828, stop: 1 #252525);\\n\"\r\n \" selection-background-color: #ffaa00;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QComboBox QAbstractItemView\\n\"\r\n \"{\\n\"\r\n \" border: 2px solid darkgray;\\n\"\r\n \" selection-background-color: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QComboBox::drop-down\\n\"\r\n \"{\\n\"\r\n \" subcontrol-origin: padding;\\n\"\r\n \" subcontrol-position: top right;\\n\"\r\n \" width: 15px;\\n\"\r\n \"\\n\"\r\n \" border-left-width: 0px;\\n\"\r\n \" border-left-color: darkgray;\\n\"\r\n \" border-left-style: solid; /* just a single line */\\n\"\r\n \" border-top-right-radius: 3px; /* same radius as the QComboBox */\\n\"\r\n \" border-bottom-right-radius: 3px;\\n\"\r\n \" }\\n\"\r\n \"\\n\"\r\n \"QComboBox::down-arrow\\n\"\r\n \"{\\n\"\r\n \" image: url(:/dark_orange/img/down_arrow.png);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QGroupBox\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid darkgray;\\n\"\r\n \" margin-top: 10px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QGroupBox:focus\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid darkgray;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTextEdit:focus\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid darkgray;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar:horizontal {\\n\"\r\n \" border: 1px solid #222222;\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0.0 #121212, stop: 0.2 #282828, stop: 1 #484848);\\n\"\r\n \" height: 7px;\\n\"\r\n \" margin: 0px 16px 0 16px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::handle:horizontal\\n\"\r\n \"{\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 #ffa02f, stop: 0.5 #d7801a, stop: 1 #ffa02f);\\n\"\r\n \" min-height: 20px;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::add-line:horizontal {\\n\"\r\n \" border: 1px solid #1b1b19;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \" width: 14px;\\n\"\r\n \" subcontrol-position: right;\\n\"\r\n \" subcontrol-origin: margin;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::sub-line:horizontal {\\n\"\r\n \" border: 1px solid #1b1b19;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \" width: 14px;\\n\"\r\n \" subcontrol-position: left;\\n\"\r\n \" subcontrol-origin: margin;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::right-arrow:horizontal, QScrollBar::left-arrow:horizontal\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid black;\\n\"\r\n \" width: 1px;\\n\"\r\n \" height: 1px;\\n\"\r\n \" background: white;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal\\n\"\r\n \"{\\n\"\r\n \" background: none;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar:vertical\\n\"\r\n \"{\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 1, y2: 0, stop: 0.0 #121212, stop: 0.2 #282828, stop: 1 #484848);\\n\"\r\n \" width: 7px;\\n\"\r\n \" margin: 16px 0 16px 0;\\n\"\r\n \" border: 1px solid #222222;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::handle:vertical\\n\"\r\n \"{\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 0.5 #d7801a, stop: 1 #ffa02f);\\n\"\r\n \" min-height: 20px;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::add-line:vertical\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid #1b1b19;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #ffa02f, stop: 1 #d7801a);\\n\"\r\n \" height: 14px;\\n\"\r\n \" subcontrol-position: bottom;\\n\"\r\n \" subcontrol-origin: margin;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::sub-line:vertical\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid #1b1b19;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #d7801a, stop: 1 #ffa02f);\\n\"\r\n \" height: 14px;\\n\"\r\n \" subcontrol-position: top;\\n\"\r\n \" subcontrol-origin: margin;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid black;\\n\"\r\n \" width: 1px;\\n\"\r\n \" height: 1px;\\n\"\r\n \" background: white;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"\\n\"\r\n \"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical\\n\"\r\n \"{\\n\"\r\n \" background: none;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTextEdit\\n\"\r\n \"{\\n\"\r\n \" background-color: #242424;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QPlainTextEdit\\n\"\r\n \"{\\n\"\r\n \" background-color: #242424;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QHeaderView::section\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #616161, stop: 0.5 #505050, stop: 0.6 #434343, stop:1 #656565);\\n\"\r\n \" color: white;\\n\"\r\n \" padding-left: 4px;\\n\"\r\n \" border: 1px solid #6c6c6c;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QCheckBox:disabled\\n\"\r\n \"{\\n\"\r\n \"color: #414141;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QDockWidget::title\\n\"\r\n \"{\\n\"\r\n \" text-align: center;\\n\"\r\n \" spacing: 3px; /* spacing between items in the tool bar */\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #323232, stop: 0.5 #242424, stop:1 #323232);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QDockWidget::close-button, QDockWidget::float-button\\n\"\r\n \"{\\n\"\r\n \" text-align: center;\\n\"\r\n \" spacing: 1px; /* spacing between items in the tool bar */\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #323232, stop: 0.5 #242424, stop:1 #323232);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QDockWidget::close-button:hover, QDockWidget::float-button:hover\\n\"\r\n \"{\\n\"\r\n \" background: #242424;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QDockWidget::close-button:pressed, QDockWidget::float-button:pressed\\n\"\r\n \"{\\n\"\r\n \" padding: 1px -1px -1px 1px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMainWindow::separator\\n\"\r\n \"{\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #161616, stop: 0.5 #151515, stop: 0.6 #212121, stop:1 #343434);\\n\"\r\n \" color: white;\\n\"\r\n \" padding-left: 4px;\\n\"\r\n \" border: 1px solid #4c4c4c;\\n\"\r\n \" spacing: 3px; /* spacing between items in the tool bar */\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMainWindow::separator:hover\\n\"\r\n \"{\\n\"\r\n \"\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #d7801a, stop:0.5 #b56c17 stop:1 #ffa02f);\\n\"\r\n \" color: white;\\n\"\r\n \" padding-left: 4px;\\n\"\r\n \" border: 1px solid #6c6c6c;\\n\"\r\n \" spacing: 3px; /* spacing between items in the tool bar */\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QToolBar::handle\\n\"\r\n \"{\\n\"\r\n \" spacing: 3px; /* spacing between items in the tool bar */\\n\"\r\n \" background: url(:/dark_orange/img/handle.png);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QMenu::separator\\n\"\r\n \"{\\n\"\r\n \" height: 2px;\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:0 #161616, stop: 0.5 #151515, stop: 0.6 #212121, stop:1 #343434);\\n\"\r\n \" color: white;\\n\"\r\n \" padding-left: 4px;\\n\"\r\n \" margin-left: 10px;\\n\"\r\n \" margin-right: 5px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QProgressBar\\n\"\r\n \"{\\n\"\r\n \" border: 2px solid grey;\\n\"\r\n \" border-radius: 5px;\\n\"\r\n \" text-align: center;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QProgressBar::chunk\\n\"\r\n \"{\\n\"\r\n \" background-color: #d7801a;\\n\"\r\n \" width: 2.15px;\\n\"\r\n \" margin: 0.5px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab {\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" border: 1px solid #444;\\n\"\r\n \" border-bottom-style: none;\\n\"\r\n \" background-color: #323232;\\n\"\r\n \" padding-left: 10px;\\n\"\r\n \" padding-right: 10px;\\n\"\r\n \" padding-top: 3px;\\n\"\r\n \" padding-bottom: 2px;\\n\"\r\n \" margin-right: -1px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabWidget::pane {\\n\"\r\n \" border: 1px solid #444;\\n\"\r\n \" top: 1px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab:last\\n\"\r\n \"{\\n\"\r\n \" margin-right: 0; /* the last selected tab has nothing to overlap with on the right */\\n\"\r\n \" border-top-right-radius: 3px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab:first:!selected\\n\"\r\n \"{\\n\"\r\n \" margin-left: 0px; /* the last selected tab has nothing to overlap with on the right */\\n\"\r\n \"\\n\"\r\n \"\\n\"\r\n \" border-top-left-radius: 3px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab:!selected\\n\"\r\n \"{\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" border-bottom-style: solid;\\n\"\r\n \" margin-top: 3px;\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:1 #212121, stop:.4 #343434);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab:selected\\n\"\r\n \"{\\n\"\r\n \" border-top-left-radius: 3px;\\n\"\r\n \" border-top-right-radius: 3px;\\n\"\r\n \" margin-bottom: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QTabBar::tab:!selected:hover\\n\"\r\n \"{\\n\"\r\n \" /*border-top: 2px solid #ffaa00;\\n\"\r\n \" padding-bottom: 3px;*/\\n\"\r\n \" border-top-left-radius: 3px;\\n\"\r\n \" border-top-right-radius: 3px;\\n\"\r\n \" background-color: QLinearGradient(x1:0, y1:0, x2:0, y2:1, stop:1 #212121, stop:0.4 #343434, stop:0.2 #343434, stop:0.1 #ffaa00);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QRadioButton::indicator:checked, QRadioButton::indicator:unchecked{\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" background-color: #323232;\\n\"\r\n \" border: 1px solid #b1b1b1;\\n\"\r\n \" border-radius: 6px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QRadioButton::indicator:checked\\n\"\r\n \"{\\n\"\r\n \" background-color: qradialgradient(\\n\"\r\n \" cx: 0.5, cy: 0.5,\\n\"\r\n \" fx: 0.5, fy: 0.5,\\n\"\r\n \" radius: 1.0,\\n\"\r\n \" stop: 0.25 #ffaa00,\\n\"\r\n \" stop: 0.3 #323232\\n\"\r\n \" );\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QCheckBox::indicator{\\n\"\r\n \" color: #b1b1b1;\\n\"\r\n \" background-color: #323232;\\n\"\r\n \" border: 1px solid #b1b1b1;\\n\"\r\n \" width: 9px;\\n\"\r\n \" height: 9px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QRadioButton::indicator\\n\"\r\n \"{\\n\"\r\n \" border-radius: 6px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QRadioButton::indicator:hover, QCheckBox::indicator:hover\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid #ffaa00;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QCheckBox::indicator:checked\\n\"\r\n \"{\\n\"\r\n \" image:url(:/dark_orange/img/checkbox.png);\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QCheckBox::indicator:disabled, QRadioButton::indicator:disabled\\n\"\r\n \"{\\n\"\r\n \" border: 1px solid #444;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"\\n\"\r\n \"QSlider::groove:horizontal {\\n\"\r\n \" border: 1px solid #3A3939;\\n\"\r\n \" height: 8px;\\n\"\r\n \" background: #201F1F;\\n\"\r\n \" margin: 2px 0;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QSlider::handle:horizontal {\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,\\n\"\r\n \" stop: 0.0 silver, stop: 0.2 #a8a8a8, stop: 1 #727272);\\n\"\r\n \" border: 1px solid #3A3939;\\n\"\r\n \" width: 14px;\\n\"\r\n \" height: 14px;\\n\"\r\n \" margin: -4px 0;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QSlider::groove:vertical {\\n\"\r\n \" border: 1px solid #3A3939;\\n\"\r\n \" width: 8px;\\n\"\r\n \" background: #201F1F;\\n\"\r\n \" margin: 0 0px;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QSlider::handle:vertical {\\n\"\r\n \" background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0.0 silver,\\n\"\r\n \" stop: 0.2 #a8a8a8, stop: 1 #727272);\\n\"\r\n \" border: 1px solid #3A3939;\\n\"\r\n \" width: 14px;\\n\"\r\n \" height: 14px;\\n\"\r\n \" margin: 0 -4px;\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QAbstractSpinBox {\\n\"\r\n \" padding-top: 2px;\\n\"\r\n \" padding-bottom: 2px;\\n\"\r\n \" border: 1px solid darkgray;\\n\"\r\n \"\\n\"\r\n \" border-radius: 2px;\\n\"\r\n \" min-width: 50px;\\n\"\r\n \"}\")\r\n\r\n self.centralwidget = QtWidgets.QWidget(Main_Window)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n\r\n self.frame = QtWidgets.QFrame(self.centralwidget)\r\n self.frame.setGeometry(QtCore.QRect(10, 0, 821, 591))\r\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame.setObjectName(\"frame\")\r\n\r\n # ============================================= LABELS ===================================================\r\n self.label_email = QtWidgets.QLabel(self.frame)\r\n self.label_email.setGeometry(QtCore.QRect(50, 20, 61, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_email.setFont(font)\r\n self.label_email.setObjectName(\"label_email\")\r\n\r\n self.label_password = QtWidgets.QLabel(self.frame)\r\n self.label_password.setGeometry(QtCore.QRect(340, 20, 101, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(9)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_password.setFont(font)\r\n self.label_password.setObjectName(\"label_password\")\r\n\r\n self.label_mailer = QtWidgets.QLabel(self.frame)\r\n self.label_mailer.setGeometry(QtCore.QRect(50, 90, 91, 31))\r\n font = QtGui.QFont()\r\n font.setPointSize(12)\r\n self.label_mailer.setFont(font)\r\n self.label_mailer.setObjectName(\"label_mailer\")\r\n\r\n self.label_emailslistname = QtWidgets.QLabel(self.frame)\r\n self.label_emailslistname.setGeometry(QtCore.QRect(60, 170, 171, 31))\r\n font = QtGui.QFont()\r\n font.setPointSize(12)\r\n self.label_emailslistname.setFont(font)\r\n self.label_emailslistname.setObjectName(\"label_emailslistname\")\r\n\r\n self.label_mailinfo = QtWidgets.QLabel(self.frame)\r\n self.label_mailinfo.setGeometry(QtCore.QRect(640, 200, 141, 221))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_mailinfo.setFont(font)\r\n self.label_mailinfo.setAutoFillBackground(False)\r\n self.label_mailinfo.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.label_mailinfo.setScaledContents(False)\r\n self.label_mailinfo.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\r\n self.label_mailinfo.setWordWrap(True)\r\n self.label_mailinfo.setObjectName(\"label_mailinfo\")\r\n\r\n self.label_emailconfirmed = QtWidgets.QLabel(self.frame)\r\n self.label_emailconfirmed.setGeometry(QtCore.QRect(640, 40, 121, 31))\r\n font = QtGui.QFont()\r\n font.setPointSize(9)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_emailconfirmed.setFont(font)\r\n self.label_emailconfirmed.setObjectName(\"label_emailconfirmed\")\r\n\r\n self.label_progress = QtWidgets.QLabel(self.frame)\r\n self.label_progress.setGeometry(QtCore.QRect(60, 460, 71, 21))\r\n font = QtGui.QFont()\r\n font.setPointSize(12)\r\n self.label_progress.setFont(font)\r\n self.label_progress.setObjectName(\"label_progress\")\r\n\r\n # ============================================= LINE EDIT ===================================================\r\n self.lineedit_mailer = QtWidgets.QLineEdit(self.frame)\r\n self.lineedit_mailer.setGeometry(QtCore.QRect(50, 120, 441, 31))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(10)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.lineedit_mailer.setFont(font)\r\n self.lineedit_mailer.setObjectName(\"lineedit_mailer\")\r\n\r\n self.lineedit_mailer.setText(\"noreply@medium.com\")\r\n # self.lineedit_mailer.setPlaceholderText(\"Enter email you wish delete it's messages\")\r\n self.lineedit_mailer.setDisabled(True)\r\n\r\n self.lineedit_password = QtWidgets.QLineEdit(self.frame)\r\n self.lineedit_password.setGeometry(QtCore.QRect(340, 40, 201, 31))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(11)\r\n font.setBold(True)\r\n font.setItalic(False)\r\n font.setWeight(75)\r\n self.lineedit_password.setFont(font)\r\n self.lineedit_password.setObjectName(\"lineedit_password\")\r\n self.lineedit_password.setPlaceholderText(\"Enter your mail password\")\r\n\r\n self.lineedit_yourmail = QtWidgets.QLineEdit(self.frame)\r\n self.lineedit_yourmail.setGeometry(QtCore.QRect(50, 40, 271, 31))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(11)\r\n font.setBold(True)\r\n font.setItalic(False)\r\n font.setWeight(75)\r\n self.lineedit_yourmail.setFont(font)\r\n self.lineedit_yourmail.setObjectName(\"lineedit_yourmail\")\r\n self.lineedit_yourmail.setPlaceholderText(\"Enter your email\")\r\n\r\n # ============================================= BUTTON ===================================================\r\n self.button_addmail = QtWidgets.QPushButton(self.frame)\r\n self.button_addmail.setGeometry(QtCore.QRect(510, 120, 101, 31))\r\n self.button_addmail.setObjectName(\"button_addmail\")\r\n self.button_addmail.clicked.connect(self.add_mail)\r\n self.button_addmail.setDisabled(True)\r\n\r\n self.button_exit = QtWidgets.QPushButton(self.frame)\r\n self.button_exit.setGeometry(QtCore.QRect(290, 540, 101, 31))\r\n self.button_exit.setObjectName(\"button_exit\")\r\n self.button_exit.clicked.connect(self.close_app_exit)\r\n\r\n self.button_delete = QtWidgets.QPushButton(self.frame)\r\n self.button_delete.setGeometry(QtCore.QRect(640, 430, 141, 81))\r\n font = QtGui.QFont()\r\n font.setPointSize(-1)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.button_delete.setFont(font)\r\n self.button_delete.setObjectName(\"button_delete\")\r\n self.button_delete.clicked.connect(self.delete_messages)\r\n self.button_delete.setDisabled(True)\r\n\r\n self.button_login = QtWidgets.QPushButton(self.frame)\r\n self.button_login.setGeometry(QtCore.QRect(550, 40, 61, 31))\r\n self.button_login.setObjectName(\"button_login\")\r\n self.button_login.clicked.connect(self.login)\r\n\r\n # ========================================== PROGRESS BAR =================================================\r\n self.progressbar = QtWidgets.QProgressBar(self.frame)\r\n self.progressbar.setGeometry(QtCore.QRect(60, 490, 561, 23))\r\n self.progressbar.setMinimum(1)\r\n self.progressbar.setProperty(\"value\", 0)\r\n self.progressbar.setObjectName(\"progressbar\")\r\n\r\n # ============================================= TEXT BROWSER =================================================\r\n self.textBrowser = QtWidgets.QTextBrowser(self.frame)\r\n self.textBrowser.setGeometry(QtCore.QRect(55, 201, 561, 221))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Times New Roman\")\r\n font.setPointSize(11)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.textBrowser.setFont(font)\r\n self.textBrowser.setObjectName(\"textBrowser\")\r\n\r\n Main_Window.setCentralWidget(self.centralwidget)\r\n\r\n self.retranslateUi(Main_Window)\r\n QtCore.QMetaObject.connectSlotsByName(Main_Window)\r\n\r\n self.deleted_mailes = []\r\n print(\"deleted_mailes list\", self.deleted_mailes)\r\n\r\n imaplib._MAXLINE = 1000000\r\n\r\n # ============================================= FUNCTIONS =================================================\r\n # TODO: ADD CLEAR TEXT BROWSE BUTTON & BUTTONS ICONS\r\n\r\n # --------------------New--------------------\r\n def auto_detect_mail(self):\r\n # auto detect mail type (yahoo, Gmail, hotmail)\r\n # while loop\r\n if '@yahoo.com' in self.lineedit_yourmail.text():\r\n # print(\"auto_detect_mail(self): yahoo mail\")\r\n return 'yahoo'\r\n\r\n elif '@gmail.com' in self.lineedit_yourmail.text():\r\n print(\"auto_detect_mail(self): Gmail\")\r\n return 'gmail'\r\n\r\n elif '@hotmail.com' in self.lineedit_yourmail.text():\r\n # print(\"auto_detect_mail(self): hotmail\")\r\n return \"hotmail\"\r\n else:\r\n return False\r\n\r\n # --------------------New--------------------\r\n def internet_connection(self):\r\n # check internet connection\r\n url = 'http://www.google.com/'\r\n timeout = 5\r\n\r\n try:\r\n requests.get(url, timeout=timeout)\r\n return True\r\n\r\n except requests.ConnectionError:\r\n self.label_emailconfirmed.setText(\"No internet\")\r\n return False\r\n\r\n # --------------------New--------------------\r\n def login(self):\r\n try:\r\n self.lineedit_mailer.setDisabled(True)\r\n self.button_addmail.setDisabled(True)\r\n self.button_delete.setDisabled(True)\r\n\r\n # login email using address and password\r\n # yahoo mail\r\n if self.internet_connection() and self.auto_detect_mail() is 'yahoo':\r\n self.label_mailinfo.setText(\"please unprotect your email goto link yahoo imap activate\")\r\n self.yahoo_e_mail = self.lineedit_yourmail.text()\r\n self.yahoo_password = self.lineedit_password.text()\r\n self.yahoo_imap_Object = imapclient.IMAPClient('imap.mail.yahoo.com', ssl=True, port=\"993\")\r\n self.yahoo_imap_Object.login(self.yahoo_e_mail, self.yahoo_password)\r\n self.label_emailconfirmed.setText(\"yahoo connected\")\r\n print(\"you have internet connection and yahoo register\")\r\n\r\n self.lineedit_mailer.setDisabled(False)\r\n self.button_addmail.setDisabled(False)\r\n self.button_delete.setDisabled(False)\r\n return True\r\n\r\n # Gmail\r\n elif self.internet_connection() and self.auto_detect_mail() is 'gmail':\r\n self.label_mailinfo.setText(\"please unprotect your email goto link gmail imap activate\")\r\n self.gmail_e_mail = self.lineedit_yourmail.text()\r\n self.gmail_password = self.lineedit_password.text()\r\n self.gmail_imap_Object = imapclient.IMAPClient('imap.gmail.com', ssl=True)\r\n self.gmail_imap_Object.login(self.gmail_e_mail, self.gmail_password)\r\n self.label_emailconfirmed.setText(\"Gmail connected\")\r\n print(\"you have internet connection and 'gmail' register\")\r\n\r\n self.lineedit_mailer.setDisabled(False)\r\n self.button_addmail.setDisabled(False)\r\n self.button_delete.setDisabled(False)\r\n return True\r\n\r\n # Hotmail\r\n elif self.internet_connection() and self.auto_detect_mail() is 'hotmail':\r\n self.label_mailinfo.setText(\"please unprotect your email goto link hotmail imap activate\")\r\n self.hotmail = self.lineedit_yourmail.text()\r\n self.hotmail_password = self.lineedit_password.text()\r\n\r\n Server_name = \"outlook.office365.com\"\r\n self.hotmail_imap_Object = imapclient.IMAPClient(Server_name, ssl=True, port=993)\r\n self.hotmail_imap_Object.login(self.hotmail, self.hotmail_password)\r\n self.label_emailconfirmed.setText(\"hotmail connected\")\r\n print(\"you have internet connection and hotmail register\")\r\n\r\n self.lineedit_mailer.setDisabled(False)\r\n self.button_addmail.setDisabled(False)\r\n self.button_delete.setDisabled(False)\r\n return True\r\n\r\n else:\r\n if self.internet_connection():\r\n print(\"Unknown email Or wrong email\")\r\n return False\r\n\r\n except:\r\n self.label_mailinfo.setText(\"Wrong mail or password\")\r\n self.label_emailconfirmed.setText(\"Error\")\r\n print(\"Unknown email\")\r\n\r\n # --------------------New--------------------\r\n def add_mail(self): # so complicated\r\n # check if the mail have record messages if it haven't don't add\r\n\r\n # take the all messages from sender and store it in dictionary {key(sender): value(count)}.\r\n if self.lineedit_mailer.text() not in self.deleted_mailes and \"@\" in self.lineedit_mailer.text():\r\n self.deleted_mailes.append(self.lineedit_mailer.text())\r\n self.lineedit_mailer.clear()\r\n else:\r\n self.label_mailinfo.setText(\"Repeated Mail or wrong mailer\")\r\n return False\r\n try:\r\n # login email using address and password\r\n # yahoo mail\r\n if self.internet_connection() and self.auto_detect_mail() is 'yahoo':\r\n\r\n self.yahoo_imap_Object.select_folder('INBOX', readonly=False)\r\n for link in self.deleted_mailes:\r\n search_address = f\"FROM {link}\"\r\n self.yahoo_UIDs = self.yahoo_imap_Object.search(search_address)\r\n print(self.yahoo_UIDs)\r\n index = len(self.deleted_mailes) - (len(self.deleted_mailes) + 1)\r\n\r\n for i in self.yahoo_UIDs:\r\n self.content = f\"mail from: {self.deleted_mailes[index]}, messages count = {len(self.yahoo_UIDs)}\"\r\n print(self.content)\r\n\r\n self.textBrowser.append(str(self.content))\r\n\r\n # Gmail\r\n elif self.internet_connection() and self.auto_detect_mail() is 'gmail': # CONFIRMED FUNCTION\r\n self.gmail_imap_Object.select_folder('INBOX', readonly=False)\r\n for link in self.deleted_mailes:\r\n search = \"FROM {}\".format(link)\r\n self.gmail_UIDs = self.gmail_imap_Object.search(search)\r\n for i in self.gmail_UIDs:\r\n self.content = f\"FROM: {self.deleted_mailes[-1]}, MESSAGES = {len(self.gmail_UIDs)}\"\r\n\r\n self.textBrowser.append(str(self.content))\r\n\r\n # Hotmail\r\n elif self.internet_connection() and self.auto_detect_mail() is 'hotmail':\r\n\r\n self.hotmail_imap_Object.select_folder('INBOX', readonly=False)\r\n for link in self.deleted_mailes:\r\n search = \"FROM {}\".format(link)\r\n self.hotmail_UIDs = self.hotmail_imap_Object.search(search)\r\n print(self.hotmail_UIDs)\r\n\r\n for i in self.hotmail_UIDs:\r\n self.content = f\"FROM: {self.deleted_mailes[-1]}, MESSAGES = {len(self.hotmail_UIDs)}\"\r\n\r\n print(self.deleted_mailes)\r\n self.textBrowser.append(str(self.content))\r\n\r\n else:\r\n if self.internet_connection():\r\n self.label_mailinfo.setText(\"Write Valid Mail\")\r\n print(\"Unknown email Or wrong email\")\r\n\r\n except:\r\n # self.label_mailinfo.setText(\"please select\\nyour email type\")\r\n # self.label_emailconfirmed.setText(\"choose mail\")\r\n print(\"Unknown email\")\r\n return False\r\n\r\n # --------------------New--------------------\r\n def delete_messages(self): # take mails in the list and delete it if it isn't empty\r\n # connect progressbar to delete messages and delete all messages stored in textBrowser\r\n if len(self.deleted_mailes) > 0:\r\n if self.auto_detect_mail() is 'gmail':\r\n for link in self.deleted_mailes:\r\n from_search = \"FROM {}\".format(link)\r\n self.after_gmail_UIDs = self.gmail_imap_Object.search(from_search)\r\n for i in self.after_gmail_UIDs:\r\n self.gmail_imap_Object.delete_messages(i)\r\n self.label_mailinfo.setText(\"Process Finished\")\r\n return True\r\n\r\n elif self.auto_detect_mail() is 'yahoo':\r\n for link in self.deleted_mailes:\r\n from_search = \"FROM {}\".format(link)\r\n self.after_yahoo_UIDs = self.yahoo_imap_Object.search(from_search)\r\n for i in self.after_yahoo_UIDs:\r\n self.yahoo_imap_Object.delete_messages(i)\r\n self.label_mailinfo.setText(\"Process Finished\")\r\n return True\r\n\r\n elif self.auto_detect_mail() is 'hotmail':\r\n for link in self.deleted_mailes:\r\n from_search = \"FROM {}\".format(link)\r\n self.after_hotmail_UIDs = self.hotmail_imap_Object.search(from_search)\r\n for i in self.after_hotmail_UIDs:\r\n self.hotmail_imap_Object.delete_messages(i)\r\n self.label_mailinfo.setText(\"Process Finished\")\r\n return True\r\n\r\n else:\r\n # NOTHING TO DELETE\r\n return False\r\n\r\n # TODO: LOGOUT BEFORE YOY CLOSE THE APP\r\n def close_app_exit(self):\r\n # try to logout Before EXIT\r\n # ---------------\r\n\r\n # EXIT\r\n self.button_exit.clicked.connect(sys.exit(0))\r\n\r\n # TODO: TRY TO ADD ARABIC EDITION , SELECT LANGUAGE\r\n def retranslateUi(self, Main_Window):\r\n _translate = QtCore.QCoreApplication.translate\r\n Main_Window.setWindowTitle(_translate(\"Main_Window\", \"Mail Cleaner\"))\r\n self.label_mailer.setText(_translate(\"Main_Window\", \"E-mail From\"))\r\n self.button_addmail.setText(_translate(\"Main_Window\", \"Add mail to list\"))\r\n self.label_emailslistname.setText(_translate(\"Main_Window\", \"Emails You will delete\"))\r\n self.label_progress.setText(_translate(\"Main_Window\", \"Progress\"))\r\n self.button_exit.setText(_translate(\"Main_Window\", \"EXIT\"))\r\n self.button_delete.setText(_translate(\"Main_Window\", \"Delete\"))\r\n self.label_email.setText(_translate(\"Main_Window\", \"Your Mail\"))\r\n self.label_password.setText(_translate(\"Main_Window\", \"Your PassWord\"))\r\n self.label_mailinfo.setText(_translate(\"Main_Window\", \"information about activate mail\"))\r\n self.label_emailconfirmed.setText(_translate(\"Main_Window\", \"Please Login\"))\r\n self.button_login.setText(_translate(\"Main_Window\", \"Login\"))\r\n self.textBrowser.setHtml(_translate(\"Main_Window\",\r\n \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\r\n \"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\r\n \"p, li { white-space: pre-wrap; }\\n\"\r\n \"</style></head><body style=\\\" font-family:\\'Times New Roman\\'; font-size:11pt; font-weight:600; font-style:normal;\\\">\\n\"\r\n \"<p style=\\\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><br /></p></body></html>\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n\r\n app = QtWidgets.QApplication(sys.argv)\r\n Main_Window = QtWidgets.QMainWindow()\r\n ui = Ui_Main_Window()\r\n ui.setupUi(Main_Window)\r\n Main_Window.show()\r\n sys.exit(app.exec_())\r\n" } ]
2
QCH-top/-
https://github.com/QCH-top/-
04ca8d6e539dd536d5a4f4ff0b1677d9619050d0
bc5311db993e1bc84bc42e6cb6b401038cb42fb3
abd770690b2382908d30dbdd92f6eca1b8b43569
refs/heads/main
2023-02-20T08:17:46.487404
2021-01-20T15:14:26
2021-01-20T15:14:26
331,338,123
9
3
null
2021-01-20T14:52:26
2021-01-20T14:52:31
2021-01-20T14:56:14
null
[ { "alpha_fraction": 0.45264846086502075, "alphanum_fraction": 0.5013375878334045, "avg_line_length": 23.216217041015625, "blob_id": "f168c0f5aea1318ff57dd7eaf4061e4dbd715877", "content_id": "9d14f620670fa477a464d643da32dc1e739bbf79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4212, "license_type": "no_license", "max_line_length": 96, "num_lines": 148, "path": "/sonar/k-means-sonar.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets, decomposition, manifold\r\n\r\nsonar = pd.read_csv(r'sonar.all-data', header=None, sep=',')\r\nsonar1 = sonar.iloc[0:208, 0:61]#取208行,前61列(数据)\r\ndata = np.mat(sonar1)\r\n#print(data)\r\n\r\nfor i in range(208):\r\n if sonar1.iloc[i, 60] == \"R\":\r\n sonar1.iloc[i, 60] = 0\r\n else:\r\n sonar1.iloc[i, 60] = 1#换成0,1来判断类别\r\n\r\ndata = np.mat(sonar1.iloc[:, 0:61])\r\ndata1 = np.mat(sonar1.iloc[:, 0:60])\r\n#print(data)\r\n#print(data1)\r\nk = 2 # k为聚类的类别数\r\nn = 208 # n为样本总个数\r\nd = 60 # d为数据集的特征数\r\n\r\n\r\n# k-means算法\r\ndef k_means():\r\n # 随机选k个初始聚类中心,聚类中心为每一类的均值向量\r\n m = np.zeros((k, d))#2行 60列零矩阵\r\n for i in range(k):\r\n m[i] = data1[np.random.randint(0, n)]\r\n \r\n # k_means聚类\r\n m_new = m.copy()\r\n #print(\"m_new = \",m_new)\r\n #print(m_new == m)\r\n t = 0\r\n while (1):\r\n # 更新聚类中心\r\n m[0] = m_new[0]\r\n m[1] = m_new[1]\r\n w1 = np.zeros((1, d+1))\r\n w2 = np.zeros((1, d+1))\r\n # 将每一个样本按照欧式距离聚类\r\n for i in range(n):#每一个样本跟两个聚类中心求欧氏距离\r\n distance = np.zeros(k)\r\n sample = data[i]#第i个样本\r\n for j in range(k): # 将每一个样本与聚类中心比较\r\n distance[j] = np.linalg.norm(sample[:, 0:60] - m[j])#二范数\r\n category = distance.argmin()#种类 距离小的下标是0则是第一类\r\n if category == 0:\r\n w1 = np.row_stack((w1, sample))# 第一类的数据\r\n if category == 1:\r\n w2 = np.row_stack((w2, sample))\r\n\r\n # 新的聚类中心\r\n w1 = np.delete(w1, 0, axis=0)#数组合并后会多出一个0删除0\r\n w2 = np.delete(w2, 0, axis=0)\r\n m_new[0] = np.mean(w1[:, 0:60], axis=0)\r\n m_new[1] = np.mean(w2[:, 0:60], axis=0)\r\n\r\n # 聚类中心不再改变时,聚类停止\r\n if (m[0] == m_new[0]).all() and (m[1] == m_new[1]).all():\r\n break\r\n\r\n print(t)\r\n t += 1\r\n\r\n w = np.vstack((w1, w2))\r\n label1 = np.zeros((len(w1), 1))\r\n label2 = np.ones((len(w2), 1))\r\n label = np.vstack((label1, label2))\r\n label = np.ravel(label)#合并为一维矩阵\r\n # print(label)\r\n test_PCA(w, label)#pac降维\r\n plot_PCA(w, label)\r\n return w1, w2\r\n\r\n\r\ndef test_PCA(*data):\r\n X, Y = data\r\n pca = decomposition.PCA(n_components=None)#自动选择降维维度\r\n pca.fit(X)#训练模型\r\n\r\n\r\n# print(\"explained variance ratio:%s\"%str(pca.explained_variance_ratio_))\r\n\r\ndef plot_PCA(*data):\r\n X, Y = data\r\n pca = decomposition.PCA(n_components=2)#降维2个维度\r\n pca.fit(X)\r\n X_r = pca.transform(X)#执行降维\r\n # print(X_r)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n colors = ((1, 0, 0), (0.33, 0.33, 0.33),)\r\n for label, color in zip(np.unique(Y), colors):\r\n position = Y == label\r\n ax.scatter(X_r[position, 0], X_r[position, 1], label=\"category=%d\" % label, color=color)\r\n ax.set_xlabel(\"X[0]\")\r\n ax.set_ylabel(\"Y[0]\")\r\n ax.legend(loc=\"best\")\r\n ax.set_title(\"PCA\")\r\n # plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n w1, w2 = k_means()\r\n accuracy = 0\r\n\r\n l1 = []\r\n l2 = []\r\n for i in range(w1.shape[0]):\r\n l1.append(w1[i, 4])\r\n l2.append(l1.count(0))\r\n l2.append(l1.count(1))\r\n\r\n l3 = np.mat(l2)\r\n count = l3.argmax()\r\n for i in range(w1.shape[0]):\r\n if w1[i, 60] == count:\r\n accuracy += 1\r\n\r\n count2 = 0\r\n if count == 0:\r\n count2=1\r\n else:\r\n count2=0\r\n\r\n for i in range(w2.shape[0]):\r\n if w2[i, 60] == count2:\r\n accuracy += 1\r\n\r\n\r\n # print(w1)\r\n\r\n accuracy /= 150\r\n print(\"准确度为:\")\r\n print(\"%.2f\" % accuracy)\r\n\r\n print(\"第一类的聚类样本数为:\")\r\n print(w1.shape[0])\r\n # print(w2)\r\n print(\"第二类的聚类样本数为:\")\r\n print(w2.shape[0])\r\n\r\n plt.show()\r\n \r\n" }, { "alpha_fraction": 0.5968406796455383, "alphanum_fraction": 0.6146978139877319, "avg_line_length": 29.214284896850586, "blob_id": "bf5d6da74877b3cd49515ed058e3bfd8bd922f9b", "content_id": "5a0a7af077a704090d07aee80cb358cb1acacb57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4752, "license_type": "no_license", "max_line_length": 136, "num_lines": 140, "path": "/yale/Bagging_yale.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport os\r\nimport skimage.io\r\nimport skimage.color\r\nfrom sklearn import svm,datasets\r\nimport matplotlib.pyplot as plt\r\nimport skimage.io as io\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets,decomposition,manifold\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n'''\r\ndata_dir = 'dataset/'#文件地址/名称\r\nclasses = os.listdir(data_dir)\r\ndata = []\r\nall_image = np.zeros(100*100)\r\n#all_image = np.array(all_image)\r\nall_label = []\r\nfor cls in classes:\r\n files = os.listdir(data_dir+cls)\r\n for f in files: \r\n img = skimage.io.imread(data_dir+cls+\"/\"+f)\r\n img = skimage.color.rgb2gray(img)#将图片转为灰度图\r\n img = img.reshape(1,100*100)\r\n #print(img)\r\n all_image = np.vstack((all_image,img))\r\n all_label.append(cls)\r\n \r\n \r\n\r\nall_image = np.array(all_image)\r\nall_label = np.array(all_label)\r\n\r\nall_label = all_label.reshape(len(all_label),1)\r\n\r\nall_image = np.delete(all_image,0,0) #删除第一行\r\nall_data = np.hstack((all_label,all_image))\r\n\r\nprint(type(all_image[0,0]))\r\nprint(all_data)\r\n'''\r\n\r\ndata_dir = 'dataset/'#文件地址/名称\r\nclasses = os.listdir(data_dir)\r\ndata = []\r\nall_image = np.zeros(100*100)\r\n#all_image = np.array(all_image)\r\nall_label = []\r\nfor cls in classes:\r\n files = os.listdir(data_dir+cls)\r\n for f in files: \r\n img = skimage.io.imread(data_dir+cls+\"/\"+f)\r\n img = skimage.color.rgb2gray(img)#将图片转为灰度图\r\n img = img.reshape(1,100*100)\r\n #print(img)\r\n all_image = np.vstack((all_image,img))\r\n all_label.append(int(cls))\r\n \r\n \r\n\r\nall_image = np.array(all_image)\r\nall_label = np.array(all_label)\r\n\r\nall_label = all_label.reshape(len(all_label),1)\r\n\r\nall_image = np.delete(all_image,0,0) #删除第一行\r\nall_image.dtype = 'float32'\r\n\r\npca = decomposition.PCA(n_components=10, svd_solver='auto',\r\n whiten=True).fit(all_image)\r\n# PCA降维后的总数据集\r\nall_image = pca.transform(all_image)\r\nall_data = np.hstack((all_label,all_image))\r\n\r\n#print(all_image)\r\n#print(all_label) \r\n#print(all_data)\r\n\r\nall_data_set = [] # 原始总数据集,二维矩阵n*m,n个样例,m个属性\r\nall_data_label = [] # 总数据对应的类标签\r\nall_data_set = all_data[ : ,1: ]\r\nall_data_label = all_data[:,0]\r\n\r\nX = np.array(all_data_set)\r\ny = np.array(all_data_label)\r\n\r\nn_estimators = [10,11]\r\ncriterion_test_names = [\"gini\", \"entropy\"]#测试 系数与熵\r\n #分类树: 基尼系数 最小的准则 在sklearn中可以选择划分的默认原则 \r\n #决策树:criterion:默认是’gini’系数,也可以选择信息增益的熵’entropy\r\n\r\n\r\n\r\ndef RandomForest(n_estimators,criterion):\r\n # 十折交叉验证计算出平均准确率\r\n # 交叉验证,随机取\r\n kf = KFold(n_splits=10, shuffle=True)\r\n precision_average = 0.0\r\n # param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5]} # 自动穷举出最优的C参数\r\n # clf = GridSearchCV(SVC(kernel=kernel_name, class_weight='balanced', gamma=param),\r\n # param_grid)s\r\n\r\n parameter_space = {\r\n \"min_samples_leaf\": [2, 4, 6], }#参数空间 随机森林RandomForest使用了CART决策树作为弱学习器\r\n clf = GridSearchCV(RandomForestClassifier(random_state=14,n_estimators = n_estimators,criterion = criterion), parameter_space, cv=5)\r\n for train, test in kf.split(X):\r\n clf = clf.fit(X[train], y[train].astype('int'))\r\n # print(clf.best_estimator_)\r\n test_pred = clf.predict(X[test])\r\n # print classification_report(y[test], test_pred)\r\n # 计算平均准确率\r\n precision = 0\r\n for i in range(0, len(y[test])):\r\n if (y[test][i] == test_pred[i]):\r\n precision = precision + 1\r\n precision_average = precision_average + float(precision) / len(y[test])\r\n precision_average = precision_average / 10\r\n print (u\"准确率为\" + str(precision_average))\r\n return precision_average\r\n\r\n\r\nfor criterion in criterion_test_names:\r\n print(\"criterion\")\r\n print(criterion)\r\n x_label = []\r\n y_label = []\r\n for i in range(10,15):\r\n print(i)\r\n y_label.append(RandomForest(i,criterion))\r\n x_label.append(i)\r\n plt.plot(x_label, y_label, label=criterion)\r\n# print(\"done in %0.3fs\" % (time() - t0))\r\nplt.xlabel(\"criterion\")\r\nplt.ylabel(\"Precision\")\r\nplt.title('Different Contrust')\r\nplt.legend()\r\nplt.show()" }, { "alpha_fraction": 0.428725928068161, "alphanum_fraction": 0.4795587360858917, "avg_line_length": 25.205883026123047, "blob_id": "02f63335e27829683759be464380ea8361032b68", "content_id": "9c18e1d21a8d288bee671eee4376f339b83278ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4985, "license_type": "no_license", "max_line_length": 96, "num_lines": 170, "path": "/iris/k-means-iris.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets, decomposition, manifold\r\n\r\niris = pd.read_csv(r'iris.data', header=None, sep=',')\r\niris1 = iris.iloc[0:150, 0:5]\r\nfor i in range(150):\r\n if iris1.iloc[i, 4] == \"Iris-setosa\":\r\n iris1.iloc[i, 4] = 0\r\n elif iris1.iloc[i, 4] == \"Iris-versicolor\":\r\n iris1.iloc[i, 4] = 1\r\n else:\r\n iris1.iloc[i, 4] = 2\r\ndata = np.mat(iris1.iloc[:, 0:5])\r\ndata1 = np.mat(iris1.iloc[:, 0:4])\r\nk = 3 # k为聚类的类别数\r\nn = 150 # n为样本总个数\r\nd = 4 # t为数据集的特征数\r\n\r\n\r\n# k-means算法\r\ndef k_means():\r\n # 随机选k个初始聚类中心,聚类中心为每一类的均值向量\r\n m = np.zeros((k, d)) # m = (3, 4)\r\n for i in range(k):\r\n m[i] = data1[np.random.randint(0, 10)]\r\n # k_means聚类\r\n m_new = m.copy()\r\n\r\n t = 0\r\n while (1):\r\n # 更新聚类中心\r\n m[0] = m_new[0]\r\n m[1] = m_new[1]\r\n m[2] = m_new[2]\r\n\r\n w1 = np.zeros((1, d+1)) # 引进标签\r\n w2 = np.zeros((1, d+1))\r\n w3 = np.zeros((1, d+1))\r\n\r\n for i in range(n):\r\n distance = np.zeros(3)\r\n sample = data[i]\r\n for j in range(k): # 将每一个样本与聚类中心比较\r\n distance[j] = np.linalg.norm(sample[:, 0:4] - m[j]) # 求范数,默认计算欧式距离\r\n category = distance.argmin()\r\n if category == 0:\r\n w1 = np.row_stack((w1, sample)) # 行添加\r\n if category == 1:\r\n w2 = np.row_stack((w2, sample))\r\n if category == 2:\r\n w3 = np.row_stack((w3, sample))\r\n\r\n # 新的聚类中心\r\n w1 = np.delete(w1, 0, axis=0)\r\n w2 = np.delete(w2, 0, axis=0)\r\n w3 = np.delete(w3, 0, axis=0)\r\n m_new[0] = np.mean(w1[:, 0:4], axis=0)\r\n m_new[1] = np.mean(w2[:, 0:4], axis=0)\r\n m_new[2] = np.mean(w3[:, 0:4], axis=0)\r\n\r\n # 聚类中心不再改变时,聚类停止\r\n if (m[0] == m_new[0]).all() and (m[1] == m_new[1]).all() and (m[2] == m_new[2]).all():\r\n break\r\n # print(t)\r\n t += 1\r\n w = np.vstack((w1, w2))\r\n w = np.vstack((w, w3))\r\n '''\r\n # 画出每一次迭代的聚类效果图\r\n \r\n label1 = np.zeros((len(w1), 1))\r\n label2 = np.ones((len(w2), 1))\r\n label3 = np.zeros((len(w3), 1))\r\n for i in range(len(w3)):\r\n label3[i, 0] = 2\r\n label = np.vstack((label1, label2))\r\n label = np.vstack((label, label3))\r\n label = np.ravel(label)\r\n test_PCA(w, label)\r\n plot_PCA(w, label)\r\n'''\r\n return w1, w2, w3, t\r\n\r\n\r\ndef test_PCA(*data):\r\n X, Y = data\r\n pca = decomposition.PCA(n_components=None)\r\n pca.fit(X)\r\n\r\n\r\n# print(\"explained variance ratio:%s\"%str(pca.explained_variance_ratio_))\r\n\r\ndef plot_PCA(*data):\r\n X, Y = data\r\n pca = decomposition.PCA(n_components=50)\r\n pca.fit(X)\r\n X_r = pca.transform(X)\r\n # print(X_r)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(1, 1, 1)\r\n colors = ((1, 0, 0), (0.3, 0.3, 0.3), (0, 0.3, 0.7),)\r\n for label, color in zip(np.unique(Y), colors):\r\n position = Y == label\r\n # print(position)\r\n ax.scatter(X_r[position, 0], X_r[position, 1], label=\"category=%d\" % label, color=color)\r\n ax.set_xlabel(\"X[0]\")\r\n ax.set_ylabel(\"Y[0]\")\r\n ax.legend(loc=\"best\")\r\n ax.set_title(\"PCA\")\r\n # plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n w1, w2, w3, t = k_means()\r\n accuracy = 0\r\n\r\n l1 = []\r\n l2 = []\r\n for i in range(w1.shape[0]): # 标签投票,得票多数判断为该类\r\n l1.append(w1[i, 4])\r\n l2.append(l1.count(0))\r\n l2.append(l1.count(1))\r\n l2.append(l1.count(2))\r\n l3 = np.mat(l2)\r\n count = l3.argmax()\r\n for i in range(w1.shape[0]):\r\n if w1[i, 4] == count:\r\n accuracy += 1\r\n\r\n l1 = []\r\n l2 = []\r\n for i in range(w2.shape[0]):\r\n l1.append(w2[i, 4])\r\n l2.append(l1.count(0))\r\n l2.append(l1.count(1))\r\n l2.append(l1.count(2))\r\n l3 = np.mat(l2)\r\n count = l3.argmax()\r\n for i in range(w2.shape[0]):\r\n if w2[i, 4] == count:\r\n accuracy += 1\r\n\r\n l1 = []\r\n l2 = []\r\n for i in range(w3.shape[0]):\r\n l1.append(w3[i, 4])\r\n l2.append(l1.count(0))\r\n l2.append(l1.count(1))\r\n l2.append(l1.count(2))\r\n l3 = np.mat(l2)\r\n count = l3.argmax()\r\n for i in range(w3.shape[0]):\r\n if w3[i, 4] == count:\r\n accuracy += 1\r\n print(accuracy)\r\n accuracy /= 150 # 纯度计算\r\n print(\"准确度为:\")\r\n print(\"%.2f\"%accuracy)\r\n print(\"迭代次数为:\")\r\n print(t)\r\n print(\"第一类的聚类样本数为:\")\r\n print(w1.shape[0])\r\n print(\"第二类的聚类样本数为:\")\r\n print(w2.shape[0])\r\n print(\"第三类的聚类样本数为:\")\r\n print(w3.shape[0])\r\n plt.show()" }, { "alpha_fraction": 0.5078148245811462, "alphanum_fraction": 0.5199056267738342, "avg_line_length": 27.504348754882812, "blob_id": "012f44dee5d8c9a79a368cca9ff031056adc735d", "content_id": "bf490a948b0a98b560b723caf57a59388a2291e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4009, "license_type": "no_license", "max_line_length": 104, "num_lines": 115, "path": "/sonar/FCM-sonar.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef loadData(datapath):\r\n data = pd.read_csv(r'sonar.all-data', sep=',', header=None)\r\n data = data.sample(frac=1.0) # 打乱数据顺序\r\n dataX = data.iloc[:, :-1].values # 特征\r\n labels = data.iloc[:, -1].values # 标签\r\n # 将标签类别用 0, 1表示\r\n labels[np.where(labels == \"R\")] = 0\r\n labels[np.where(labels == \"M\")] = 1\r\n\r\n\r\n return dataX, labels\r\n\r\n#==========================隶属度矩阵=============================#\r\ndef initialize_U(samples, classes):\r\n U = np.random.rand(samples, classes) # 先生成随机矩阵\r\n sumU = 1 / np.sum(U, axis=1) # 求每行的和\r\n U = np.multiply(U.T, sumU) # 使隶属度矩阵每一行和为1\r\n\r\n return U.T\r\n\r\n\r\n# =====================样本和簇中心的距离,欧氏距离==================#\r\ndef distance(X, centroid):\r\n return np.sqrt(np.sum((X - centroid) ** 2, axis=1))\r\n\r\n\r\n#=======================更新隶属度矩阵Uij = 1/sum(dij/dki)**2/(a - 1)================# \r\ndef computeU(X, centroids, m=2):\r\n sampleNumber = X.shape[0] # 样本数(行数)\r\n classes = len(centroids)\r\n U = np.zeros((sampleNumber, classes))\r\n # 更新隶属度矩阵\r\n for i in range(classes):\r\n for k in range(classes):\r\n U[:, i] += (distance(X, centroids[i]) / distance(X, centroids[k])) ** (2 / (m - 1))\r\n U = 1 / U\r\n\r\n return U\r\n\r\n#===============================真实类别=========================#\r\ndef adjustCentroid(centroids, U, labels): # 调整使中心的标签代表类标签\r\n newCentroids = [[], []]\r\n curr = np.argmax(U, axis=1) # 行方向搜索最大值,当前中心顺序得到的标签\r\n for i in range(len(centroids)):\r\n index = np.where(curr == i) # 建立中心和类别的映射\r\n trueLabel = list(labels[index]) # 获取labels[index]出现次数最多的元素,就是真实类别\r\n trueLabel = max(set(trueLabel), key=trueLabel.count)\r\n newCentroids[trueLabel] = centroids[i]\r\n return newCentroids\r\n \r\n#=========================更新聚类中心=====================#\r\ndef cluster(data, labels, m, classes, EPS):\r\n \"\"\"\r\n :param data: 数据集\r\n :param m: 模糊系数(fuzziness coefficient)\r\n :param classes: 类别数\r\n :return: 聚类中心\r\n \"\"\"\r\n sampleNumber = data.shape[0] # 样本数\r\n\r\n U = initialize_U(sampleNumber, classes) # 初始化隶属度矩阵\r\n\r\n t = 0\r\n while True:\r\n centroids = []\r\n # 更新簇中心\r\n for i in range(classes):\r\n centroid = np.dot(U[:, i] ** m, data) / (np.sum(U[:, i] ** m))#sum(Uij**a)*x / sum(Uij**a)\r\n centroids.append(centroid)\r\n\r\n U_old = U.copy()\r\n U = computeU(data, centroids, m) # 计算新的隶属度矩阵\r\n t += 1 \r\n if np.max(np.abs(U - U_old)) < EPS: # abs绝对值\r\n # 这里的类别和数据标签并不是一一对应的, 调整使得第i个中心表示第i类\r\n centroids = adjustCentroid(centroids, U, labels)\r\n return centroids, U, t\r\n\r\n\r\n# 预测所属的类别\r\ndef predict(X, centroids):\r\n labels = np.zeros(X.shape[0])\r\n U = computeU(X, centroids) # 计算隶属度矩阵\r\n labels = np.argmax(U, axis=1) # 找到隶属度矩阵中每行的最大值,即该样本最大可能所属类别\r\n\r\n return labels\r\n\r\n\r\ndef main():\r\n\r\n dataX, labels = loadData('sonar (2).csv') # 读取数据\r\n\r\n EPS = 1e-6 # 停止误差条件\r\n m = 2 # 模糊因子\r\n classes = 2 # 类别数\r\n # 得到各类别的中心\r\n centroids, U, t = cluster(dataX, labels, m, classes, EPS)\r\n\r\n trainLabels_prediction = predict(dataX, centroids)\r\n\r\n accuracy = 0\r\n for i in range(208):\r\n if trainLabels_prediction[i] == labels[i]:\r\n accuracy += 1\r\n accuracy /= 208\r\n print(\"准确度为:%.2f\"%accuracy)\r\n print(\"迭代次数为:\", t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()" }, { "alpha_fraction": 0.5143943428993225, "alphanum_fraction": 0.5399239659309387, "avg_line_length": 25.507463455200195, "blob_id": "40e8c544053078b2c09cfb30b7075bfb80667489", "content_id": "fcf8f7e5fe7dcda5e4dee875a12de95e3dd6691d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2156, "license_type": "no_license", "max_line_length": 72, "num_lines": 67, "path": "/iris/k_means.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nfrom random import sample\r\nimport numpy as np\r\nfrom sklearn.cluster import KMeans\r\n#from sklearn import datasets\r\nfrom sklearn.datasets import load_iris\r\n#导入鸢尾花数据集\r\n#以二维数据为例 假设k=2,X为鸢尾花数据集前两维\r\niris = load_iris()\r\nX = iris.data[:,0:2] ##表示我们只取特征空间中的前两个维度 X类型是np.array\r\nprint(len(X))\r\n#绘制数据分布图 显示前两维数据\r\nplt.scatter(X[:, 0], X[:, 1], c = \"red\", marker='o', label='data')\r\nplt.xlabel('petal length')\r\nplt.ylabel('petal width')\r\nplt.legend(loc=2)\r\nplt.show()\r\n \r\n#从X中随机选择k个样本作为初始“簇中心”向量: μ(1),μ(2),...,,μ(k)\r\n#随机获得两个数据\r\nn = 3 #表示n个聚类\r\nu = sample(X.tolist(),n) #随机选择n个X中的向量作为聚类中心\r\nmax_iter = 0 #记录迭代次数\r\nu_before = u\r\n \r\nwhile max_iter<5:\r\n #簇分配过程\r\n c = []\r\n print(u_before,u)\r\n for i in range(len(X)):\r\n min = 1000\r\n index = 0\r\n for j in range(n):\r\n vec1 = X[i]\r\n vec2 = u[j]\r\n dist = np.sqrt(np.sum(np.square(vec1 - vec2)))\r\n if dist<min:\r\n min = dist\r\n index =j\r\n c.append(index)\r\n # print(i,\"------\",c[i])\r\n #移动聚类中心\r\n for j in range(n):\r\n sum = np.zeros(2) # 定义n为零向量 此处n为2\r\n count = 0 # 统计不同类别的个数\r\n for i in range(len(X)):\r\n if c[i]==j:\r\n sum = sum+X[i]\r\n count = count+1\r\n u[j] = sum/count\r\n \r\n print(max_iter,\"------------\",u)\r\n #设置迭代次数\r\n max_iter = max_iter + 1\r\nprint(np.array(c))\r\nlabel_pred = np.array(c)\r\n#绘制k-means结果\r\nx0 = X[label_pred == 0]\r\nx1 = X[label_pred == 1]\r\nx2 = X[label_pred == 2]\r\nplt.scatter(x0[:, 0], x0[:, 1], c = \"red\", marker='o', label='label0')\r\nplt.scatter(x1[:, 0], x1[:, 1], c = \"green\", marker='*', label='label1')\r\nplt.scatter(x2[:, 0], x2[:, 1], c = \"blue\", marker='+', label='label2')\r\nplt.xlabel('petal length')\r\nplt.ylabel('petal width')\r\nplt.legend(loc=2)\r\nplt.show()" }, { "alpha_fraction": 0.6285097002983093, "alphanum_fraction": 0.6491823792457581, "avg_line_length": 24.0930233001709, "blob_id": "c1c850fa7ae267e4c8a14bf70fdd423143403bff", "content_id": "e4ddd41757faa1b9455b3b3350a804b3df5664bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3925, "license_type": "no_license", "max_line_length": 136, "num_lines": 129, "path": "/mnist/Bagging_minst.py", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nimport os\nimport gzip\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets, decomposition, manifold\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# In[9]:\n\n\ndata = pd.read_csv(r'mnist_train.csv',header = None)\ndata1 = pd.read_csv(r'mnist_test.csv',header = None)\ndata = np.array(data)\ndata1 = np.array(data1)\ntrain_label = data[:,0] #训练集标签\ntrain_images = data[:,1:784] #训练集图片\ntest_label = data1[:,0] #训练集标签\ntest_images = data1[:,1:784] #训练集图片\n\n\n# In[10]:\n\n\nall_data_set = [] # 原始总数据集,二维矩阵n*m,n个样例,m个属性\nall_data_label = [] # 总数据对应的类标签\nall_data_set = train_images\nall_data_label = train_label\n\nn_components = 16 #降为16维度\npca = decomposition.PCA(n_components=n_components, svd_solver='auto',\n whiten=True).fit(all_data_set)\n# PCA降维后的总数据集\nall_data_pca = pca.transform(all_data_set)\n# X为降维后的数据,y是对应类标签\nX = np.array(all_data_pca)\ny = np.array(all_data_label)\n\nn_estimators = [10,11]\ncriterion_test_names = [\"gini\", \"entropy\"]#测试 系数与熵\n #分类树: 基尼系数 最小的准则 在sklearn中可以选择划分的默认原则 \n #决策树:criterion:默认是’gini’系数,也可以选择信息增益的熵’entropy’\n\n\n# In[11]:\n\n\n#RF使用了CART决策树作为弱学习器,\n#在使用决策树的基础上,\n#RF对决策树的建立做了改进\n#RF通过随机选择节点上的一部分样本特征,\n#这个数字小于n\n#假设为nsub,\n#然后在这些随机选择的nsub个样本特征中,\n#选择一个最优的特征来做决策树的左右子树划分。\n#这样进一步增强了模型的泛化能力。\n#总的来说,随机森林是在将bagging方法的基学习器确定为决策树,并且在决策树的训练过程中引入了随机属性选择\n\n\n# In[12]:\n\n\ndef RandomForest(n_estimators,criterion):\n # 十折交叉验证计算出平均准确率\n # 交叉验证,随机取\n kf = KFold(n_splits=10, shuffle=True)\n precision_average = 0.0\n # param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5]} # 自动穷举出最优的C参数\n # clf = GridSearchCV(SVC(kernel=kernel_name, class_weight='balanced', gamma=param),\n # param_grid)\n\n parameter_space = {\n \"min_samples_leaf\": [2, 4, 6], }#参数空间 \n clf = GridSearchCV(RandomForestClassifier(random_state=14,n_estimators = n_estimators,criterion = criterion), parameter_space, cv=5)\n for train, test in kf.split(X):\n clf = clf.fit(X[train], y[train])\n # print(clf.best_estimator_)\n test_pred = clf.predict(X[test])\n # print classification_report(y[test], test_pred)\n # 计算平均准确率\n precision = 0\n for i in range(0, len(y[test])):\n if (y[test][i] == test_pred[i]):\n precision = precision + 1\n precision_average = precision_average + float(precision) / len(y[test])\n precision_average = precision_average / 10\n print (u\"准确率为\" + str(precision_average))\n return precision_average\n\n\n# In[13]:\n\n\nprint(n_estimators)\n\n\n# In[ ]:\n\n\nfor criterion in criterion_test_names:\n print(\"criterion\",criterion)\n x_label = []\n y_label = []\n for i in range(10,15):\n print(i)\n y_label.append(RandomForest(i,criterion))\n x_label.append(i)\n plt.plot(x_label, y_label, label=criterion)\n time += 1\n# print(\"done in %0.3fs\" % (time() - t0))\nplt.xlabel(\"criterion\")\nplt.ylabel(\"Precision\")\nplt.title('Different Contrust')\nplt.legend()\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.8133333325386047, "alphanum_fraction": 0.8133333325386047, "avg_line_length": 36.5, "blob_id": "dfafcfd7a7655c2542cb725d5fbff40c91b23407", "content_id": "60c3aba771d86bb8cb8d47d4454785b698253313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 151, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/README.md", "repo_name": "QCH-top/-", "src_encoding": "UTF-8", "text": "# -\n针对iris,sonar数据集的K_means(k均值聚类),FCM(模糊均值聚类)算法,以及minst,yale人脸数据集的SVM与包分类\n" } ]
7
Jimbomaniak/13_cinemas
https://github.com/Jimbomaniak/13_cinemas
5395e6c3a6046d72018b065339a3e2d760b91960
52aa65321c4d28ff23022de96fc43f0bfa48f217
1feab6b2429f2f3a3079e335d952ee4a7452092c
refs/heads/master
2021-01-10T23:37:58.200422
2016-10-17T12:30:40
2016-10-17T12:30:40
70,414,486
0
0
null
2016-10-09T16:06:18
2016-10-09T16:06:19
2016-10-01T09:10:39
Python
[ { "alpha_fraction": 0.7240143418312073, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 26.899999618530273, "blob_id": "5700e0932e6b73d010d26423cbcb3b23bd32c952", "content_id": "2c73a8dc0153406509a61c291d73cf27ceb12f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 279, "license_type": "no_license", "max_line_length": 78, "num_lines": 10, "path": "/README.md", "repo_name": "Jimbomaniak/13_cinemas", "src_encoding": "UTF-8", "text": "# 13_cinemas\n\n###Prerequisites:\nRun in console pip install `-r requirements.txt` to install 3rd party modules.\n\n###This script can help you to find most popular movies in cinema now a day.\n\n###How to use:\n\nRun script - get resault top 10 movies by rating or cinema count number.\n" }, { "alpha_fraction": 0.6338475346565247, "alphanum_fraction": 0.6397458910942078, "avg_line_length": 34.54838562011719, "blob_id": "8cce929ea32306639ed9bc2a3dd289da43688d49", "content_id": "fad3e04ed3639acbe3a0255472c303fb21c70603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2204, "license_type": "no_license", "max_line_length": 86, "num_lines": 62, "path": "/cinemas.py", "repo_name": "Jimbomaniak/13_cinemas", "src_encoding": "UTF-8", "text": "import requests\nimport re\nfrom bs4 import BeautifulSoup as BS\n\n\nAFISHA_URL = 'http://www.afisha.ru/msk/schedule_cinema/'\nKINOPOISK_URL = 'https://www.kinopoisk.ru/get'\nNUMBER_MOVIES_TO_SHOW = 10\n\n\ndef fetch_afisha_page():\n response = requests.get(AFISHA_URL)\n return response.content\n\n\ndef parse_afisha_list(html):\n soup = BS(html, 'html.parser')\n movies_info = soup.find('div', {\n 'class': 'b-theme-schedule m-schedule-with-collapse',\n 'id': 'schedule'})\n movies = []\n for movie in movies_info.find_all(class_='object s-votes-hover-area collapsed'):\n one_movie = fetch_movie_info(movie.h3.a.text)\n one_movie['title'] = movie.h3.a.text\n one_movie['cinema_number'] = len(movie.find_all('td', {'class': 'b-td-item'}))\n movies.append(one_movie)\n return movies\n\n\ndef fetch_movie_info(movie_title):\n payload = {'kp_query': movie_title, 'first': 'yes'}\n response = requests.get(KINOPOISK_URL, params=payload)\n soup = BS(response.content, 'html.parser')\n try:\n rate = float(soup.find('span', class_='rating_ball').text)\n rating_count_site = soup.find('span', class_='ratingCount').text\n rating_count_digits = re.search(r'\\d+', rating_count_site)\n rating_count = int(rating_count_digits.group())\n except AttributeError:\n rate, rating_count = 0, 0\n return {'rate': rate, 'ratingCount': rating_count}\n\n\ndef sort_movies(movies, how_sort_movies):\n sort_by = how_sort_movies and 'cinema_number' or 'rate'\n movies.sort(key=lambda item: item[sort_by], reverse=True)\n return movies\n\n\ndef output_movies_to_console(movies, number_movies_to_show):\n for num, movie in enumerate(movies[:number_movies_to_show]):\n print('{0}. {title}, movie rate - {rate},'\n 'you can watch in {cinema_number} cinema'.format(num+1, **movie))\n print('------')\n\n\nif __name__ == '__main__':\n movies = parse_afisha_list(fetch_afisha_page())\n how_sort_movies = int(input('0 - movies by rating\\n'\n '1 - movies by cinema numbers\\nEnter 1 or 0: '))\n sort_movies = sort_movies(movies, how_sort_movies)\n output_movies_to_console(sort_movies, NUMBER_MOVIES_TO_SHOW)\n" } ]
2
ajlangley/ml-training-progress-bar
https://github.com/ajlangley/ml-training-progress-bar
715f324a935114e00c1f1658b658e26a0c53a990
e3cb2a8b730636cd153c741572a8837f239c77cb
3e285a710911e5e1afa0636e252e7e8015c4f7e7
refs/heads/master
2018-07-02T04:00:59.763964
2018-06-30T16:46:42
2018-06-30T16:46:42
127,028,694
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7982456088066101, "alphanum_fraction": 0.7982456088066101, "avg_line_length": 56, "blob_id": "784ac6efa7ab66a9f73cc3ac194ae502d13e425c", "content_id": "c435579cde13676c42897efde781c1c4ef1b171c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 114, "license_type": "no_license", "max_line_length": 89, "num_lines": 2, "path": "/README.md", "repo_name": "ajlangley/ml-training-progress-bar", "src_encoding": "UTF-8", "text": "# Training Progress Bar\nAdd as a submodule to your ml projects to render a training progress bar in the terminal.\n" }, { "alpha_fraction": 0.5357961058616638, "alphanum_fraction": 0.55756014585495, "avg_line_length": 32.25714111328125, "blob_id": "a96313f7f8328f16206c5ad88a6323c48c556dad", "content_id": "2547884f28c704aed97ea895e530f46c5b5d49d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3492, "license_type": "no_license", "max_line_length": 109, "num_lines": 105, "path": "/training_progress_bar.py", "repo_name": "ajlangley/ml-training-progress-bar", "src_encoding": "UTF-8", "text": "from colorama import Fore\nfrom colorama import Style\nimport sys\nimport os\n\n\ncolors = {'RED': Fore.RED, 'YELLOW':Fore.YELLOW, 'GREEN': Fore.GREEN, 'BLUE': Fore.BLUE, 'BLACK': Fore.BLACK,\n 'WHITE': Fore.WHITE, 'CYAN': Fore.CYAN}\n\n\nclass TrainingProgressBar:\n def __init__(self, n_examples, bar_color=None, text_color=None, show_epoch_on_newline=True):\n self.epoch_num = 1\n self.n_examples = n_examples\n self.total_examples_seen = 0\n self.epoch_examples_seen = 0\n self.total_loss = 0\n self.epoch_loss = 0\n self.bar_color = None\n self.text_color = None\n self.show_epoch_on_newline = show_epoch_on_newline\n\n # Appearance configurations\n if bar_color:\n self.bar_color = bar_color.upper()\n if text_color:\n self.text_color = text_color.upper()\n\n def update(self, examples_seen, loss):\n self.epoch_examples_seen += examples_seen\n self.total_examples_seen += examples_seen\n self.epoch_loss += loss * examples_seen\n self.total_loss += loss * examples_seen\n\n def show(self):\n percent_complete = self.epoch_examples_seen / self.n_examples * 100\n\n avg_epoch_loss = self.epoch_loss / self.epoch_examples_seen if self.epoch_examples_seen else 0\n avg_total_loss = self.total_loss / self.total_examples_seen if self.total_examples_seen else 0\n\n sys.stdout.flush()\n print('\\r', end='')\n\n self.set_text_color()\n print(f'Epoch {self.epoch_num}: ', end='')\n\n self.print_bar(percent_complete)\n\n self.set_text_color()\n print((50 - int(percent_complete / 2)) * ' ', end='')\n print('|{0:.1f}%'.format(percent_complete), end='')\n print('\\t [Avg Error for Epoch]: {0:.4f}'.format(avg_epoch_loss), end='')\n print('\\t [Total Training Loss]: {0:.4f}'.format(avg_total_loss), end='')\n print(f'{Style.RESET_ALL}', end='')\n\n self._update_epoch()\n\n def print_bar(self, percent_complete):\n self.set_bar_color()\n\n # Print n / 2 blocks, followed by a fraction of a block\n # for added precision\n for i in range(int(percent_complete / 2)):\n print(u'\\u2589', end='')\n self.print_fraction(percent_complete / 2 - int(percent_complete / 2))\n\n def set_bar_color(self):\n if self.bar_color:\n print(f'{colors[self.bar_color]}', end='')\n else:\n print(f'{Style.RESET_ALL}', end='')\n\n def set_text_color(self):\n if self.text_color:\n print(f'{colors[self.text_color]}', end='')\n else:\n print(f'{Style.RESET_ALL}', end='')\n\n def _update_epoch(self):\n if self.epoch_examples_seen >= self.n_examples:\n self.epoch_num += 1\n self.epoch_examples_seen = 0\n self.epoch_loss = 0\n\n if self.show_epoch_on_newline:\n print('\\n')\n\n @staticmethod\n def print_fraction(fraction):\n if fraction >= 0.875:\n print(u'\\u2589', end='')\n elif fraction >= 0.75:\n print(u'\\u258A', end='')\n elif fraction >= 0.625:\n print(u'\\u258B', end='')\n elif fraction >= 0.5:\n print(u'\\u258C', end='')\n elif fraction >= 0.375:\n print(u'\\u258D', end='')\n elif fraction >= 0.25:\n print(u'\\u258E', end='')\n elif fraction >= 0.125:\n print(u'\\u258F', end='')\n else:\n print(' ', end='')\n" } ]
2
aashutosh0012/Youtube-Video-Downloader
https://github.com/aashutosh0012/Youtube-Video-Downloader
8c0da0477dedcc0681d01df611e757f9cf31ce27
e553f2d356de4f2cf469d0b3323a67a24f319c8a
8f8c63e4b84d240331da9e2ca3815e0a01b670a3
refs/heads/main
2023-05-06T13:38:59.959080
2021-05-30T10:33:45
2021-05-30T10:33:45
372,182,914
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6385002732276917, "alphanum_fraction": 0.6650811433792114, "avg_line_length": 43.846153259277344, "blob_id": "54d9c028ae2a9bfe0f2e780f5c96b13b325dedd3", "content_id": "45336142b33999d7496f3f816e8b5845738467bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3574, "license_type": "no_license", "max_line_length": 173, "num_lines": 78, "path": "/youtube_downloader.py", "repo_name": "aashutosh0012/Youtube-Video-Downloader", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nfrom tkinter import ttk\r\nfrom pytube import YouTube\r\nimport os,requests\r\nfrom PIL import ImageTk,Image\r\n\r\n\r\n#Get Current Path or Script Path where Video will be Downloaded\r\ntry:\r\n PATH = os.path.dirname(os.path.abspath(__file__))\r\nexcept NameError:\r\n PATH = os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd())))\r\n\r\nwindow = Tk()\r\nwindow.title('Youtube Downloader')\r\nwindow.geometry('400x600+100+100')\r\nurl = StringVar()\r\ndef get_data():\r\n global yt,res,title,video_url,url,resolutions\r\n video_url = url.get() \r\n yt = YouTube(video_url)\r\n title = yt.title\r\n\r\n #Download Thumbnail\r\n v_id = video_url.split('=')[1]+'.jpg'\r\n thumbnail_file = os.path.join(PATH,v_id)\r\n thumbnail = requests.get(yt.thumbnail_url)\r\n with open(thumbnail_file,'wb') as file:\r\n file.write(thumbnail.content)\r\n\r\n #Display Thumbnail\r\n img = Image.open(thumbnail_file)\r\n size = 300, 300\r\n img.thumbnail(size, Image.ANTIALIAS)\r\n img = ImageTk.PhotoImage(img) \r\n thumbnail_label = Label(window, image = img,bd = 5,relief = 'solid')\r\n thumbnail_label.grid(row=3,column=0,columnspan=2)\r\n thumbnail_label.image=img\r\n\r\n #Display Title of Video in Window\r\n title_message = Message(window, text=title, font=(\"Arial Bold\",10),width=300,bd=10).grid(row=4,columnspan=2)\r\n available_res = [stream.resolution for stream in yt.streams.filter(progressive=True,file_extension='mp4').order_by(\"resolution\")]\r\n \r\n #Select Avaialable Resolution of Video to Download\r\n choose_res = Label(window,text='Select Resolution').grid(row=5,column=0)\r\n resolutions = ttk.Combobox(window)\r\n resolutions['values'] = available_res\r\n resolutions.current(0)\r\n resolutions.grid(row = 5,column = 1)\r\n res = resolutions.get()\r\n #print(f\"resolution = {res} title = {title}\") \r\n \r\n #Display Download Button\r\n download_btn = Button(window,text=\"Download Video\",bd=5, relief='ridge', bg = 'violet', fg='Black', command = download).grid(row = 6, column = 0,columnspan = 2,pady = 3)\r\n\r\ndef download():\r\n download_label = Label(window,text = 'Downloading...',bd = 5).grid(row = 6,column = 0, columnspan=2,pady = 3)\r\n global yt,res,title,resolutions\r\n res = resolutions.get()\r\n #print(res)\r\n\r\n #Download Video in mp4 Format and Selected Resolution\r\n video = yt.streams.filter(file_extension = 'mp4',progressive = True,res = res)[0]\r\n video.download()\r\n \r\n #Display Video Downlaoded Messaged along with Path\r\n download_label = Label(window,text = 'Download Finished',bd = 5).grid(row = 6,column = 0, columnspan = 2,ipady = 3)\r\n success_message = Text(window,height = 5,width = 50,wrap = CHAR, bd = 0)\r\n success_message.insert(1.0,f'Video Downloaded \\n {os.path.join(PATH,title)}')\r\n success_message.grid(row = 7, column = 0,columnspan = 2)\r\n success_message.configure(state = \"disabled\")\r\n\r\nL1 = Label(window,text='Enter Youtube Video URL', font = ('Arial',15)).grid(row = 0,column = 0, columnspan = 2)\r\nurl_Entry = Entry(window,textvariable = url, width = 60,bd = 10,relief = 'ridge').grid(row = 1, column = 0, pady = 5, ipady = 5,columnspan = 2)\r\nfind_video = Button(window,text = 'Find Video',bd = 5, relief = 'ridge',command = get_data).grid(row = 2, column = 0, columnspan = 2)\r\nquit = Button(window, text = 'Close',bd = 5, relief = 'ridge',command = window.destroy).grid(row = 8, column = 0, columnspan = 2,rowspan = 2)\r\ninfo = Label(window, text = 'Made by Aashutosh', font = (\"Courier New\",10)).grid(row = 15, column = 0, columnspan = 2)\r\nwindow.mainloop()" }, { "alpha_fraction": 0.6483103632926941, "alphanum_fraction": 0.7947434186935425, "avg_line_length": 48.8125, "blob_id": "a416f5a204439d18b544b2c2914fe30c5c3f1745", "content_id": "6887927d8d7139587c55675b415b8443afb69e17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 799, "license_type": "no_license", "max_line_length": 155, "num_lines": 16, "path": "/README.md", "repo_name": "aashutosh0012/Youtube-Video-Downloader", "src_encoding": "UTF-8", "text": "# Youtube-Video-Downloader\nDownload Videos from Youtube to your local system.\n\nThis Program uses **[Pytube](https://pytube.io/en/latest/index.html)** Library to download videos from Youtube and **tkinter** to diplay Graphical Window..\n\n\n![image](https://user-images.githubusercontent.com/21958711/120100778-38450280-c160-11eb-9945-983bee408cfb.png)\n\nEnter URL of the Youtube Video & hit Find.\n\n![image](https://user-images.githubusercontent.com/21958711/120100788-4d219600-c160-11eb-948d-a210f4ca2e7f.png)\n\nApp Searches for the Video and provides a Dropdown button to Select available Resolution and a Download Button.\nAfter Video gets downloaded, it displays the File Path and Name.\n\n![image](https://user-images.githubusercontent.com/21958711/120100824-704c4580-c160-11eb-861c-1cf249f600de.png)\n\n\n" } ]
2
portelaoliveira/CSV_e_JSON
https://github.com/portelaoliveira/CSV_e_JSON
69dad48dd02ec7cd2c6feac72ae7b0ebe6e6bf89
0c8c5b93f7a2ed4433d3133c93d004687aa4c497
58f936e6c296576d1a5b3c787977a3595b0c0347
refs/heads/master
2022-07-29T17:38:53.876534
2020-05-16T19:58:26
2020-05-16T19:58:26
264,503,010
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7212121486663818, "alphanum_fraction": 0.7212121486663818, "avg_line_length": 56.07692337036133, "blob_id": "13bad409350942bd4db232096ae4de3e069f7ebd", "content_id": "bb76a1cbe361bc5fa385a5e91aaef420f9285bcd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1498, "license_type": "permissive", "max_line_length": 160, "num_lines": 26, "path": "/APIS/python_repos.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars' # Armazenamos o URL da chamada da API.\nr = requests.get(url) # Usamos requests para fazer a chamada.\nprint('Status Code:', r.status_code)\n''' O objeto com a resposta tem um atributo chamado status_code, que nos informa se a resposta\n foi bem sucedida. '''\n\n# Armazena a resposta da API em uma variável.\nrespose_dict = r.json()\nprint('Total repositories:', respose_dict['total_count']) # Total de repositórios python no GitHub.\n\n# Explora informaçôes sobre os repositórios.\nrepo_dicts = respose_dict['items']\nprint('Repositories returned:', len(repo_dicts))\n\nprint(\"\\nSelected information about each repository:\")\nfor repo_dict in repo_dicts:\n print(f\"Name: {repo_dict['name']}\") # Nome do projeto.\n print(f\"Owner: {repo_dict['owner']['login']}\") # Acessa o dicionário que o representa com owner e então usamos a chave login para obter o seu nome de login.\n print(f\"Stars: {repo_dict['stargazers_count']}\") # Exibe a quantidade de estrelas que o projeto recebeu.\n print(f\"Repository: {repo_dict['html_url']}\") # Extraí o URL do repositório no GitHub.\n print(f\"Created: {repo_dict['created_at']}\") # Mostra a data em que o projeto foi criado.\n print(f\"Updated: {repo_dict['updated_at']}\") # Quando foi atualizado pela última vez.\n print(f\"Description: {repo_dict['description']}\") # Exibe a descrição do repositório.\n\n" }, { "alpha_fraction": 0.719462513923645, "alphanum_fraction": 0.729234516620636, "avg_line_length": 47.156864166259766, "blob_id": "c02cda914534ad56cbb4c02da3d24b8a979fe33c", "content_id": "6bd3f57500716b5f4957c9fe2fcffb72e8028399", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2480, "license_type": "permissive", "max_line_length": 125, "num_lines": 51, "path": "/APIS/bar_descriptions_visula.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars' # Armazenamos o URL da chamada da API.\nr = requests.get(url) # Usamos requests para fazer a chamada.\nprint('Status Code:', r.status_code)\n''' O objeto com a resposta tem um atributo chamado status_code, que nos informa se a resposta\n foi bem sucedida. '''\n\n# Armazena a resposta da API em uma variável.\nrespose_dict = r.json()\nprint('Total repositories:', respose_dict['total_count']) # Total de repositórios python no GitHub.\n\n# Explora informaçôes sobre os repositórios.\nrepo_dicts = respose_dict['items']\nprint('Repositories returned:', len(repo_dicts))\n\nnames, plot_dicts = [], []\n''' As listas vazias vão armazenar o nome de cada projeto para rotular as barras e o número de estrelas\n para determinar a altura delas. '''\nfor repo_dict in repo_dicts:\n names.append(repo_dict['name'])\n\n plot_dict = {\n 'value': repo_dict['stargazers_count'],\n 'label': repo_dict['description'],\n 'xlink': repo_dict['html_url'], # O pygal usa o URL associado a 'xlink' para transformar cada barra em um link ativo.\n }\n plot_dicts.append(plot_dict)\n\n# Cria a vizualização.\nmy_style = LS('#333366', base_style = LCS)\n\nmy_config = pygal.Config() # Cria uma instância da classse Config().\nmy_config.x_label_rotation = 45 # x_label_rotation = 45 define a rotação dos nomes ao longo do eixo X.\nmy_config.show_legend = False # Não mostra as legendas.\nmy_config.title_font_size = 24 # Definimos o tamaho da fonte para o título do gráfico.\nmy_config.label_font_size = 14 # Definimos o tamaho da fonte para o rótulos menor (nomes dos projetos ao longo do eixox).\nmy_config.major_label_font_size = 18 # Definimos o tamaho da fonte para o rótulos maior (número no eixo y).\nmy_config.truncate_label = 15 # Reduz os nomes dos projetos mais longos a 15 caracteres.\nmy_config.show_y_guides = False # Oculta as linhas horizontais do gráfico.\nmy_config.width = 1000 # Definimos uma largura personalizada para que o gráfico use mais do espaço disponível no navegador.\n\nchart = pygal.Bar(my_config, style = my_style) # O método Bar() cria um gráfico de barras.\nchart.title = 'Most-Starred Python Projects on GitHub' \nchart.x_labels = names\n\nchart.add('', plot_dicts)\nchart.render_to_file('Bar_python_repos_visual.svg')\n" }, { "alpha_fraction": 0.7292817831039429, "alphanum_fraction": 0.7624309659004211, "avg_line_length": 44, "blob_id": "5c0110af2f8b4bc5b52d74b053f468b1b047784d", "content_id": "40800f0df5f0035d3b6770c11f602ed48c36ac4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "permissive", "max_line_length": 90, "num_lines": 4, "path": "/mapping_global_data_sets/countries.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "from pygal_maps_world import i18n\n\nfor country_code in sorted(i18n.COUNTRIES.keys()): # Coloca as chaves em ordem alfabética.\n print(country_code, i18n.COUNTRIES[country_code])\n\n" }, { "alpha_fraction": 0.7072654962539673, "alphanum_fraction": 0.7162483334541321, "avg_line_length": 51.56944274902344, "blob_id": "2a9843995aa8a5d31b14fec89f8c576ec19a6b85", "content_id": "392ec90a44b37e28d36bba117c799feb499baae7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3823, "license_type": "permissive", "max_line_length": 164, "num_lines": 72, "path": "/APIS/hn_submissions_visual.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\nfrom operator import itemgetter\nimport pygal\nfrom pygal.style import LightColorizedStyle as LCS, LightenStyle as LS\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://hacker-news.firebaseio.com/v0/topstories.json'\n''' Essa chamada de API devolve uma lista contendo os IDs dos 500 artigos mais populares do Hacker News \n no momento em que a chamada é feita. '''\nr = requests.get(url)\nprint('Status code:', r.status_code)\n\n# Processa informações sobre cada artigo submetido.\nsubmission_ids = r.json() # Covertemos o texto em uma lista Python.\nsubmission_dicts = [] # Usaremos esse IDs para criar um conjunto de diconários em que cada um aramzena informaçôes sobre um dos artigos submetidos.\nfor submission_id in submission_ids[:10]: # Percorre os IDs dos 10 principais artigos submetidos.\n # Cria uma chamada de API separada para cada artigo submetido.\n url = f\"https://hacker-news.firebaseio.com/v0/item/{submission_id}.json\" # Faz uma nova chamada de API para cada artigo gerando um URL que inclui o valor atual.\n submission_r = requests.get(url)\n print(f\"id: {submission_id}\\tstatus: {submission_r.status_code}\") # Ver se foi bem sucedido\n response_dict = submission_r.json()\n\n submission_dict = {\n 'title': response_dict['title'], # Título do artigo.\n 'link': f\"http://news.ycombinator.com/item?id={submission_id}\", # Link para página de discurssão desse item.\n 'comments': response_dict.get('descendants', 0), # Número de comentários no dicionário.\n }\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, \n key = itemgetter('comments'), \n reverse = True)\n''' A função itemgetter('comments') ordena a lita de dicionário de acordo com o número de comentários. \n A função sorted() então utiliza esse valor como base para ordenar a lista. Ordenamos a lista na \n ordem inversa para colocar as histórias mais comentadas antes. '''\n\nfor submission_dict in submission_dicts:\n print(f\"\\nTitle: {submission_dict['title']}\")\n print(f\"Discussion link: {submission_dict['link']}\")\n print(f\"Comments: {submission_dict['comments']}\")\n\ntitles, plot_dicts = [], []\nfor submission_dict in submission_dicts:\n titles.append(submission_dict['title'])\n\n plot_dict = {\n 'value': submission_dict['comments'],\n 'label': submission_dict['title'],\n 'xlink': submission_dict['link'], # O pygal usa o URL associado a 'xlink' para transformar cada barra em um link ativo.\n }\n plot_dicts.append(plot_dict)\n\n# Cria a vizualização.\nmy_style = LS('#333366', base_style = LCS)\n\nmy_config = pygal.Config() # Cria uma instância da classse Config().\nmy_config.x_label_rotation = 45 # x_label_rotation = 45 define a rotação dos nomes ao longo do eixo X.\nmy_config.show_legend = False # Não mostra as legendas.\nmy_config.title_font_size = 24 # Definimos o tamaho da fonte para o título do gráfico.\nmy_config.label_font_size = 14 # Definimos o tamaho da fonte para o rótulos menor (nomes dos projetos ao longo do eixox).\nmy_config.major_label_font_size = 18 # Definimos o tamaho da fonte para o rótulos maior (número no eixo y).\nmy_config.truncate_label = 15 # Reduz os nomes dos projetos mais longos a 15 caracteres.\nmy_config.show_y_guides = False # Oculta as linhas horizontais do gráfico.\nmy_config.width = 1000 # Definimos uma largura personalizada para que o gráfico use mais do espaço disponível no navegador.\nmy_config.y_title = 'Number of Comments'\n\nchart = pygal.Bar(my_config, style = my_style) # O método Bar() cria um gráfico de barras.\nchart.title = 'Most-Active Discussions on Hacker News' \nchart.x_labels = titles\n\nchart.add('', plot_dicts)\nchart.render_to_file('hn_submission_visual.svg')\n" }, { "alpha_fraction": 0.6553191542625427, "alphanum_fraction": 0.6813238859176636, "avg_line_length": 39.653846740722656, "blob_id": "d83a7c0223512e48595e170586c7809e5cdc8255", "content_id": "dc7e38f4ede2c5483a3586a6d1fd2e575672c905", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2148, "license_type": "permissive", "max_line_length": 122, "num_lines": 52, "path": "/mapping_global_data_sets/world_population.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import os\nimport platform\nimport json\nfrom lib.country_codes import get_country_code\nfrom pygal_maps_world.maps import World\nfrom pygal.style import RotateStyle\n\npath = os.path.abspath(os.path.dirname(__file__))\nif platform.system() == 'Linux':\n path += '/data/population_data_json.json'\nelse:\n path += '\\\\data\\\\population_data_json.json'\n\n# Carregar dados de uma lista.\nwith open(path) as f:\n pop_data = json.load(f) # Armazenando os dados do objeto arquivo (f) na variável.\n # A função json.load() converte os dados em uma lista.\n\n# Constrói um dicionário com dados das populações e os códigos dos países.\ncc_populations = {}\nfor pop_dict in pop_data: # Percorre cada item da lista.\n if pop_dict[\"Year\"] == 2010: # Procuramos 2010 na chave Year de cada dicionário.\n country_name = pop_dict[\"Country Name\"]\n population = int(float(pop_dict[\"Value\"])) # Armazenadas em um formato numérico.\n code_country = get_country_code(country_name)\n if code_country:\n cc_populations[code_country] = population\n ''' O dicionário armazena o código do país como chave e a população\n como valor sempre que o código é devolvido. '''\n\n# Agrupa os países em três níveis populacionais.\ncc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}\nfor cc, pop in cc_populations.items():\n if pop < 10000000:\n cc_pops_1[cc] = pop\n elif pop < 1000000000:\n cc_pops_2[cc] = pop\n else:\n cc_pops_3[cc] = pop\n\n# Vê quantos píses estão em cada nível.\nprint(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))\nwm_style = RotateStyle('#336699')\nwm = World(style = wm_style) # Criamos uma instância da classe World().\nwm.title = 'World Population in 2010, by country' # Definimos o atributo title() do mapa.\n# usamos o método add() que aceita um rótulo (primiro argumento) e um dicionário de códigos de países (segundo argumento).\nwm.add('0-10m', cc_pops_1)\nwm.add('10m-1bn', cc_pops_2)\nwm.add('>1bn', cc_pops_3)\n\n# O método render_to_file() cria um arquivo svg contendo o mapa, que poderá ser aberto no navegador.\nwm.render_to_file('World_population.svg')\n\n" }, { "alpha_fraction": 0.6859903335571289, "alphanum_fraction": 0.7020934224128723, "avg_line_length": 35.588233947753906, "blob_id": "eb491d3ed10cb7cface9832897a783eb779ebb19", "content_id": "1066bd181e97ccf41998ba84a0d54e0b35f4f5b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "permissive", "max_line_length": 94, "num_lines": 17, "path": "/APIS/hn_article.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\n# Faz uma chamada de API e armazene a resposta.\nurl = 'https://hacker-news.firebaseio.com/v0/item/19155826.json'\nr = requests.get(url)\nprint(f\"Status code: {r.status_code}\")\n\n# Explora a estrutura dos dados.\nresponse_dict = r.json()\nreadable_file = 'data/hn_topstories.json'\nwith open(readable_file, 'w') as f:\n json.dump(response_dict, f, indent=4)\n\n''' Um argumento indent é usado para imprimir JSON de maneira bonita e torná-lo mais legível. \n O padrão é . Para obter a representação JSON mais compacta, você deve usar para eliminar \n o espaço em branco.(', ', ': ')(',', ':') '''" }, { "alpha_fraction": 0.7942857146263123, "alphanum_fraction": 0.7963265180587769, "avg_line_length": 78.03225708007812, "blob_id": "98743ce61c9a0cc3b53a823a7eb37f2487ab8b14", "content_id": "c64a6e63a3c0af000f25fc47a6a0c5c148bc2cc3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2450, "license_type": "permissive", "max_line_length": 487, "num_lines": 31, "path": "/the_csv_file_format/README.md", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "# DOWNLOADING DATA\n\nDownloading datasets from online sources and creating functional visualizations of that data.\nWe access and view data stored in two common formats: CSV and JSON. We use the Python csv module to process meteorological data stored in CSV format (Comma-Separated Values, or Comma Separated Values) and analyze the maximum and minimum temperatures over time in two different locations. Then we will use matplotlib to generate a graph based on the downloaded data and display the temperature variations in two very different environments: Sitka in Alaska and Death Valley in California.\n\nA simple way to store data in a text file is to write it as a series of comma-separated values. The resulting files are called CSV files.\nThey are weather data for January 5, 2014 for Sitka in Alaska. They include the maximum and minimum temperatures as well as several other measurements from that day. CSV files can be complicated for humans to read, but they are easy for programs to process and extract values, which streamlines the data analysis operation.\n\n# Parsing CSV file headers\n\nThe csv module of the standard Python library parses the lines of a CSV file and allows you to quickly extract the values we are interested in. We start by analyzing the first line of the file, which contains a series of headers for the data.\nThen it was done:\n\n* Displaying headers and their positions;\n* Extracting and reading data;\n* Plotting data on a temperature graph;\n* Datetime module;\n>>> We've added dates to our chart to make it more useful;\n* Plotting Dates;\n>>> Extracting the daily maximum dates and passing the maximum dates and temperatures to plot;\n* Plotting a longer period of time;\n* Plotting a second data series;\n>>> Including minimum temperatures;\n\n### Shading an area of the chart\n\nAfter adding two series of data, we can now analyze the temperature variation for each day. We will add a final touch to the graph using shading to show the variation between minimum and maximum temperatures each day. For this we will use the fill_between method, which accepts a series of x values and two series of y values, and fills the space between the two series of y values.\n\n### Error checking\n\nsome weather stations occasionally malfunction and fail to collect some of the data they are supposed to obtain - or all of them. The absence of data can result in exceptions that can cause our programs to fail if we do not handle them properly.\n" }, { "alpha_fraction": 0.6664659976959229, "alphanum_fraction": 0.7224563360214233, "avg_line_length": 30.94230842590332, "blob_id": "a890fd54b99c608fd5eec5253d5e2ceecbc538f0", "content_id": "4fca7dd4c5c8243456befb72481d3412f1e6d187", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1672, "license_type": "permissive", "max_line_length": 77, "num_lines": 52, "path": "/the_csv_file_format/sitka_dv_comparacao.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\nfrom obter_dados_csv import obter_datas_temperaturas\n\n\n# Nome dos arquivos.\nfilename_1 = 'death_valley_2014.csv'\nfilename_2 = 'sitka_weather_2014.csv'\n\n# Obtém os dados de ambos os arquivos.\ndados_1 = obter_datas_temperaturas(filename_1)\ndados_2 = obter_datas_temperaturas(filename_2)\n\n# Obtém as datas, temperaturas mínimas e máximas para Death Valley.\ndatas_1 = dados_1[0]\nminimas_1 = dados_1[1]\nmaximas_1 = dados_1[2]\n\n# Obtém as datas, temperaturas mínimas e máximas para Sitka.\ndatas_2 = dados_2[0]\nminimas_2 = dados_2[1]\nmaximas_2 = dados_2[2]\n\n# Faz a plotagem dos dados.\nfig = plt.figure(dpi=128, figsize=(10, 6))\n\n# Formata xaxis com 1 mês de intervalo e o formato da data.\nplt.gca().xaxis.set_major_locator(mdates.MonthLocator(interval=1))\nplt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b %Y'))\n\n# Plota o gráfico para o ano de 2014, Death Valley.\nplt.plot(datas_1, maximas_1, c='red', alpha=0.3)\nplt.plot(datas_1, minimas_1, c='blue', alpha=0.3)\nplt.fill_between(datas_1, maximas_1, minimas_1, facecolor='blue', alpha=0.05)\n\n# Plota o gráfico para o ano de 2014, Sitka.\nplt.plot(datas_2, maximas_2, c='red', alpha=0.6)\nplt.plot(datas_2, minimas_2, c='blue', alpha=0.6)\nplt.fill_between(datas_2, maximas_2, minimas_2, facecolor='blue', alpha=0.15)\n\n# Formata o gráfico.\ntitle = 'Daily high and low temperatures - 2014'\ntitle += '\\nDeath Valley, CA and Sitka, AK'\nplt.title(title, fontsize=20)\nplt.xlabel('', fontsize=14)\nfig.autofmt_xdate()\nplt.ylabel('Temperature (F)', fontsize=14)\nplt.tick_params(axis='both', which='major', labelsize=14)\nplt.ylim(10, 120)\n\nplt.show()\n" }, { "alpha_fraction": 0.6357868313789368, "alphanum_fraction": 0.6357868313789368, "avg_line_length": 27, "blob_id": "9a3ddf9caaeb7a7fedbdb80a2fed1226758d25e2", "content_id": "c36c852f9ae7954079dec615ccb6ed7700b77c31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "permissive", "max_line_length": 63, "num_lines": 28, "path": "/mapping_global_data_sets/lib/teste_country_codes.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom country_codes import get_country_code\n\n\nclass CountryCodesTestCase(unittest.TestCase):\n \"\"\"Tests for country_codes.py.\"\"\"\n\n def test_get_country_code(self):\n \"\"\"Testa a função get_country_code().\"\"\"\n country_code = get_country_code('Andorra')\n self.assertEqual(country_code, 'ad')\n\n country_code = get_country_code('United Arab Emirates')\n self.assertEqual(country_code, 'ae')\n\n country_code = get_country_code('Tanzania')\n self.assertEqual(country_code, 'tz')\n\n country_code = get_country_code('Brazil')\n self.assertEqual(country_code, 'br')\n\n country_code = get_country_code('Afghanistan')\n self.assertEqual(country_code, 'af')\n\n\nif __name__ == '__main__':\n unittest.main()\n " }, { "alpha_fraction": 0.5277101993560791, "alphanum_fraction": 0.5325763821601868, "avg_line_length": 32.62727355957031, "blob_id": "f60d71755d2d5235d0e0fef80f7b9427d33394da", "content_id": "f9c8025c76e1897374fcc4823c57b21f7ebce666", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3722, "license_type": "permissive", "max_line_length": 79, "num_lines": 110, "path": "/the_csv_file_format/obter_dados_csv.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import csv\nfrom datetime import datetime\n\n\ndef obter_datas_temperaturas(arquivo: str) -> str or tuple:\n \"\"\"\n -> Obtém as datas, temperaturas mínimas e temperaturas máximas\n de um arquivo csv.\n\n :param arquivo: Nome do arquivo csv.\n :return: Retorna uma tupla com 3 listas:\n as datas, temperaturas máximas e mínimas.\n \"\"\"\n\n try:\n with open(arquivo) as obj_arq:\n conteudo = csv.reader(obj_arq)\n # Pula a linha que contém os headers.\n next(conteudo)\n\n # Listas das datas, temp. máximas e mínimas.\n datas, maximas, minimas = [], [], []\n for linha in conteudo:\n try:\n data_atual = datetime.strptime(linha[0], '%Y-%m-%d').date()\n\n maxima = int(linha[1])\n minima = int(linha[3])\n if minima == 0:\n minima = 50\n except ValueError:\n print(data_atual, 'Faltando dados.')\n else:\n datas.append(data_atual)\n maximas.append(maxima)\n minimas.append(minima)\n except FileNotFoundError:\n return f\"O arquivo '{arquivo}' não existe.\"\n else:\n return datas, maximas, minimas\n\n\ndef obter_datas_indices_pluvio(arquivo: str) -> str or tuple:\n \"\"\"\n -> Obtém as datas e o indices pluviométrico de um arquivo\n csv.\n\n :param arquivo: Nome do arquivo csv.\n :return: Retorna uma tupla com 2 listas:\n as datas e os indices pluviométricos.\n \"\"\"\n\n try:\n with open(arquivo) as obj_arq:\n conteudo = csv.reader(obj_arq)\n # Pula a linha que contém os headers.\n next(conteudo)\n\n # Listas das datas e indices pluviometricos.\n datas, indices_pluvio = [], []\n for linha in conteudo:\n try:\n data_atual = datetime.strptime(linha[0], '%Y-%m-%d').date()\n indice_pluvio = float(linha[16])\n except ValueError:\n print(data_atual, 'Faltando dados.')\n else:\n datas.append(data_atual)\n indices_pluvio.append(indice_pluvio)\n except FileNotFoundError:\n return f\"O arquivo '{arquivo}' não existe.\"\n else:\n return datas, indices_pluvio\n\n\ndef obter_datas_umidades(arquivo: str) -> str or tuple:\n \"\"\"\n -> Obtém as datas, umidade máximas e mínimas\n de um arquivo csv.\n\n :param arquivo: Nome do arquivo csv.\n :return: Retorna uma tupla com 3 listas:\n as datas, umidades máximas e mínimas.\n \"\"\"\n try:\n with open(arquivo) as obj_arq:\n conteudo = csv.reader(obj_arq)\n # Pula a linha que contém os headers.\n next(conteudo)\n\n # Listas das datas, umidades máximas e mínimas.\n datas, umidades_maximas, umidades_minimas = [], [], []\n for linha in conteudo:\n try:\n data_atual = datetime.strptime(linha[0], '%Y-%m-%d').date()\n\n umidade_maxima = int(linha[7])\n umidade_minima = int(linha[9])\n if umidade_minima == 0:\n umidade_minima = 55\n except ValueError:\n print(data_atual, 'Faltando dados.')\n else:\n datas.append(data_atual)\n umidades_maximas.append(umidade_maxima)\n umidades_minimas.append(umidade_minima)\n except FileNotFoundError:\n return f\"O arquivo '{arquivo}' não existe.\"\n else:\n return datas, umidades_maximas, umidades_minimas\n" }, { "alpha_fraction": 0.7381404042243958, "alphanum_fraction": 0.7381404042243958, "avg_line_length": 36.64285659790039, "blob_id": "b4bea2ccfa5878de9512508c1f0d6a963586f113", "content_id": "c67d325b28d6d14000aa4465ebb5780b3600fdcd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "permissive", "max_line_length": 118, "num_lines": 14, "path": "/APIS/repos.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars' # Armazenamos o URL da chamada da API.\nr = requests.get(url) # Usamos requests para fazer a chamada.\nprint('Status Code:', r.status_code)\n''' O objeto com a resposta tem um atributo chamado status_code, que nos informa se a resposta\n foi bem sucedida. '''\n\n# Armazena a resposta da API em uma variável.\nrespose_dict = r.json()\n\n# Processa o resultado.\nprint(respose_dict.keys())\n" }, { "alpha_fraction": 0.7927801012992859, "alphanum_fraction": 0.8007501363754272, "avg_line_length": 111.2631607055664, "blob_id": "f432a0686257bad2abdb978fc214f271876db0e5", "content_id": "02d081611ab53d88d4ca2a6a2b3fb3658390d420", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2135, "license_type": "permissive", "max_line_length": 432, "num_lines": 19, "path": "/mapping_global_data_sets/README.md", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "# Mapping global data sets: JSON format\n\nData from different countries was downloaded in JSON format and we worked with that data using the json module. Using Pygal's beginner-friendly mapping tool for country-based data, we create visualizations of that data to explore global patterns that concern the distribution of the world's population across different countries.\nThis was done:\n\n* Extraction of relevant data;\n The file basically consists of a long Python list. Each item is a dictionary with four keys: the name of a country, its code, a year and a value that represents the population. We only analyzed the name of each country and the population in 2010;\n* Convert strings to numeric values;\n All keys and values in population_data.json are stored as strings. To work with population data, we must convert strings with populations to numeric values. We do this using the int function;\n* Obtaining two-letter country codes;\n>>> Country codes in Pygal are stored in a module called i18n, which is an abbreviation for internationalization. The COUNTRIES dictionary contains two-letter country codes as keys and country names as values. To view these codes, import the i18n module dictionary and display its keys and values;\n* Building a world map;\n With the country codes we have, creating a world map is quick and easy. Pygal includes a type of map called Worldmap to help map global data sets;\n* Plotando dados numéricos em um mapa-múndi;\n We put the numerical data of the world population on a map, and with that we create a map that shows the populations of the countries;\n* Grouping countries according to their population;\n As China and India are much more populous than other countries, the map shows little contrast. China and India each have more than one billion people, while the next most populous country is the United States, with approximately 300 million people. Rather than plotting all countries as a group, we are going to separate them into three population levels: less than 10 million, between 10 million and 1 billion and above 1 billion;\n* Styling world maps with Pygal;\n* Lightening the theme color;\n" }, { "alpha_fraction": 0.7214699983596802, "alphanum_fraction": 0.7258220314979553, "avg_line_length": 56.41666793823242, "blob_id": "7b9e371c0c64f98d999472ed90901826a83a7a55", "content_id": "ea072951b9b00bb00588746d36c54cfb17c50c9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2089, "license_type": "permissive", "max_line_length": 164, "num_lines": 36, "path": "/APIS/hn_submissions.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\nfrom operator import itemgetter\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://hacker-news.firebaseio.com/v0/topstories.json'\n''' Essa chamada de API devolve uma lista contendo os IDs dos 500 artigos mais populares do Hacker News \n no momento em que a chamada é feita. '''\nr = requests.get(url)\nprint('Status code:', r.status_code)\n\n# Processa informações sobre cada artigo submetido.\nsubmission_ids = r.json() # Covertemos o texto em uma lista Python.\nsubmission_dicts = [] # Usaremos esse IDs para criar um conjunto de diconários em que cada um aramzena informaçôes sobre um dos artigos submetidos.\nfor submission_id in submission_ids[:10]: # Percorre os IDs dos 10 principais artigos submetidos.\n # Cria uma chamada de API separada para cada artigo submetido.\n url = f\"https://hacker-news.firebaseio.com/v0/item/{submission_id}.json\" # Faz uma nova chamada de API para cada artigo gerando um URL que inclui o valor atual.\n submission_r = requests.get(url)\n print(f\"id: {submission_id}\\tstatus: {submission_r.status_code}\") # Ver se foi bem sucedido\n response_dict = submission_r.json()\n\n submission_dict = {\n 'title': response_dict['title'], # Título do artigo.\n 'hn_link': f\"http://news.ycombinator.com/item?id={submission_id}\", # Link para página de discurssão desse item.\n 'comments': response_dict['descendants'], # Número de comentários no dicionário.\n }\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, key = itemgetter('comments'), reverse = True)\n''' A função itemgetter('comments') ordena a lita de dicionário de acordo com o número de comentários. \n A função sorted() então utiliza esse valor como base para ordenar a lista. Ordenamos a lista na \n ordem inversa para colocar as histórias mais comentadas antes. '''\n\nfor submission_dict in submission_dicts:\n print(f\"\\nTitle: {submission_dict['title']}\")\n print(f\"Discussion link: {submission_dict['hn_link']}\")\n print(f\"Comments: {submission_dict['comments']}\")\n\n" }, { "alpha_fraction": 0.6691939234733582, "alphanum_fraction": 0.6815642714500427, "avg_line_length": 47.19230651855469, "blob_id": "c12e580b5e429ac0db89b8006bf4328c45d38033", "content_id": "f88b19feb34f26350d30d6c84d836f733b275e99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2527, "license_type": "permissive", "max_line_length": 124, "num_lines": 52, "path": "/the_csv_file_format/death_valley_highs_lows.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import os\nimport platform\nimport csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\n# Obtém as datas e as temperaturas máximas e mínimas do arquivo.\npath = os.path.abspath(os.path.dirname(__file__))\nif platform.system() == 'Linux':\n path += '/data/death_valley_2018_simple.csv'\nelse:\n path += '\\\\data\\\\death_valley_2018_simple.csv'\n\nwith open(path) as f:\n reader = csv.reader(f) # Passamos o objeto arquivo (f) como argumento a fim de criar um objeto reader\n # associado a esse arquivo.\n header_row = next(reader) # A função next() devolve a próxima linha do arquivo quando recebe \n # o objeto reader.\n\n dates, highs, lows = [], [], [] # Lista para armazenar as datas e temperaturas máximas e mínimas em cada dia do arquivo.\n for row in reader: # Percorrendo as linhas anteriores do arquivo.\n ''' Sempre que analisamos uma linha, tentamos extrair a data e as temperaturas máximas e mínima.\n Se houver algum dado faltando, Python levantará um ValueError e o trataremos exibindo uma menssagem \n de errro que inclua a data do dado ausente. '''\n try:\n current_date = datetime.strptime(row[2], \"%Y-%m-%d\") # converte as informações de datas em um objeto datetime.\n high = int(row[4]) # Converte de string para inteiro.\n low = int(row[5]) # Converte de string para inteiro.\n except ValueError:\n print(current_date, 'missing data')\n else:\n dates.append(current_date) # Concatenamos.\n highs.append(high) # Concatenamos.\n lows.append(low) # Concatenamos.\n\n# Faz a plotagem dos dados.\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(dates, highs, c = 'red', alpha = 0.5) # O argumento alpha controla a transparência de uma cor.\nax.plot(dates, lows, c = 'blue', alpha = 0.5) # 0 é totalmente transparente e 1 é opaco.\nplt.fill_between(dates, highs, lows, facecolor = 'blue', alpha = 0.1)\n''' Para fill_between() passamos a lista dates para os valores de x e então as duas séries \n com valores de y, highs e lows. O argumento facecolor determina a cor da região sombreada. '''\n\n# Formatar o gráfico.\nplt.title('Daily high and low temperatures - 2018\\nDeath Valley, CA', fontsize = 20)\nplt.xlabel('', fontsize = 16)\nfig.autofmt_xdate() # Desenha os rótulos com as datas na diagonal para evitar que se sobreponha.\nplt.ylabel('Temperature (F)', fontsize = 16)\nplt.tick_params(axis = 'both', which = 'major', labelsize = 16)\n\nplt.show()\n" }, { "alpha_fraction": 0.4991430938243866, "alphanum_fraction": 0.5, "avg_line_length": 33.8283576965332, "blob_id": "11bb248bfc4152df71af42094d96b9f02860f2cc", "content_id": "1f8d1159b8210fd1795543d23fce0d5010489d76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4679, "license_type": "permissive", "max_line_length": 102, "num_lines": 134, "path": "/mapping_global_data_sets/lib/country_codes.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "from pygal_maps_world import i18n\n\ndef get_country_code(country_name): # Passa o nome do país e armazena no parâmetro country_name.\n ''' Devolve o código de duas letras do pygal para um país, dado o seu nome. '''\n for code, name in i18n.COUNTRIES.items(): # Percorre os pares de código-nome do país em COUNTRIES.\n if name == country_name: # Se o nome for encontrado, o código desse país é retornado.\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Zimbabwe':\n return 'zw'\n elif country_name == 'Zambia':\n return 'zm'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Virgin Islands (U.S.)':\n return 'do'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Venezuela, RB':\n return 've'\n elif country_name == 'Vanuatu':\n return 'pg'\n elif country_name == 'Uzbekistan':\n return 'uz'\n elif country_name == 'Uruguay':\n return 'uy'\n elif country_name == 'United States':\n return 'us'\n elif country_name == 'United Kingdom':\n return 'gb'\n elif country_name == 'United Arab Emirates':\n return 'ae'\n elif country_name == 'Ukraine':\n return 'ua'\n elif country_name == 'Uganda':\n return 'ug'\n elif country_name == 'Turkmenistan':\n return 'tm'\n elif country_name == 'Turkey':\n return 'tr'\n elif country_name == 'Tunisia':\n return 'tn'\n elif country_name == 'Togo':\n return 'tg'\n elif country_name == 'Timor-Leste':\n return 'tl'\n elif country_name == 'Thailand':\n return 'th'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Tajikistan':\n return 'tj'\n elif country_name == 'Syrian Arab Republic':\n return 'sy'\n elif country_name == 'Swaziland':\n return 'sz'\n elif country_name == 'Suriname':\n return 'sr'\n elif country_name == 'Sweden':\n return 'se'\n elif country_name == 'Sudan':\n return 'sd'\n elif country_name == 'Spain':\n return 'es'\n elif country_name == 'South Africa':\n return 'za'\n elif country_name == 'Somalia':\n return 'so'\n elif country_name == 'Slovenia':\n return 'si'\n elif country_name == 'Slovak Republic':\n return 'sk'\n elif country_name == '-Singapore':\n return 'sg'\n elif country_name == 'Sri Lanka':\n return 'lk'\n elif country_name == 'Sierra Leone':\n return 'sl'\n elif country_name == 'Bolivia':\n return 'bo'\n elif country_name == 'Brazil':\n return 'br'\n elif country_name == 'Chile':\n return 'cl'\n elif country_name == 'China':\n return 'cn'\n elif country_name == 'Colombia':\n return 'co'\n elif country_name == 'Costa Rica':\n return 'cr'\n elif country_name == 'Cuba':\n return 'cu'\n elif country_name == 'Cyprus':\n return 'cy'\n elif country_name == 'Czech Republic':\n return 'cz'\n elif country_name == 'Denmark':\n return 'dk'\n elif country_name == '-Djibouti':\n return 'dj'\n elif country_name == 'Madagascar':\n return 'mg'\n elif country_name == 'India':\n return 'in'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Kyrgyz Republic':\n return 'kd'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR, China':\n return 'hk'\n elif country_name == 'Gambia, The':\n return 'gm'\n elif country_name == 'Egypt, Arab Rep.':\n return 'eg'\n elif country_name == 'Congo, Rep.':\n return 'cg'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n \n # Se o país não foi encontrado, devolve None\n return None\n\n" }, { "alpha_fraction": 0.6338823437690735, "alphanum_fraction": 0.6752941012382507, "avg_line_length": 40.66666793823242, "blob_id": "e68619a91749e63e493cf645888a57127c410bbd", "content_id": "010a4d2308b8b55b443e7282d44dc38730ddf723", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2158, "license_type": "permissive", "max_line_length": 122, "num_lines": 51, "path": "/mapping_global_data_sets/gross_domestic_product.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import os\nimport platform\nimport json\nfrom lib.country_codes import get_country_code\nfrom pygal_maps_world.maps import World\nfrom pygal.style import RotateStyle\n\npath = os.path.abspath(os.path.dirname(__file__))\nif platform.system() == 'Linux':\n path += '/data/gdp_json.json'\nelse:\n path += '\\\\data\\\\gdp_json.json'\n\n# Carregar dados de uma lista.\nwith open(path) as f:\n gdp_data = json.load(f) # Armazenando os dados do objeto arquivo (f) na variável.\n # A função json.load() converte os dados em uma lista.\n\n# Constrói um dicionário com dados das populações e os códigos dos países.\ncc_gdps = {}\nfor gdp_dict in gdp_data: # Percorre cada item da lista.\n if gdp_dict[\"Year\"] == 2016: # Procuramos 2010 na chave Year de cada dicionário.\n country_name = gdp_dict[\"Country Name\"]\n gdp = int(float(gdp_dict[\"Value\"])) # Armazenadas em um formato numérico.\n code_country = get_country_code(country_name)\n if code_country:\n cc_gdps[code_country] = gdp\n ''' O dicionário armazena o código do país como chave e a população\n como valor sempre que o código é devolvido. '''\n# Agrupa os países em três níveis populacionais.\ncc_gdps_1, cc_gdps_2, cc_gdps_3 = {}, {}, {}\nfor cc, gdp in cc_gdps.items():\n if gdp < 5000000000:\n cc_gdps_1[cc] = round(gdp / 1000000000)\n elif gdp < 50000000000:\n cc_gdps_2[cc] = round(gdp / 1000000000)\n else:\n cc_gdps_3[cc] = round(gdp / 1000000000)\n\n# Vê quantos píses estão em cada nível.\nprint(len(cc_gdps_1), len(cc_gdps_2), len(cc_gdps_3))\nwm_style = RotateStyle('#336699')\nwm = World(style = wm_style) # Criamos uma instância da classe World().\nwm.title = 'Global GDP in 2016, by Country (in billions USD)' # Definimos o atributo title() do mapa.\n# usamos o método add() que aceita um rótulo (primiro argumento) e um dicionário de códigos de países (segundo argumento).\nwm.add('0-5bn', cc_gdps_1)\nwm.add('5bn-50bn', cc_gdps_2)\nwm.add('>50bn', cc_gdps_3)\n\n# O método render_to_file() cria um arquivo svg contendo o mapa, que poderá ser aberto no navegador.\nwm.render_to_file('Global_gdp.svg')\n" }, { "alpha_fraction": 0.6401944756507874, "alphanum_fraction": 0.6401944756507874, "avg_line_length": 55.09090805053711, "blob_id": "b3b39a7d71860d0338437ae841f02ecd56774c96", "content_id": "5c4039db263e5668c7c094c2d2ebe9eff86f6df6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "permissive", "max_line_length": 103, "num_lines": 11, "path": "/mapping_global_data_sets/americas.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "from pygal_maps_world.maps import World\nwm = World() # Criamos uma instância da classe World().\nwm.title = 'North, Central, and South America' # Definimos o atributo title() do mapa.\n\n# usamos o método add() que aceita um rótulo e uma lista de códigos de países.\nwm.add('North America', ['ca', 'mx', 'us'])\nwm.add('Central America', ['bz', 'cr', 'gt', 'hn', 'ni', 'pa', 'sv'])\nwm.add('South America', ['ar', 'bo', 'br', 'cl', 'co', 'ec', 'gf', 'gy', 'pe', 'py', 'sr', 'uy', 've'])\n\n# O método render_to_file() cria um arquivo svg contendo o mapa, que poderá ser aberto no navegador.\nwm.render_to_file('Americas.svg')\n" }, { "alpha_fraction": 0.7983519434928894, "alphanum_fraction": 0.7983519434928894, "avg_line_length": 78.34615325927734, "blob_id": "276d8b22e56e39816714e9805034ee1a6671c93c", "content_id": "7323a49ed0a546f67762dd092a20b4cfa5e5dc0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2063, "license_type": "permissive", "max_line_length": 473, "num_lines": 26, "path": "/APIS/README.md", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "# WORKING WITH APIS\n\nWe wrote a self-contained program to generate a visualization based on data retrieved by the program. The program uses a web API (Application Programming Interface) to request specific information from a website automatically, instead of asking for whole pages. Then this information will be used to generate a visualization. Since programs written in this way will always use current data to generate a visualization, even if that data changes quickly, they will be.\n\n## Using a web API\n\nA web API is a part of a website designed to interact with programs that use very specific URLs in order to request certain information. This type of request is known as an API call. The requested data will be returned in an easily processed format, for example, JSON or CSV. Most applications that rely on external data sources, such as those that integrate with social media sites, rely on API calls.\n\n## Git and GitHub\n\nOur visualization will be based on information from GitHub: a website that allows programmers to collaborate on projects. We will use the GitHub API to request information from the site about Python projects and then we will generate an interactive view of the relative popularity of these projects in Pygal.\nWith that we can do:\n\n* Processing an API response;\n* Working with the response dictionary;\n* Summary of the main repositories;\n* Monitoring API usage rate limits;\n* Viewing repositories using Pygal;\n* Perfecting Pygal's graphics;\n* Adding custom context hints;\n* Plotting the data;\n* Adding clickable links to our chart;\n\n## The Hacker News API\n\nTo explore the use of API calls on other sites, we'll take a look at Hacker News [Hacker News](http://news.ycombinator.com/). At Hacker News, people share articles about programming and technology, and engage in enthusiastic discussions about those articles. The Hacker News API provides access to data on all submitted articles and website comments, available without the need to register to obtain a key. And with that we followed the same steps that were done on GitHub.\n" }, { "alpha_fraction": 0.7394438982009888, "alphanum_fraction": 0.740473747253418, "avg_line_length": 43.1363639831543, "blob_id": "368dd34c900680d9758fae5ce029af70e4935790", "content_id": "9efd9400d74cfe2e4b65fd9d79a6b58c7dd990ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "permissive", "max_line_length": 130, "num_lines": 22, "path": "/APIS/repos_items_total_count.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "import requests\n\n# Faz uma chamada de API e armazena a resposta.\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars' # Armazenamos o URL da chamada da API.\nr = requests.get(url) # Usamos requests para fazer a chamada.\nprint('Status Code:', r.status_code)\n''' O objeto com a resposta tem um atributo chamado status_code, que nos informa se a resposta\n foi bem sucedida. '''\n\n# Armazena a resposta da API em uma variável.\nrespose_dict = r.json()\nprint('Total repositories:', respose_dict['total_count']) # Número total de repositórios Python no GitHub.\n\n# Explora informações sobre os repositórios.\nrepo_dicts = respose_dict['items'] # 'items' é uma lista que contém vários dicionários, cada um contém dados sobre um repositório.\nprint('Repositories returned:', len(repo_dicts))\n\n# Analisa o primeiro repositório.\nrepo_dict = repo_dicts[0] # Primeiro item.\nprint('\\nKeys:', len(repo_dict))\nfor key in sorted(repo_dict.keys()):\n print(key)\n" }, { "alpha_fraction": 0.6936089992523193, "alphanum_fraction": 0.7424812316894531, "avg_line_length": 58, "blob_id": "15de5282262a1d56e8d5aa5fd4ce002ad40d21f7", "content_id": "d7fc29de48fe26f5483c5a0ba6e7139b556ba99c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "permissive", "max_line_length": 122, "num_lines": 9, "path": "/mapping_global_data_sets/na_populations.py", "repo_name": "portelaoliveira/CSV_e_JSON", "src_encoding": "UTF-8", "text": "from pygal_maps_world.maps import World\nwm = World() # Criamos uma instância da classe World().\nwm.title = 'Populations of Countries in North America' # Definimos o atributo title() do mapa.\n\n# usamos o método add() que aceita um rótulo (primiro argumento) e um dicionário de códigos de países (segundo argumento).\nwm.add('North America ', {'ca':34126000, 'us':309349000, 'mx':113423000})\n\n# O método render_to_file() cria um arquivo svg contendo o mapa, que poderá ser aberto no navegador.\nwm.render_to_file('na_populations.svg')\n\n" } ]
20
cryuso/ToLP_Django_labs
https://github.com/cryuso/ToLP_Django_labs
9e10dcedb7a74f1ff4af155bcfbcd7f64b0860e0
7f0a9359ef8a25452319f6f6af158078f5838f8d
50eda0ca41888779f0c1ffa7483c82ff345f1146
refs/heads/master
2020-05-19T13:24:52.353996
2019-05-26T11:27:15
2019-05-26T11:27:15
185,038,657
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.542885959148407, "alphanum_fraction": 0.5459132194519043, "avg_line_length": 38.643836975097656, "blob_id": "9f5ab3a9ed05e5a349ad59d6dc70b891da218aa7", "content_id": "2dbd3fbdd83c94f611ebe640024ecacddbeb2fa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3243, "license_type": "no_license", "max_line_length": 94, "num_lines": 73, "path": "/Lab6/blog/articles/views.py", "repo_name": "cryuso/ToLP_Django_labs", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\r\nfrom .models import Article, LoginForm, RegisterForm\r\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\r\nfrom django.contrib.auth import authenticate, login\r\n\r\n\r\ndef archive(request):\r\n return render(request, 'archive.html', {\"posts\":Article.objects.all()})\r\n\r\ndef get_article(request, article_id):\r\n try:\r\n post = Article.objects.get(id=article_id)\r\n return render(request, 'article.html', {\"post\": post})\r\n except Article.DoesNotExist:\r\n raise Http404\r\n\r\ndef create_post(request):\r\n if request.user.is_authenticated:\r\n if request.method == \"POST\":\r\n # обработать данные формы, если метод POST\r\n form = { 'text': request.POST[\"text\"],\r\n 'title': request.POST[\"title\"]\r\n }\r\n # в словаре form будет храниться информация, введенная пользователем\r\n if form[\"text\"] and form[\"title\"]:\r\n # если поля заполнены без ошибок\r\n title=form[\"title\"]\r\n # проверка на уникальность\r\n if not Article.objects.filter(title=title).exists():\r\n article = Article.objects.create(text=form[\"text\"],\r\n title=form[\"title\"],\r\n author=request.user)\r\n return redirect('get_article', article_id = article.id)\r\n else:\r\n form [\"errors\"] = (u\"Статья с таким именем уже существует\")\r\n return render(request, 'create_post.html', {\"form\": form})\r\n # перейти на страницу поста\r\n else:\r\n # если введенные данные некорректны\r\n form [\"errors\"] = (u\"Не все поля заполнены\")\r\n return render(request, 'create_post.html', {\"form\": form})\r\n else:\r\n # просто вернуть страницу с формой, если метод GET\r\n return render(request, 'create_post.html', {}) \r\n else:\r\n raise Http404\r\n\r\n\r\ndef register(request):\r\n if request.method == \"POST\":\r\n form = RegisterForm(request.POST)\r\n if form.is_valid():\r\n user = form.save()\r\n conpassword = form.cleaned_data.get('password')\r\n user = authenticate(username=user.username, emai=user.email, password=conpassword)\r\n login(request, user)\r\n return redirect('register')\r\n else:\r\n form = RegisterForm()\r\n return render(request, 'register.html', {'form': form}) \r\n\r\n \r\n\r\ndef log_in(request):\r\n if request.method == \"POST\":\r\n form = LoginForm(request.POST)\r\n if form.is_valid():\r\n if form.get_user():\r\n login(request, form.get_user())\r\n return HttpResponseRedirect('../../archive/')\r\n else:\r\n form = LoginForm()\r\n return render(request, 'user.html', {'form':form})\r\n \r\n" }, { "alpha_fraction": 0.4479704797267914, "alphanum_fraction": 0.4590405821800232, "avg_line_length": 46.42856979370117, "blob_id": "f0e1dcce9c229e67ad23867261de9f96e8d93c91", "content_id": "f01293dbc064f7ebf8d77d27324d54584b98e573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 153, "num_lines": 28, "path": "/Lab5/blog/articles/templates/create_post.html", "repo_name": "cryuso/ToLP_Django_labs", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n{% load static %}\r\n<html> \r\n <head> \r\n <title>Django Public Blog - create article</title> \r\n {% load static from staticfiles %}\r\n <link rel=\"stylesheet\" href=\"{% static \"/css/article.css\"%}\"> \r\n </head> \r\n<body>\r\n <div class=\"create_post\"> \r\n <div class=\"header\">\r\n {% load static from staticfiles %}\r\n <img src=\"{% static \"im/logo.png\"%}\" alt=\"Hello ^_^\" height=\"80\" width=\"40\"/> \r\n </div> \r\n <div class=\"content\"> \r\n <p align=\"center\"><h1><center>Write your article here</h1></p> \r\n <form method=\"POST\">{% csrf_token %}\r\n <p align=\"center\"><h2><center>Article name</h2></p> \r\n <p align=\"center\"><input type=\"text\" size=\"40\" name=\"title\" placeholder=\"Type article name\" value=\"{{ form.title }}\"></p>\r\n <p align=\"center\"><h2><center>Article text</h2></p> \r\n <p align=\"center\"><textarea name=\"text\" cols=\"40\" rows=\"3\" placeholder=\"Type article text\">{{ form.text }}</textarea></p> \r\n <p align=\"center\"><input type=\"submit\" value=\"Save\"></p> \r\n </form>\r\n {{ form.errors }} \r\n </div>\r\n </div> \r\n</body> \r\n</html> " }, { "alpha_fraction": 0.595703125, "alphanum_fraction": 0.6041666865348816, "avg_line_length": 31.55555534362793, "blob_id": "895264092f90391bc4ffb5807f0cdd6baf610e19", "content_id": "8b81c4352968fbf382c4a974a7e400d1a503f2d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1572, "license_type": "no_license", "max_line_length": 101, "num_lines": 45, "path": "/Lab6/blog/articles/models.py", "repo_name": "cryuso/ToLP_Django_labs", "src_encoding": "UTF-8", "text": "from django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom django.contrib.auth.forms import UserCreationForm, forms\r\nfrom django.contrib.auth import authenticate, login\r\n\r\n# Create your models here.\r\n\r\nclass Article(models.Model):\r\n title = models.CharField(max_length=200)\r\n author = models.ForeignKey(User,\r\n on_delete = models.CASCADE,\r\n )\r\n text = models.TextField()\r\n created_date = models.DateField(auto_now_add=True)\r\n \r\n \r\n def __str__(self):\r\n return \"%s: %s\" % (self.author.username, self.title)\r\n\r\n def get_excerpt(self):\r\n return self.text[:140] + \"...\" if len(self.text) > 140 else self.text\r\n\r\n def get_content(self):\r\n return self.text\r\n\r\nclass RegisterForm(UserCreationForm):\r\n class Meta:\r\n model = User\r\n fields = ('username', 'email')\r\n\r\nclass LoginForm(forms.Form):\r\n username = forms.CharField(max_length=40)\r\n password = forms.CharField(max_length=40, widget=forms.PasswordInput)\r\n\r\n def get_user(self):\r\n return self.user or None\r\n\r\n def clean(self):\r\n cleaned_data = super(LoginForm, self).clean()\r\n if not self.errors:\r\n user = authenticate(username=cleaned_data['username'], password=cleaned_data['password'])\r\n if user is None:\r\n raise forms.ValidationError(u'Неправильное имя пользователя или пароль')\r\n self.user = user\r\n return cleaned_data\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n" } ]
3
akoaysigod/raytracing
https://github.com/akoaysigod/raytracing
46fc88a1fdadfb3bd70c940ba3f1ea0ec75a32e9
d02ba6f1223bfced1914ccbcfb8020abc26d398a
c11f473e0eae070d7b67e4406cbf74514926b138
refs/heads/master
2020-09-17T05:42:11.888919
2016-12-05T05:06:57
2016-12-05T05:06:57
67,459,747
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 15.333333015441895, "blob_id": "217f502b982f759a4f6cf4dd82431a348063053f", "content_id": "17b40b0e868083a31e512a10572ee9e4af3bc995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 49, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/run.sh", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n./.build/debug/RayTracer > test.ppm\n" }, { "alpha_fraction": 0.608832836151123, "alphanum_fraction": 0.624605655670166, "avg_line_length": 18.8125, "blob_id": "32cda886767be72bf3267e1e501032f240200dd7", "content_id": "2beb4b657efdba2a6ade3e091912ec0e6486a2de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 317, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/Sources/NoiseTexture.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#if os(macOS)\nimport simd\n#endif\n\nfinal class NoiseTexture: Texture {\n private let perlin = Perlin()\n private let scale: Double\n\n init(scale: Double = 0.1) {\n self.scale = scale\n }\n\n func value(_ s: Double, _ t: Double, _ p: Vector) -> Vector {\n return perlin.noise(vec: scale * p) * Vector(1, 1, 1)\n }\n}\n" }, { "alpha_fraction": 0.6477987170219421, "alphanum_fraction": 0.6477987170219421, "avg_line_length": 17.705883026123047, "blob_id": "4377fa8a54e2250862e6bebe32f8fcaf9382a4ef", "content_id": "a528d5c05b3b90f25d517de312ba4299dd547efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 318, "license_type": "no_license", "max_line_length": 65, "num_lines": 17, "path": "/Sources/Ray.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#if !os(Linux)\nimport simd\n#endif\n\nfinal class Ray {\n let origin: Vector\n let direction: Vector\n\n init(origin: Vector = Vector(), direction: Vector = Vector()) {\n self.origin = origin\n self.direction = direction \n }\n\n func pointAtParameter(_ t: Double) -> Vector {\n return origin + (t * direction)\n }\n}\n" }, { "alpha_fraction": 0.5289855003356934, "alphanum_fraction": 0.533816397190094, "avg_line_length": 24.875, "blob_id": "ee2c53f1fc9e0c627e82bdcd8be6c4b40f02800d", "content_id": "74774beb7898cf6cc6dd564689be459b20352eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 56, "num_lines": 16, "path": "/src-python/hitable_list.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "from hitable import Hitable\n\nclass HitableList(Hitable):\n def __init__(self, arr):\n self.arr = arr\n\n def hit(self, ray, tMin = 0.0, tMax = float(\"inf\")):\n retRecord = None\n closest = tMax\n\n for item in self.arr:\n record = item.hit(ray, tMin, closest)\n if record:\n closest = record.t\n retRecord = record\n return retRecord\n" }, { "alpha_fraction": 0.5451327562332153, "alphanum_fraction": 0.5663716793060303, "avg_line_length": 34.3125, "blob_id": "dc56d26e2fd2bea6327f6ebb63bc225d5cf9aea8", "content_id": "1256a3e0d79b5febb040b6c83679293d30a6c04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 113, "num_lines": 16, "path": "/src-python/camera.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "from ray import Ray\nfrom vector import Vector\n\nclass Camera:\n def __init__(self,\n lowerLeftCorner = Vector(-2, -1, -1),\n horizontal = Vector(4, 0, 0),\n vertical = Vector(0, 2, 0),\n origin = Vector(0, 0, 0)):\n self.lowerLeftCorner = lowerLeftCorner\n self.horizontal = horizontal\n self.vertical = vertical\n self.origin = origin\n\n def get_ray(self, u, v):\n return Ray(self.origin, self.lowerLeftCorner + (u * self.horizontal) + (v * self.vertical) - self.origin)\n" }, { "alpha_fraction": 0.6890756487846375, "alphanum_fraction": 0.6932772994041443, "avg_line_length": 25.44444465637207, "blob_id": "587fa3b0c8cc4cf6f51fbd8b40217e60b687d51f", "content_id": "007e5289abb9786e318a765926a880ec7f4c2ca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 476, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/Sources/Lambertian.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#if !os(Linux)\nimport simd\n#endif\n\nfinal class Lambertian: Material {\n private let albedo: Texture\n\n init(albedo: Texture) {\n self.albedo = albedo\n }\n\n func scatter(ray: Ray, record: HitRecord) -> Scatter? {\n let target = record.p + record.normal + randomInUnitSphere()\n let scattered = Ray(origin: record.p, direction: target - record.p)\n let attenuation = albedo.value(0, 0, record.p)\n return Scatter(attenuation: attenuation, scattered: scattered)\n }\n}\n" }, { "alpha_fraction": 0.5659898519515991, "alphanum_fraction": 0.5837563276290894, "avg_line_length": 20.88888931274414, "blob_id": "0fce757a63e3f12b57051225e166c9359f52f44a", "content_id": "6ac4936f2fbb1be205106836ae27f2e47ae2bf1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 394, "license_type": "no_license", "max_line_length": 63, "num_lines": 18, "path": "/Sources/CheckerTexture.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n\nfinal class CheckerTexture: Texture {\n private let even: Texture\n private let odd: Texture\n\n init(even: Texture, odd: Texture) {\n self.even = even\n self.odd = odd\n }\n\n func value(_ s: Double, _ t: Double, _ p: Vector) -> Vector {\n if sin(10 * p.x) * sin(10 * p.y) * sin(10 * p.z) < 0 {\n return odd.value(s, t, p)\n }\n return even.value(s, t, p)\n }\n}\n" }, { "alpha_fraction": 0.6472355723381042, "alphanum_fraction": 0.6658653616905212, "avg_line_length": 29.254545211791992, "blob_id": "5a5eaa015221ea8fe0a1cfe04844ee4742860c38", "content_id": "3ba6e7d018918b55a8db2252e1b5e58b0350808a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1664, "license_type": "no_license", "max_line_length": 106, "num_lines": 55, "path": "/Sources/ColorDeterminer.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\ntypealias ColorFunc = (Ray, HitableList, Int) -> Vector\n\nfinal class ColorDeterminer {\n private let world: HitableList\n private let camera: Camera\n\n init(world: HitableList, camera: Camera) {\n self.world = world\n self.camera = camera\n }\n\n fileprivate func background(direction: Double) -> Vector {\n let t = 0.5 * (1.0 + direction)\n return (1.0 - t) * Vector(1, 1, 1) + t * Vector(0.5, 0.7, 1.0)\n }\n}\n\n//normal colors\nextension ColorDeterminer {\n func normalColor(ray: Ray, world: HitableList, depth: Int = 0) -> Vector {\n if let record = world.hit(ray: ray) {\n return 0.5 * Vector(1.0 + record.normal.x, 1.0 + record.normal.y, 1.0 + record.normal.z)\n }\n return background(direction: ray.direction.unit.y)\n }\n}\n\n//diffuse chapter 7\nextension ColorDeterminer {\n func diffuseColor(ray: Ray, world: HitableList, depth: Int = 0) -> Vector {\n if let record = world.hit(ray: ray) {\n let t = record.p + record.normal + record.material.randomInUnitSphere()\n return 0.5 * diffuseColor(ray: Ray(origin: record.p, direction: t - record.p), world: world)\n }\n return background(direction: ray.direction.unit.y)\n }\n}\n\n//materials\nextension ColorDeterminer {\n func materialColor(ray: Ray, world: HitableList, depth: Int) -> Vector {\n if let record = world.hit(ray: ray) {\n if let scatter = record.material.scatter(ray: ray, record: record), depth < 50 {\n return scatter.attenuation * materialColor(ray: scatter.scattered, world: world, depth: depth + 1)\n }\n return Vector()\n }\n return self.background(direction: ray.direction.unit.y)\n }\n}\n" }, { "alpha_fraction": 0.8017241358757019, "alphanum_fraction": 0.8017241358757019, "avg_line_length": 18.33333396911621, "blob_id": "dfff8417d4b158d16fc17795c2bc4e4de40cf271", "content_id": "32c0bcc08e62e9d9b1178f20e7953c31f7a46087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 116, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/Tests/LinuxMain.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import XCTest\n@testable import RayTracingWeekendTests\n\nXCTMain([\n testCase(RayTracingWeekendTests.allTests),\n])\n" }, { "alpha_fraction": 0.6650406718254089, "alphanum_fraction": 0.6723577380180359, "avg_line_length": 34.14285659790039, "blob_id": "ac5bb133ff3b319a29ef9ac320ff90cc359f272c", "content_id": "38b3877363119cc12b3b2412af026674ff4ab8dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1230, "license_type": "no_license", "max_line_length": 98, "num_lines": 35, "path": "/Sources/Dielectric.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\nfinal class Dielectric: Material {\n private let refractiveIndex: Double\n\n init(refractiveIndex: Double) {\n self.refractiveIndex = refractiveIndex\n }\n\n func scatter(ray: Ray, record: HitRecord) -> Scatter? {\n let (outwardNormal, nint, cosine) = { _ -> (Vector, Double, Double) in\n if ray.direction.dotp(record.normal) > 0 {\n let cosine = refractiveIndex * (ray.direction.dotp(record.normal) / ray.direction.lengthp)\n return (-1 * record.normal, refractiveIndex, cosine)\n }\n let cosine = -ray.direction.dotp(record.normal) / ray.direction.lengthp\n return (record.normal, 1.0 / refractiveIndex, cosine)\n }()\n\n let refracted = refract(v: ray.direction, n: outwardNormal, nint: nint)\n let probability = schlick(cosine: cosine, refIdx: refractiveIndex)\n let scattered: Ray = { _ -> Ray in\n if let refracted = refracted, drand48() > probability {\n return Ray(origin: record.p, direction: refracted)\n }\n let reflected = reflect(v: ray.direction, n: record.normal)\n return Ray(origin: record.p, direction: reflected)\n }()\n\n return Scatter(attenuation: Vector(1, 1, 1), scattered: scattered)\n }\n}\n" }, { "alpha_fraction": 0.5984588861465454, "alphanum_fraction": 0.6035959124565125, "avg_line_length": 25.545454025268555, "blob_id": "8caa0fe111d647233698236bcdd21123fa5839a5", "content_id": "01362234f6938d9f37781089f9cb61cc07ea3f48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1168, "license_type": "no_license", "max_line_length": 88, "num_lines": 44, "path": "/Sources/Sphere.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\nfinal class Sphere: Hitable {\n private let center: Vector\n private let radius: Double\n private let material: Material\n\n init(center: Vector = Vector(), radius: Double = 0.0, material: Material) {\n self.center = center\n self.radius = radius\n self.material = material\n }\n\n private func hitRecord(_ t: Double, ray: Ray) -> HitRecord {\n let p = ray.pointAtParameter(t)\n let normal = (p - center) / radius\n return HitRecord(t: t, p: p, normal: normal, material: material)\n }\n\n func hit(ray: Ray, tMin: Double = 0.0, tMax: Double = Double.infinity) -> HitRecord? {\n let oc = ray.origin - center\n let a = ray.direction.dotp(ray.direction)\n let b = oc.dotp(ray.direction)\n let c = oc.dotp(oc) - (radius * radius)\n let discrim = (b * b) - (a * c)\n\n if discrim > 0.0 {\n let sqr = sqrt(discrim)\n let tmpOne = (-b - sqr) / a\n if tmpOne < tMax && tmpOne > tMin {\n return hitRecord(tmpOne, ray: ray)\n }\n\n let tmpTwo = (-b + sqr) / a\n if tmpTwo < tMax && tmpTwo > tMin {\n return hitRecord(tmpTwo, ray: ray)\n }\n }\n return nil\n }\n}\n" }, { "alpha_fraction": 0.6133333444595337, "alphanum_fraction": 0.6287719011306763, "avg_line_length": 25.38888931274414, "blob_id": "16abb5735a2c8371b8e2d4b14031cc90773bee13", "content_id": "0eec99a39d7c3fb70cf000f32f2f0fa245e73cae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1425, "license_type": "no_license", "max_line_length": 113, "num_lines": 54, "path": "/Sources/Camera.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\nfinal class Camera {\n private let lowerLeftCorner: Vector\n private let horizontal: Vector\n private let vertical: Vector\n private let origin: Vector\n private let w: Vector\n private let u: Vector\n private let v: Vector\n private let lensRadius: Double\n\n init(origin: Vector,\n lookAt: Vector,\n vup: Vector,\n fov: Double,\n aspect: Double,\n aperture: Double,\n focusDistance: Double) {\n let theta = fov * (M_PI / 180.0)\n let halfHeight = tan(theta / 2.0)\n let halfWidth = aspect * halfHeight\n\n self.origin = origin\n lensRadius = aperture / 2.0\n\n w = (origin - lookAt).unit\n u = vup.crossp(w).unit\n v = w.crossp(u)\n\n lowerLeftCorner = origin - halfWidth * focusDistance * u - halfHeight * focusDistance * v - focusDistance * w\n\n horizontal = 2 * halfWidth * focusDistance * u\n vertical = 2 * halfHeight * focusDistance * v\n }\n\n private func randomInUnitDisk() -> Vector {\n let vec = 2.0 * Vector(drand48(), drand48(), 0) - Vector(1, 1, 0)\n if vec.dotp(vec) >= 1.0 {\n return randomInUnitDisk()\n }\n return vec\n }\n\n func getRay(s: Double, t: Double) -> Ray {\n let rd = lensRadius * randomInUnitDisk()\n let offset = rd.x * u + rd.y * v\n return Ray(origin: origin + offset,\n direction: lowerLeftCorner + (s * horizontal) + (t * vertical) - origin - offset)\n }\n}\n" }, { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.6677685976028442, "avg_line_length": 25.30434799194336, "blob_id": "0733da022ee42d2e1439924eec2206d9fbf82a54", "content_id": "28e5aa6661def1897d070fd2d2b3a01dc0655ac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 605, "license_type": "no_license", "max_line_length": 93, "num_lines": 23, "path": "/Sources/Metal.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#if !os(Linux)\nimport simd\n#endif\n\nfinal class Metal: Material {\n private let albedo: Vector\n private let fuzz: Double\n\n init(albedo: Vector, fuzz: Double = 1.0) {\n self.albedo = albedo\n self.fuzz = fuzz < 1.0 ? fuzz : 1.0\n }\n\n func scatter(ray: Ray, record: HitRecord) -> Scatter? {\n let reflected = reflect(v: ray.direction.unit, n: record.normal)\n let scattered = Ray(origin: record.p, direction: reflected + fuzz * randomInUnitSphere())\n\n if scattered.direction.dotp(record.normal) > 0.0 {\n return Scatter(attenuation: albedo, scattered: scattered)\n }\n return nil\n }\n}\n" }, { "alpha_fraction": 0.5240641832351685, "alphanum_fraction": 0.5240641832351685, "avg_line_length": 19.77777862548828, "blob_id": "dbc0fa7a70f0c590ab7e2ac0c57f98f23f28a340", "content_id": "27b6a093a644fcec782839c5c14cb7eaee20b0c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/src-python/hitable.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "class HitRecord:\n def __init__(self, t, p, normal):\n self.t = t\n self.p = p\n self.normal = normal\n\nclass Hitable:\n def hit(self, ray, tMin, tMax):\n pass\n" }, { "alpha_fraction": 0.5675971508026123, "alphanum_fraction": 0.6215106844902039, "avg_line_length": 37.0625, "blob_id": "5e5482e41a59075396e6c0a03341677db235ae80", "content_id": "a4041685bba3d82d49c126195f0fa464450d0673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 3654, "license_type": "no_license", "max_line_length": 148, "num_lines": 96, "path": "/Sources/main.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\nfunc generateRandomScene() -> HitableList {\n let even = ConstantTexture(color: Vector(0.2, 0.3, 0.1))\n let odd = ConstantTexture(color: Vector(0.9, 0.9, 0.9))\n let staticSpheres = [\n Sphere(center: Vector(0, -1000, 0), radius: 1000, material: Lambertian(albedo: CheckerTexture(even: even, odd: odd))),\n Sphere(center: Vector(0, 1, 0), radius: 1, material: Dielectric(refractiveIndex: 1.5)),\n Sphere(center: Vector(-4, 1, 0), radius: 1, material: Lambertian(albedo: ConstantTexture(color: Vector(0.4, 0.2, 0.1)))),\n Sphere(center: Vector(4, 1, 0), radius: 1, material: Metal(albedo: Vector(0.7, 0.6, 0.5), fuzz: 0.0))\n ]\n\n let spheres = (-11..<11).reduce(staticSpheres) { (spheres, a) -> [Sphere] in\n let randomSpheres = (-11..<11).map { b -> Sphere? in\n let chooseMat = drand48()\n let center = Vector(Double(a) + 0.9 * drand48(), 0.2, Double(b) + 0.9 * drand48())\n if (center - Vector(4, 0.2, 0)).lengthp > 0.9 {\n if chooseMat < 0.8 {\n return Sphere(center: center, radius: 0.2, material: Lambertian(albedo: ConstantTexture(color: Vector(drand48(), drand48(), drand48()))))\n }\n else if chooseMat < 0.95 {\n return Sphere(center: center,\n radius: 0.2,\n material: Metal(albedo: Vector(0.5 * (1 + drand48()), 0.5 * (1 + drand48()), 0.5 * (1 + drand48())), fuzz: 0.5 * drand48()))\n }\n return Sphere(center: center, radius: 0.2, material: Dielectric(refractiveIndex: 1.5))\n }\n return nil\n }\n return spheres + randomSpheres.flatMap { $0 }\n }\n return HitableList(list: spheres)\n}\n\nfunc checkeredTest() -> HitableList {\n let even = ConstantTexture(color: Vector(0.2, 0.3, 0.1))\n let odd = ConstantTexture(color: Vector(0.9, 0.9, 0.9))\n let checkerTexture = CheckerTexture(even: even, odd: odd)\n let sphere0 = Sphere(center: Vector(0, -10, 0), radius: 10, material: Lambertian(albedo: checkerTexture))\n let sphere1 = Sphere(center: Vector(0, 10, 0), radius: 10, material: Lambertian(albedo: checkerTexture))\n return HitableList(list: [sphere0, sphere1])\n}\n\nfunc perlinTest() -> HitableList {\n let texture = NoiseTexture()\n let sphere1 = Sphere(center: Vector(0, -1000, 0), radius: 1000, material: Lambertian(albedo: texture))\n let sphere2 = Sphere(center: Vector(0, 2, 0), radius: 2, material: Lambertian(albedo: texture))\n return HitableList(list: [sphere1, sphere2])\n}\n\nfunc makeCamera(nx: Int, ny: Int) -> Camera {\n let origin = Vector(13, 2, 3)\n let lookAt = Vector(0, 0, 0)\n let fov = 20.0\n let distToFocus = 500.0\n let aperture = 0.001\n\n return Camera(origin: origin,\n lookAt: lookAt,\n vup: Vector(0, 1, 0),\n fov: fov,\n aspect: Double(nx) / Double(ny),\n aperture: aperture,\n focusDistance: distToFocus)\n}\n\nfunc makeImage(nx: Int, ny: Int, ns: Int, world: HitableList, camera: Camera) {\n let colorFunc = ColorDeterminer(world: world, camera: camera).materialColor\n let images = ImageGenerator(nx: nx, ny: ny, ns: ns, world: world, camera: camera)\n images.generate(colorFunc) { (colors: [Color]) in\n print(\"P3\\n\\(nx) \\(ny)\\n255\")\n colors.forEach { c in\n print(\"\\(c.r) \\(c.g) \\(c.b)\")\n }\n }\n}\n\nfunc main() {\n// srand48(Int(time(nil)))\n\n let nx = 1000 //width\n let ny = 500 //height\n let ns = 10 //anti aliasing\n\n let world = perlinTest()\n //let world = checkeredTest()\n //let world = generateRandomScene()\n let camera = makeCamera(nx: nx, ny: ny)\n\n makeImage(nx: nx, ny: ny, ns: ns, world: world, camera: camera)\n}\n\nmain()\n" }, { "alpha_fraction": 0.44783833622932434, "alphanum_fraction": 0.50845867395401, "avg_line_length": 29.399999618530273, "blob_id": "7673495d98dde73f8290c9aa7f4f3e2979d80a0c", "content_id": "0fe0640bf94369aef17c85ebcbac58413145add5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2128, "license_type": "no_license", "max_line_length": 96, "num_lines": 70, "path": "/src-python/main.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "from camera import Camera\nfrom hitable_list import HitableList\nfrom random import random\nfrom ray import Ray\nfrom sphere import Sphere\nfrom vector import Vector\n\ndef gen_file(nx, ny, ns):\n with open('test.ppm', 'w+') as f:\n f.write('P3\\n%s %s\\n255\\n' % (nx, ny))\n\n for c in colors(nx, ny, ns):\n f.write('%s %s %s\\n' % (c[0], c[1], c[2]))\n\ndef hit_sphere(center, radius, ray):\n oc = ray.origin - center\n a = ray.direction.dotp(ray.direction)\n b = 2.0 * oc.dotp(ray.direction)\n c = oc.dotp(oc) - radius * radius\n discrim = b * b - 4 * a * c\n\n if discrim < 0:\n return -1.0\n return (-b - discrim ** 0.5) / (2.0 * a)\n\ndef color_gradient(ray):\n t = hit_sphere(Vector(0, 0, -1), 0.5, ray)\n if t > 0.0:\n n = (ray.point_at_parameter(t) - Vector(0, 0, -1)).unit\n return 0.5 * Vector(1.0 + n.x, 1.0 + n.y, 1.0 + n.z)\n unit = ray.direction.unit\n t = 0.5 * (1.0 + unit.y)\n return (1.0 - t) * Vector(1.0, 1.0, 1.0) + t * Vector(0.5, 0.7, 1.0)\n\ndef color(ray, world):\n record = world.hit(ray)\n if record:\n return 0.5 * Vector(1.0 + record.normal.x, 1.0 + record.normal.y, 1.0 + record.normal.z)\n unit = ray.direction.unit\n t = 0.5 * (1.0 + unit.y)\n return (1.0 - t) * Vector(1, 1, 1) + t * Vector(0.5, 0.7, 1.0)\n\ndef colors(nx, ny, ns):\n llc = Vector(-2.0, -1.0, -1.0)\n hor = Vector(4.0, 0.0, 0.0)\n ver = Vector(0.0, 2.0, 0.0)\n org = Vector()\n\n spheres = [Sphere(Vector(0, 0, -1), 0.5), Sphere(Vector(0, -100.5, -1), 100)]\n world = HitableList(spheres)\n camera = Camera()\n\n for j in xrange(ny - 1, 0, -1):\n for i in xrange(nx):\n vec = Vector()\n for s in xrange(ns):\n u = (i + random()) / float(nx)\n v = (j + random()) / float(ny)\n ray = camera.get_ray(u, v)\n vec = vec + color(ray, world)\n\n vec = vec / Vector(ns, ns, ns)\n ir = int(255 * vec.x)\n ig = int(255 * vec.y)\n ib = int(255 * vec.z)\n\n yield (ir, ig, ib)\n\nif __name__ == '__main__':\n gen_file(200, 100, 100)\n" }, { "alpha_fraction": 0.6144578456878662, "alphanum_fraction": 0.6144578456878662, "avg_line_length": 26.66666603088379, "blob_id": "58e8013aafb014234c383340fca8e009720215e9", "content_id": "42f322ec341d73a625e7cd8cdf1240f13821a447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 83, "license_type": "no_license", "max_line_length": 61, "num_lines": 3, "path": "/Sources/Texture.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "protocol Texture {\n func value(_ s: Double, _ t: Double, _ p: Vector) -> Vector\n}\n" }, { "alpha_fraction": 0.5103569626808167, "alphanum_fraction": 0.5381225347518921, "avg_line_length": 28.467533111572266, "blob_id": "718f2877612ed7f0913bf0d56a724b3f15de328a", "content_id": "5eea336c47639dd1b7b3601c9c718fae6a3ae161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 2269, "license_type": "no_license", "max_line_length": 101, "num_lines": 77, "path": "/Sources/Perlin.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n\nfinal class Perlin {\n private let randomVectors: [Vector]\n private let xPerms: [Int]\n private let yPerms: [Int]\n private let zPerms: [Int]\n\n init() {\n randomVectors = Perlin.generateRandomVectors()\n xPerms = Perlin.generatePermutations()\n yPerms = Perlin.generatePermutations()\n zPerms = Perlin.generatePermutations()\n }\n\n func trilinearInterpolation(randVectors: [[[Vector]]], u: Double, v: Double, w: Double) -> Double {\n func hermiteCubic(_ x: Double) -> Double {\n return x * x * (3.0 - 2.0 * x)\n }\n\n let uu = hermiteCubic(u)\n let vv = hermiteCubic(v)\n let ww = hermiteCubic(w)\n\n return (0..<2).reduce(0.0) { (ret, i) in\n return (0..<2).reduce(ret) { (ret1, j) in\n return (0..<2).reduce(ret1) { (ret2, k) in\n let weight = Vector(u - Double(i), v - Double(j), w - Double(k))\n let a = Double(i) * uu + Double(1 - i) * (1.0 - uu)\n let b = Double(j) * vv + Double(1 - j) * (1.0 - vv)\n let c = Double(k) * ww + Double(1 - k) * (1.0 - ww)\n return ret2 + a * b * c * randVectors[i][j][k].dotp(weight)\n }\n }\n }\n }\n\n func noise(vec: Vector) -> Double {\n let i = Int(vec.x)\n let j = Int(vec.y)\n let k = Int(vec.z)\n\n let randVectors = (0..<2).map { (di) -> [[Vector]] in\n return (0..<2).map { dj in\n return (0..<2).map { dk in\n let x = xPerms[(i + di) & 255]\n let y = yPerms[(j + dj) & 255]\n let z = zPerms[(k + dk) & 255]\n return randomVectors[x ^ y ^ z]\n }\n }\n }\n\n let u = vec.x - floor(vec.x)\n let v = vec.y - floor(vec.y)\n let w = vec.z - floor(vec.z)\n return trilinearInterpolation(randVectors: randVectors, u: u, v: v, w: w)\n }\n\n private static func generateRandomVectors() -> [Vector] {\n return (0..<256).map { _ in\n let arr = (0..<3).map { _ in -1.0 + 2.0 * drand48() }\n return Vector(array: arr).unit\n }\n }\n\n private static func generatePermutations() -> [Int] {\n var table = (0..<256).map { $0 }\n for i in stride(from: table.count - 1, through: 0, by: -1) {\n let target = Int(drand48() * Double(i + 1))\n let tmp = table[i]\n table[i] = table[target]\n table[target] = tmp\n }\n return table\n }\n}\n" }, { "alpha_fraction": 0.5705309510231018, "alphanum_fraction": 0.5830976963043213, "avg_line_length": 26.678260803222656, "blob_id": "0951230ac37cc565b3807e4810f196a60bbe978f", "content_id": "25748be3f40928f162463cec661a7cb0d83e62bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 3183, "license_type": "no_license", "max_line_length": 105, "num_lines": 115, "path": "/Sources/ImageGenerator.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#else\nimport Dispatch\n#endif\n\nfinal class ImageGenerator {\n private let nx: Int\n private let ny: Int\n private let ns: Int\n private let world: HitableList\n private let camera: Camera\n private var colors: [Color]\n private let queue = DispatchQueue(label: \"asdasdaSD\", attributes: .concurrent)\n\n init(nx: Int, ny: Int, ns: Int, world: HitableList, camera: Camera) {\n self.nx = nx\n self.ny = ny\n self.ns = ns\n self.world = world\n self.camera = camera\n\n colors = Array(repeating: Color(r: 0, g: 0, b: 0), count: nx * ny)\n }\n\n func generate(_ colorFunc: @escaping ColorFunc, _ completion: ([Color]) -> ()) {\n for j in stride(from: ny - 1, through: 0, by: -1) {\n for i in (0..<nx) {\n queue.sync {\n let vector = (0..<self.ns).reduce(Vector()) { (vector, _) -> Vector in\n let u = (Double(i) + drand48()) / Double(self.nx)\n let v = (Double(j) + drand48()) / Double(self.ny)\n let ray = self.camera.getRay(s: u, t: v)\n return vector + colorFunc(ray, self.world, 0)\n }\n\n let avg = vector / Double(self.ns)\n let gammaAvg = avg.vec3.map { x -> Double in\n if x <= 0.0 {\n return sqrt(-1.0 * x)\n }\n return sqrt(x)\n }\n let gammaCorrection = Vector(array: gammaAvg)\n let color = 255.0 * gammaCorrection\n self.colors[(self.ny - j - 1) * self.nx + i] = color.color\n }\n }\n }\n\n completion(colors)\n }\n}\n\n//single threaded\n//final class ImageGenerator: Sequence {\n// fileprivate let nx: Int\n// fileprivate let ny: Int\n// fileprivate let ns: Int\n// fileprivate let world: HitableList\n// fileprivate let camera: Camera\n// fileprivate let colorFunc: ColorFunc\n//\n// init(nx: Int, ny: Int, ns: Int, world: HitableList, camera: Camera, colorFunc: @escaping ColorFunc) {\n// self.nx = nx\n// self.ny = ny\n// self.ns = ns\n// self.world = world\n// self.camera = camera\n// self.colorFunc = colorFunc\n// }\n//\n// func makeIterator() -> ColorIterator {\n// return ColorIterator(imageGenerator: self)\n// }\n//}\n//\n//struct ColorIterator: IteratorProtocol {\n// let imageGenerator: ImageGenerator\n//\n// private var i: Int\n// private var j: Int\n//\n// init(imageGenerator: ImageGenerator) {\n// self.imageGenerator = imageGenerator\n//\n// i = 0\n// j = imageGenerator.ny\n// }\n//\n// mutating func next() -> Color? {\n// guard j > 0 else { return nil }\n//\n// if i >= imageGenerator.nx {\n// i = 0\n// j -= 1\n// }\n//\n// let vector = (0..<imageGenerator.ns).reduce(Vector()) { (vector, _) -> Vector in\n// let u = (Double(i) + drand48()) / Double(imageGenerator.nx)\n// let v = (Double(j) + drand48()) / Double(imageGenerator.ny)\n// let ray = imageGenerator.camera.getRay(s: u, t: v)\n// return vector + imageGenerator.colorFunc(ray, imageGenerator.world, 0)\n// }\n//\n// let avg = vector / Double(imageGenerator.ns)\n// let gammaCorrection = Vector(array: avg.vec3.map { sqrt($0) })\n// let color = 255.0 * gammaCorrection\n//\n// i += 1\n//\n// return color.color\n// }\n//}\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 27, "blob_id": "01de6afe34fedb42ae2c54c72b2b5c3ef73e3104", "content_id": "4f7c87953a1f5b5e52e24d2c875c43b6fb3b6c47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 84, "license_type": "no_license", "max_line_length": 62, "num_lines": 3, "path": "/Sources/Hitable.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "protocol Hitable {\n func hit(ray: Ray, tMin: Double, tMax: Double) -> HitRecord?\n}\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "f07c815594d48e91e3b85bde8d6ee12c2ac9b044", "content_id": "d3cd299342ffb90414d43ae59d65f1a6d8065f53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 99, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Sources/HitRecord.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "struct HitRecord {\n let t: Double\n let p: Vector\n let normal: Vector\n let material: Material\n}\n" }, { "alpha_fraction": 0.6847290396690369, "alphanum_fraction": 0.6847290396690369, "avg_line_length": 10.277777671813965, "blob_id": "bec96db0cd19ac62a4dfe29a494dc765f51bc289", "content_id": "c8bb7b0f51e1bc945abc323fbeced83a733f87b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 203, "license_type": "no_license", "max_line_length": 28, "num_lines": 18, "path": "/Makefile", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "NAME=RayTracer\nOUTFILE=test.ppm\n\nbuild:\n\tswift build\n\nrelease:\n\tswift build -c release\n\nclean:\n\trm -rf ./.build\n\tswift build --clean\n\nrunDebug:\n\tlldb ./.build/debug/$(NAME)\n\nrun:\n\t./.build/debug/$(NAME)\n" }, { "alpha_fraction": 0.5991285443305969, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 21.950000762939453, "blob_id": "943471edd12df59c89e7915722fd54b1c08ad9c8", "content_id": "116ba2c0dbb811fe902353d1fee4e8245ba80298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 459, "license_type": "no_license", "max_line_length": 90, "num_lines": 20, "path": "/Sources/HitableList.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "final class HitableList: Hitable {\n private let list: [Hitable]\n\n init(list: [Hitable]) {\n self.list = list\n }\n\n func hit(ray: Ray, tMin: Double = 0.001, tMax: Double = Double.infinity) -> HitRecord? {\n var retRecord: HitRecord?\n var closest = tMax\n\n list.forEach { object in\n if let record = object.hit(ray: ray, tMin: tMin, tMax: closest) {\n closest = record.t\n retRecord = record\n }\n }\n return retRecord\n }\n}\n" }, { "alpha_fraction": 0.496049165725708, "alphanum_fraction": 0.5048288106918335, "avg_line_length": 35.74193572998047, "blob_id": "c1e592616872450bd38eed12f2949fe23717e3cc", "content_id": "d83abe22d45a4409caafbb5928d5cf63b698d493", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 90, "num_lines": 31, "path": "/src-python/sphere.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "from hitable import Hitable\nfrom hitable import HitRecord\nfrom vector import Vector\n\nclass Sphere(Hitable):\n def __init__(self, center = Vector(), radius = 0.0):\n self.center = center\n self.radius = float(radius)\n\n def hit(self, ray, tMin = 0.0, tMax = float(\"inf\")):\n oc = ray.origin - self.center\n a = ray.direction.dotp(ray.direction)\n b = oc.dotp(ray.direction)\n c = oc.dotp(oc) - (self.radius * self.radius)\n discrim = (b * b) - (a * c)\n\n if discrim > 0.0:\n temp = (-b - (b * b - a * c) ** 0.5) / a\n if tMin < temp < tMax:\n t = temp\n p = ray.point_at_parameter(t)\n normal = (p - self.center) / Vector(self.radius, self.radius, self.radius)\n return HitRecord(t, p, normal)\n\n temp = (-b + (b * b - a * c) ** 0.5) / a\n if tMin < temp < tMax:\n t = temp\n p = ray.point_at_parameter(t)\n normal = (p - self.center) / Vector(self.radius, self.radius, self.radius)\n return HitRecord(t, p, normal)\n return None\n" }, { "alpha_fraction": 0.5465116500854492, "alphanum_fraction": 0.5824524164199829, "avg_line_length": 21.5238094329834, "blob_id": "0bc44518775b4fa0d168cf326a833be64dab0ebb", "content_id": "2aa1f141c7684f879f5faf69c93f023eab6456e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 946, "license_type": "no_license", "max_line_length": 77, "num_lines": 42, "path": "/Sources/Material.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "import Foundation\n#if !os(Linux)\nimport simd\n#endif\n\nstruct Scatter {\n let attenuation: Vector\n let scattered: Ray\n}\n\nprotocol Material {\n func scatter(ray: Ray, record: HitRecord) -> Scatter?\n}\n\nextension Material {\n func randomInUnitSphere() -> Vector {\n let vec = 2.0 * Vector(drand48(), drand48(), drand48()) - Vector(1, 1, 1)\n if vec.dotp(vec) >= 1.0 {\n return randomInUnitSphere()\n }\n return vec\n }\n\n func reflect(v: Vector, n: Vector) -> Vector {\n return v - 2 * v.dotp(n) * n\n }\n\n func refract(v: Vector, n: Vector, nint: Double) -> Vector? {\n let dt = v.unit.dotp(n)\n let discrim = 1.0 - nint * nint * (1.0 - dt * dt)\n if discrim > 0.0 {\n return nint * (v.unit - dt * n) - (sqrt(discrim) * n)\n }\n return nil\n }\n\n func schlick(cosine: Double, refIdx: Double) -> Double {\n let r0 = (1 - refIdx) / (1 + refIdx)\n let r02 = r0 * r0\n return r02 + (1 - r02) * pow(1 - cosine, 5)\n }\n}\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 10.199999809265137, "blob_id": "bd0db61c90c4a5db5d78030902c84af6fec5ddfd", "content_id": "4648bcd85312d74ba7ecaabe34b4685dcb03dcfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 56, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/Sources/Color.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "struct Color {\n let r: Int\n let g: Int\n let b: Int\n}\n" }, { "alpha_fraction": 0.4562545120716095, "alphanum_fraction": 0.46854662895202637, "avg_line_length": 25.596153259277344, "blob_id": "86cef19c49106b50675ddfaebdf5586775803d8c", "content_id": "de23b334d86445a0102664ad1219afa8545c8bb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 69, "num_lines": 52, "path": "/src-python/vector.py", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "class Vector:\n def __init__(self, x = 0.0, y = 0.0, z = 0.0):\n self.x = float(x)\n self.y = float(y)\n self.z = float(z)\n\n @property\n def vec3(self):\n return [self.x, self.y, self.z]\n\n @property\n def unit(self):\n l = self.lengthp()\n return Vector(\n x = self.x / l,\n y = self.y / l,\n z = self.z / l\n )\n\n def __add__(self, vec):\n return Vector(self.x + vec.x, self.y + vec.y, self.z + vec.z)\n\n def __sub__(self, vec):\n return Vector(self.x - vec.x, self.y - vec.y, self.z - vec.z)\n\n def __mul__(self, vec):\n return Vector(self.x * vec.x, self.y * vec.y, self.z * vec.z)\n\n def __rmul__(self, s):\n vec = [s * x for x in self.vec3]\n return Vector(vec[0], vec[1], vec[2])\n\n def __div__(self, vec):\n return self.__truediv__(vec)\n\n def __truediv__(self, vec):\n return Vector(self.x / vec.x, self.y / vec.y, self.z / vec.z)\n\n def dotp(self, vec):\n z = zip(self.vec3, vec.vec3)\n r = (x * y for x, y in z)\n return sum(r)\n\n def crossp(self, vec):\n return Vector(\n x = self.y * vec.z - self.z * vec.y,\n y = -(self.x * vec.z - self.z * vec.x),\n z = self.x * vec.y - self.y * vec.x\n )\n\n def lengthp(self):\n return sum(x ** 2 for x in self.vec3) ** 0.5\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7642857432365417, "avg_line_length": 45.66666793823242, "blob_id": "92a8368b926619491a130af7df5fac357434379d", "content_id": "7f9e3f25a12116b3aa040c3e6332214a426a7fc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 140, "license_type": "no_license", "max_line_length": 105, "num_lines": 3, "path": "/README.md", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "based on the [Ray Tracing Series](http://in1weekend.blogspot.com/2016/01/ray-tracing-in-one-weekend.html)\n\n![example](example.png?raw=true)\n" }, { "alpha_fraction": 0.620192289352417, "alphanum_fraction": 0.620192289352417, "avg_line_length": 17.909090042114258, "blob_id": "bd465e4746e637f717bc79c820caf625af392e04", "content_id": "a83175f0a0a7fa73ed7398f65cee1495334f0e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 208, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/Sources/ConstantTexture.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "final class ConstantTexture: Texture {\n private let color: Vector \n\n init(color: Vector) {\n self.color = color\n }\n\n func value(_ s: Double, _ t: Double, _ p: Vector) -> Vector {\n return color\n }\n}\n" }, { "alpha_fraction": 0.5493606328964233, "alphanum_fraction": 0.5560102462768555, "avg_line_length": 18.747474670410156, "blob_id": "608650ae581b29e4e304500270acf6f2da6a5d5c", "content_id": "cd8269e7be4a800808f5c51ee823c1b33f4a887d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Swift", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 65, "num_lines": 99, "path": "/Sources/Vector.swift", "repo_name": "akoaysigod/raytracing", "src_encoding": "UTF-8", "text": "#if !os(Linux)\nimport simd\n\ntypealias Vector = double3\n\nextension Vector {\n var lengthp: Double {\n return length(self)\n }\n\n func dotp(_ v: Vector) -> Double {\n return dot(self, v)\n }\n\n func crossp(_ v: Vector) -> Vector {\n return cross(self, v)\n }\n}\n#endif\n\nfunc ==(lhs: Vector, rhs: Vector) -> Bool {\n return lhs.x == rhs.x && lhs.y == rhs.y && lhs.z == rhs.z\n}\n\nextension Vector {\n init(array: [Double]) {\n assert(array.count == 3, \"Requires an array of size 3\")\n self.init(array[0], array[1], array[2])\n }\n\n var color: Color {\n return Color(r: Int(x), g: Int(y), b: Int(z))\n }\n\n var vec3: [Double] {\n return [x, y, z]\n }\n\n var unit: Vector {\n let l = lengthp\n return Vector(x / l, y / l, z / l)\n }\n}\n\nfunc /(lhs: Vector, rhs: Double) -> Vector {\n return Vector(lhs.x / rhs, lhs.y / rhs, lhs.z / rhs)\n}\n\n#if os(Linux)\n\nimport Foundation\n\nfunc +(lhs: Vector, rhs: Vector) -> Vector {\n return Vector(lhs.x + rhs.x, lhs.y + rhs.y, lhs.z + rhs.z)\n}\n\nfunc -(lhs: Vector, rhs: Vector) -> Vector {\n return Vector(lhs.x - rhs.x, lhs.y - rhs.y, lhs.z - rhs.z)\n}\n\nfunc *(lhs: Vector, rhs: Vector) -> Vector {\n return Vector(lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z)\n}\n\nfunc *(lhs: Double, rhs: Vector) -> Vector {\n return Vector(lhs * rhs.x, lhs * rhs.y, lhs * rhs.z)\n}\n\nfunc /(lhs: Vector, rhs: Vector) -> Vector {\n return Vector(lhs.x / rhs.x, lhs.y / rhs.y, lhs.z / rhs.z)\n}\n\nstruct Vector {\n let x: Double\n let y: Double\n let z: Double\n\n var lengthp: Double {\n return sqrt(x * x + y * y + z * z)\n }\n\n init(_ x: Double = 0.0, _ y: Double = 0.0, _ z: Double = 0.0) {\n self.x = x\n self.y = y\n self.z = z\n }\n\n func dotp(_ vec: Vector) -> Double {\n return x * vec.x + y * vec.y + z * vec.z\n }\n\n func crossp(_ vec: Vector) -> Vector {\n return Vector(\n self.y * vec.z - self.z * vec.y,\n -(self.x * vec.z - self.z * vec.x),\n self.x * vec.y - self.y * vec.x)\n }\n}\n#endif\n" } ]
30
broliang/HRGAT
https://github.com/broliang/HRGAT
2e0985e38020fc242933347d73420ecbbd4c61ff
6521085cfd55e9f1b21ced573a99e2d6ce4b2795
da9ad959b5099cf30a40833ba14b3f6b6949c6cd
refs/heads/main
2023-06-19T12:20:20.692439
2021-07-21T09:20:24
2021-07-21T09:20:24
388,004,424
5
1
null
null
null
null
null
[ { "alpha_fraction": 0.57022625207901, "alphanum_fraction": 0.5824136734008789, "avg_line_length": 48.6863899230957, "blob_id": "3324bfe888255ef94fc18d49f4b402e6cb3905cd", "content_id": "8a3318f5f623b29ec4fb6200c26cc957fd2d1ae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25190, "license_type": "no_license", "max_line_length": 160, "num_lines": 507, "path": "/model/model.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nimport dgl\nfrom dgl.nn.pytorch import RelGraphConv\nfrom model.layer import RGATConv, RBF, RelGCNCov\nimport torch.nn.functional as F\nfrom torch.nn.init import xavier_normal_\nfrom torch.autograd import Variable\nfrom torch.nn import Parameter\nimport numpy as np\nimport time\n\nclass RGAT(nn.Module):\n def __init__(self, num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer, edge_type, edge_norm,\n ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text = True, use_img = False, use_attr = False,\n conv_bias=True, gcn_drop=0., opn='mult'):\n super(RGAT, self).__init__()\n self.act = torch.tanh\n self.loss = nn.BCELoss()\n self.num_ent, self.num_rel, self.num_base = num_ent, num_rel, num_base\n self.init_dim, self.gcn_dim, self.embed_dim = init_dim, gcn_dim, embed_dim\n self.conv_bias = conv_bias\n self.gcn_drop = gcn_drop\n self.opn = opn\n self.edge_type = edge_type # [E]\n self.edge_norm = edge_norm # [E]\n self.n_layer = n_layer\n self.init_embed = self.get_param([self.num_ent, self.init_dim]) # initial embedding for entities\n self.device = device\n self.dtype = self.init_embed.dtype\n self.use_text = use_text\n self.use_img = use_img\n self.use_attr = use_attr\n\n self.rank = 16\n self.fusion_weights = self.get_param([1, self.rank])\n self.fusion_bias = Parameter(torch.Tensor(1, self.init_dim))\n self.fusion_bias.data.fill_(0)\n if self.use_text: \n self.ent2textvector = torch.tensor(ent2textvector, dtype = self.dtype).to(device) #entity text vector dict (768,)\n self.text_feature_embed = torch.nn.Linear(768, self.init_dim)\n self.text_facotr = self.get_param([self.rank, self.init_dim+1, self.init_dim])\n else:\n self.ent2textvector = None\n if self.use_img == True:\n self.ent2imgvector = torch.tensor(ent2imgvector, dtype = self.dtype).to(device)\n self.ent2imgvector = self.ent2imgvector.squeeze()\n self.img_feature_embed = torch.nn.Linear(self.ent2imgvector.shape[-1], self.init_dim)\n self.img_facotr = self.get_param([self.rank, self.init_dim+1, self.init_dim])\n else:\n self.ent2imgvector = None\n if self.use_attr:\n self.attr2vector = torch.tensor(attr2vector, dtype=self.dtype).to(device) # attr text vector (116,768)\n # self.ent2attr = torch.tensor(ent2attr, dtype=self.dtype).to(device) # entity attr value dict (116,)\n # self.ent2attrlabel = torch.tensor(ent2attrlabel, dtype=self.dtype).to(device) # entity attr label (116,)\n self.attr_featrue_embed = torch.nn.Linear(768, self.init_dim)\n self.attr_facotr = self.get_param([self.rank, self.init_dim+1, self.init_dim])\n else:\n self.attr2vector = None\n\n\n if self.num_base > 0:\n # linear combination of a set of basis vectors\n self.init_rel = self.get_param([self.num_base, self.init_dim])\n self.init_rel_t = self.get_param([self.num_base, self.init_dim])\n self.init_rel_i = self.get_param([self.num_base, self.init_dim])\n self.init_rel_attr = self.get_param([self.num_base, self.init_dim])\n else:\n # independently defining an embedding for each relation\n self.init_rel = self.get_param([self.num_rel * 2, self.init_dim])\n self.init_rel_t = self.get_param([self.num_rel * 2, self.init_dim])\n self.init_rel_i = self.get_param([self.num_rel * 2, self.init_dim])\n self.init_rel_attr = self.get_param([self.num_rel * 2, self.init_dim])\n\n self.conv1 = RGATConv(self.init_dim, self.gcn_dim, self.act, conv_bias, gcn_drop, opn, self.use_text, self.use_img, self.use_attr,\n num_base=self.num_base,\n num_rel=self.num_rel)\n self.conv2 = RGATConv(self.gcn_dim, self.embed_dim, self.act, conv_bias, gcn_drop, self.use_text, self.use_img, self.use_attr,\n opn) if n_layer == 2 else None\n\n self.rgcnv1 = RelGCNCov(self.init_dim, self.gcn_dim, self.act, conv_bias, gcn_drop, opn, self.use_text, self.use_img, self.use_attr,\n num_base=self.num_base,\n num_rel=self.num_rel)\n self.rgcnv2 = RelGCNCov(self.gcn_dim, self.embed_dim, self.act, conv_bias, gcn_drop, self.use_text, self.use_img, self.use_attr,\n opn) if n_layer == 2 else None\n self.bias = nn.Parameter(torch.zeros(self.num_ent))\n\n def get_param(self, shape):\n param = nn.Parameter(torch.Tensor(*shape))\n nn.init.xavier_normal_(param, gain=nn.init.calculate_gain('relu'))\n return param\n\n def calc_loss(self, pred, label):\n return self.loss(pred, label)\n def select(self,data, item):\n temp = []\n for i in item:\n temp.append(data[i])\n return torch.tensor(temp).to(self.init_embed.device)\n\n def forward_base(self, g, subj, rel, drop1, drop2):\n \"\"\"\n :param g: graph\n :param sub: subjects in a batch [batch]\n :param rel: relations in a batch [batch]\n :param drop1: dropout rate in first layer\n :param drop2: dropout rate in second layer\n :return: sub_emb: [batch, D]\n rel_emb: [num_rel*2, D]\n x: [num_ent, D]\n \"\"\"\n x, r = self.init_embed, self.init_rel # embedding of relations\n fusion_zy = 1.0\n if self.use_text:\n x_t = self.text_feature_embed(self.ent2textvector)\n x_t = torch.cat(\n (Variable(torch.ones(x_t.shape[0], 1).type(self.dtype).to(self.device), requires_grad=False), x_t),\n dim=1)\n x_t = torch.matmul(x_t, self.text_facotr)\n fusion_zy = fusion_zy * x_t\n else:\n x_t = None\n if self.use_img:\n x_i = self.img_feature_embed(self.ent2imgvector)\n x_i = torch.cat(\n (Variable(torch.ones(x_i.shape[0], 1).type(self.dtype).to(self.device), requires_grad=False), x_i),\n dim=1)\n x_i = torch.matmul(x_i, self.img_facotr)\n\n fusion_zy = fusion_zy * x_i\n else:\n x_i = None\n if self.use_attr:\n x_attr = self.attr2vector\n x_attr = self.attr_featrue_embed(x_attr)\n x_attr = torch.cat(\n (Variable(torch.ones(x_attr.shape[0], 1).type(self.dtype).to(self.device), requires_grad=False), x_attr),\n dim=1)\n x_attr = torch.matmul(x_attr, self.attr_facotr)\n fusion_zy = fusion_zy * x_attr\n\n else:\n x_attr = None\n\n fused = torch.matmul(self.fusion_weights, fusion_zy.permute(1,0,2)).squeeze() + self.fusion_bias\n x = x*fused\n\n x, r = self.conv1(g, x, x_t, x_i, x_attr, r, self.edge_type, self.edge_norm)\n x = drop1(x) # embeddings of entities [num_ent, dim]\n x, r = self.conv2(g, x,x_t, x_i, x_attr, r, self.edge_type, self.edge_norm) if self.n_layer == 2 else (x, r)\n x = drop2(x) if self.n_layer == 2 else x\n sub_emb = torch.index_select(x, 0, subj) # filter out embeddings of subjects in this batch\n rel_emb = torch.index_select(r, 0, rel) # filter out embeddings of relations in this batch\n return sub_emb, rel_emb, x\n\n def forward_base_rgcn(self, g, subj, rel, drop1, drop2):\n \"\"\"\n :param g: graph\n :param sub: subjects in a batch [batch]\n :param rel: relations in a batch [batch]\n :param drop1: dropout rate in first layer\n :param drop2: dropout rate in second layer\n :return: sub_emb: [batch, D]\n rel_emb: [num_rel*2, D]\n x: [num_ent, D]\n \"\"\"\n x, r = self.init_embed, self.init_rel # embedding of relations\n x, r = self.rgcnv1(g, x, r, self.edge_type, self.edge_norm)\n x = drop1(x) # embeddings of entities [num_ent, dim]\n x, r = self.rgcnv2(g, x, r, self.edge_type, self.edge_norm) if self.n_layer == 2 else (x, r)\n x = drop2(x) if self.n_layer == 2 else x\n sub_emb = torch.index_select(x, 0, subj) # filter out embeddings of subjects in this batch\n rel_emb = torch.index_select(r, 0, rel) # filter out embeddings of relations in this batch\n return sub_emb, rel_emb, x\n\n def forward_bass_embeding(self, g, drop1, drop2):\n \"\"\"\n :param g: graph\n :param sub: subjects in a batch [batch]\n :param rel: relations in a batch [batch]\n :param drop1: dropout rate in first layer\n :param drop2: dropout rate in second layer\n :return: sub_emb: [batch, D]\n rel_emb: [num_rel*2, D]\n x: [num_ent, D]\n \"\"\"\n x, r = self.init_embed, self.init_rel # embedding of relations\n x, r = self.conv1(g, x, r, self.edge_type, self.edge_norm)\n x = drop1(x) # embeddings of entities [num_ent, dim]\n x, r = self.conv2(g, x, r, self.edge_type, self.edge_norm) if self.n_layer == 2 else (x, r)\n x = drop2(x) if self.n_layer == 2 else x\n return x, r\n\n\nclass RGAT_DistMult(RGAT):\n def __init__(self, num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer, edge_type, edge_norm,\n ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text=True, use_img=False, use_attr=False,\n bias=True, gcn_drop=0., opn='mult', hid_drop=0.):\n super(RGAT_DistMult, self).__init__(num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer,\n edge_type, edge_norm,ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text, use_img, use_attr,\n bias, gcn_drop, opn)\n self.drop = nn.Dropout(hid_drop)\n\n\n def forward(self, g, subj, rel):\n \"\"\"\n :param g: dgl graph\n :param sub: subject in batch [batch_size]\n :param rel: relation in batch [batch_size]\n :return: score: [batch_size, ent_num], the prob in link-prediction\n \"\"\"\n\n sub_emb, rel_emb, all_ent = self.forward_base(g, subj, rel, self.drop, self.drop)\n obj_emb = sub_emb * rel_emb # [batch_size, emb_dim]\n x = torch.mm(obj_emb, all_ent.transpose(1, 0)) # [batch_size, ent_num]\n x += self.bias.expand_as(x)\n score = torch.sigmoid(x)\n return score\n\n\n\n\nclass RGAT_ConvE(RGAT):\n def __init__(self, num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer, edge_type, edge_norm,\n ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text=True, use_img=False, use_attr=False,\n bias=True, gcn_drop=0., opn='mult', hid_drop=0., input_drop=0., conve_hid_drop=0., feat_drop=0.,\n num_filt=None, ker_sz=None, k_h=None, k_w=None):\n \"\"\"\n :param num_ent: number of entities\n :param num_rel: number of different relations\n :param num_base: number of bases to use\n :param init_dim: initial dimension\n :param gcn_dim: dimension after first layer\n :param embed_dim: dimension after second layer\n :param n_layer: number of layer\n :param edge_type: relation type of each edge, [E]\n :param bias: weather to add bias\n :param gcn_drop: dropout rate in RGATcov\n :param opn: combination operator\n :param hid_drop: gcn output (embedding of each entity) dropout\n :param input_drop: dropout in conve input\n :param conve_hid_drop: dropout in conve hidden layer\n :param feat_drop: feature dropout in conve\n :param num_filt: number of filters in conv2d\n :param ker_sz: kernel size in conv2d\n :param k_h: height of 2D reshape\n :param k_w: width of 2D reshape\n \"\"\"\n super(RGAT_ConvE, self).__init__(num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer,\n edge_type, edge_norm, ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text, use_img, use_attr,\n bias, gcn_drop, opn)\n self.hid_drop, self.input_drop, self.conve_hid_drop, self.feat_drop = hid_drop, input_drop, conve_hid_drop, feat_drop\n self.num_filt = num_filt\n self.ker_sz, self.k_w, self.k_h = ker_sz, k_w, k_h\n\n self.bn0 = torch.nn.BatchNorm2d(1) # one channel, do bn on initial embedding\n self.bn1 = torch.nn.BatchNorm2d(self.num_filt) # do bn on output of conv\n self.bn2 = torch.nn.BatchNorm1d(self.embed_dim)\n\n self.drop = torch.nn.Dropout(self.hid_drop) # gcn output dropout\n self.input_drop = torch.nn.Dropout(self.input_drop) # stacked input dropout\n self.feature_drop = torch.nn.Dropout(self.feat_drop) # feature map dropout\n self.hidden_drop = torch.nn.Dropout(self.conve_hid_drop) # hidden layer dropout\n\n\n\n self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_filt,\n kernel_size=(self.ker_sz, self.ker_sz), stride=1, padding=0, bias=bias)\n\n flat_sz_h = int(2 * self.k_h) - self.ker_sz + 1 # height after conv\n flat_sz_w = self.k_w - self.ker_sz + 1 # width after conv\n self.flat_sz = flat_sz_h * flat_sz_w * self.num_filt\n self.fc = torch.nn.Linear(self.flat_sz, self.embed_dim) # fully connected projection\n\n def concat(self, ent_embed, rel_embed):\n \"\"\"\n :param ent_embed: [batch_size, embed_dim]\n :param rel_embed: [batch_size, embed_dim]\n :return: stack_input: [B, C, H, W]\n \"\"\"\n ent_embed = ent_embed.view(-1, 1, self.embed_dim)\n rel_embed = rel_embed.view(-1, 1, self.embed_dim)\n stack_input = torch.cat([ent_embed, rel_embed], 1) # [batch_size, 2, embed_dim]\n assert self.embed_dim == self.k_h * self.k_w\n stack_input = stack_input.reshape(-1, 1, 2 * self.k_h, self.k_w) # reshape to 2D [batch, 1, 2*k_h, k_w]\n return stack_input\n\n\n\n def forward(self, g, subj, rel, get_embedding = False):\n \"\"\"\n :param g: dgl graph\n :param sub: subject in batch [batch_size]\n :param rel: relation in batch [batch_size]\n :return: score: [batch_size, ent_num], the prob in link-prediction\n \"\"\"\n sub_emb, rel_emb, all_ent = self.forward_base(g, subj, rel, self.drop, self.input_drop)\n stack_input = self.concat(sub_emb, rel_emb) # [batch_size, 1, 2*k_h, k_w]\n x = self.bn0(stack_input)\n x = self.conv2d(x) # [batch_size, num_filt, flat_sz_h, flat_sz_w]\n x = self.bn1(x)\n x = F.relu(x)\n x = self.feature_drop(x)\n x = x.view(-1, self.flat_sz) # [batch_size, flat_sz]\n x = self.fc(x) # [batch_size, embed_dim]\n x = self.hidden_drop(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = torch.mm(x, all_ent.transpose(1, 0)) # [batch_size, ent_num]\n x += self.bias.expand_as(x)\n score = torch.sigmoid(x)\n return score\n\n\n\nclass RGCN_ConvE(RGAT):\n def __init__(self, num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer, edge_type, edge_norm,\n ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text=True, use_img=False, use_attr=False,\n bias=True, gcn_drop=0., opn='mult', hid_drop=0., input_drop=0., conve_hid_drop=0., feat_drop=0.,\n num_filt=None, ker_sz=None, k_h=None, k_w=None):\n \"\"\"\n :param num_ent: number of entities\n :param num_rel: number of different relations\n :param num_base: number of bases to use\n :param init_dim: initial dimension\n :param gcn_dim: dimension after first layer\n :param embed_dim: dimension after second layer\n :param n_layer: number of layer\n :param edge_type: relation type of each edge, [E]\n :param bias: weather to add bias\n :param gcn_drop: dropout rate in RGATcov\n :param opn: combination operator\n :param hid_drop: gcn output (embedding of each entity) dropout\n :param input_drop: dropout in conve input\n :param conve_hid_drop: dropout in conve hidden layer\n :param feat_drop: feature dropout in conve\n :param num_filt: number of filters in conv2d\n :param ker_sz: kernel size in conv2d\n :param k_h: height of 2D reshape\n :param k_w: width of 2D reshape\n \"\"\"\n super(RGCN_ConvE, self).__init__(num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer,\n edge_type, edge_norm, ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text, use_img, use_attr,\n bias, gcn_drop, opn)\n self.hid_drop, self.input_drop, self.conve_hid_drop, self.feat_drop = hid_drop, input_drop, conve_hid_drop, feat_drop\n self.num_filt = num_filt\n self.ker_sz, self.k_w, self.k_h = ker_sz, k_w, k_h\n\n self.bn0 = torch.nn.BatchNorm2d(1) # one channel, do bn on initial embedding\n self.bn1 = torch.nn.BatchNorm2d(self.num_filt) # do bn on output of conv\n self.bn2 = torch.nn.BatchNorm1d(self.embed_dim)\n\n self.drop = torch.nn.Dropout(self.hid_drop) # gcn output dropout\n self.input_drop = torch.nn.Dropout(self.input_drop) # stacked input dropout\n self.feature_drop = torch.nn.Dropout(self.feat_drop) # feature map dropout\n self.hidden_drop = torch.nn.Dropout(self.conve_hid_drop) # hidden layer dropout\n\n\n\n self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_filt,\n kernel_size=(self.ker_sz, self.ker_sz), stride=1, padding=0, bias=bias)\n\n flat_sz_h = int(2 * self.k_h) - self.ker_sz + 1 # height after conv\n flat_sz_w = self.k_w - self.ker_sz + 1 # width after conv\n self.flat_sz = flat_sz_h * flat_sz_w * self.num_filt\n self.fc = torch.nn.Linear(self.flat_sz, self.embed_dim) # fully connected projection\n\n def concat(self, ent_embed, rel_embed):\n \"\"\"\n :param ent_embed: [batch_size, embed_dim]\n :param rel_embed: [batch_size, embed_dim]\n :return: stack_input: [B, C, H, W]\n \"\"\"\n ent_embed = ent_embed.view(-1, 1, self.embed_dim)\n rel_embed = rel_embed.view(-1, 1, self.embed_dim)\n stack_input = torch.cat([ent_embed, rel_embed], 1) # [batch_size, 2, embed_dim]\n assert self.embed_dim == self.k_h * self.k_w\n stack_input = stack_input.reshape(-1, 1, 2 * self.k_h, self.k_w) # reshape to 2D [batch, 1, 2*k_h, k_w]\n return stack_input\n\n def forward(self, g, subj, rel):\n \"\"\"\n :param g: dgl graph\n :param sub: subject in batch [batch_size]\n :param rel: relation in batch [batch_size]\n :return: score: [batch_size, ent_num], the prob in link-prediction\n \"\"\"\n sub_emb, rel_emb, all_ent = self.forward_base_rgcn(g, subj, rel, self.drop, self.input_drop)\n stack_input = self.concat(sub_emb, rel_emb) # [batch_size, 1, 2*k_h, k_w]\n x = self.bn0(stack_input)\n x = self.conv2d(x) # [batch_size, num_filt, flat_sz_h, flat_sz_w]\n x = self.bn1(x)\n x = F.relu(x)\n x = self.feature_drop(x)\n x = x.view(-1, self.flat_sz) # [batch_size, flat_sz]\n x = self.fc(x) # [batch_size, embed_dim]\n x = self.hidden_drop(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = torch.mm(x, all_ent.transpose(1, 0)) # [batch_size, ent_num]\n x += self.bias.expand_as(x)\n score = torch.sigmoid(x)\n return score\n\n\n\n\nclass ConvE(RGAT):\n def __init__(self, num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer, edge_type, edge_norm,\n ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text=True, use_img=False, use_attr=False,\n bias=True, gcn_drop=0., opn='mult', hid_drop=0., input_drop=0., conve_hid_drop=0., feat_drop=0.,\n num_filt=None, ker_sz=None, k_h=None, k_w=None):\n \"\"\"\n :param num_ent: number of entities\n :param num_rel: number of different relations\n :param num_base: number of bases to use\n :param init_dim: initial dimension\n :param gcn_dim: dimension after first layer\n :param embed_dim: dimension after second layer\n :param n_layer: number of layer\n :param edge_type: relation type of each edge, [E]\n :param bias: weather to add bias\n :param gcn_drop: dropout rate in RGATcov\n :param opn: combination operator\n :param hid_drop: gcn output (embedding of each entity) dropout\n :param input_drop: dropout in conve input\n :param conve_hid_drop: dropout in conve hidden layer\n :param feat_drop: feature dropout in conve\n :param num_filt: number of filters in conv2d\n :param ker_sz: kernel size in conv2d\n :param k_h: height of 2D reshape\n :param k_w: width of 2D reshape\n \"\"\"\n super(ConvE, self).__init__(num_ent, num_rel, num_base, init_dim, gcn_dim, embed_dim, n_layer,\n edge_type, edge_norm, ent2textvector, rel2textvector, ent2attr, ent2attrlabel, attr2vector, ent2imgvector, device,\n use_text, use_img, use_attr,\n bias, gcn_drop, opn)\n self.hid_drop, self.input_drop, self.conve_hid_drop, self.feat_drop = hid_drop, input_drop, conve_hid_drop, feat_drop\n self.num_filt = num_filt\n self.ker_sz, self.k_w, self.k_h = ker_sz, k_w, k_h\n\n self.bn0 = torch.nn.BatchNorm2d(1) # one channel, do bn on initial embedding\n self.bn1 = torch.nn.BatchNorm2d(self.num_filt) # do bn on output of conv\n self.bn2 = torch.nn.BatchNorm1d(self.embed_dim)\n\n self.drop = torch.nn.Dropout(self.hid_drop) # gcn output dropout\n self.input_drop = torch.nn.Dropout(self.input_drop) # stacked input dropout\n self.feature_drop = torch.nn.Dropout(self.feat_drop) # feature map dropout\n self.hidden_drop = torch.nn.Dropout(self.conve_hid_drop) # hidden layer dropout\n\n\n\n self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_filt,\n kernel_size=(self.ker_sz, self.ker_sz), stride=1, padding=0, bias=bias)\n\n flat_sz_h = int(2 * self.k_h) - self.ker_sz + 1 # height after conv\n flat_sz_w = self.k_w - self.ker_sz + 1 # width after conv\n self.flat_sz = flat_sz_h * flat_sz_w * self.num_filt\n self.fc = torch.nn.Linear(self.flat_sz, self.embed_dim) # fully connected projection\n\n def concat(self, ent_embed, rel_embed):\n \"\"\"\n :param ent_embed: [batch_size, embed_dim]\n :param rel_embed: [batch_size, embed_dim]\n :return: stack_input: [B, C, H, W]\n \"\"\"\n ent_embed = ent_embed.view(-1, 1, self.embed_dim)\n rel_embed = rel_embed.view(-1, 1, self.embed_dim)\n stack_input = torch.cat([ent_embed, rel_embed], 1) # [batch_size, 2, embed_dim]\n assert self.embed_dim == self.k_h * self.k_w\n stack_input = stack_input.reshape(-1, 1, 2 * self.k_h, self.k_w) # reshape to 2D [batch, 1, 2*k_h, k_w]\n return stack_input\n\n def forward(self, g, subj, rel):\n \"\"\"\n :param g: dgl graph\n :param sub: subject in batch [batch_size]\n :param rel: relation in batch [batch_size]\n :return: score: [batch_size, ent_num], the prob in link-prediction\n \"\"\"\n x, r = self.init_embed, self.init_rel\n all_ent = self.init_embed\n sub_emb = torch.index_select(x, 0, subj) # filter out embeddings of subjects in this batch\n rel_emb = torch.index_select(r, 0, rel) # filter out embeddings of relations in this batch\n stack_input = self.concat(sub_emb, rel_emb) # [batch_size, 1, 2*k_h, k_w]\n x = self.bn0(stack_input)\n x = self.conv2d(x) # [batch_size, num_filt, flat_sz_h, flat_sz_w]\n x = self.bn1(x)\n x = F.relu(x)\n x = self.feature_drop(x)\n x = x.view(-1, self.flat_sz) # [batch_size, flat_sz]\n x = self.fc(x) # [batch_size, embed_dim]\n x = self.hidden_drop(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = torch.mm(x, all_ent.transpose(1, 0)) # [batch_size, ent_num]\n x += self.bias.expand_as(x)\n score = torch.sigmoid(x)\n return score" }, { "alpha_fraction": 0.5956334471702576, "alphanum_fraction": 0.6140836477279663, "avg_line_length": 41.24675369262695, "blob_id": "dbd9361fa93bb18512cf0e0f7e3eb7119861062b", "content_id": "ba8bee22b465549ea0ee26aaebb2c60089aa6301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3252, "license_type": "no_license", "max_line_length": 127, "num_lines": 77, "path": "/utils/data_set.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "from torch.utils.data import Dataset\nimport numpy as np\nimport torch\n\nclass TrainDataset(Dataset):\n def __init__(self, triplets, num_ent, params):\n # , ent2textvector, rel2textvector, ent2attr, ent2attrlabel\n super(TrainDataset, self).__init__()\n self.p = params\n self.triplets = triplets\n self.label_smooth = params.lbl_smooth\n self.num_ent = num_ent\n # self.ent2textvector = ent2textvector\n # self.rel2textvector = rel2textvector\n # self.ent2attr = ent2attr\n # self.ent2attrlabel = ent2attrlabel\n def __len__(self):\n return len(self.triplets)\n\n def __getitem__(self, item):\n ele = self.triplets[item]\n triple, label = torch.tensor(ele['triple'], dtype=torch.long), np.int32(ele['label'])\n # triple_reverse, label_reverse = torch.tensor(ele['triple_reverse'], dtype=torch.long), np.int32(ele['label_reverse'])\n head, rel, tail = triple\n # head_reverse, rel_reverse, tail_reverse = triple_reverse\n\n label = self.get_label(label)\n # label_reverse = self.get_label(label_reverse)\n if self.label_smooth != 0.0:\n label = (1.0 - self.label_smooth) * label + (1.0 / self.num_ent)\n # label_reverse = (1.0 - self.label_smooth) * label_reverse + (1.0 / self.num_ent)\n return triple, label\n # , triple_reverse, label_reverse\n # , (self.ent2textvector[head],self.ent2textvector[rel],self.ent2textvector[tail]), \\\n # (self.ent2attr[head], self.ent2attr[tail]), (self.ent2attrlabel[head],self.ent2attrlabel[tail])\n\n def get_label(self, label):\n \"\"\"\n get label corresponding to a (sub, rel) pair\n :param label: a list containing indices of objects corresponding to a (sub, rel) pair\n :return: a tensor of shape [nun_ent]\n \"\"\"\n y = np.zeros([self.num_ent], dtype=np.float32)\n y[label] = 1\n return torch.tensor(y, dtype=torch.float32)\n\n\nclass TestDataset(Dataset):\n def __init__(self, triplets, num_ent, params):\n super(TestDataset, self).__init__()\n self.triplets = triplets\n self.num_ent = num_ent\n self.num_ent = num_ent\n # self.ent2textvector = ent2textvector\n # self.rel2textvector = rel2textvector\n # self.ent2attr = ent2attr\n # self.ent2attrlabel = ent2attrlabel\n def __len__(self):\n return len(self.triplets)\n\n def __getitem__(self, item):\n ele = self.triplets[item]\n triple, label = torch.tensor(ele['triple'], dtype=torch.long), np.int32(ele['label'])\n label = self.get_label(label)\n return triple, label\n # , (self.ent2textvector[head],self.ent2textvector[rel],self.ent2textvector[tail]), \\\n # (self.ent2attr[head], self.ent2attr[tail]), (self.ent2attrlabel[head],self.ent2attrlabel[tail])\n\n def get_label(self, label):\n \"\"\"\n get label corresponding to a (sub, rel) pair\n :param label: a list containing indices of objects corresponding to a (sub, rel) pair\n :return: a tensor of shape [nun_ent]\n \"\"\"\n y = np.zeros([self.num_ent], dtype=np.float32)\n y[label] = 1\n return torch.tensor(y, dtype=torch.float32)" }, { "alpha_fraction": 0.5443311333656311, "alphanum_fraction": 0.556748628616333, "avg_line_length": 38.8803825378418, "blob_id": "b73f33e3d8c9d9af46310db6923577c04e2d61c3", "content_id": "a3704b74eb362d57d7cc36878a2b8710188faa4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16670, "license_type": "no_license", "max_line_length": 142, "num_lines": 418, "path": "/model/layer.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nimport dgl\nimport dgl.function as fn\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\n\nclass RGATConv(nn.Module):\n def __init__(self, in_channels, out_channels, act=lambda x: x, bias=True, drop_rate=0., opn='corr',\n use_text = True, use_img = False, use_attr = False,\n num_base=-1,\n num_rel=None):\n super(RGATConv, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.act = act # activation function\n self.device = None\n self.rel = None\n self.rank = 8\n self.opn = opn\n self.use_text = use_text\n self.use_img = use_img\n self.use_attr = use_attr\n print(self.use_text,self.use_img,self.use_attr)\n self.fuse_embed = out_channels\n # if self.use_text:\n # self.in_w_t = self.get_param([in_channels, out_channels])\n # self.out_w_t = self.get_param([in_channels, out_channels])\n #\n # self.text_facotr = self.get_param([self.rank, self.out_channels+1, self.out_channels])\n # # self.fuse_embed += out_channels\n # if self.use_img:\n # self.in_w_i = self.get_param([in_channels, out_channels])\n # self.out_w_i = self.get_param([in_channels, out_channels])\n # # self.loop_w_i = self.get_param([in_channels, out_channels])\n # self.img_facotr = self.get_param([self.rank, self.out_channels+1, self.out_channels])\n # # self.fuse_embed += out_channels\n # if self.use_attr:\n # self.in_w_n = self.get_param([in_channels, out_channels])\n # self.out_w_n = self.get_param([in_channels, out_channels])\n # # self.loop_w_n = self.get_param([in_channels, out_channels])\n # self.attr_factor = self.get_param([self.rank, self.out_channels+1, self.out_channels])\n # # self.fuse_embed += out_channels\n\n\n # relation-type specific parameter\n self.in_w = self.get_param([in_channels, out_channels])\n self.out_w = self.get_param([in_channels, out_channels])\n self.loop_w = self.get_param([in_channels, out_channels])\n self.w_rel = self.get_param([in_channels, out_channels]) # transform embedding of relations to next layer\n self.loop_rel = self.get_param([1, in_channels]) # self-loop embedding\n\n self.attn_fc = nn.Linear(3 * in_channels, 1, bias= False)\n\n self.drop = nn.Dropout(drop_rate)\n self.bn = torch.nn.BatchNorm1d(out_channels)\n self.bias = nn.Parameter(torch.zeros(out_channels)) if bias else None\n # self.fusion_weights = self.get_param([1, self.rank])\n # self.fusion_bias = Parameter(torch.Tensor(1, self.out_channels))\n # self.fusion_bias.data.fill_(0)\n if self.w_rel.is_cuda:\n self.DTYPE = torch.cuda.FloatTensor\n else:\n self.DTYPE = torch.FloatTensor\n if num_base > 0:\n self.rel_wt = self.get_param([num_rel * 2, num_base])\n else:\n self.rel_wt = None\n\n def get_param(self, shape):\n param = nn.Parameter(torch.Tensor(*shape))\n nn.init.xavier_normal_(param, gain=nn.init.calculate_gain('relu'))\n return param\n\n def message_func(self, edges: dgl.EdgeBatch):\n edge_type = edges.data['type'] # [E, 1]\n edge_num = edge_type.shape[0]\n edge_data_h = self.comp(edges.src['h'], self.rel[edge_type]) # [E, in_channel]\n msg = torch.cat([torch.matmul(edge_data_h[:edge_num // 2, :], self.in_w),\n torch.matmul(edge_data_h[edge_num // 2:, :], self.out_w)])\n # if self.use_text:\n # edge_data_t = self.comp(edges.src['t'], self.rel[edge_type]) # [E, in_channel]\n # msg_t = torch.cat([torch.matmul(edge_data_t[:edge_num // 2, :], self.in_w_t),\n # torch.matmul(edge_data_t[edge_num // 2:, :], self.out_w_t)])\n # msg_t = torch.cat((Variable(torch.ones(msg_t.shape[0], 1).type(self.DTYPE).to(self.device), requires_grad=False), msg_t), dim=1)\n # fused_msg_t = torch.matmul(msg_t, self.text_facotr)\n #\n # if self.use_img:\n # edge_data_i = self.comp(edges.src['i'], self.rel[edge_type]) # [E, in_channel]\n # msg_i = torch.cat([torch.matmul(edge_data_i[:edge_num // 2, :], self.in_w_i),\n # torch.matmul(edge_data_i[edge_num // 2:, :], self.out_w_i)])\n # msg_i = torch.cat((Variable(torch.ones(msg_i.shape[0], 1).type(self.DTYPE).to(self.device), requires_grad=False), msg_i), dim=1)\n # fused_msg_i = torch.matmul(msg_i, self.img_facotr)\n #\n # # msg = torch.cat([msg, msg_i], dim=1)\n # if self.use_attr:\n # edge_data_n = self.comp(edges.src['n'], self.rel[edge_type]) # [E, in_channel]\n # msg_n = torch.cat([torch.matmul(edge_data_n[:edge_num // 2, :], self.in_w_n),\n # torch.matmul(edge_data_n[edge_num // 2:, :], self.out_w_n)])\n # msg_n = torch.cat((Variable(torch.ones(msg_n.shape[0], 1).type(self.DTYPE).to(self.device), requires_grad=False), msg_n), dim=1)\n # fused_msg_n = torch.matmul(msg_n, self.attr_factor)\n #\n # # msg = torch.cat([msg, msg_n], dim=1)\n # fusion_zy = fused_msg_t * fused_msg_i * fused_msg_n\n # fused = torch.matmul(self.fusion_weights, fusion_zy.permute(1,0,2)).squeeze() + self.fusion_bias\n # msg = msg*fused\n msg = torch.nn.functional.softmax(msg)\n msg = msg * edges.data['norm'].reshape(-1, 1) # [E, D] * [E, 1]\n return {'msg': msg, 'e': edges.data['e']}\n\n def reduce_func(self, nodes: dgl.NodeBatch):\n alpha = F.softmax(nodes.mailbox['e'], dim=1)\n h = self.drop(torch.sum(alpha * nodes.mailbox['msg'], dim=1)) / 3\n return {'h': h}\n\n def comp(self, h, edge_data):\n def com_mult(a, b):\n r1, i1 = a[..., 0], a[..., 1]\n r2, i2 = b[..., 0], b[..., 1]\n return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)\n\n def conj(a):\n a[..., 1] = -a[..., 1]\n return a\n\n def ccorr(a, b):\n return torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n if self.opn == 'mult':\n return h * edge_data\n elif self.opn == 'sub':\n return h - edge_data\n elif self.opn == 'corr':\n return ccorr(h, edge_data.expand_as(h))\n else:\n raise KeyError(f'composition operator {self.opn} not recognized.')\n def edge_attention(self,edges):\n h2 = torch.cat([edges.src['h'], self.rel[edges.data['type']], edges.dst['h']], dim=1)\n a = self.attn_fc(h2)\n return {'e': F.leaky_relu(a)}\n\n def forward(self, g: dgl.DGLGraph, x, x_t, x_i, x_n, rel_repr, edge_type, edge_norm):\n \"\"\"\n :param g: dgl Graph, a graph without self-loop\n :param x: input node features, [V, in_channel]\n :param rel_repr: input relation features: 1. not using bases: [num_rel*2, in_channel]\n 2. using bases: [num_base, in_channel]\n :param edge_type: edge type, [E]\n :param edge_norm: edge normalization, [E]\n :return: x: output node features: [V, out_channel]\n rel: output relation features: [num_rel*2, out_channel]\n \"\"\"\n self.device = x.device\n g = g.local_var()\n g.ndata['h'] = x\n # if self.use_text:\n # g.ndata['t'] = x_t\n # if self.use_img:\n # g.ndata['i'] = x_i\n # if self.use_attr:\n # g.ndata['n'] = x_n\n g.edata['type'] = edge_type\n g.edata['norm'] = edge_norm\n if self.rel_wt is None:\n self.rel = rel_repr\n else:\n self.rel = torch.mm(self.rel_wt, rel_repr) # [num_rel*2, num_base] @ [num_base, in_c]\n g.apply_edges(self.edge_attention)\n g.update_all(self.message_func, self.reduce_func)\n x = g.ndata.pop('h') + torch.mm(self.comp(x, self.loop_rel), self.loop_w) / 3\n if self.bias is not None:\n x = x + self.bias\n x = self.bn(x)\n\n return self.act(x), torch.matmul(self.rel, self.w_rel)\n\n# RBF Layer\n\nclass RBF(nn.Module):\n \"\"\"\n Transforms incoming data using a given radial basis function:\n u_{i} = rbf(||x - c_{i}|| / s_{i})\n Arguments:\n in_features: size of each input sample\n out_features: size of each output sample\n Shape:\n - Input: (N, in_features) where N is an arbitrary batch size\n - Output: (N, out_features) where N is an arbitrary batch size\n Attributes:\n centres: the learnable centres of shape (out_features, in_features).\n The values are initialised from a standard normal distribution.\n Normalising inputs to have mean 0 and standard deviation 1 is\n recommended.\n\n sigmas: the learnable scaling factors of shape (out_features).\n The values are initialised as ones.\n\n basis_func: the radial basis function used to transform the scaled\n distances.\n \"\"\"\n\n def __init__(self, in_features, out_features):\n super(RBF, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.centres = nn.Parameter(torch.Tensor(out_features, in_features))\n self.sigmas = nn.Parameter(torch.Tensor(out_features))\n # self.basis_func = self.gaussian()\n self.reset_parameters()\n\n def gaussian(alpha):\n phi = torch.exp(-1 * alpha.pow(2))\n return phi\n\n def reset_parameters(self):\n nn.init.normal_(self.centres, 0, 1)\n nn.init.constant_(self.sigmas, 1)\n\n def forward(self, input):\n size = (input.shape[0], self.out_features, self.in_features)\n x = input.unsqueeze(1).expand(size)\n c = self.centres.unsqueeze(0).expand(size)\n distances = (x - c).pow(2).sum(-1).pow(0.5) * self.sigmas.unsqueeze(0)\n return self.gaussian(distances)\n\n\n# RBFs\n\ndef gaussian(alpha):\n phi = torch.exp(-1 * alpha.pow(2))\n return phi\n\n\ndef linear(alpha):\n phi = alpha\n return phi\n\n\ndef quadratic(alpha):\n phi = alpha.pow(2)\n return phi\n\n\ndef inverse_quadratic(alpha):\n phi = torch.ones_like(alpha) / (torch.ones_like(alpha) + alpha.pow(2))\n return phi\n\n\ndef multiquadric(alpha):\n phi = (torch.ones_like(alpha) + alpha.pow(2)).pow(0.5)\n return phi\n\n\ndef inverse_multiquadric(alpha):\n phi = torch.ones_like(alpha) / (torch.ones_like(alpha) + alpha.pow(2)).pow(0.5)\n return phi\n\n\ndef spline(alpha):\n phi = (alpha.pow(2) * torch.log(alpha + torch.ones_like(alpha)))\n return phi\n\n\ndef poisson_one(alpha):\n phi = (alpha - torch.ones_like(alpha)) * torch.exp(-alpha)\n return phi\n\n\ndef poisson_two(alpha):\n phi = ((alpha - 2 * torch.ones_like(alpha)) / 2 * torch.ones_like(alpha)) \\\n * alpha * torch.exp(-alpha)\n return phi\n\n\ndef matern32(alpha):\n phi = (torch.ones_like(alpha) + 3 ** 0.5 * alpha) * torch.exp(-3 ** 0.5 * alpha)\n return phi\n\n\ndef matern52(alpha):\n phi = (torch.ones_like(alpha) + 5 ** 0.5 * alpha + (5 / 3) \\\n * alpha.pow(2)) * torch.exp(-5 ** 0.5 * alpha)\n return phi\n\n\n\nclass RelGCNCov(nn.Module):\n def __init__(self, in_channels, out_channels, act=lambda x: x, bias=True, drop_rate=0., opn='corr',\n use_text = True, use_img = False, use_attr = False,\n num_base=-1,\n num_rel=None):\n super(RelGCNCov, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.act = act # activation function\n self.device = None\n self.rel = None\n self.rank = 8\n self.opn = opn\n self.use_text = use_text\n self.use_img = use_img\n self.use_attr = use_attr\n self.fuse_embed = out_channels\n\n self.in_w = self.get_param([in_channels, out_channels])\n self.out_w = self.get_param([in_channels, out_channels])\n self.loop_w = self.get_param([in_channels, out_channels])\n self.w_rel = self.get_param([in_channels, out_channels]) # transform embedding of relations to next layer\n self.loop_rel = self.get_param([1, in_channels]) # self-loop embedding\n\n\n\n self.drop = nn.Dropout(drop_rate)\n self.bn = torch.nn.BatchNorm1d(out_channels)\n self.bias = nn.Parameter(torch.zeros(out_channels)) if bias else None\n # self.fusion_weights = self.get_param([1, self.rank])\n # self.fusion_bias = Parameter(torch.Tensor(1, self.out_channels))\n # self.fusion_bias.data.fill_(0)\n if self.w_rel.is_cuda:\n self.DTYPE = torch.cuda.FloatTensor\n else:\n self.DTYPE = torch.FloatTensor\n if num_base > 0:\n self.rel_wt = self.get_param([num_rel * 2, num_base])\n else:\n self.rel_wt = None\n\n def get_param(self, shape):\n param = nn.Parameter(torch.Tensor(*shape))\n nn.init.xavier_normal_(param, gain=nn.init.calculate_gain('relu'))\n return param\n\n def message_func(self, edges: dgl.EdgeBatch):\n edge_type = edges.data['type'] # [E, 1]\n edge_num = edge_type.shape[0]\n edge_data_h = self.rel[edge_type]\n msg = torch.cat([torch.matmul(edge_data_h[:edge_num // 2, :], self.in_w),\n torch.matmul(edge_data_h[edge_num // 2:, :], self.out_w)])\n msg = torch.nn.functional.softmax(msg)\n msg = msg * edges.data['norm'].reshape(-1, 1) # [E, D] * [E, 1]\n return {'msg': msg}\n\n def reduce_func(self, nodes: dgl.NodeBatch):\n return {'h': self.drop(nodes.data['h']) / 3}\n\n def comp(self, h, edge_data):\n def com_mult(a, b):\n r1, i1 = a[..., 0], a[..., 1]\n r2, i2 = b[..., 0], b[..., 1]\n return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)\n\n def conj(a):\n a[..., 1] = -a[..., 1]\n return a\n\n def ccorr(a, b):\n return torch.irfft(com_mult(conj(torch.rfft(a, 1)), torch.rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n if self.opn == 'mult':\n return h * edge_data\n elif self.opn == 'sub':\n return h - edge_data\n elif self.opn == 'corr':\n return ccorr(h, edge_data.expand_as(h))\n else:\n raise KeyError(f'composition operator {self.opn} not recognized.')\n\n def forward(self, g: dgl.DGLGraph, x, rel_repr, edge_type, edge_norm):\n \"\"\"\n :param g: dgl Graph, a graph without self-loop\n :param x: input node features, [V, in_channel]\n :param rel_repr: input relation features: 1. not using bases: [num_rel*2, in_channel]\n 2. using bases: [num_base, in_channel]\n :param edge_type: edge type, [E]\n :param edge_norm: edge normalization, [E]\n :return: x: output node features: [V, out_channel]\n rel: output relation features: [num_rel*2, out_channel]\n \"\"\"\n self.device = x.device\n g = g.local_var()\n g.ndata['h'] = x\n g.edata['type'] = edge_type\n g.edata['norm'] = edge_norm\n if self.rel_wt is None:\n self.rel = rel_repr\n else:\n self.rel = torch.mm(self.rel_wt, rel_repr) # [num_rel*2, num_base] @ [num_base, in_c]\n g.update_all(self.message_func, fn.sum(msg='msg', out='h'), self.reduce_func)\n x = g.ndata.pop('h') + torch.mm(self.comp(x, self.loop_rel), self.loop_w) / 3\n if self.bias is not None:\n x = x + self.bias\n x = self.bn(x)\n\n return self.act(x), torch.matmul(self.rel, self.w_rel)\n\n\n\nif __name__ == '__main__':\n RGAT = RGATConv(in_channels=10, out_channels=5)\n src, tgt = [0, 1, 0, 3, 2], [1, 3, 3, 4, 4]\n g = dgl.DGLGraph()\n g.add_nodes(5)\n g.add_edges(src, tgt) # src -> tgt\n g.add_edges(tgt, src) # tgt -> src\n edge_type = torch.tensor([0, 0, 0, 1, 1] + [2, 2, 2, 3, 3])\n in_deg = g.in_degrees(range(g.number_of_nodes())).float().numpy()\n norm = in_deg ** -0.5\n norm[np.isinf(norm)] = 0\n g.ndata['xxx'] = norm\n g.apply_edges(lambda edges: {'xxx': edges.dst['xxx'] * edges.src['xxx']})\n edge_norm = g.edata.pop('xxx').squeeze()\n\n x = torch.randn([5, 10])\n rel = torch.randn([4, 10]) # 2*2+1\n x, rel = RGAT(g, x, rel, edge_type, edge_norm)\n print(x.shape, rel.shape)\n" }, { "alpha_fraction": 0.7936508059501648, "alphanum_fraction": 0.7936508059501648, "avg_line_length": 63, "blob_id": "769b39f4e7a9356f413301e47f6629b65c85e636", "content_id": "76642b9baf1c398d7ae707879ecdc452152a8fcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "no_license", "max_line_length": 63, "num_lines": 1, "path": "/model/__init__.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "from .model import RGAT_DistMult, RGAT_ConvE, RGCN_ConvE, ConvE" }, { "alpha_fraction": 0.8172042965888977, "alphanum_fraction": 0.8172042965888977, "avg_line_length": 45.5, "blob_id": "1a3515386bf81eebb7b60a74d6951796fba94df3", "content_id": "7cbb9d5a09fd82f7a4dc77c35589a810c1e3487f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/utils/__init__.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "from .data_process import process, load_link\nfrom .data_set import TestDataset, TrainDataset\n" }, { "alpha_fraction": 0.5599697232246399, "alphanum_fraction": 0.5706110596656799, "avg_line_length": 55.456520080566406, "blob_id": "78c1d0b5ce4437d6b4c96ad539be1b867e7567c2", "content_id": "7785270c8476cbf8bac66a84cf477154067d013c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21144, "license_type": "no_license", "max_line_length": 182, "num_lines": 368, "path": "/run.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "import argparse\r\nimport time\r\nfrom pprint import pprint\r\nimport numpy as np\r\nimport random\r\nfrom pathlib import Path\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nimport dgl\r\nfrom dgl.contrib.data import load_data\r\nimport os\r\nimport sys\r\nimport logging\r\n\r\nfrom model import RGAT_DistMult, RGAT_ConvE, RGCN_ConvE, ConvE\r\nfrom utils import process, TrainDataset, TestDataset, load_link\r\n\r\n\r\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\r\n\r\nclass Runner(object):\r\n def __init__(self, params):\r\n self.p = params\r\n self.prj_path = Path(__file__).parent.resolve()\r\n self.data = load_link(self.p.dataset)\r\n self.num_ent, self.train_data, self.valid_data, self.test_data, self.num_rels = self.data.num_nodes, self.data.train, self.data.valid, self.data.test, self.data.num_rels\r\n self.ent2textvector, self.rel2textvector, self.attr2vector = self.data.ent2textvector, self.data.rel2textvector, self.data.attr2vector\r\n self.attrname, self.ent2value, self.ent2attrlabel = self.data.attrname, self.data.ent2value, self.data.ent2attrlabel\r\n self.ent2imgvector = self.data.ent2imgvector\r\n self.triplets = process({'train': self.train_data, 'valid': self.valid_data, 'test': self.test_data},\r\n self.num_rels)\r\n if self.p.gpu != -1 and torch.cuda.is_available():\r\n self.device = torch.device(f'cuda:{self.p.gpu}')\r\n else:\r\n self.device = torch.device('cpu')\r\n self.p.embed_dim = self.p.k_w * self.p.k_h if self.p.embed_dim is None else self.p.embed_dim # output dim of gnn\r\n self.data_iter = self.get_data_iter()\r\n self.g = self.build_graph()\r\n self.edge_type, self.edge_norm = self.get_edge_dir_and_norm()\r\n self.model = self.get_model()\r\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.p.lr, weight_decay=self.p.l2)\r\n self.best_val_mrr, self.best_epoch, self.best_val_results = 0., 0., {}\r\n self.logger = logging.getLogger()\r\n self.log_name = self.p.name + '.log'\r\n self.log_path = '/home/liangshuang/NewWork/logs'\r\n pprint(vars(self.p))\r\n\r\n def fit(self):\r\n epoch_hits1 = []\r\n epoch_hits3 = []\r\n epoch_hits10 = []\r\n epoch_mrr = []\r\n epoch_mr = []\r\n save_root = self.prj_path / 'checkpoints'\r\n if not save_root.exists():\r\n save_root.mkdir()\r\n save_path = save_root / (self.p.name + '.pt')\r\n\r\n if self.p.restore:\r\n self.load_model(save_path)\r\n print('Successfully Loaded previous model')\r\n\r\n for epoch in range(self.p.max_epochs):\r\n start_time = time.time()\r\n train_loss = self.train()\r\n val_results = self.evaluate('test')\r\n epoch_mr.append(val_results['mr'])\r\n epoch_mrr.append(val_results['mrr'])\r\n epoch_hits1.append(val_results['hits@1'])\r\n epoch_hits3.append(val_results['hits@3'])\r\n epoch_hits10.append(val_results['hits@10'])\r\n if val_results['mrr'] > self.best_val_mrr:\r\n self.best_val_results = val_results\r\n self.best_val_mrr = val_results['mrr']\r\n self.best_epoch = epoch\r\n self.save_model(save_path)\r\n print(\r\n f\"[Epoch {epoch}]: Training Loss: {train_loss:.5}, Valid MRR: {val_results['mrr']:.5}, Best Valid MRR: {self.best_val_mrr:.5}, Cost: {time.time() - start_time:.2f}s\")\r\n pprint(vars(self.p))\r\n self.load_model(save_path)\r\n print(f'Loading best model in {self.best_epoch} epoch, Evaluating on Test data')\r\n self.model.eval()\r\n # entity_embedding, rel_embedding = self.model.get_embedding(self.g)\r\n # np.save(os.path.join('./embedding', self.p.name + '_entity_embedding'), entity_embedding.cpu().numpy())\r\n # np.save(os.path.join('./embedding', self.p.name + '_rel_embedding'), rel_embedding.cpu().numpy())\r\n test_results = self.evaluate('test')\r\n print(\r\n f\"MRR: Tail {test_results['left_mrr']:.5}, Head {test_results['right_mrr']:.5}, Avg {test_results['mrr']:.5}\")\r\n print(f\"MR: Tail {test_results['left_mr']:.5}, Head {test_results['right_mr']:.5}, Avg {test_results['mr']:.5}\")\r\n print(f\"hits@1 = {test_results['hits@1']:.5}\")\r\n print(f\"hits@3 = {test_results['hits@3']:.5}\")\r\n print(f\"hits@10 = {test_results['hits@10']:.5}\")\r\n print(\"rank: \", self.model.rank)\r\n np.save(os.path.join('./result', self.p.name + '_epoch_mr'), epoch_mr)\r\n np.save(os.path.join('./result', self.p.name + '_epoch_mrr'), epoch_mrr)\r\n np.save(os.path.join('./result', self.p.name + '_epoch_hits@1'), epoch_hits1)\r\n np.save(os.path.join('./result', self.p.name + '_epoch_hits@3'), epoch_hits1)\r\n np.save(os.path.join('./result', self.p.name + '_epoch_hits@10'), epoch_hits1)\r\n\r\n\r\n def train(self):\r\n self.model.train()\r\n losses = []\r\n train_iter = self.data_iter['train']\r\n for step, (triplets, labels) in enumerate(train_iter):\r\n triplets, labels = triplets.to(self.device), labels.to(self.device)\r\n subj, rel = triplets[:, 0], triplets[:, 1]\r\n pred = self.model(self.g, subj, rel) # [batch_size, num_ent]\r\n loss = self.model.calc_loss(pred, labels)\r\n\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n losses.append(loss.item())\r\n\r\n loss = np.mean(losses)\r\n return loss\r\n\r\n def evaluate(self, split):\r\n \"\"\"\r\n Function to evaluate the model on validation or test set\r\n :param split: valid or test, set which data-set to evaluate on\r\n :return: results['mr']: Average of ranks_left and ranks_right\r\n results['mrr']: Mean Reciprocal Rank\r\n results['hits@k']: Probability of getting the correct prediction in top-k ranks based on predicted score\r\n results['left_mrr'], results['left_mr'], results['right_mrr'], results['right_mr']\r\n results['left_hits@k'], results['right_hits@k']\r\n \"\"\"\r\n\r\n def get_combined_results(left, right):\r\n results = dict()\r\n assert left['count'] == right['count']\r\n count = float(left['count'])\r\n results['left_mr'] = round(left['mr'] / count, 5)\r\n results['left_mrr'] = round(left['mrr'] / count, 5)\r\n results['right_mr'] = round(right['mr'] / count, 5)\r\n results['right_mrr'] = round(right['mrr'] / count, 5)\r\n results['mr'] = round((left['mr'] + right['mr']) / (2 * count), 5)\r\n results['mrr'] = round((left['mrr'] + right['mrr']) / (2 * count), 5)\r\n for k in [1, 3, 10]:\r\n results[f'left_hits@{k}'] = round(left[f'hits@{k}'] / count, 5)\r\n results[f'right_hits@{k}'] = round(right[f'hits@{k}'] / count, 5)\r\n results[f'hits@{k}'] = round((results[f'left_hits@{k}'] + results[f'right_hits@{k}']) / 2, 5)\r\n return results\r\n\r\n self.model.eval()\r\n left_result = self.predict(split, 'tail')\r\n right_result = self.predict(split, 'head')\r\n res = get_combined_results(left_result, right_result)\r\n return res\r\n\r\n def predict(self, split='valid', mode='tail'):\r\n \"\"\"\r\n Function to run model evaluation for a given mode\r\n :param split: valid or test, set which data-set to evaluate on\r\n :param mode: head or tail\r\n :return: results['mr']: Sum of ranks\r\n results['mrr']: Sum of Reciprocal Rank\r\n results['hits@k']: counts of getting the correct prediction in top-k ranks based on predicted score\r\n results['count']: number of total predictions\r\n \"\"\"\r\n with torch.no_grad():\r\n results = dict()\r\n test_iter = self.data_iter[f'{split}_{mode}']\r\n for step, (triplets, labels) in enumerate(test_iter):\r\n triplets, labels = triplets.to(self.device), labels.to(self.device)\r\n subj, rel, obj = triplets[:, 0], triplets[:, 1], triplets[:, 2]\r\n pred = self.model(self.g, subj, rel)\r\n b_range = torch.arange(pred.shape[0], device=self.device)\r\n target_pred = pred[b_range, obj] # [batch_size, 1], get the predictive score of obj\r\n # label=>-1000000, not label=>pred, filter out other objects with same sub&rel pair\r\n pred = torch.where(labels.byte(), -torch.ones_like(pred) * 10000000, pred)\r\n pred[b_range, obj] = target_pred # copy predictive score of obj to new pred\r\n ranks = 1 + torch.argsort(torch.argsort(pred, dim=1, descending=True), dim=1, descending=False)[\r\n b_range, obj] # get the rank of each (sub, rel, obj)\r\n ranks = ranks.float()\r\n results['count'] = torch.numel(ranks) + results.get('count', 0) # number of predictions\r\n results['mr'] = torch.sum(ranks).item() + results.get('mr', 0)\r\n results['mrr'] = torch.sum(1.0 / ranks).item() + results.get('mrr', 0)\r\n\r\n for k in [1, 3, 10]:\r\n results[f'hits@{k}'] = torch.numel(ranks[ranks <= k]) + results.get(f'hits@{k}', 0)\r\n return results\r\n\r\n def save_model(self, path):\r\n \"\"\"\r\n Function to save a model. It saves the model parameters, best validation scores,\r\n best epoch corresponding to best validation, state of the optimizer and all arguments for the run.\r\n :param path: path where the model is saved\r\n :return:\r\n \"\"\"\r\n state = {\r\n 'model': self.model.state_dict(),\r\n 'best_val': self.best_val_results,\r\n 'best_epoch': self.best_epoch,\r\n 'optimizer': self.optimizer.state_dict(),\r\n 'args': vars(self.p)\r\n }\r\n torch.save(state, path)\r\n\r\n def load_model(self, path):\r\n \"\"\"\r\n Function to load a saved model\r\n :param path: path where model is loaded\r\n :return:\r\n \"\"\"\r\n state = torch.load(path)\r\n self.best_val_results = state['best_val']\r\n self.best_val_mrr = self.best_val_results['mrr']\r\n self.best_epoch = state['best_epoch']\r\n self.model.load_state_dict(state['model'])\r\n self.optimizer.load_state_dict(state['optimizer'])\r\n\r\n def build_graph(self):\r\n g = dgl.DGLGraph()\r\n g.add_nodes(self.num_ent)\r\n g.add_edges(self.train_data[:, 0], self.train_data[:, 2])\r\n g.add_edges(self.train_data[:, 2], self.train_data[:, 0])\r\n return g\r\n\r\n def get_data_iter(self):\r\n \"\"\"\r\n get data loader for train, valid and test section\r\n :return: dict\r\n \"\"\"\r\n\r\n def get_data_loader(dataset_class, split):\r\n return DataLoader(\r\n dataset_class(self.triplets[split], self.num_ent, self.p),\r\n batch_size=self.p.batch_size,\r\n shuffle=True,\r\n num_workers=self.p.num_workers\r\n )\r\n\r\n return {\r\n 'train': get_data_loader(TrainDataset, 'train'),\r\n 'valid_head': get_data_loader(TestDataset, 'valid_head'),\r\n 'valid_tail': get_data_loader(TestDataset, 'valid_tail'),\r\n 'test_head': get_data_loader(TestDataset, 'test_head'),\r\n 'test_tail': get_data_loader(TestDataset, 'test_tail')\r\n }\r\n\r\n def get_edge_dir_and_norm(self):\r\n \"\"\"\r\n :return: edge_type: indicates type of each edge: [E]\r\n \"\"\"\r\n in_deg = self.g.in_degrees(range(self.g.number_of_nodes())).float().numpy()\r\n norm = in_deg ** -0.5\r\n norm[np.isinf(norm)] = 0\r\n self.g.ndata['xxx'] = norm\r\n self.g.apply_edges(lambda edges: {'xxx': edges.dst['xxx'] * edges.src['xxx']})\r\n norm = self.g.edata.pop('xxx').squeeze().to(self.device)\r\n edge_type = torch.tensor(np.concatenate([self.train_data[:, 1], self.train_data[:, 1] + self.num_rels])).to(\r\n self.device)\r\n return edge_type, norm\r\n\r\n def get_model(self):\r\n if self.p.model_func.lower() == 'distmult':\r\n model = RGAT_DistMult(num_ent=self.num_ent, num_rel=self.num_rels, num_base=self.p.num_bases,\r\n init_dim=self.p.init_dim, gcn_dim=self.p.gcn_dim, embed_dim=self.p.embed_dim,\r\n n_layer=self.p.n_layer, edge_type=self.edge_type, edge_norm=self.edge_norm,\r\n ent2textvector = self.ent2textvector, rel2textvector = self.rel2textvector,\r\n ent2attr = self.ent2value, ent2attrlabel = self.ent2attrlabel, attr2vector = self.attr2vector, ent2imgvector = self.ent2imgvector,\r\n use_text = self.p.text, use_img = self.p.img, use_attr = self.p.attr,\r\n device = self.device, bias=self.p.bias, gcn_drop=self.p.gcn_drop, opn=self.p.opn,\r\n hid_drop=self.p.hid_drop)\r\n elif self.p.model_func.lower() == 'conve':\r\n model = RGAT_ConvE(num_ent=self.num_ent, num_rel=self.num_rels, num_base=self.p.num_bases,\r\n init_dim=self.p.init_dim, gcn_dim=self.p.gcn_dim, embed_dim=self.p.embed_dim,\r\n n_layer=self.p.n_layer, edge_type=self.edge_type, edge_norm=self.edge_norm,\r\n ent2textvector=self.ent2textvector, rel2textvector=self.rel2textvector,\r\n ent2attr=self.ent2value, ent2attrlabel=self.ent2attrlabel, attr2vector=self.attr2vector, ent2imgvector = self.ent2imgvector,\r\n use_text=self.p.text, use_img=self.p.img, use_attr=self.p.attr,\r\n device=self.device,bias=self.p.bias, gcn_drop=self.p.gcn_drop, opn=self.p.opn,\r\n hid_drop=self.p.hid_drop, input_drop=self.p.input_drop,\r\n conve_hid_drop=self.p.conve_hid_drop, feat_drop=self.p.feat_drop,\r\n num_filt=self.p.num_filt, ker_sz=self.p.ker_sz, k_h=self.p.k_h, k_w=self.p.k_w)\r\n elif self.p.model_func.lower() == 'rgcn':\r\n model = RGCN_ConvE(num_ent=self.num_ent, num_rel=self.num_rels, num_base=self.p.num_bases,\r\n init_dim=self.p.init_dim, gcn_dim=self.p.gcn_dim, embed_dim=self.p.embed_dim,\r\n n_layer=self.p.n_layer, edge_type=self.edge_type, edge_norm=self.edge_norm,\r\n ent2textvector=self.ent2textvector, rel2textvector=self.rel2textvector,\r\n ent2attr=self.ent2value, ent2attrlabel=self.ent2attrlabel, attr2vector=self.attr2vector, ent2imgvector = self.ent2imgvector,\r\n use_text=self.p.text, use_img=self.p.img, use_attr=self.p.attr,\r\n device=self.device,bias=self.p.bias, gcn_drop=self.p.gcn_drop, opn=self.p.opn,\r\n hid_drop=self.p.hid_drop, input_drop=self.p.input_drop,\r\n conve_hid_drop=self.p.conve_hid_drop, feat_drop=self.p.feat_drop,\r\n num_filt=self.p.num_filt, ker_sz=self.p.ker_sz, k_h=self.p.k_h, k_w=self.p.k_w)\r\n elif self.p.model_func.lower() == 'simpleconve':\r\n model = ConvE(num_ent=self.num_ent, num_rel=self.num_rels, num_base=self.p.num_bases,\r\n init_dim=self.p.init_dim, gcn_dim=self.p.gcn_dim, embed_dim=self.p.embed_dim,\r\n n_layer=self.p.n_layer, edge_type=self.edge_type, edge_norm=self.edge_norm,\r\n ent2textvector=self.ent2textvector, rel2textvector=self.rel2textvector,\r\n ent2attr=self.ent2value, ent2attrlabel=self.ent2attrlabel, attr2vector=self.attr2vector, ent2imgvector = self.ent2imgvector,\r\n use_text=self.p.text, use_img=self.p.img, use_attr=self.p.attr,\r\n device=self.device,bias=self.p.bias, gcn_drop=self.p.gcn_drop, opn=self.p.opn,\r\n hid_drop=self.p.hid_drop, input_drop=self.p.input_drop,\r\n conve_hid_drop=self.p.conve_hid_drop, feat_drop=self.p.feat_drop,\r\n num_filt=self.p.num_filt, ker_sz=self.p.ker_sz, k_h=self.p.k_h, k_w=self.p.k_w)\r\n else:\r\n raise KeyError(f'score function {self.p.model_func} not recognized.')\r\n model.to(self.device)\r\n return model\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Parser For Arguments',\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n\r\n parser.add_argument('--name', default='test_run', help='Set run name for saving/restoring models')\r\n parser.add_argument('--data', dest='dataset', default='FB15k-237', help='Dataset to use, default: FB15k-237')\r\n parser.add_argument('--model_func', dest='model_func', default='conve',\r\n help='Score Function for Link prediction')\r\n parser.add_argument('--opn', dest='opn', default='corr', help='Composition Operation to be used in CompGCN')\r\n\r\n parser.add_argument('--batch', dest='batch_size', default=256, type=int, help='Batch size')\r\n parser.add_argument('--gpu', type=int, default=0, help='Set GPU Ids : Eg: For CPU = -1, For Single GPU = 0')\r\n parser.add_argument('--epoch', dest='max_epochs', type=int, default=500, help='Number of epochs')\r\n parser.add_argument('--l2', type=float, default=0.0, help='L2 Regularization for Optimizer')\r\n parser.add_argument('--lr', type=float, default=0.001, help='Starting Learning Rate')\r\n parser.add_argument('--lbl_smooth', dest='lbl_smooth', type=float, default=0.1, help='Label Smoothing')\r\n parser.add_argument('--num_workers', type=int, default=8, help='Number of processes to construct batches')\r\n parser.add_argument('--seed', dest='seed', default=12345, type=int, help='Seed for randomization')\r\n\r\n parser.add_argument('--restore', dest='restore', action='store_true',\r\n help='Restore from the previously saved model')\r\n parser.add_argument('--bias', dest='bias', action='store_true', help='Whether to use bias in the model')\r\n\r\n parser.add_argument('--text', dest='text', action='store_true', help='Whether to use text in the model')\r\n\r\n parser.add_argument('--img', dest='img', action='store_true', help='Whether to use img in the model')\r\n\r\n parser.add_argument('--attr', dest='attr', action='store_true', help='Whether to use attr in the model')\r\n\r\n parser.add_argument('--num_bases', dest='num_bases', default=-1, type=int,\r\n help='Number of basis relation vectors to use')\r\n parser.add_argument('--init_dim', dest='init_dim', default=100, type=int,\r\n help='Initial dimension size for entities and relations')\r\n parser.add_argument('--gcn_dim', dest='gcn_dim', default=200, type=int, help='Number of hidden units in GCN')\r\n parser.add_argument('--embed_dim', dest='embed_dim', default=None, type=int,\r\n help='Embedding dimension to give as input to score function')\r\n parser.add_argument('--n_layer', dest='n_layer', default=1, type=int, help='Number of GCN Layers to use')\r\n parser.add_argument('--gcn_drop', dest='gcn_drop', default=0.1, type=float, help='Dropout to use in GCN Layer')\r\n parser.add_argument('--hid_drop', dest='hid_drop', default=0.3, type=float, help='Dropout after GCN')\r\n\r\n # ConvE specific hyperparameters\r\n parser.add_argument('--conve_hid_drop', dest='conve_hid_drop', default=0.3, type=float,\r\n help='ConvE: Hidden dropout')\r\n parser.add_argument('--feat_drop', dest='feat_drop', default=0.2, type=float, help='ConvE: Feature Dropout')\r\n parser.add_argument('--input_drop', dest='input_drop', default=0.2, type=float, help='ConvE: Stacked Input Dropout')\r\n parser.add_argument('--k_w', dest='k_w', default=20, type=int, help='ConvE: k_w')\r\n parser.add_argument('--k_h', dest='k_h', default=10, type=int, help='ConvE: k_h')\r\n parser.add_argument('--num_filt', dest='num_filt', default=200, type=int,\r\n help='ConvE: Number of filters in convolution')\r\n parser.add_argument('--ker_sz', dest='ker_sz', default=7, type=int, help='ConvE: Kernel size to use')\r\n\r\n args = parser.parse_args()\r\n if not args.restore:\r\n args.name = time.strftime('%Y_%m_%d') + '_' + time.strftime(\r\n '%H:%M:%S') + '-' + args.model_func.lower() + '-' + args.opn\r\n\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n torch.cuda.manual_seed(args.seed)\r\n\r\n runner = Runner(args)\r\n runner.fit()\r\n" }, { "alpha_fraction": 0.6604729890823364, "alphanum_fraction": 0.6790540814399719, "avg_line_length": 24.7391300201416, "blob_id": "a1882e01b11a6fcd0c97ed2444a1eeff954c89be", "content_id": "a9de97382ad2791dbe20044698198dcd9fc60cee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 592, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/README.md", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "# HRGAT\nThe code for hyper-node relational graph attention network\n\n## requirement\ntorch == 1.7.0,\ndgl == 0.4.2,\nsentence-bert\n\n### Train Model\nTo start a simple training process:\n\n```shell script\npython run.py --data FB15k-237 --text --img --attr\n```\n\n - `--model` denotes the link prediction score score function \n - `--gpu` for specifying the GPU to use\n - `--epoch` for number of epochs\n - `--batch` for batch size\n - `--text` for text information\n - `--img` for batch information\n - `--attr` for batch information\n - Rest of the arguments can be listed using `python run.py -h`\n" }, { "alpha_fraction": 0.5740241408348083, "alphanum_fraction": 0.5914322733879089, "avg_line_length": 47.21559524536133, "blob_id": "ad4bb50156705e155f4e1c059baac86bd54866a8", "content_id": "692dde325bf970a3e66931cdb0c7e49fae2851d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31537, "license_type": "no_license", "max_line_length": 157, "num_lines": 654, "path": "/utils/data_process.py", "repo_name": "broliang/HRGAT", "src_encoding": "UTF-8", "text": "from collections import defaultdict as ddict\nimport csv\nfrom itertools import islice\nimport numpy as np\nimport pandas as pd\nfrom sentence_transformers import SentenceTransformer\nimport os\nimport h5py\ndef _read_dictionary(filename):\n d = {}\n if 'id' in filename:\n with open(filename, 'r+') as f:\n num = 0\n for line in islice(f,1,None):\n line = line.strip().split('\\t')\n d[line[0]] = num\n num += 1\n else:\n with open(filename, 'r+') as f:\n for id, line in enumerate(f):\n line = line.strip()\n d[line] = int(id)\n d = {k: v for k, v in sorted(d.items(), key=lambda kv: (kv[1], kv[0]))}\n return d\n\ndef to_unicode(input):\n # FIXME (lingfan): not sure about python 2 and 3 str compatibility\n return str(input)\n \"\"\" lingfan: comment out for now\n if isinstance(input, unicode):\n return input\n elif isinstance(input, str):\n return input.decode('utf-8', errors='replace')\n return str(input).decode('utf-8', errors='replace')\n \"\"\"\n\ndef _read_triplets(filename):\n with open(filename, 'r+') as f:\n for line in f:\n processed_line = line.strip().split('\\t')\n yield processed_line\n\ndef _read_triplets_as_list(filename, entity_dict, relation_dict):\n l = []\n for triplet in _read_triplets(filename):\n s = entity_dict[triplet[0]]\n r = relation_dict[triplet[1]]\n o = entity_dict[triplet[2]]\n l.append([s, r, o])\n return l\n\ndef _read_same_link(filename):\n d = {}\n with open(filename, 'r+') as f:\n for line in f:\n line = line.strip().split(' ')\n d[line[2]] = line[0]\n return d\n\n\nclass RGCNLinkDataset(object):\n \"\"\"RGCN link prediction dataset\n The dataset contains a graph depicting the connectivity of a knowledge\n base. Currently, the knowledge bases from the\n `RGCN paper <https://arxiv.org/pdf/1703.06103.pdf>`_ supported are\n FB15k-237, FB15k, wn18\n The original knowledge base is stored as an RDF file, and this class will\n download and parse the RDF file, and performs preprocessing.\n An object of this class has 5 member attributes needed for link\n prediction:\n num_nodes: int\n number of entities of knowledge base\n num_rels: int\n number of relations (including reverse relation) of knowledge base\n train: numpy.array\n all relation triplets (src, rel, dst) for training\n valid: numpy.array\n all relation triplets (src, rel, dst) for validation\n test: numpy.array\n all relation triplets (src, rel, dst) for testing\n Usually, user don't need to directly use this class. Instead, DGL provides\n wrapper function to load data (see example below).\n Examples\n --------\n Load FB15k-237 dataset\n >>> from dgl.contrib.data import load_data\n >>> data = load_data(dataset='FB15k-237')\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.dir = './data'\n self.dir = os.path.join(self.dir, self.name)\n\n def _read_text(self):\n self.ent2text = {}\n self.ent2text_short = {}\n with open(os.path.join(self.dir, \"entity2text.txt\"), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n if len(temp) == 2:\n end = temp[1] # .find(',')\n self.ent2text_short[temp[0]] = temp[1] # [:end]\n\n if self.dir.find(\"FB15\") != -1:\n with open(os.path.join(self.dir, \"entity2textlong.txt\"), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n # first_sent_end_position = temp[1].find(\".\")\n # self.ent2text[temp[0]] = temp[1] # [:first_sent_end_position + 1]\n self.ent2text[temp[0]] = temp[1].split('.')[0] # [:first_sent]\n\n self.entities_short = list(self.ent2text_short.keys())\n self.entities = list(self.ent2text.keys())\n self.rel2text = {}\n with open(os.path.join(self.dir, \"relation2text.txt\"), 'r') as f:\n rel_lines = f.readlines()\n for line in rel_lines:\n temp = line.strip().split('\\t')\n self.rel2text[temp[0]] = temp[1]\n\n def _laod_vgg(self):\n self.vgg_feature = h5py.File('/home/liangshuang/NewWork/data/FB15k-237/FB15K_ImageData.h5')\n self.img_index = {}\n with open(os.path.join('./data/FB15k-237/FB15K_ImageIndex.txt'), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n self.img_index[temp[0]] = temp[1] # [:end]\n self.ent2imgvector = []\n for k,v in self.entity_dict.items():\n if k in self.img_index.keys():\n self.ent2imgvector.append(self.vgg_feature[self.img_index[k]])\n else:\n self.ent2imgvector.append(np.zeros((1,4096)))\n\n def _read_numerical(self):\n self.ent2num = {}\n self.attributes = []\n with open(os.path.join(self.dir, \"FB15K_NumericalTriples.txt\"), 'r') as f:\n num_lines = f.readlines()\n for line in num_lines:\n temp = line.strip().split('\\t')\n attr = temp[1].split('/')[-1]\n self.attributes.append(attr)\n self.attributes = set(self.attributes)\n self.attributes_dict = {k: v+1 for v,k in enumerate(self.attributes)}\n self.ent2num = {k: {j: 0.0 for j in self.attributes} for k,v in self.entity_dict.items()} #every ent's all real numerical value(if don't have, its 0)\n with open(os.path.join(self.dir, \"FB15K_NumericalTriples.txt\"), 'r') as f:\n num_lines = f.readlines()\n for line in num_lines:\n temp = line.strip().split('\\t')\n ent = temp[0]\n attr = temp[1].split('/')[-1]\n value = temp[2]\n self.ent2num[ent][attr] = 1.0\n self.ent2numpd = pd.DataFrame(self.ent2num).T # row is ent, column is attr_name\n self.ent2attrlabel = self.ent2numpd.copy()\n self.ent2attrlabel[self.ent2attrlabel != 0] = 1.0 # which attr ent has\n # self.ent2numpd = self.ent2numpd / self.ent2numpd.max(axis = 0) #normalization num_feature to 0-1\n self.ent2attrlabel = self.ent2attrlabel.T\n if os.path.exists(os.path.join(self.dir,'attr2textvector.npz')):\n print('***********load bert attr vector successfully*************')\n self.attr2textvector = np.load(os.path.join(self.dir,'attr2textvector.npz'))\n self.attr2textvector = {k:v for k,v in zip(self.attr2textvector['x'],self.attr2textvector['y'])}\n else:\n self.bert = SentenceTransformer(\n '/home/liangshuang/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-distilroberta-base-v1_part')\n self.attr2textvector = {k: self.bert.encode(k.replace('.', ' ')) for k in self.attributes}\n np.savez(os.path.join(self.dir, 'attr2textvector.npz'), x=np.array(list(self.attr2textvector.keys())),\n y=np.array(list(self.attr2textvector.values())))\n # self.ent2numpd = (self.ent2numpd - self.ent2numpd.mean(axis = 0)) / self.ent2numpd.std(axis = 0) #normalization num_feature\n def _load_bert(self):\n if os.path.exists(os.path.join(self.dir,'ent2textvector.npz')):\n print('***********load bert ent vector successfully*************')\n self.ent2textvector = np.load(os.path.join(self.dir,'ent2textvector.npz'))\n self.ent2textvector = {k:v for k,v in zip(self.ent2textvector['x'],self.ent2textvector['y'])}\n\n self.ent2textvector_short = np.load(os.path.join(self.dir,'ent2textvector_short.npz'))\n self.ent2textvector_short = {k:v for k,v in zip(self.ent2textvector_short['x'],self.ent2textvector_short['y'])}\n\n if os.path.exists(os.path.join(self.dir,'rel2textvector.npz')):\n print('***********load bert rel vector successfully*************')\n self.rel2textvector = np.load(os.path.join(self.dir,'rel2textvector.npz'))\n self.rel2textvector = {k:v for k,v in zip(self.rel2textvector['x'],self.rel2textvector['y'])}\n else:\n print('*********there is no bert vector, begin to encode text*************')\n self.bert = SentenceTransformer('/home/liangshuang/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-distilroberta-base-v1_part')\n self.ent2textvector = {k: self.bert.encode(v) for k,v in self.ent2text.items()}\n self.ent2textvector_short = {k: self.bert.encode(v) for k,v in self.ent2text_short.items()}\n self.rel2textvector = {k: self.bert.encode(v) for k,v in self.rel2text.items()}\n np.savez(os.path.join(self.dir,'ent2textvector.npz'), x = np.array(list(self.ent2textvector.keys())),\n y = np.array(list(self.ent2textvector.values())))\n np.savez(os.path.join(self.dir,'ent2textvector_short.npz'), x = np.array(list(self.ent2textvector_short.keys())),\n y = np.array(list(self.ent2textvector_short.values())))\n np.savez(os.path.join(self.dir,'rel2textvector.npz'), x = np.array(list(self.rel2textvector.keys())),\n y = np.array(list(self.rel2textvector.values())))\n\n\n def load(self):\n if os.path.exists(os.path.join(self.dir, 'entity2id.txt')) and os.path.exists(os.path.join(self.dir, 'relation2id.txt')):\n entity_path = os.path.join(self.dir, 'entity2id.txt')\n relation_path = os.path.join(self.dir, 'relation2id.txt')\n else:\n entity_path = os.path.join(self.dir, 'entities.txt')\n relation_path = os.path.join(self.dir, 'relations.txt')\n train_path = os.path.join(self.dir, 'train.tsv')\n valid_path = os.path.join(self.dir, 'dev.tsv')\n test_path = os.path.join(self.dir, 'test.tsv')\n entity_dict = _read_dictionary(entity_path)\n relation_dict = _read_dictionary(relation_path)\n self._read_text()\n self._load_bert()\n self.entity_dict = entity_dict\n self.relation_dict = relation_dict\n self.train = np.asarray(_read_triplets_as_list(train_path, entity_dict, relation_dict))\n self.valid = np.asarray(_read_triplets_as_list(valid_path, entity_dict, relation_dict))\n self.test = np.asarray(_read_triplets_as_list(test_path, entity_dict, relation_dict))\n\n self._laod_vgg()\n self._read_numerical()\n self.ent2num = self.ent2numpd.to_numpy()\n self.attr2vector = np.array(\n [self.attr2textvector[i] for i in self.attributes]) # attribute bert vector (116, 768)\n self.attr2vector = np.matmul(self.ent2num , self.attr2vector)\n self.attrname = self.ent2numpd.columns # attribute name\n self.ent2value = None\n self.ent2attrlabel = None\n\n self.ent2textvector = [self.ent2textvector[k] for k,v in self.entity_dict.items()]\n self.ent2textvector = np.array(self.ent2textvector)\n\n self.rel2textvector_ = []\n for k,v in self.relation_dict.items():\n if k in self.rel2textvector.keys():\n self.rel2textvector_.append(self.rel2textvector[k])\n else:\n self.rel2textvector_.append(np.zeros(768))\n self.rel2textvector = np.array(self.rel2textvector_)\n\n # self.ent2textvector = {self.entity_dict[k]: v for k,v in self.ent2textvector.items()}\n # self.rel2textvector = {self.relation_dict[k]: v for k,v in self.rel2textvector.items()}\n # self.attr2textvector = {self.attributes_dict[k]: v for k,v in self.attr2textvector.items()}\n\n self.num_nodes = len(entity_dict)\n print(\"# entities: {}\".format(self.num_nodes))\n self.num_rels = len(relation_dict)\n print(\"# relations: {}\".format(self.num_rels))\n print(\"# edges: {}\".format(len(self.train)))\n\n # for ent,v in self.ent2num.items():\n # temp = {}\n # attr_label = []\n # for attr_name, value in v.items():\n # if value != 0:\n # temp[attr_name] = (self.attr2textvector[attr_name], value, self.ent2numpd[attr_name][ent])\n # attr_label.append(1)\n # else:\n # attr_label.append(0)\n\n # if len(temp) == 0:\n # self.ent2numreal[ent] = np.zeros(768)\n # self.ent2numreal[ent] = temp\n # self.ent2attr[ent] = attr_label\n\n\n\n\nclass RGCNLinkDataset_DB(object):\n \"\"\"RGCN link prediction dataset\n The dataset contains a graph depicting the connectivity of a knowledge\n base. Currently, the knowledge bases from the\n `RGCN paper <https://arxiv.org/pdf/1703.06103.pdf>`_ supported are\n FB15k-237, FB15k, wn18\n The original knowledge base is stored as an RDF file, and this class will\n download and parse the RDF file, and performs preprocessing.\n An object of this class has 5 member attributes needed for link\n prediction:\n num_nodes: int\n number of entities of knowledge base\n num_rels: int\n number of relations (including reverse relation) of knowledge base\n train: numpy.array\n all relation triplets (src, rel, dst) for training\n valid: numpy.array\n all relation triplets (src, rel, dst) for validation\n test: numpy.array\n all relation triplets (src, rel, dst) for testing\n Usually, user don't need to directly use this class. Instead, DGL provides\n wrapper function to load data (see example below).\n Examples\n --------\n Load FB15k-237 dataset\n >>> from dgl.contrib.data import load_data\n >>> data = load_data(dataset='FB15k-237')\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.dir = '/home/liangshuang/MultiGCN/data'\n self.dir = os.path.join(self.dir, self.name)\n\n def _read_text(self):\n self.ent2text = {}\n self.ent2text_short = {}\n with open(os.path.join(self.dir, \"entity2text.txt\"), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n if len(temp) == 2:\n end = temp[1] # .find(',')\n self.ent2text_short[temp[0]] = temp[1] # [:end]\n\n with open(os.path.join(self.dir, \"entity2textlong.txt\"), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n # first_sent_end_position = temp[1].find(\".\")\n # self.ent2text[temp[0]] = temp[1] # [:first_sent_end_position + 1]\n self.ent2text[temp[0]] = temp[1].split('.')[0] # [:first_sent]\n\n self.entities_short = list(self.ent2text_short.keys())\n self.entities = list(self.ent2text.keys())\n self.rel2text = {}\n with open(os.path.join(self.dir, \"relation2text.txt\"), 'r') as f:\n rel_lines = f.readlines()\n for line in rel_lines:\n temp = line.strip().split('\\t')\n self.rel2text[temp[0]] = temp[1]\n\n def _laod_vgg(self):\n self.vgg_feature = h5py.File(self.dir + '/' + self.name + '_ImageData.h5')\n self.img_index = {}\n with open(self.dir + '/' + self.name + '_ImageIndex.txt', 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n self.img_index[temp[0]] = temp[1] # [:end]\n self.ent2imgvector = []\n for k,v in self.entity_dict.items():\n if k in self.img_index.keys():\n self.ent2imgvector.append(self.vgg_feature[self.img_index[k]])\n else:\n self.ent2imgvector.append(np.zeros((1,4096)))\n\n def _read_numerical(self):\n self.ent2num = {}\n self.attributes = []\n\n\n with open(os.path.join(self.dir + '/' + self.name + '_NumericalTriples.txt'), 'r') as f:\n num_lines = f.readlines()\n for line in num_lines:\n if '\\t' in line:\n temp = line.strip().split('\\t')\n else:\n temp = line.strip().split(' ')\n attr = temp[1].split('/')[-1]\n self.attributes.append(attr)\n self.attributes = set(self.attributes)\n self.attributes_dict = {k: v+1 for v,k in enumerate(self.attributes)}\n\n if os.path.exists(os.path.join(self.dir,'attr2textvector.npz')):\n print('***********load bert attr vector successfully*************')\n self.attr2textvector = np.load(os.path.join(self.dir,'attr2textvector.npz'))\n self.attr2textvector = {k:v for k,v in zip(self.attr2textvector['x'],self.attr2textvector['y'])}\n else:\n self.bert = SentenceTransformer(\n '/home/liangshuang/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-distilroberta-base-v1_part')\n self.attr2textvector = {k: self.bert.encode(k.replace('.', ' ')) for k in self.attributes}\n np.savez(os.path.join(self.dir, 'attr2textvector.npz'), x=np.array(list(self.attr2textvector.keys())),\n y=np.array(list(self.attr2textvector.values())))\n\n self.ent2num = {k: {j: 0.0 for j in self.attributes} for k in self.entity_dict.keys()} #every ent's all real numerical value(if don't have, its 0)\n with open(os.path.join(self.dir + '/' + self.name + '_NumericalTriples.txt'), 'r') as f:\n num_lines = f.readlines()\n for line in num_lines:\n if '\\t' in line:\n temp = line.strip().split('\\t')\n else:\n temp = line.strip().split(' ')\n ent = temp[0]\n attr = temp[1].split('/')[-1]\n value = self.attr2textvector[attr]\n # if '\"' in temp[2]:\n # value = temp[2].split('\"')[1]\n # elif '-' in temp[2]:\n # value = temp[2].split('-')[1]\n self.ent2num[ent][attr] = 1.0\n self.ent2numpd = pd.DataFrame(self.ent2num).T # row is ent, column is attr_name\n self.ent2attrlabel = self.ent2numpd.copy()\n self.ent2attrlabel[self.ent2attrlabel != 0] = 1.0 # which attr ent has\n # self.ent2numpd = self.ent2numpd / self.ent2numpd.max(axis = 0) #normalization num_feature to 0-1\n self.ent2attrlabel = self.ent2attrlabel.T\n\n # self.ent2numpd = (self.ent2numpd - self.ent2numpd.mean(axis = 0)) / self.ent2numpd.std(axis = 0) #normalization num_feature\n def _load_bert(self):\n if os.path.exists(os.path.join(self.dir,'ent2textvector_short.npz')):\n print('***********load bert ent vector successfully*************')\n\n self.ent2textvector_short = np.load(os.path.join(self.dir,'ent2textvector_short.npz'))\n self.ent2textvector_short = {k:v for k,v in zip(self.ent2textvector_short['x'],self.ent2textvector_short['y'])}\n\n if os.path.exists(os.path.join(self.dir,'rel2textvector.npz')):\n print('***********load bert rel vector successfully*************')\n self.rel2textvector = np.load(os.path.join(self.dir,'rel2textvector.npz'))\n self.rel2textvector = {k:v for k,v in zip(self.rel2textvector['x'],self.rel2textvector['y'])}\n else:\n print('*********there is no bert vector, begin to encode text*************')\n self.bert = SentenceTransformer('/home/liangshuang/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-distilroberta-base-v1_part')\n self.ent2textvector = {k: self.bert.encode(v) for k,v in self.ent2text.items()}\n self.ent2textvector_short = {k: self.bert.encode(v) for k,v in self.ent2text_short.items()}\n self.rel2textvector = {k: self.bert.encode(v) for k,v in self.rel2text.items()}\n np.savez(os.path.join(self.dir,'ent2textvector.npz'), x = np.array(list(self.ent2textvector.keys())),\n y = np.array(list(self.ent2textvector.values())))\n np.savez(os.path.join(self.dir,'ent2textvector_short.npz'), x = np.array(list(self.ent2textvector_short.keys())),\n y = np.array(list(self.ent2textvector_short.values())))\n np.savez(os.path.join(self.dir,'rel2textvector.npz'), x = np.array(list(self.rel2textvector.keys())),\n y = np.array(list(self.rel2textvector.values())))\n\n\n def load(self):\n if os.path.exists(os.path.join(self.dir, 'entity2id.txt')) and os.path.exists(os.path.join(self.dir, 'relation2id.txt')):\n entity_path = os.path.join(self.dir, 'entity2id.txt')\n relation_path = os.path.join(self.dir, 'relation2id.txt')\n else:\n entity_path = os.path.join(self.dir, 'entities.txt')\n relation_path = os.path.join(self.dir, 'relations.txt')\n train_path = os.path.join(self.dir, 'train.tsv')\n valid_path = os.path.join(self.dir, 'dev.tsv')\n test_path = os.path.join(self.dir, 'test.tsv')\n entity_dict = _read_dictionary(entity_path)\n\n relation_dict = _read_dictionary(relation_path)\n\n same_link = os.path.join(self.dir, self.name + '_SameAsLink.txt')\n self.same_link = _read_same_link(same_link)\n\n\n self._read_text()\n\n self._load_bert()\n\n self.entity_dict = entity_dict\n self.relation_dict = relation_dict\n self.train = np.asarray(_read_triplets_as_list(train_path, entity_dict, relation_dict))\n self.valid = np.asarray(_read_triplets_as_list(valid_path, entity_dict, relation_dict))\n self.test = np.asarray(_read_triplets_as_list(test_path, entity_dict, relation_dict))\n self._laod_vgg()\n self._read_numerical()\n self.ent2num = self.ent2numpd.to_numpy()\n self.attr2vector = np.array(\n [self.attr2textvector[i] for i in self.attributes]) # attribute bert vector (116, 768)\n self.attr2vector = np.matmul(self.ent2num , self.attr2vector)\n self.attrname = self.ent2numpd.columns # attribute name\n\n # self.ent2value = [np.array(self.ent2numpd[k]) for k, v in\n # self.entity_dict.items()] # ent num value (ent_num, 116)\n # self.ent2attrlabel = [np.array(self.ent2attrlabel[k]) for k, v in\n # self.entity_dict.items()] # ent num attr label (ent_num, 116)\n self.ent2value = None\n self.ent2attrlabel = None\n self.ent2textvector = []\n for k,v in self.entity_dict.items():\n if k in self.same_link.keys():\n self.ent2textvector.append(self.ent2textvector_short[self.same_link[k]])\n else:\n self.ent2textvector.append(np.zeros(768))\n # self.ent2textvector = [self.ent2textvector_short[self.same_link[k]] for k,v in self.entity_dict.items()]\n # self.rel2textvector = [self.rel2textvector[k] for k,v in self.relation_dict.items()]\n self.rel2textvector = None\n # self.ent2textvector = {self.entity_dict[k]: v for k,v in self.ent2textvector.items()}\n # self.rel2textvector = {self.relation_dict[k]: v for k,v in self.rel2textvector.items()}\n # self.attr2textvector = {self.attributes_dict[k]: v for k,v in self.attr2textvector.items()}\n\n self.num_nodes = len(entity_dict)\n print(\"# entities: {}\".format(self.num_nodes))\n self.num_rels = len(relation_dict)\n print(\"# relations: {}\".format(self.num_rels))\n print(\"# edges: {}\".format(len(self.train)))\n\n\n\nclass RGCNLinkDataset_WN(object):\n \"\"\"RGCN link prediction dataset\n The dataset contains a graph depicting the connectivity of a knowledge\n base. Currently, the knowledge bases from the\n `RGCN paper <https://arxiv.org/pdf/1703.06103.pdf>`_ supported are\n FB15k-237, FB15k, wn18\n The original knowledge base is stored as an RDF file, and this class will\n download and parse the RDF file, and performs preprocessing.\n An object of this class has 5 member attributes needed for link\n prediction:\n num_nodes: int\n number of entities of knowledge base\n num_rels: int\n number of relations (including reverse relation) of knowledge base\n train: numpy.array\n all relation triplets (src, rel, dst) for training\n valid: numpy.array\n all relation triplets (src, rel, dst) for validation\n test: numpy.array\n all relation triplets (src, rel, dst) for testing\n Usually, user don't need to directly use this class. Instead, DGL provides\n wrapper function to load data (see example below).\n Examples\n --------\n Load FB15k-237 dataset\n >>> from dgl.contrib.data import load_data\n >>> data = load_data(dataset='FB15k-237')\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.dir = '/home/liangshuang/MultiGCN/data'\n self.dir = os.path.join(self.dir, self.name)\n\n def _read_text(self):\n self.ent2text = {}\n with open(os.path.join(self.dir, \"entity2text.txt\"), 'r') as f:\n ent_lines = f.readlines()\n for line in ent_lines:\n temp = line.strip().split('\\t')\n self.ent2text[temp[0]] = temp[1] # [:end]\n\n self.entities = list(self.ent2text.keys())\n self.rel2text = {}\n with open(os.path.join(self.dir, \"relation2text.txt\"), 'r') as f:\n rel_lines = f.readlines()\n for line in rel_lines:\n temp = line.strip().split('\\t')\n self.rel2text[temp[0]] = temp[1]\n\n def _load_bert(self):\n if os.path.exists(os.path.join(self.dir,'ent2textvector.npz')):\n print('***********load bert ent vector successfully*************')\n self.ent2textvector = np.load(os.path.join(self.dir,'ent2textvector.npz'))\n self.ent2textvector = {k:v for k,v in zip(self.ent2textvector['x'],self.ent2textvector['y'])}\n\n if os.path.exists(os.path.join(self.dir,'rel2textvector.npz')):\n print('***********load bert rel vector successfully*************')\n self.rel2textvector = np.load(os.path.join(self.dir,'rel2textvector.npz'))\n self.rel2textvector = {k:v for k,v in zip(self.rel2textvector['x'],self.rel2textvector['y'])}\n else:\n print('*********there is no bert vector, begin to encode text*************')\n self.bert = SentenceTransformer('/home/liangshuang/.cache/torch/sentence_transformers/sbert.net_models_paraphrase-distilroberta-base-v1_part')\n self.ent2textvector = {k: self.bert.encode(v) for k,v in self.ent2text.items()}\n self.rel2textvector = {k: self.bert.encode(v) for k,v in self.rel2text.items()}\n np.savez(os.path.join(self.dir,'ent2textvector.npz'), x = np.array(list(self.ent2textvector.keys())),\n y = np.array(list(self.ent2textvector.values())))\n np.savez(os.path.join(self.dir,'rel2textvector.npz'), x = np.array(list(self.rel2textvector.keys())),\n y = np.array(list(self.rel2textvector.values())))\n\n\n def load(self):\n entity_path = os.path.join(self.dir, 'entities.txt')\n relation_path = os.path.join(self.dir, 'relations.txt')\n train_path = os.path.join(self.dir, 'train.tsv')\n valid_path = os.path.join(self.dir, 'dev.tsv')\n test_path = os.path.join(self.dir, 'test.tsv')\n entity_dict = _read_dictionary(entity_path)\n relation_dict = _read_dictionary(relation_path)\n self.entity_dict = entity_dict\n self.relation_dict = relation_dict\n self._read_text()\n self._load_bert()\n self.train = np.asarray(_read_triplets_as_list(train_path, entity_dict, relation_dict))\n self.valid = np.asarray(_read_triplets_as_list(valid_path, entity_dict, relation_dict))\n self.test = np.asarray(_read_triplets_as_list(test_path, entity_dict, relation_dict))\n\n\n self.attr2vector = None\n self.attrname = None # attribute name\n self.ent2value = None\n self.ent2attrlabel = None\n\n self.ent2textvector = [self.ent2textvector[k] for k,v in self.entity_dict.items()]\n self.ent2textvector = np.array(self.ent2textvector)\n self.ent2imgvector = None\n self.rel2textvector_ = []\n for k,v in self.relation_dict.items():\n if k in self.rel2textvector.keys():\n self.rel2textvector_.append(self.rel2textvector[k])\n else:\n self.rel2textvector_.append(np.zeros(768))\n self.rel2textvector = np.array(self.rel2textvector_)\n\n # self.ent2textvector = {self.entity_dict[k]: v for k,v in self.ent2textvector.items()}\n # self.rel2textvector = {self.relation_dict[k]: v for k,v in self.rel2textvector.items()}\n # self.attr2textvector = {self.attributes_dict[k]: v for k,v in self.attr2textvector.items()}\n\n self.num_nodes = len(entity_dict)\n print(\"# entities: {}\".format(self.num_nodes))\n self.num_rels = len(relation_dict)\n print(\"# relations: {}\".format(self.num_rels))\n print(\"# edges: {}\".format(len(self.train)))\n\n\ndef load_link(dataset):\n if 'DB' in dataset or 'YAGO' in dataset:\n data = RGCNLinkDataset_DB(dataset)\n elif 'WN' in dataset:\n data = RGCNLinkDataset_WN(dataset)\n else:\n data = RGCNLinkDataset(dataset)\n data.load()\n return data\n\ndef process(dataset, num_rel):\n \"\"\"\n pre-process dataset\n :param dataset: a dictionary containing 'train', 'valid' and 'test' data.\n :param num_rel: relation number\n :return:\n \"\"\"\n sr2o = ddict(set)\n for subj, rel, obj in dataset['train']:\n sr2o[(subj, rel)].add(obj)\n sr2o[(obj, rel + num_rel)].add(subj)\n\n # for subj, rel, obj in dataset['train']:\n # sr2o[(subj, rel)].add(obj)\n # sr2o[(obj, rel)].add(subj)\n sr2o_train = {k: list(v) for k, v in sr2o.items()}\n for split in ['valid', 'test']:\n for subj, rel, obj in dataset[split]:\n sr2o[(subj, rel)].add(obj)\n sr2o[(obj, rel + num_rel)].add(subj)\n sr2o_all = {k: list(v) for k, v in sr2o.items()} # sr2o[(obj, rel + num_rel)].add(subj)\n triplets = ddict(list)\n\n # for subj, rel, obj in dataset['train']:\n # triplets['train'].append({'triple':(subj, rel, -1), 'label': sr2o[(subj, rel)]})\n # triplets['train'].append({'triple_reverse':(obj, rel + num_rel, -1), 'label_reverse': sr2o[(obj, rel + num_rel)]})\n\n # for (subj, rel), obj in sr2o_train.items():\n # triplets['train'].append({'triple': (subj, rel, -1), 'label': sr2o_train[(subj, rel)]})\n\n\n for (subj, rel), obj in sr2o_train.items():\n triplets['train'].append({'triple': (subj, rel, -1), 'label': sr2o_train[(subj, rel)]})\n for (obj, rel), subj in sr2o_train.items():\n triplets['train'].append({'triple': (obj, rel, -1), 'label': sr2o_train[(obj, rel)]})\n\n\n for split in ['valid', 'test']:\n for subj, rel, obj in dataset[split]:\n triplets[f\"{split}_tail\"].append({'triple': (subj, rel, obj), 'label': sr2o_all[(subj, rel)]})\n triplets[f\"{split}_head\"].append(\n {'triple': (obj, rel + num_rel, subj), 'label': sr2o_all[(obj, rel + num_rel)]})\n triplets = dict(triplets)\n return triplets\n\n\n\n\n" } ]
8
uglkjgj/morse-code-tranlator
https://github.com/uglkjgj/morse-code-tranlator
136c9e4b9f8d1deb759b1b9b668629873c9232bd
7d397846001c676c64fb19153cd1defa70f3b864
f212a24fc2a4eed5e027a8567d3732069f030d2e
refs/heads/main
2023-04-19T23:15:37.515345
2021-05-16T02:36:27
2021-05-16T02:36:27
367,769,886
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.637005627155304, "alphanum_fraction": 0.6398305296897888, "avg_line_length": 24.321428298950195, "blob_id": "48cca42ed59a5f3e3065ed216506b8f1e9cbbd5e", "content_id": "3ba0f702ad7992ab85531253c06bb69f9cd5b4dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "no_license", "max_line_length": 99, "num_lines": 28, "path": "/more_code_translator.py", "repo_name": "uglkjgj/morse-code-tranlator", "src_encoding": "UTF-8", "text": "import requests, json\n\nurl = \"https://gsamuel-morse-code-v1.p.rapidapi.com/\"\n\nfromw = input(\"Do you want to translate to morse code or text\\n\")\ntrans = input(\"Type what you want to be translated\\nNote: It should be morse code or plain text\\n\")\n\nif fromw == \"text\":\n payload = {\n \"code\": trans\n }\nelse:\n payload = {\n \"text\": trans\n }\n\nheaders = {\n 'content-type': \"application/json\",\n 'x-rapidapi-key': \"GET KEY FROM RAPIDAPI\",\n 'x-rapidapi-host': \"gsamuel-morse-code-v1.p.rapidapi.com\"\n }\n\nresponse = requests.request(\"POST\", url, data=json.dumps(payload), headers=headers)\n\nif fromw == \"text\":\n print(response.json()['text'])\nelse:\n print(response.json()['code'])" } ]
1
lingzhic/BayesianOpt4dftu
https://github.com/lingzhic/BayesianOpt4dftu
d5fef54c7261ce4d5b0f9a43c57248507dc4e0df
44ae2293169f72adbc2bab8ebb6288bb16f0a695
4c125e091f515e722c811a7b7b78f7478e74c2a1
refs/heads/master
2023-08-05T13:13:28.599842
2021-09-14T16:54:27
2021-09-14T16:54:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5264005064964294, "alphanum_fraction": 0.5384508967399597, "avg_line_length": 37.82500076293945, "blob_id": "aa603bea91bc08cbb3637188057bdd7c65054e4f", "content_id": "c3e946dd9eb371434be5b4353ae633e75fb75ee6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21742, "license_type": "permissive", "max_line_length": 124, "num_lines": 560, "path": "/BayesOpt4dftu/core.py", "repo_name": "lingzhic/BayesianOpt4dftu", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport bayes_opt\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport pymatgen as mg\nimport xml.etree.ElementTree as ET\n\nfrom ase import Atoms, Atom\nfrom ase.calculators.vasp.vasp import Vasp\nfrom ase.dft.kpoints import *\n\nfrom pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar, Poscar\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure, Molecule\nfrom pymatgen.io.vasp.outputs import BSVasprun, Vasprun\n\nfrom bayes_opt import UtilityFunction\nfrom bayes_opt import BayesianOptimization\nfrom string import ascii_lowercase\nfrom BayesOpt4dftu.special_kpath import kpath_dict\n\nfrom vaspvis import Band\nfrom vaspvis.utils import get_bandgap\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm, gridspec\n\n# TODO: 1. SCF calculation in DFT+U missing U tags in INCAR.\n# 2. Check whether the U value has an duplicate in u.txt.\n# 3. Modify the BO for multi-U condition (More than 2 U values need to be optimized).\n# 4. Fix the bug that code output incorrect U in when U values are optimized for elements without the first one.\n\n\ndef readgap(vasprun, kpoints):\n run = BSVasprun(vasprun)\n bs = run.get_band_structure(kpoints)\n if (bs.is_metal() == False):\n return bs.get_cbm()['energy']-bs.get_vbm()['energy']\n else:\n return 0\n\n\nclass vasp_init(object):\n def __init__(self, input_path):\n with open(input_path, 'r') as f:\n self.input_dict = json.load(f)\n self.struct_info = self.input_dict['structure_info']\n self.general_flags = self.input_dict['general_flags']\n self.atoms = None\n\n def init_atoms(self):\n lattice_param = self.struct_info['lattice_param']\n cell = np.array(self.struct_info['cell'])\n self.atoms = Atoms(cell=cell*lattice_param)\n for atom in self.struct_info['atoms']:\n self.atoms.append(Atom(atom[0], atom[1], magmom=atom[2]))\n\n return self.atoms\n\n def modify_poscar(self, path='./'):\n with open(path + '/POSCAR', 'r') as f:\n poscar = f.readlines()\n poscar[7] = 'Direct\\n'\n f.close()\n\n with open(path + '/POSCAR', 'w') as d:\n d.writelines(poscar)\n d.close()\n\n def kpt4pbeband(self, path, import_kpath):\n if import_kpath:\n special_kpoints = kpath_dict\n else:\n special_kpoints = get_special_points(self.atoms.cell)\n\n num_kpts = self.struct_info['num_kpts']\n labels = self.struct_info['kpath']\n kptset = list()\n lbs = list()\n if labels[0] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[0]])\n lbs.append(labels[0])\n\n for i in range(1, len(labels)-1):\n if labels[i] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[i]])\n lbs.append(labels[i])\n kptset.append(special_kpoints[labels[i]])\n lbs.append(labels[i])\n if labels[-1] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[-1]])\n lbs.append(labels[-1])\n\n # Hardcoded for EuS and EuTe since one of the k-point is not in the special kpoints list.\n if 'EuS' in self.atoms.symbols or 'EuTe' in self.atoms.symbols:\n kptset[0] = np.array([0.5, 0.5, 1])\n\n kpt = Kpoints(comment='band', kpts=kptset, num_kpts=num_kpts,\n style='Line_mode', coord_type=\"Reciprocal\", labels=lbs)\n kpt.write_file(path+'/KPOINTS')\n\n def kpt4hseband(self, path, import_kpath):\n ibz = open(path+'/IBZKPT', 'r')\n num_kpts = self.struct_info['num_kpts']\n labels = self.struct_info['kpath']\n ibzlist = ibz.readlines()\n ibzlist[1] = str(num_kpts*(len(labels)-1) +\n int(ibzlist[1].split('\\n')[0])) + '\\n'\n if import_kpath:\n special_kpoints = kpath_dict\n else:\n special_kpoints = get_special_points(self.atoms.cell)\n for i in range(len(labels)-1):\n k_head = special_kpoints[labels[i]]\n k_tail = special_kpoints[labels[i+1]]\n increment = (k_tail-k_head)/(num_kpts-1)\n ibzlist.append(' '.join(map(str, k_head)) +\n ' 0 ' + labels[i] + '\\n')\n for j in range(1, num_kpts-1):\n k_next = k_head + increment*j\n ibzlist.append(' '.join(map(str, k_next)) + ' 0\\n')\n ibzlist.append(' '.join(map(str, k_tail)) +\n ' 0 ' + labels[i+1] + '\\n')\n with open(path+'/KPOINTS', 'w') as f:\n f.writelines(ibzlist)\n\n def generate_input(self, directory, step, xc, import_kpath):\n flags = {}\n flags.update(self.general_flags)\n flags.update(self.input_dict[step])\n if step == 'scf':\n if xc == 'pbe':\n flags.update(self.input_dict[xc])\n calc = Vasp(self.atoms, directory=directory,\n kpts=self.struct_info['kgrid_'+xc], gamma=True, **flags)\n calc.write_input(self.atoms)\n if str(self.atoms.symbols) in ['Ni2O2']:\n mom_list = {'Ni': 2, 'Mn': 5, 'Co': 3, 'Fe': 4}\n s = str(self.atoms.symbols[0])\n incar_scf = Incar.from_file(directory+'/INCAR')\n incar_scf['MAGMOM'] = '%s -%s 0 0' % (mom_list[s], mom_list[s])\n incar_scf.write_file(directory+'/INCAR')\n\n self.modify_poscar(path=directory)\n elif step == 'band':\n flags.update(self.input_dict[xc])\n calc = Vasp(self.atoms, directory=directory, gamma=True, **flags)\n calc.write_input(self.atoms)\n self.modify_poscar(path=directory)\n if xc == 'pbe':\n self.kpt4pbeband(directory, import_kpath)\n elif xc == 'hse':\n self.kpt4hseband(directory, import_kpath)\n\n\nclass delta_band(object):\n def __init__(self, bandrange=10, path='./', iteration=1, interpolate=False):\n self.path = path\n self.br = bandrange\n self.interpolate = interpolate\n self.vasprun_hse = os.path.join(path, 'hse/band/vasprun.xml')\n self.kpoints_hse = os.path.join(path, 'hse/band/KPOINTS')\n self.vasprun_dftu = os.path.join(path, 'dftu/band/vasprun.xml')\n self.kpoints_dftu = os.path.join(path, 'dftu/band/KPOINTS')\n self.iteration = iteration\n\n def readInfo(self, filepath):\n tree = ET.parse(filepath)\n root = tree.getroot()\n ispin = int(root.findall(\n './parameters/separator/.[@name=\"electronic\"]/separator/.[@name=\"electronic spin\"]/i/.[@name=\"ISPIN\"]')[0].text)\n nbands = int(root.findall(\n './parameters/separator/.[@name=\"electronic\"]/i/.[@name=\"NBANDS\"]')[0].text)\n nkpts = len(root.findall('./kpoints/varray/.[@name=\"kpointlist\"]/v'))\n\n return ispin, nbands, nkpts\n\n def access_eigen(self, b, interpolate=False):\n wave_vectors = b._get_k_distance()\n eigenvalues = b.eigenvalues\n\n if interpolate:\n _, eigenvalues_interp = b._get_interpolated_data(\n wave_vectors=wave_vectors,\n data=eigenvalues\n )\n\n if interpolate:\n return eigenvalues_interp\n else:\n return eigenvalues\n\n def locate_and_shift_bands(self, eigenvalues):\n band_mean = eigenvalues.mean(axis=1)\n\n below_index = np.where(band_mean < 0)[0]\n above_index = np.where(band_mean >= 0)[0]\n\n vbm = np.max(eigenvalues[below_index])\n cbm = np.min(eigenvalues[above_index])\n\n if cbm < vbm:\n vbm = 0.0\n cbm = 0.0\n\n valence_bands = eigenvalues[below_index[-self.br:]]\n conduction_bands = eigenvalues[above_index[:self.br]]\n\n valence_bands -= vbm\n conduction_bands -= cbm\n\n shifted_bands = np.r_[conduction_bands, valence_bands]\n\n return shifted_bands\n\n def deltaBand(self):\n ispin_hse, nbands_hse, nkpts_hse = self.readInfo(self.vasprun_hse)\n ispin_dftu, nbands_dftu, nkpts_dftu = self.readInfo(self.vasprun_dftu)\n\n \n if nbands_hse != nbands_dftu:\n raise Exception('The band number of HSE and GGA+U are not match!')\n\n kpoints = [line for line in open(self.kpoints_hse) if line.strip()]\n kpts_diff = 0\n for ii, line in enumerate(kpoints[3:]):\n if line.split()[3] != '0':\n kpts_diff += 1\n\n if nkpts_hse - kpts_diff != nkpts_dftu:\n raise Exception(\n 'The kpoints number of HSE and GGA+U are not match!')\n\n new_n = 500\n\n if ispin_hse == 1 and ispin_dftu == 1:\n band_hse = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n band_dftu = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n bandgap=True,\n printbg=False,\n )\n\n eigenvalues_hse = self.access_eigen(band_hse, interpolate=self.interpolate)\n eigenvalues_dftu = self.access_eigen(band_dftu, interpolate=self.interpolate)\n\n shifted_hse = self.locate_and_shift_bands(eigenvalues_hse)\n shifted_dftu = self.locate_and_shift_bands(eigenvalues_dftu)\n\n n = shifted_hse.shape[0] * shifted_hse.shape[1]\n delta_band = sum((1/n)*sum((shifted_hse - shifted_dftu)**2))**(1/2)\n\n bg = get_bandgap(\n folder=os.path.join(self.path, 'dftu/band'),\n printbg=False,\n method=1,\n spin='both',\n )\n\n incar = Incar.from_file('./dftu/band/INCAR')\n u = incar['LDAUU']\n u.append(bg)\n u.append(delta_band)\n output = ' '.join(str(x) for x in u)\n\n with open('u_tmp.txt', 'a') as f:\n f.write(output + '\\n')\n f.close\n\n return delta_band\n\n elif ispin_hse == 2 and ispin_dftu == 2:\n band_hse_up = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n band_dftu_up = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n bandgap=True,\n printbg=False,\n )\n\n band_hse_down = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='down',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n band_dftu_down = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='down',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n eigenvalues_hse_up = self.access_eigen(band_hse_up, interpolate=self.interpolate)\n eigenvalues_dftu_up = self.access_eigen(band_dftu_up, interpolate=self.interpolate)\n\n shifted_hse_up = self.locate_and_shift_bands(eigenvalues_hse_up)\n shifted_dftu_up = self.locate_and_shift_bands(eigenvalues_dftu_up)\n\n n_up = shifted_hse_up.shape[0] * shifted_hse_up.shape[1]\n delta_band_up = sum((1/n_up)*sum((shifted_hse_up - shifted_dftu_up)**2))**(1/2)\n\n eigenvalues_hse_down = self.access_eigen(band_hse_down, interpolate=self.interpolate)\n eigenvalues_dftu_down = self.access_eigen(band_dftu_down, interpolate=self.interpolate)\n\n shifted_hse_down = self.locate_and_shift_bands(eigenvalues_hse_down)\n shifted_dftu_down = self.locate_and_shift_bands(eigenvalues_dftu_down)\n\n n_down = shifted_hse_down.shape[0] * shifted_hse_down.shape[1]\n delta_band_down = sum((1/n_down)*sum((shifted_hse_down - shifted_dftu_down)**2))**(1/2)\n\n delta_band = np.mean([delta_band_up, delta_band_down])\n\n bg = get_bandgap(\n folder=os.path.join(self.path, 'dftu/band'),\n printbg=False,\n method=1,\n spin='both',\n )\n\n incar = Incar.from_file('./dftu/band/INCAR')\n u = incar['LDAUU']\n\n u.append(bg)\n u.append(delta_band)\n output = ' '.join(str(x) for x in u)\n\n with open('u_tmp.txt', 'a') as f:\n f.write(output + '\\n')\n f.close\n\n return delta_band\n else:\n raise Exception('The spin number of HSE and GGA+U are not match!')\n\n\nclass bayesOpt_DFTU(object):\n def __init__(self, path, opt_u_index=(1, 1), u_range=(0, 10), kappa=2.5, alpha_1=1, alpha_2=1):\n self.input = path + 'u_tmp.txt'\n self.gap = readgap(path + '/hse/band/vasprun.xml',\n path + '/hse/band/KPOINTS')\n self.kappa = kappa\n self.a1 = alpha_1\n self.a2 = alpha_2\n self.opt_u_index = np.array(opt_u_index) > 0\n self.u_range = u_range\n self.elements = {}\n\n def loss(self, y, y_hat, delta_band, alpha_1, alpha_2):\n return -alpha_1 * (y - y_hat) ** 2 - alpha_2 * delta_band ** 2\n \n def get_optimizer(self):\n data = pd.read_csv(self.input, header=0,\n delimiter=\"\\s\", engine='python')\n num_rows, d = data.shape\n num_variables = sum(self.opt_u_index)\n if num_variables > 2:\n raise ValueError(\"BO larger than 2D are not supported yet!\")\n variables_string = ascii_lowercase[:num_variables]\n pbounds = {}\n if num_variables == 1:\n pbounds[variables_string[0]] = self.u_range\n elif num_variables == 2:\n for variable in variables_string:\n pbounds[variable] = self.u_range\n utility_function = UtilityFunction(kind=\"ucb\", kappa=self.kappa, xi=0)\n optimizer = BayesianOptimization(\n f=None,\n pbounds=pbounds,\n verbose=2,\n random_state=1,\n )\n \n check_duplicates = []\n for i in range(num_rows):\n values = list()\n for j in range(len(self.opt_u_index)):\n if self.opt_u_index[j]:\n values.append(data.iloc[i][j])\n # Avoid duplicates in the sample space.\n if values in check_duplicates:\n values[0] = values[0] + 0.001\n check_duplicates.append(values)\n\n params = {}\n for (value, variable) in zip(values, variables_string):\n params[variable] = value\n target = self.loss(self.gap, list(\n data.iloc[i])[-2], list(data.iloc[i])[-1], self.a1, self.a2)\n\n optimizer.register(\n params=params,\n target=target,\n )\n \n return utility_function, optimizer, target\n\n def plot_bo(self, ratio=1):\n utility_function, optimizer, target = self.get_optimizer()\n plot_size = len(optimizer.res)*ratio\n opt_eles = [ele for i, ele in enumerate(self.elements) if self.opt_u_index[i]]\n\n if sum(self.opt_u_index) == 1:\n x = np.linspace(self.u_range[0], self.u_range[1], 10000).reshape(-1, 1)\n x_obs = np.array([res[\"params\"]['a'] for res in optimizer.res]).reshape(-1,1)[:plot_size]\n y_obs = np.array([res[\"target\"] for res in optimizer.res])[:plot_size]\n\n mu, sigma = posterior(optimizer, x_obs, y_obs, x)\n\n fig = plt.figure()\n gs = gridspec.GridSpec(2, 1) \n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n axis.plot(x_obs.flatten(), y_obs, 'D', markersize=8, label=u'Observations', color='r')\n axis.plot(x, mu, '--', color='k', label='Prediction')\n axis.fill(np.concatenate([x, x[::-1]]), \n np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),\n alpha=.6, fc='c', ec='None', label='95% confidence interval')\n \n axis.set_xlim(self.u_range)\n axis.set_ylim((None, None))\n axis.set_ylabel('f(x)')\n\n acq.plot(x, utility_function, label='Acquisition Function', color='purple')\n acq.plot(x[np.argmax(utility_function)], np.max(utility_function), '*', markersize=15, \n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)\n acq.set_xlim(self.u_range)\n acq.set_ylim((np.min(utility_function)-0.5,np.max(utility_function)+0.5))\n acq.set_ylabel('Acquisition')\n acq.set_xlabel('U (eV)')\n axis.legend(loc=4, borderaxespad=0.)\n acq.legend(loc=4, borderaxespad=0.)\n\n plt.savefig('1D_kappa_%s_a1_%s_a2_%s.png' %(self.kappa, self.a1, self.a2), dpi = 400)\n\n if sum(self.opt_u_index) == 2:\n x = y = np.linspace(self.u_range[0], self.u_range[1], 300)\n X, Y = np.meshgrid(x, y)\n x = X.ravel()\n y = Y.ravel()\n X = np.vstack([x, y]).T[:, [1, 0]]\n\n x1_obs = np.array([[res[\"params\"][\"a\"]] for res in optimizer.res])[:plot_size]\n x2_obs = np.array([[res[\"params\"][\"b\"]] for res in optimizer.res])[:plot_size]\n y_obs = np.array([res[\"target\"] for res in optimizer.res])[:plot_size]\n obs = np.column_stack((x1_obs, x2_obs))\n\n optimizer._gp.fit(obs, y_obs)\n mu, sigma = optimizer._gp.predict(X, eval)\n\n fig, axis = plt.subplots(1, 2, figsize=(15,5))\n plt.subplots_adjust(wspace = 0.2)\n \n axis[0].plot(x1_obs, x2_obs, 'D', markersize=4, color='k', label='Observations')\n axis[0].set_title('Gaussian Process Predicted Mean',pad=10)\n im1 = axis[0].hexbin(y, x, C=mu, cmap=cm.jet, bins=None)\n axis[0].axis([x.min(), x.max(), y.min(), y.max()])\n axis[0].set_xlabel('U_%s (eV)' %opt_eles[0],labelpad=5)\n axis[0].set_ylabel('U_%s (eV)' %opt_eles[1],labelpad=10,va='center')\n cbar1 = plt.colorbar(im1, ax = axis[0])\n\n utility = utility_function.utility(X, optimizer._gp, optimizer.max)\n axis[1].plot(x1_obs, x2_obs, 'D', markersize=4, color='k', label='Observations')\n axis[1].set_title('Acquisition Function',pad=10)\n axis[1].set_xlabel('U_%s (eV)' %opt_eles[0],labelpad=5)\n axis[1].set_ylabel('U_%s (eV)' %opt_eles[1],labelpad=10,va='center')\n im3 = axis[1].hexbin(y, x, C=utility, cmap=cm.jet, bins=None)\n axis[1].axis([x.min(), x.max(), y.min(), y.max()])\n cbar3 = plt.colorbar(im3, ax = axis[1])\n\n plt.savefig('2D_kappa_%s_a1_%s_a2_%s.png' %(self.kappa, self.a1, self.a2), dpi = 400)\n\n\n def bo(self):\n utility_function, optimizer, target = self.get_optimizer()\n next_point_to_probe = optimizer.suggest(utility_function)\n\n points = list(next_point_to_probe.values())\n points = [round(elem, 6) for elem in points]\n\n U = [str(x) for x in points]\n with open('input.json', 'r') as f:\n data = json.load(f)\n elements = list(data[\"pbe\"][\"ldau_luj\"].keys())\n self.elements = elements\n for i in range(len(self.opt_u_index)):\n if self.opt_u_index[i]:\n try:\n data[\"pbe\"][\"ldau_luj\"][elements[i]\n ][\"U\"] = round(float(U[i]), 6)\n except:\n data[\"pbe\"][\"ldau_luj\"][elements[i]\n ][\"U\"] = round(float(U[i-1]), 6)\n f.close()\n\n with open('input.json', 'w') as f:\n json.dump(data, f, indent=4)\n f.close()\n\n return target\n\n\ndef calculate(command, outfilename, method, import_kpath):\n olddir = os.getcwd()\n calc = vasp_init(olddir+'/input.json')\n calc.init_atoms()\n\n if method == 'dftu':\n calc.generate_input(olddir+'/%s/scf' %\n method, 'scf', 'pbe', import_kpath)\n calc.generate_input(olddir+'/%s/band' %\n method, 'band', 'pbe', import_kpath)\n\n if os.path.isfile(f'{olddir}/{method}/band/eigenvalues.npy'):\n os.remove(f'{olddir}/{method}/band/eigenvalues.npy')\n\n elif method == 'hse':\n calc.generate_input(olddir+'/%s/scf' %\n method, 'scf', 'hse', import_kpath)\n if not os.path.exists(olddir+'/%s/band' % method):\n os.mkdir(olddir+'/%s/band' % method)\n\n try:\n os.chdir(olddir+'/%s/scf' % method)\n errorcode_scf = subprocess.call(\n '%s > %s' % (command, outfilename), shell=True)\n os.system('cp CHG* WAVECAR IBZKPT %s/%s/band' % (olddir, method))\n if method == 'hse':\n calc.generate_input(olddir+'/%s/band' %\n method, 'band', 'hse', import_kpath)\n finally:\n os.chdir(olddir+'/%s/band' % method)\n errorcode_band = subprocess.call(\n '%s > %s' % (command, outfilename), shell=True)\n os.chdir(olddir)\n" }, { "alpha_fraction": 0.6330645084381104, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 28.176469802856445, "blob_id": "9d34f3e0d45c5ad517defcd3b8e8c9f08b314954", "content_id": "04dc1fbd1aa57a5a6d28500e6966881f1a44919a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 496, "license_type": "permissive", "max_line_length": 95, "num_lines": 17, "path": "/setup.py", "repo_name": "lingzhic/BayesianOpt4dftu", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='BayesOpt4dftu',\n version='0.1.0',\n# description='???',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n# author='???',\n# author_email=\"???\",\n url='https://github.com/maituoy/BayesianOpt4dftu',\n packages=['BayesOpt4dftu'],\n install_requires=['numpy', 'ase', 'pymatgen', 'bayesian-optimization', 'pandas','vaspvis'],\n)\n" }, { "alpha_fraction": 0.6371463537216187, "alphanum_fraction": 0.6527265310287476, "avg_line_length": 34.77941131591797, "blob_id": "2b6ff58f583ece341f6ed32d49aae236c84b0b6a", "content_id": "5404f98371cd1038a149175a7aa0d419b47a2748", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2439, "license_type": "permissive", "max_line_length": 112, "num_lines": 68, "path": "/example/1d/example.py", "repo_name": "lingzhic/BayesianOpt4dftu", "src_encoding": "UTF-8", "text": "from BayesOpt4dftu.core import *\nimport os\nimport argparse\n\n# Command to run VASP executable.\nVASP_RUN_COMMAND = 'mpirun -np 56 vasp_ncl'\n# Define the name for output file.\nOUTFILENAME = 'vasp.out'\n# Define the path direct to the VASP pesudopotential.\nVASP_PP_PATH = '/home/maituoy/pp_vasp/'\n\n\ndef parse_argument():\n \"\"\"\n kappa: The parameter to control exploration and exploitation.\n exploitation 0 <-- kappa --> 10 exploration\n\n alpha1: Weight coefficient of band gap in the objective function.\n\n alpha2: Weight coefficient of delta band in the objective function.\n \n threshold: Convergence threshold of Bayesian optimization process.\n \"\"\"\n parser = argparse.ArgumentParser(description='params')\n parser.add_argument('--which_u', dest='which_u', type=tuple, default=(1,0))\n parser.add_argument('--br', dest='br', type=int, default=4)\n parser.add_argument('--kappa', dest='kappa', type=float, default=2.5)\n parser.add_argument('--alpha1', dest='alpha1', type=float, default=1)\n parser.add_argument('--alpha2', dest='alpha2', type=float, default=1)\n parser.add_argument('--threshold', dest='threshold', type=float, default=0.001)\n parser.add_argument('--urange', dest='urange', type=tuple, default=(-10,10))\n parser.add_argument('--import_kpath', dest='import_kpath', type=bool, default=False)\n\n return parser.parse_args()\n\ndef main():\n args = parse_argument()\n k = args.kappa\n a1 = args.alpha1\n a2 = args.alpha2\n which_u = tuple(int(x) for x in args.which_u)\n urange = tuple(float(x) for x in args.urange)\n br = args.br\n import_kpath = args.import_kpath\n\n os.environ['VASP_PP_PATH'] = VASP_PP_PATH\n\n calculate(command=VASP_RUN_COMMAND, outfilename=OUTFILENAME, method='hse', import_kpath = import_kpath)\n \n if os.path.exists('u.txt'):\n os.remove('u.txt')\n\n obj = 0 \n threshold = args.threshold\n for i in range(50):\n calculate(command=VASP_RUN_COMMAND, outfilename=OUTFILENAME, method='dftu', import_kpath = import_kpath)\n db = delta_band(bandrange=br, path='./')\n db.deltaBand()\n\n bayesianOpt = bayesOpt_DFTU(path='./', kappa=k, alpha_1=a1, alpha_2=a2 )\n obj_next = bayesianOpt.bo(which_u, urange)\n if abs(obj_next - obj) <= threshold:\n print(\"Optimization has been finished!\")\n break\n obj = obj_next \n\nif __name__ == \"__main__\":\n main()\n \n\n" }, { "alpha_fraction": 0.6669233441352844, "alphanum_fraction": 0.684636116027832, "avg_line_length": 32.25640869140625, "blob_id": "7d7ab9c6dba7af24e337535527c7ae6842ae3880", "content_id": "113d60602573c5ee625d48c1d157772075aeb1ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2597, "license_type": "permissive", "max_line_length": 111, "num_lines": 78, "path": "/example/2d/example.py", "repo_name": "lingzhic/BayesianOpt4dftu", "src_encoding": "UTF-8", "text": "from BayesOpt4dftu.core import *\nimport os\nimport argparse\n\n# Command to run VASP executable.\nVASP_RUN_COMMAND = 'mpirun -np 56 vasp_ncl'\n# Define the name for output file.\nOUTFILENAME = 'vasp.out'\n# Define the path direct to the VASP pesudopotential.\nVASP_PP_PATH = '/home/maituoy/pp_vasp/'\n\n\ndef parse_argument():\n\t\"\"\"\n\tkappa: The parameter to control exploration and exploitation.\n\t\t exploitation 0 <-- kappa --> 10 exploration\n\n\talpha1: Weight coefficient of band gap in the objective function.\n\n\talpha2: Weight coefficient of delta band in the objective function.\n\t\n\tthreshold: Convergence threshold of Bayesian optimization process.\n\t\"\"\"\n\tparser = argparse.ArgumentParser(description='params')\n\tparser.add_argument('--which_u', dest='which_u', type=tuple, default=(1,1))\n\tparser.add_argument('--br', dest='br', type=int, default=5)\n\tparser.add_argument('--kappa', dest='kappa', type=float, default=7.5)\n\tparser.add_argument('--alpha1', dest='alpha1', type=float, default=0.5)\n\tparser.add_argument('--alpha2', dest='alpha2', type=float, default=0.5)\n\tparser.add_argument('--threshold', dest='threshold', type=float, default=0.0001)\n\tparser.add_argument('--urange', dest='urange', type=tuple, default=(-10,10))\n\tparser.add_argument('--import_kpath', dest='import_kpath', type=bool, default=False)\n\n\treturn parser.parse_args()\n\ndef main():\n\targs = parse_argument()\n\tk = args.kappa\n\ta1 = args.alpha1\n\ta2 = args.alpha2\n\twhich_u = tuple(int(x) for x in args.which_u)\n\turange = tuple(float(x) for x in args.urange)\n\tbr = args.br\n\timport_kpath = args.import_kpath\n\n\tos.environ['VASP_PP_PATH'] = VASP_PP_PATH\n\n\tcalculate(command=VASP_RUN_COMMAND, outfilename=OUTFILENAME, method='hse', import_kpath = import_kpath)\n\t\n\theader = []\n\tfor i, u in enumerate(which_u):\n\t\theader.append('U_ele_%s' % str(i+1))\n\t\n\tif os.path.exists('./u_tmp.txt'):\n\t\tos.remove('./u_tmp.txt')\n\t\t\n\twith open('./u_tmp.txt', 'w+') as f:\n\t\tf.write('%s band_gap(eV) delta_band(eV) \\n' % (' '.join(header)))\n\n\tobj = 0 \n\tthreshold = args.threshold\n\tfor i in range(50):\n\t\tcalculate(command=VASP_RUN_COMMAND, outfilename=OUTFILENAME, method='dftu', import_kpath = import_kpath)\n\t\tdb = delta_band(bandrange=br, path='./')\n\t\tdb.deltaBand()\n\t\t\n\t\tbayesianOpt = bayesOpt_DFTU(path='./', opt_u_index=which_u, u_range=urange, kappa=k, alpha_1=a1, alpha_2=a2 )\n\t\tobj_next = bayesianOpt.bo()\n\t\tif abs(obj_next - obj) <= threshold:\n\t\t\tprint(\"Optimization has been finished!\")\n\t\t\tbreak\n\t\tobj = obj_next \n\n\tbayesianOpt.plot_bo() \n\tos.system('mv ./u_tmp.txt ./u_kappa_%s_a1_%s_a2_%s.txt' %(k, a1, a2)) \n\nif __name__ == \"__main__\":\n\tmain()\n\t\n\n" }, { "alpha_fraction": 0.7489905953407288, "alphanum_fraction": 0.7637954354286194, "avg_line_length": 36.125, "blob_id": "167193e9702c679bef2aa7d6856bd2b57d57b4df", "content_id": "d1a2ff4272e47c2eb9e85e845e2db51bdf2b9e6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1488, "license_type": "permissive", "max_line_length": 160, "num_lines": 40, "path": "/README.md", "repo_name": "lingzhic/BayesianOpt4dftu", "src_encoding": "UTF-8", "text": "# BayesianOpt4dftu #\n\nThis code determines the Hubbard U parameters in DFT+U via Bayesian Optimization approach.\n\n## Requirements ##\n\n1. Python 3.6+\n2. NumPy\n3. Pandas\n4. ASE (https://wiki.fysik.dtu.dk/ase/)\n5. pymatgen (https://pymatgen.org/)\n6. bayesian-optimization https://github.com/fmfn/BayesianOptimization\n7. Vienna Ab initio Simulation Package (VASP) https://www.vasp.at/\n\n## Set up the input file (input.json) before running the code \n\nThe input file contains these parts:\n- structure_info : Includes geometry information (such as lattice parameter, lattice vectors, atomic position, etc) of the \ntarget materials.\n- general_flags: Includes general flags required in the VASP calculation.\n- scf: Flags required particularly in SCF calculation.\n- band: Flags required particularly in band structure calculation.\n- pbe: Flags required when using PBE as exchange-correlation functional.\n- hse: Flags required when using HSE06 as exchange-correlation functional.\nThe flags can be added or removed. More flag keys can be found in the ASE VASP calculator.\n\n## Installation\n\n* `pip install BayesOpt4dftu`\n\n## Usage\nBefore running, change the environment variables VASP_RUN_COMMAND, OUTFILENAME, and VASP_PP_PATH.\n\n* `cd example/`\n* `python ./example.py`\n\n## Citation\nPlease cite the following work if you use this code.\n\n[1] M. Yu, S. Yang, C. Wu, N. Marom, Machine learning the Hubbard U parameter in DFT+ U using Bayesian optimization, npj Computational Materials, 6(1):1–6, 2020.\n\n" } ]
5
kiss8981/nuggulman
https://github.com/kiss8981/nuggulman
1eea1df6f4efbaed157ae19c24fdf78ee2654b22
890b6ddd58d3c882475decc85dcb3ec43fb463e5
94a9ae1c6a3a3aed738dd589204a91cead1c3287
refs/heads/master
2020-06-25T04:30:35.657192
2019-07-28T13:28:46
2019-07-28T13:28:46
199,202,236
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5535714030265808, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 27, "blob_id": "f9de3fda47d78243ea6547ec3c0ad0af909e62dc", "content_id": "713b8bb0399a0250dda3959a4e476e0f11e26704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 42, "num_lines": 4, "path": "/general_settings.py", "repo_name": "kiss8981/nuggulman", "src_encoding": "UTF-8", "text": "class Settings:\n def __init__(self):\n self.token = \"NjA0NzE4NDIzMzM0NzE1NDIz.XT2e0w.Qp-s7ivn5lNkKwmS8aS4veo4tTM\"\n self.clientPassword = \"850501\"\n" } ]
1
eriixon/telegram-bot
https://github.com/eriixon/telegram-bot
f4c19d437ec152054e3bcc877142ee3698faeb42
1c498e07a7081b1e9a6ee36355b0f926c3755be2
60364a20253723a181636acc7fc6611efc317a83
refs/heads/master
2022-12-12T08:26:54.029345
2018-10-19T18:09:28
2018-10-19T18:09:28
153,727,639
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5935114622116089, "alphanum_fraction": 0.5935114622116089, "avg_line_length": 42.58333206176758, "blob_id": "8a837a060807e9439826708c9c6686dad3d49759", "content_id": "9f8c30b904ca6e46f744d3b2c1f8fc91c1ddda14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "no_license", "max_line_length": 104, "num_lines": 12, "path": "/content.py", "repo_name": "eriixon/telegram-bot", "src_encoding": "UTF-8", "text": "content_list = {\n \"start\": \"I can help you create your own list of good places. To get a list of command input /help\",\n \"help\": \"List of commands:\\n\"\n \"/add - help you to add a place\\n\"\n \"/list - get a list of your saved places\\n\"\n \"/remove - delete a list of your places\",\n \"add\": \"Great! Give me a name of the place\",\n \"list\": \"Here is the list of your places\",\n \"remove\": \"Are you sure?\",\n \"removed\": \"Your list is empty\",\n \"cancel_remove\": \"Your list is still here\"\n}\n\n" }, { "alpha_fraction": 0.6831181049346924, "alphanum_fraction": 0.6849631071090698, "avg_line_length": 23.88505744934082, "blob_id": "d729b4f7875914dcc260297bc3d2016194c72c25", "content_id": "bd3f0e0237ea37378cf5b7203b1c64b5d4454dcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2168, "license_type": "no_license", "max_line_length": 120, "num_lines": 87, "path": "/bot.py", "repo_name": "eriixon/telegram-bot", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport tornado.ioloop\nimport tornado.web\nimport telebot\nfrom telebot import types\nfrom content import content_list\n\n\ndef get_token():\n with open('token.json') as data:\n jdata = json.load(data)\n return jdata[\"token\"]\n\t\n\ntoken = os.environ.get('TOKEN', get_token())\nbot = telebot.TeleBot(token)\n\n\ndef get_keyboard():\n kb = types.InlineKeyboardMarkup()\n kb.add(types.InlineKeyboardButton('Yes', callback_data='yes'), types.InlineKeyboardButton('No', callback_data='no'))\n return kb\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n text = \"Hello {}! {}\".format(message.chat.first_name, content_list[\"start\"])\n bot.send_message(message.chat.id, text)\n\n\n@bot.message_handler(commands=['help'])\ndef send_help(message):\n text = content_list[\"help\"]\n bot.send_message(message.chat.id, text)\n\n\n@bot.message_handler(commands=['add'])\ndef add_place(message):\n text = content_list[\"add\"]\n # TODO add a row to DB\n bot.send_message(message.chat.id, text)\n\n\n@bot.message_handler(commands=['list'])\ndef list_places(message):\n text = content_list[\"list\"]\n # TODO get all rows in DB\n bot.send_message(message.chat.id, text)\n\n\n@bot.message_handler(commands=['remove'])\ndef remove_list(message):\n text = content_list[\"remove\"]\n bot.send_message(message.chat.id, text, reply_markup=get_keyboard())\n\n\n@bot.message_handler()\ndef handle_message(message):\n bot.send_message(message.chat.id, message.text)\n\n\n@bot.callback_query_handler(func=lambda x: True)\ndef callback_handler(callback):\n message = callback.message\n response = callback.data\n if response == 'yes':\n # TODO remove all rows in DB\n bot.send_message(message.chat.id, content_list[\"removed\"])\n else:\n bot.send_message(message.chat.id, content_list[\"cancel_remove\"])\n\n\nbot.polling()\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n\napplication = tornado.web.Application([(r\"/\", MainHandler)])\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n application.listen(port)\n tornado.ioloop.IOLoop.instance().start()\n\n\n\n" } ]
2
eractnodi/MontyHall
https://github.com/eractnodi/MontyHall
91f7daf5d3c733263a6de7ad8531b1a08c9c4316
4fd494e1ea6dd69aa96ebf776a8a364843ea07c6
753c6a28312ab3e9fcbdd6b87dd2d740a08d204b
refs/heads/master
2016-09-05T17:16:03.912323
2015-02-22T20:07:03
2015-02-22T20:07:03
31,177,566
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7080344557762146, "alphanum_fraction": 0.7209469079971313, "avg_line_length": 30.409090042114258, "blob_id": "a72a6b3826e2d92cc34c95edd639f5567c2f5d32", "content_id": "a1e00f42b725f185e1fc40d71520cb01b201db5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 125, "num_lines": 44, "path": "/MontyHall.py", "repo_name": "eractnodi/MontyHall", "src_encoding": "UTF-8", "text": "from itertools import permutations\nfrom random import randint\n\n#copied this from the interwebs\ndef remove_duplicates(values):\n output = []\n seen = set()\n for value in values:\n # If value has not been encountered yet,\n # ... add it to both list and set.\n if value not in seen:\n output.append(value)\n seen.add(value)\n return output\n\npossiblePrizesList = ['car', 'goat', 'goat']\n\niterable = possiblePrizesList\n\npossiblePrizePermutationsList = []\n\nfor prizePermutation in permutations(iterable, r=None):\n possiblePrizePermutationsList.append(prizePermutation)\n\npossiblePrizePermutationsList = remove_duplicates(possiblePrizePermutationsList)\n\nnoSwitchCount = 0\nswitchCount = 0\n\ncount = 0\nwhile count < 1000:\n currentDoorPermutation = possiblePrizePermutationsList[randint(0,len(possiblePrizePermutationsList)-1)]\n contestantChoosesInt = randint(0,2)\n montyRevealsInt = -1\n while montyRevealsInt == contestantChoosesInt or currentDoorPermutation[montyRevealsInt] == 'car' or montyRevealsInt < 0:\n montyRevealsInt = randint(0,2)\n if contestantChoosesInt == currentDoorPermutation.index('car'):\n noSwitchCount = noSwitchCount+1\n else:\n switchCount = switchCount+1\n count=count+1\n\nprint('should not switch times: '+str(noSwitchCount))\nprint('should have switched times: '+str(switchCount))\n \n \n\n\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7662337422370911, "avg_line_length": 24.66666603088379, "blob_id": "8fdc05d6b3e7b28e40541bbf44a178089aebf1ca", "content_id": "08155b19e45f380f1c1eff8116a8a38883ef08ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 77, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/README.md", "repo_name": "eractnodi/MontyHall", "src_encoding": "UTF-8", "text": "# MontyHall\nA script to test the Monty Hall problem.\nwritten in python 3.4.1\n" } ]
2
gsk12/UdacityDAND
https://github.com/gsk12/UdacityDAND
154c9b05d1afde0fce4b79983bf7bc9a883c364a
16840aa5c125493730de5f8e2c0ef970846bb93a
19ee8c35422383ad4206732c34d5c06d606f3ec6
refs/heads/master
2021-07-06T12:24:34.149048
2017-10-02T15:06:40
2017-10-02T15:06:40
105,547,141
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6203522682189941, "alphanum_fraction": 0.6203522682189941, "avg_line_length": 37.38461685180664, "blob_id": "da4b6d22dc55982093d7e6a7b1c47e4d61f4823c", "content_id": "02863df36667264e0da4fe4fbb7000b0d758ed2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/Project_3/Python Scripts/Audit_streetnames.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "def audit_street_type(street_types, street_name):\r\n # add unexpected street name to a list\r\n m = street_type_re.search(street_name)\r\n if m:\r\n street_type = m.group()\r\n if street_type not in expected:\r\n street_types[street_type].add(street_name)\r\n \r\ndef is_street_name(elem):\r\n # determine whether a element is a street name\r\n return (elem.attrib['k'] == \"addr:street\")\r\n\r\ndef audit_street(osmfile):\r\n # iterate through all street name tag under node or way and audit the street name value\r\n osm_file = open(osmfile, \"r\")\r\n street_types = defaultdict(set)\r\n for event, elem in cET.iterparse(osm_file, events=(\"start\",)):\r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n for tag in elem.iter(\"tag\"):\r\n if is_street_name(tag):\r\n audit_street_type(street_types, tag.attrib['v'])\r\n return street_types\r\n\r\nst_types = audit_street(bangaloreOSM)\r\n# print out unexpected street names\r\npprint.pprint(dict(st_types))" }, { "alpha_fraction": 0.48664259910583496, "alphanum_fraction": 0.4877256453037262, "avg_line_length": 34.959999084472656, "blob_id": "bd6fcbaf746f6ac49c5071c2ac863001554531a2", "content_id": "ad168051278d0faa3918c264b9478fbc4017bfa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2770, "license_type": "no_license", "max_line_length": 110, "num_lines": 75, "path": "/Project_3/Python Scripts/osmtoxml.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "CREATED = [ \"version\", \"changeset\", \"timestamp\", \"user\", \"uid\"]\r\n\r\ndef shape_element(element):\r\n node = {}\r\n node[\"created\"]={}\r\n node[\"address\"]={}\r\n node[\"pos\"]=[]\r\n refs=[]\r\n \r\n # we only process the node and way tags\r\n if element.tag == \"node\" or element.tag == \"way\" :\r\n if \"id\" in element.attrib:\r\n node[\"id\"]=element.attrib[\"id\"]\r\n node[\"type\"]=element.tag\r\n\r\n if \"visible\" in element.attrib.keys():\r\n node[\"visible\"]=element.attrib[\"visible\"]\r\n \r\n # the key-value pairs with attributes in the CREATED list are added under key \"created\"\r\n for elem in CREATED:\r\n if elem in element.attrib:\r\n node[\"created\"][elem]=element.attrib[elem]\r\n \r\n # attributes for latitude and longitude are added to a \"pos\" array\r\n # include latitude value \r\n if \"lat\" in element.attrib:\r\n node[\"pos\"].append(float(element.attrib[\"lat\"]))\r\n # include longitude value \r\n if \"lon\" in element.attrib:\r\n node[\"pos\"].append(float(element.attrib[\"lon\"]))\r\n\r\n \r\n for tag in element.iter(\"tag\"):\r\n if not(problemchars.search(tag.attrib['k'])):\r\n if tag.attrib['k'] == \"addr:housenumber\":\r\n node[\"address\"][\"housenumber\"]=tag.attrib['v']\r\n \r\n if tag.attrib['k'] == \"addr:postcode\":\r\n node[\"address\"][\"postcode\"]=tag.attrib['v']\r\n \r\n # handling the street attribute, update incorrect names using the strategy developed before \r\n if tag.attrib['k'] == \"addr:street\":\r\n node[\"address\"][\"street\"]=tag.attrib['v']\r\n \r\n\r\n if tag.attrib['k'].find(\"addr\")==-1:\r\n node[tag.attrib['k']]=tag.attrib['v']\r\n \r\n for nd in element.iter(\"nd\"):\r\n refs.append(nd.attrib[\"ref\"])\r\n \r\n if node[\"address\"] =={}:\r\n node.pop(\"address\", None)\r\n\r\n if refs != []:\r\n node[\"node_refs\"]=refs\r\n \r\n return node\r\n else:\r\n return None\r\n\r\n# process the OSM file, write a json out file and return a list of dictionaries\r\ndef process_map(file_in, pretty = False):\r\n file_out = \"{0}.json\".format(file_in)\r\n data = []\r\n with codecs.open(file_out, \"w\") as fo:\r\n for _, element in cET.iterparse(file_in):\r\n el = shape_element(element)\r\n if el:\r\n data.append(el)\r\n if pretty:\r\n fo.write(json.dumps(el, indent=2)+\"\\n\")\r\n else:\r\n fo.write(json.dumps(el) + \"\\n\")\r\n return data" }, { "alpha_fraction": 0.5216741561889648, "alphanum_fraction": 0.5336322784423828, "avg_line_length": 31.549999237060547, "blob_id": "1cb96851cde6bd1f42a91a6e04009316c7f0b523", "content_id": "e5f88440958589291d0de5ba0ca7f3aa263c1afe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 95, "num_lines": 20, "path": "/Project_3/Python Scripts/cleanup_zipcode.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "def update_name(zipcode):\r\n testNum = re.findall('[a-zA-Z]*', zipcode)\r\n if testNum:\r\n testNum = testNum[0]\r\n testNum.strip()\r\n if testNum == \"- 5\":\r\n convertedZipcode = (re.findall(r'\\d+', zipcode))\r\n if convertedZipcode:\r\n if convertedZipcode.__len__() == 3:\r\n return (re.findall(r'\\d+', zipcode))[0] + \"-\" +(re.findall(r'\\d+', zipcode))[1]\r\n else:\r\n return (re.findall(r'\\d+', zipcode))[0]\r\n\r\n## or\r\n# zipcodes = audit_zipcodes(bangaloreOSM)\r\n\r\n# for zipcode in zipcodes:\r\n# if zipcode.startswith('- 5'):\r\n# zipcode = zipcode[2:]\r\n# print zipcode, zipcodes[zipcode]" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 20, "blob_id": "a1371b32becbcf865a1b556b62b260e223fcced9", "content_id": "5efdb546627970a69acf8d44e06178ebb8995a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/Project_3/Python Scripts/mongoconnection.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "# Processing the file\r\ndata = process_map(bangaloreOSM)\r\n\r\nclient = MongoClient()\r\ndb = client.bangaloreOSM\r\ncollection = db.bangaloreMAP\r\ncollection.insert(data)\r\ncollection\r\n\r\n" }, { "alpha_fraction": 0.6012157797813416, "alphanum_fraction": 0.6066869497299194, "avg_line_length": 31.612245559692383, "blob_id": "77d8dce99d53c173cc047a369077b6a7446fb746", "content_id": "f4683722ee289d24558602d31e22d5fda98c2d2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1645, "license_type": "no_license", "max_line_length": 96, "num_lines": 49, "path": "/Project_3/Python Scripts/setup.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "# loading libraries\r\nimport os\r\nimport xml.etree.cElementTree as cET\r\nfrom collections import defaultdict\r\nimport pprint\r\nimport re\r\nimport codecs\r\nimport json\r\nimport string\r\nfrom pymongo import MongoClient\r\n\r\nfilename = \"bangalore.osm\" # osm filename\r\npath = \"C:\\Users\\sampa\\Desktop\\Other\\Udacity\\Lesson 4\" # directory contain the osm file\r\nbangaloreOSM = os.path.join(path, filename)\r\n\r\n# regular expression to check tags that contain ony lowercase are valid, lower case with colon \r\n# or with problematic characters otherwise valid\r\nlower = re.compile(r'^([a-z]|_)*$') \r\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\r\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\r\nstreet_type_re = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\r\n\r\n# expected street names , Halli , nagar, Palya are locally common street names\r\nexpected = [\"Street\", \"Avenue\", \"Halli\", \"Nagar\", \"Palya\", \"Place\", \"Square\", \"Lane\", \"Road\"]\r\n\r\ndef key_type(element, keys):\r\n if element.tag == \"tag\":\r\n for tag in element.iter('tag'):\r\n k = tag.get('k')\r\n if lower.search(k):\r\n keys['lower'] += 1\r\n elif lower_colon.search(k):\r\n keys['lower_colon'] += 1\r\n elif problemchars.search(k):\r\n keys['problemchars'] += 1\r\n else:\r\n keys['other'] += 1\r\n return keys\r\n\r\n\r\ndef process_map(filename):\r\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\r\n for _, element in ET.iterparse(filename):\r\n keys = key_type(element, keys)\r\n\r\n return keys\r\n\r\nban_keys = process_map(bangaloreOSM)\r\npprint.pprint(ban_keys)" }, { "alpha_fraction": 0.6986855864524841, "alphanum_fraction": 0.7163597345352173, "avg_line_length": 42.51900863647461, "blob_id": "ec7b84cbc13df0c18394e0a3d44d33ec2d692771", "content_id": "e2c6c8fdbd6875bfb3f9aa2aa58347994d6c0917", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 26932, "license_type": "no_license", "max_line_length": 697, "num_lines": 605, "path": "/Project_4/Red Wine Quality.rmd", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "Explore and Summarize Red Wine Quality Analysis Data by Sampath Grandhi\r\n========================================================\r\n\r\n\r\n```{r echo=FALSE, message=FALSE, warning=FALSE, packages}\r\n# Load all of the packages that you end up using in your analysis in this code\r\n\r\nlibrary(ggplot2)\r\nlibrary(grid)\r\nlibrary(gridExtra)\r\nlibrary(dplyr)\r\nlibrary(tidyr)\r\nlibrary(RColorBrewer)\r\nlibrary(GGally)\r\nlibrary(corrplot)\r\n\r\n\r\n```\r\n\r\n```{r echo=FALSE, Load_the_Data}\r\n# Load the Data\r\n\r\nwine <- read.csv(\"wineQualityReds.csv\",row.names=NULL)\r\n\r\n# Remove x column as it's just an index\r\n\r\nwine$X <- NULL\r\n\r\nwine <- wine[wine$fixed.acidity < quantile(wine$fixed.acidity, 0.99),]\r\nwine <- wine[wine$residual.sugar < quantile(wine$residual.sugar, 0.99),]\r\nwine <- wine[wine$total.sulfur.dioxide < quantile(wine$total.sulfur.dioxide, 0.99),]\r\nwine <- wine[wine$free.sulfur.dioxide < quantile(wine$free.sulfur.dioxide, 0.99),]\r\n\r\n```\r\n\r\n\r\n\r\n\r\n``` {r echo =FALSE, SUMMARY}\r\n\r\nsummary(wine)\r\n\r\nnames(wine)\r\n\r\nstr(wine)\r\n\r\n```\r\n\r\nThis [dataset](https://www.google.com/url?q=https://s3.amazonaws.com/udacity-hosted-downloads/ud651/wineQualityReds.csv&sa=D&ust=1496596163158000&usg=AFQjCNHuSdYQfa2QwgHKVl-gGcYzrCdctQ \"Red Wine Data\") contains 1,599 red wines observations. wWth 12 variables, 11 on the chemical properties of the wine. At least 3 wine experts rated the quality of each wine, providing a rating between 0 (bad) and 10 (excellent). Quality variable is discrete and the others are continuous.\r\n\r\n5 is the most frequent the quality rating and most of the ratings are either 5 or 6. Quality is a categorical discrete variable, but if we were to treat it as continuous, the mean would be 5.63 and the median would be 6. The highest rating was 8, and the lowest was 3. Furthermore, total sulfur dioxide and free sulfur dioxide seem to be discrete variables. This might be due to rounding issues. We could think that citric acid is a subset of fixed acidity and potentially volatile acidity.\r\n\r\nFixed acidity, residual sugar, total sulfur dioxide, and free sulfur dioxide were all stripped from their top 1% values as they appeared to be large outliers. Now there are 1,534 red wines observations.\r\n\r\n# Univariate Plots Section\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots}\r\n\r\n\r\nwine$quality.level <- ifelse(wine$quality < 5, \"low\", \r\n ifelse(wine$quality < 7, \"average\", \"high\"))\r\nwine$quality.level <- factor(wine$quality.level, \r\n levels=c(\"high\", \"average\", \"low\"), ordered=TRUE)\r\nattach(wine)\r\n\r\n\r\n```\r\n```{r echo=FALSE, Univariate_Plots1}\r\nqplot(factor(quality.level), data=wine, geom=\"bar\", xlab=\"Quality\") + theme_bw()\r\nsummary(wine$quality.level)\r\n```\r\n\r\nA newly quality level variable is created where red wine obseravtions with quality graded less than 5 is graded as low and above 5 and below 7 is graded as Average and above 7 is graded as high. Most of the red wine observations quality were in the average quality grade and very few were graded low. \r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots2}\r\nqplot(factor(quality), data=wine, geom=\"bar\", xlab=\"Quality\") + theme_bw()\r\nsummary(wine$quality)\r\n```\r\n\r\nRed wine quality is normally distributed and concentrated around 5 and 6.\r\n\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots3}\r\n uni_qplot <- function(x, dat=NULL, xlab, binwidth=0.01) {\r\n if(missing(dat)) {\r\n qplot(x, data=wine, xlab=xlab, binwidth=binwidth) + theme_bw()\r\n }\r\n else {\r\n qplot(x, data=dat, xlab=xlab, binwidth=binwidth) + theme_bw()\r\n }\r\n}\r\nuni_qplot(x=fixed.acidity, xlab=\"Fixed acidity (g/dm^3)\", binwidth=0.1)\r\n```\r\n\r\nFixed acidity's distribution is right skewed, and concentrated around 7\r\n\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots4}\r\nuni_qplot(x=volatile.acidity, xlab=\"Volatile acidity (g/dm^3)\")\r\nsummary(wine$volatile.acidity)\r\n```\r\nVolatile acidity's distribution appears to be vague whether it is bimodal or unimodel, right skewed or normal.\r\n\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots5}\r\nuni_qplot(citric.acid, xlab=\"Citric acid (g/dm^3)\")\r\nsummary(wine$citric.acid)\r\n```\r\nCitric acid's distribution is not normal.\r\n\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots6}\r\nuni_qplot(residual.sugar, xlab=\"Residual sugar (g/dm^3)\", binwidth=0.5)\r\nsummary(wine$residual.sugar)\r\n```\r\nResidual sugar's distribution is right skewed, and is concentrated around 2. The plot has some outliers which were removed.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots7}\r\nuni_qplot(x=chlorides, xlab=\"Chlorides (g/dm^3)\")\r\nsummary(wine$chlorides)\r\n```\r\nChloride's distribution is normal, and is concentrated around 0.08. The plot has some outliers.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots8}\r\nuni_qplot(free.sulfur.dioxide, xlab=\"Free sulfur dioxide (mg/dm^3)\", binwidth=0.5)\r\nsummary(wine$free.sulfur.dioxide)\r\n```\r\nFree sulfur dioxide's distribution is right skewed and is concentrated around 14.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots9}\r\nuni_qplot(total.sulfur.dioxide, xlab=\"Total sulfur dioxide (mg/dm^3)\", binwidth=5)\r\nsummary(wine$total.sulfur.dioxide)\r\n```\r\nTotal sulfur dioxide's distribution is right skewed and concentrated around 37. The plot has few outliers which were removed.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots10}\r\nuni_qplot(density, xlab=\"Density (g/cm^3)\", binwidth=0.001)\r\nsummary(wine$density)\r\n```\r\nThe distribution of density is normal and is concentrated around 0.9967.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots11}\r\nuni_qplot(wine$pH, xlab=\"pH\")\r\nsummary(wine$pH)\r\n```\r\nThe distribution of pH is normal and is concentrated around 3.31.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots12}\r\nuni_qplot(sulphates, xlab=\"Sulphates (g/dm^3)\")\r\nsummary(wine$sulphates)\r\n```\r\nSulphate's distribution is right skewed and concentrated around 0.6569. The plot has few outliers.\r\n\r\n\r\n```{r echo=FALSE, Univariate_Plots13}\r\nuni_qplot(alcohol, xlab=\"Alcohol (%)\", binwidth=0.4)\r\nsummary(wine$alcohol)\r\n```\r\nAlcohol's distribution is right skewed and concentrated around 10.20.\r\n\r\nThe data is divided into 2 groups: a high quality group contains observations whose quality is 7 or 8, and a low quality group which has observations of whose quality is 3 or 4. Examining the difference in each feature between the two groups, we could see that volatile acidity, density, and citric acid may have some correation with quality. Let's visualize the data to see the difference.\r\n\r\n```{r echo=FALSE, Univariate_Plots14}\r\nquality78 <- subset(wine, quality == 8 | quality == 7)\r\nquality34 <- subset(wine, quality == 3 | quality == 4)\r\nvolatile78 <- uni_qplot(quality78$volatile.acidity, dat=quality78, \r\n xlab=\"Volatile acidity (g/dm^3), quality 7 & 8\", \r\n binwidth=0.1)\r\nvolatile34 <- uni_qplot(quality34$volatile.acidity, dat=quality34, \r\n xlab=\"Volatile acidity (g/dm^3), quality 3 & 4\", \r\n binwidth=0.1)\r\n\r\ndensity78 <- uni_qplot(quality78$density, dat=quality78, \r\n xlab=\"Density (g/cm^3), quality 7 & 8\", binwidth=0.001)\r\ndensity34 <- uni_qplot(quality34$density, dat=quality34, \r\n xlab=\"Density (g/cm^3), quality 3 & 4\", binwidth=0.001)\r\n\r\ncitric78 <- uni_qplot(quality78$citric.acid, dat=quality78, \r\n xlab=\"Citric acid (g/dm^3), quality 7 & 8\")\r\ncitric34 <- uni_qplot(quality34$citric.acid, dat=quality34, \r\n xlab=\"Citric acid (g/dm^3), quality 3 & 4\")\r\n\r\nalcohol78 <- uni_qplot(quality78$alcohol, dat=quality78, \r\n xlab=\"Alcohol (%), quality 7 & 8\", binwidth=0.1)\r\nalcohol34 <- uni_qplot(quality34$alcohol, dat=quality34, \r\n xlab=\"Alcohol (%), quality 3 & 4\", binwidth=0.1)\r\n\r\ngrid.arrange(volatile34, volatile78, density34, density78, \r\n citric34, citric78, alcohol34, alcohol78, ncol=2)\r\n```\r\n\r\n\r\n# Univariate Analysis\r\n\r\n> \r\n\r\n### What is the structure of your dataset?\r\n\r\nThere are 1,599 red wines in the dataset with 11 features on the chemical properties of the wine. ( fixed.acidity, volatile.acidity, citric.acid, residual.sugar, chlorides, free.sulfur.dioxide, total.sulfur.dioxide, density, pH, sulphates, alcohol, and quality). There are 1534 observations after slicing out the top 1% from the variables that had large outliers (Fixed acidity, residual sugar, total sulfur dioxide, and free sulfur dioxide)\r\n\r\n\r\n\r\nObservations:\r\n\r\n1. The median quality is 6. \r\n2. Most wines have a pH of 3.4 or higher. \r\n3. About 75% of wine have quality that is lower than 6 and 75% of the wines have less than 11.10% alcohol.\r\n4. The median percent alcohol content is 10.20 and the max percent alcohol content is 14.90.\r\n5. The quality is an integer number with 6 values.\r\n6. The suger distribution is right skewed with many outliers.\r\n\r\n\r\n### What is/are the main feature(s) of interest in your dataset?\r\n\r\nAs given in the dataset description, it appears that diffrent types of acids have significant impact on the quality.\r\nThe main features in the data set are pH and quality. I'd like to find which features are best for predicting the quality of a wine. I suspect volatile acidity has the most impact on the quality. We could use pH or some combination of the other variables can be used to build a predictive model to grade the quality of wines.\r\n\r\n\r\n### What other features in the dataset do you think will help support your \\\r\ninvestigation into your feature(s) of interest?\r\n\r\nVolatile acidity, citric acid, and alcohol likely contribute to the quality of a wine. Volatile acidity (the amount of acetic acid in wine) and alcohol (the percent alcohol content of the wine) probably contribute most to the quality of wine after looking them up on the internet.\r\n\r\n\r\n### Did you create any new variables from existing variables in the dataset?\r\n\r\nA new variable called \"quality.level\" which is categorically divided into \"low\", \"average\", and \"high\" was created. This categorization will help us identify the difference among each group more easily.\r\n\r\n\r\n### Of the features you investigated, were there any unusual distributions? \\\r\nDid you perform any operations on the data to tidy, adjust, or change the form \\\r\nof the data? If so, why did you do this?\r\n\r\nVolatile acidity data has some unusual distributions, so this might have some correlation with the quality of red wine. \r\nThe top 1% of values were stripped off of fixed acidity, residual sugar, total sulfur dioxide, and free sulfur dioxide.\r\n\r\nThe x column was removed as it was just an index value . Since the rest of the data is clean, I did not perform any cleaning process or modification of the data.\r\n\r\n\r\n\r\n# Bivariate Plots Section\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots1}\r\nbi_qplot <- function(x, y, z=\"jitter\") {\r\n if(z==\"boxplot\") {\r\n qplot(x=x, y=y, data=wine, geom=\"jitter\", alpha=0.01) + \r\n geom_boxplot() +\r\n guides(alpha=\"none\") +\r\n theme_bw()\r\n }\r\n else {\r\n qplot(x=x, y=y, data=wine, geom=z, alpha=0.01) + \r\n guides(alpha=\"none\") +\r\n theme_bw()\r\n }\r\n}\r\n\r\nbi_qplot(quality.level, volatile.acidity, \"boxplot\") +\r\n xlab(\"Quality level\") +\r\n ylab(\"Volatile acidity (g/dm^3)\")\r\n```\r\n\r\nThe graph shows that lower the volatile acidity is, the higher the quality becomes. The correlation coefficient between quality and volatile acidity is -0.39. This is a fact as volatile acidity at too high of levels can lead to an unpleasant, vinegar taste.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots2}\r\nbi_qplot(quality.level, citric.acid, \"boxplot\") +\r\n xlab(\"Quality level\") +\r\n ylab(\"Citric acid\")\r\ngrp <- group_by(wine, quality.level)\r\ncnts <- summarize(grp, count=n(), \r\n median=median(citric.acid), \r\n mean=mean(citric.acid), \r\n variance=var(citric.acid), \r\n Q1=quantile(citric.acid, 0.25), \r\n Q3=quantile(citric.acid, 0.75))\r\nprint(cnts)\r\n```\r\nThe correlation coefficient is 0.226; the graph shows a slight positive relationship between quality level and citric acid.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots3}\r\nbi_qplot(quality.level, alcohol) +\r\n xlab(\"Quality level\") +\r\n ylab(\"Alcohol\")\r\ncor.test(wine$quality, wine$alcohol)\r\n```\r\n\r\nWith the correlation coefficient of 0.476, the graph shows quality seems to have a semi strong correlation with alcohol. Average quality and low quality wines have their percent alcohol contents concentrated around 10 whereas high quality wines have their percent alcohol contents concentrated around 12.\r\nAnd the general trend between the alcohol and the quality shows that as alcohol increases, the quality increases as well.\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots4}\r\nbi_qplot(alcohol, volatile.acidity) +\r\n xlab(\"Alcohol (%)\") +\r\n ylab(\"Volatile acidity (g/dm^3)\") +\r\n geom_smooth(method='lm')\r\n\r\n```\r\n\r\nThere is a weak negative correlation of -0.2 between percentage of alcohol content and volatile acidity.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots5}\r\nbi_qplot(residual.sugar, alcohol) +\r\n xlab(\"Residual sugar (g/dm^3)\") +\r\n ylab(\"Alcohol (%)\") + geom_smooth(method='lm')\r\n```\r\n\r\nThe correlation coefficient between residual sugar and percent alcohol content is slightly positive, which says there is some relationship between them. \r\nThis is supported by In actual winemaking process, wines are made from ripe to overly ripe grape fruit. To keep wines from staying too sweet, the fermentation process is left to continue until most of the sugar is consumed, but as a byproduct, more alcohol is present in the wines.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots6}\r\nbi_qplot(citric.acid, volatile.acidity) +\r\n xlab(\"Citric acid (g/dm^3)\") +\r\n ylab(\"Volatile acidity (g/dm^3)\") + geom_smooth(method='lm')\r\ncor.test(wine$citric.acid, wine$volatile.acidity)\r\n```\r\n\r\nThere is a negative correlation between citric acid and volatile acidity.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots7}\r\nbi_qplot(alcohol, density) + \r\n xlab(\"Alcohol (%)\") + \r\n ylab(\"Density (g/cm^3)\") + geom_smooth(method='lm')\r\n```\r\n\r\nThe correlation coefficient between percentage alcohol and density is -0.5, so the relationship is quite clear as percentage alcohol content increases, the density decreases. This could be explained by the fact the density of wine is lower than the density of pure water.\r\n\r\n\r\n\r\n```{r echo=FALSE, Bivariate_Plots8}\r\naddFeatures <- wine[,!colnames(wine) %in% c(\"volatile.acidity\", \r\n \"quality\", \"quality.level\")]\r\nggpairs(addFeatures, \r\n columnLabels=c(\"f.aci\", \"ci.aci\", \"res.sug\", \"chlo\", \"fr.su.dio\", \r\n \"to.su.dio\", \"dens\", \"pH\", \"sulph\", \"alco\"), \r\n lower = list(continuous = wrap(\"points\", size=1, shape = I('.'))),\r\n upper = list(combo = wrap(\"box\", outlier.shape = I('.')))) + \r\n theme(axis.ticks=element_blank(),\r\n axis.line=element_blank(), \r\n axis.text=element_blank(), \r\n panel.grid.major= element_blank())\r\n\r\n```\r\n\r\nThis graph shows positive relationship between density and fixed acidity, positive relationship between fixed acidity and citric acid, negative relationship between pH and acidity.\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plotsa}\r\nden_qplot <- function(x, color, xlab) {\r\n ggplot(data=wine, aes(x, colour=color)) + \r\n geom_density() + \r\n xlab(xlab) + \r\n labs(colour=\"Quality level\") +\r\n theme_bw()\r\n}\r\nden_qplot(fixed.acidity, quality.level, \"Fixed acidity (g/dm^3)\")\r\n```\r\n\r\nThe distribution of low and average quality wines seem to be concentrated at fixed acidity values that are between 6 and 10. pH increases as fixed acidity decreases, and citric acid increases as fixed acidity increases.\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots3}\r\nalcoQuaDensity <- den_qplot(alcohol, quality.level, \"Alcohol (%)\")\r\nprint(alcoQuaDensity)\r\n\r\nalcohol_lm <- lm(data=wine, quality~alcohol)\r\nsummary(alcohol_lm)\r\n```\r\n\r\nHigh quality wine density line is distinct from the others, and mostly distributed between 11 and 12.\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots4}\r\nvolaQuaDensity <- den_qplot(volatile.acidity, quality.level, \r\n \"Volatile acidity (g/dm^3)\")\r\nprint(volaQuaDensity)\r\n\r\nvolacid_lm <- lm(data=wine, quality~volatile.acidity)\r\nsummary(volacid_lm)\r\n```\r\n\r\nThis chart shows a very clear trend; as volatile acidity decreases, the quality of wine increases. Wines with volatile acidity exceeding 1 are almost rated as low quality. The linear model of volatile acidity has an R-squared of 0.152 which means this feature alone does not explain much of the variability of red wine quality.\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots5}\r\nfeaInterest_lm <- lm(data=wine, quality~volatile.acidity + alcohol)\r\nsummary(feaInterest_lm)\r\n```\r\n\r\nR-squared increases by two times after adding alcohol to the linear model.\r\n\r\n\r\n# Bivariate Analysis\r\n\r\n\r\n### Talk about some of the relationships you observed in this part of the \\\r\ninvestigation. How did the feature(s) of interest vary with other features in \\\r\nthe dataset?\r\n\r\nThere is a negative relationship between quality level and volatile acidity, and positive correlation between quality level and alcohol. This is not surprising as stronger wines are graded as high quality, where as wines with low percent alcohol are often not graded high. High volatile acidity is reagrded to be undesirable because it impacts the taste of wines. Alcohol and volatile acidity don't have any clear relationship between each other.\r\n\r\n\r\n### Did you observe any interesting relationships between the other features \\\r\n(not the main feature(s) of interest)?\r\n\r\nThere is a positive relationship between density and fixed acidity, positive relationship between fixed acidity and citric acid, positive realtionship between alochol and residual suagr, Fixed acidity and pH were negatively correlated. Other variables either show very weak relationship or do not show any relationship.\r\n\r\n\r\n### What was the strongest relationship you found?\r\n\r\nQuality is positively and strongly correlated with alcohol, and it is also negatively correlated with volatile acidity. Using Alcohol and volatile acidity we could build a model to predict the quality of wine.\r\n\r\n\r\n# Multivariate Plots Section\r\n\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots}\r\nmulti_qplot <- function(x, y, z, alpha=0.4) {\r\n qplot(x, y, data=wine, color=z) +\r\n guides(alpha=FALSE)\r\n}\r\nmulti_qplot(density, volatile.acidity, quality.level) +\r\n xlab(\"Density (g/cm^3)\") +\r\n ylab(\"Volatile acidity (g/cm^3)\") +\r\n labs(color=\"Quality level\") +\r\n geom_point(alpha=1, size = 1) +\r\n geom_smooth(method = \"lm\", se = FALSE,size=1) \r\n\r\n \r\n\r\n```\r\n\r\nThe densities of high quality wines are concentrated on the lower part of volatile acidity (y axis) and between 0.994 and 0.998 values. \r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots1}\r\nmulti_qplot(volatile.acidity, alcohol, quality.level) + aes(size=citric.acid) +\r\n xlab(\"Volatile acidity (g/dm^3)\") +\r\n ylab(\"Alcohol (%)\") + \r\n labs(color=\"Quality level\", size=\"Citric acid\")\r\nprint(\"Percent alcohol contents by quality level:\")\r\nwine %>% \r\n group_by(quality.level) %>% \r\n summarize(mean=mean(alcohol),sd=sd(alcohol))\r\nprint(\"Volatile acidities by quality level:\")\r\nwine %>% \r\n group_by(quality.level) %>% \r\n summarize(mean=mean(volatile.acidity),sd=sd(volatile.acidity))\r\n```\r\n\r\nHigh quality feature gieves an impression to be related with alcohol ranging from 11 to 13, volatile acidity from 0.2 to 0.5, and citric acid from 0.25 to 0.75 .\r\n\r\n\r\n```{r echo=FALSE, Multivariate_Plots2}\r\nmulti_qplot(fixed.acidity, volatile.acidity, quality.level) + \r\n aes(size=pH) +\r\n xlab(\"Fixed acidity (g/dm^3)\") +\r\n ylab(\"Volatile acidity (g/dm^3)\") +\r\n labs(color=\"Quality level\")\r\n\r\nmulti_qplot(residual.sugar, alcohol, quality.level) + \r\n xlab(\"Residual sugar (g/dm^3)\") +\r\n ylab(\"Alcohol (%)\") +\r\n labs(color=\"Quality level\")\r\n\r\nmulti_qplot(fixed.acidity, alcohol, quality.level) + \r\n aes(size=citric.acid) +\r\n xlab(\"Fixed acidity (g/dm^3)\") +\r\n ylab(\"Alcohol (%)\") + \r\n labs(color=\"Quality level\", size=\"Citric acid\")\r\n```\r\n\r\n\r\n\r\n# Multivariate Analysis\r\n\r\n\r\n### Talk about some of the relationships you observed in this part of the \\\r\ninvestigation. Were there features that strengthened each other in terms of \\\r\nlooking at your feature(s) of interest?\r\n\r\nThe relationship between volatile acidity and alcohol is significant: a low volatile acidity rating appeared to be a requirement in order for a wine to be rated high. When looking at wine quality level, we see a positive relationship between fixed acidity and citric acid.\r\nThis analysis suggests that bad wines generally have higher volatile acidity. Excellent wines seem to have low volatile acidity, higher citric acid, and higher alcohol content. However, higher alcohol content might have the final say in making a wine excellent.\r\n\r\n\r\n\r\n### Were there any interesting or surprising interactions between features?\r\n\r\nResidual sugar which was supposed to play an important part in wine taste, actually has very little impact on wine quality but after removing outliers it does seem to have a positive impact on the wine quality.\r\n\r\n\r\n\r\n### OPTIONAL: Did you create any models with your dataset? Discuss the \\\r\nstrengths and limitations of your model.\r\n\r\nYes, 3 models were created. Quality has a weak positive relationship with alcohol, and weak negative relationship with volatile acid. The R squared values are low but p-values are significant; this result indicates that the regression models have significant variable but explains little of the variability. The quality of wine does not solely depends on volatile acidity and alcohol but also other features. Therefore, it is hard to build a predictive model that can accurately predict the quality of wine. Their p-values are significant; however, the R squared values are under 0.4, so they do not provide us with enough explanation about the variability of the response data around their means.\r\n\r\n------\r\n\r\n# Final Plots and Summary\r\n\r\n\r\n### Plot One\r\n```{r echo=FALSE, Plot_One}\r\nggplot(data=wine, aes(factor(quality), fill=quality.level)) + \r\n geom_bar() + \r\n ggtitle(\"Quality of Red Wine Samples\") +\r\n xlab(\"Quality\") + \r\n ylab(\"Number of wines\")\r\n```\r\n\r\n### Description One\r\nThe distribution of red wine quality appears to be normal. 82.5% of wines are rated 5 and 6 (average quality). Although the rating scale is between 0 and 10, there are no wine's which are rated 1, 2, 9 or 10.\r\n\r\n\r\n\r\n### Plot Two\r\n```{r echo=FALSE, Plot_Two}\r\nbi_qplot(quality.level, citric.acid, \"boxplot\") +\r\n ggtitle(\"Citric Acid concentration compared to Quality Level of Red Wine samples\") +\r\n xlab(\"Quality level\") +\r\n ylab(\"Citric acid (g/dm^3)\")\r\n```\r\n\r\n### Description Two\r\nWhile citric acid does not have a strong correlation with quality, it is an important component in the quality of wine. Citric acid is an organic acid that contributes to the total acidity of a wine, it is crucial to have a good amount of citric acid in wine. Citric acid causes the wine to be \"fresh\" , if it's not present the wine would be more acidic. Wines with citric acid exceeding 0.75 are hardly rated as high quality. 50% of high quality wines have a relatively high citric acid that ranges between 0.3 and 0.49, whereas average and low quality wines have lower amount of citric acid.\r\n\r\n\r\n### Plot Three\r\n```{r echo=FALSE, Plot_Three}\r\nvol.alco <- multi_qplot(volatile.acidity, alcohol, quality.level) + \r\n geom_point(size=4, shape=2, colour=\"steelblue\", alpha=0.002) +\r\n ggtitle(\"Volatile acidity and Alcohol concentration effects on the Quality of Red Wine samples \") +\r\n xlab(\"Volatile acidity (g/dm^3)\") +\r\n ylab(\"Alcohol (%)\") +\r\n labs(color=\"Quality level\") +\r\n scale_colour_grey() + \r\n theme_bw()\r\n\r\n# Move to a new page\r\ngrid.newpage()\r\n# Create layout : nrow = 2, ncol = 2\r\npushViewport(viewport(layout = grid.layout(2, 2)))\r\n# A helper function to define a region on the layout\r\ndefine_region <- function(row, col){\r\n viewport(layout.pos.row = row, layout.pos.col = col)\r\n} \r\n# Arrange the plots\r\nprint(vol.alco, vp=define_region(1, 1:2))\r\nprint(volaQuaDensity, vp = define_region(2, 1))\r\nprint(alcoQuaDensity, vp = define_region(2, 2))\r\n```\r\n\r\n### Description Three\r\n\r\nWe observed the opposite direction to which quality levels are heading. Wine with high percent alcohol content and low volatile acidity tends to be rated as high quality wine. Based on the result, we can see that the volatile acidity in wine and percent alcohol content are two important components in the quality and taste of red wines.\r\n\r\n\r\n------\r\n\r\n# Reflection\r\n\r\nThe wines data set contains information on 1599 wines across 12 variables with 11 variables on the chemical properties. In this analysis the effects of different features of red wine on it's quality were explored based on the data set. Initially univariate plots were created by understanding the individual variables in the data set and observations were made on plots. Finally, the quality of wines across many variables were explored and tried to create models to predict red wine quality.\r\n\r\nThere was a trend between the alcohol and its quality. Alcohol content appeared to be the number one factor for determining an excellent wine. There was also a trend between the volatile acidity of a wine and its quality. Volatile acidity essentially made a wine bad in large amounts, regardless of the circumstances. The wine should also have good amount of acetic acid to be graded excellent. After removing outliers residual sugar does seem to have a positive impact on the wine quality. \r\n\r\nFor the linear model, all wines were included since data on quality, volatile acidity and alcohol were there for all the wines. The third linear model with 2 variables represented for 31.6% of the variance in the dataset. \r\n\r\nThere are very few wines that were rated low or high quality. We could improve the quality of our analysis by collecting more data, and creating more variables that may contribute to the quality of wine. This will improve the accuracy of the prediction models.\r\n\r\nClear shortcomings in this data are due to biases in the wine tasters' preferences. A wine taster's perference may not match an average persons taste preference. Data in which a normal person is allowed to grade the wine's taste would be interesting and we could analyse the difference between an expert and a normal person. Choosing different populations/levels of wine tasters would further strengthen similarities in the data.\r\n\r\nWe have successfully identified features that effect the quality of red wine, visualized their relationships and summarized their statistics.\r\n\r\n-----\r\n#References\r\n\r\n1. https://s3.amazonaws.com/udacity-hosted-downloads/ud651/wineQualityInfo.txt\r\n2. https://en.wikipedia.org/wiki/Fermentation_in_winemaking\r\n3. http://waterhouse.ucdavis.edu/whats-in-wine/volatile-acidity\r\n4. https://s3.amazonaws.com/content.udacity-data.com/courses/ud651/diamondsExample_2016-05.html\r\n5. https://onlinecourses.science.psu.edu/stat857/node/223" }, { "alpha_fraction": 0.5387647747993469, "alphanum_fraction": 0.5492772459983826, "avg_line_length": 42.882354736328125, "blob_id": "bc115f4172d1a8f6e6c9703f76ae2c53c4939916", "content_id": "d3d65387dbee70eedabd90e60da56f4f6c9c9524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 96, "num_lines": 17, "path": "/Project_3/Python Scripts/audit_zipcode.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "def audit_zipcodes(osmfile):\r\n # iter through all zip codes, collect all the zip codes that does not start with 560\r\n osm_file = open(osmfile, \"r\")\r\n zip_codes = {}\r\n for event, elem in cET.iterparse(osm_file, events=(\"start\",)):\r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n for tag in elem.iter(\"tag\"):\r\n if tag.attrib['k'] == \"addr:postcode\" and not tag.attrib['v'].startswith('560'):\r\n if tag.attrib['v'] not in zip_codes:\r\n zip_codes[tag.attrib['v']] = 1\r\n else:\r\n zip_codes[tag.attrib['v']] += 1\r\n return zip_codes\r\n\r\nzipcodes = audit_zipcodes(bangaloreOSM)\r\nfor zipcode in zipcodes:\r\n print zipcode, zipcodes[zipcode]" }, { "alpha_fraction": 0.7567185163497925, "alphanum_fraction": 0.7821782231330872, "avg_line_length": 60.4782600402832, "blob_id": "828a8eedb8473a7a99d80080adb2651a581aab4d", "content_id": "74ffb42beea103cffd2c1e1b4939fdb354b7633d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 152, "num_lines": 23, "path": "/README.md", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "# UdacityDAND\n\n## About\nThis repository contains my project work for Udacity's [Data Analyst Nanodegree](https://www.udacity.com/course/data-analyst-nanodegree--nd002).\n\n## Projects\n* P0: Analyze Bay Area Bike Share Data. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_0/Bay_Area_Bike_Share_Analysis.ipynb)\n* P1: Test a Perceptual Phenomenon. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_1/uda_statistics.pdf)\n* P2: Investigate a Baseball Dataset. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_2/Titanic%20Dataset.ipynb)\n* P3: Wrangle OpenStreetMap Data with Python and SQL. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_3/OSM.ipynb)\n* P4: Explore and Summarize Data with R. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_4/Red%20Wine%20Quality.rmd)\n* P5: Identify Fraud from Enron Email. [Link to project](https://github.com/gsk12/UdacityDAND/blob/master/Project_5/project5.pdf)\n* P6: Make Effective Data Visualization. [Link to project](https://public.tableau.com/profile/sampath.grandhi#!/vizhome/BaseBallData/Story1)\n\n## Courses\n* Statistics\n* Intro to Data Analysis with Python\n* Data Wrangling with Python and SQL\n* Data Analysis with R\n* Intro to Machine Learning\n* Data Visualization with Tableau\n\n![Udacity Data Analyst Nanodegree certificate](Certificate.pdf)\n" }, { "alpha_fraction": 0.4224037230014801, "alphanum_fraction": 0.4224037230014801, "avg_line_length": 29.814815521240234, "blob_id": "7d88717c000e168dac9dc245391d5099978d465a", "content_id": "ab86e3ad8c6b36f0d5da93eaa50aeb802b1fb6c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/Project_3/Python Scripts/cleanup_streetname.py", "repo_name": "gsk12/UdacityDAND", "src_encoding": "UTF-8", "text": "# creating a dictionary for correcting some common street name abbrevations\r\nmapping = { \"Ct\": \"Court\",\r\n \"St\": \"Street\",\r\n \"st\": \"Street\",\r\n \"St.\": \"Street\",\r\n \"St,\": \"Street\",\r\n \"ST\": \"Street\",\r\n \"street\": \"Street\",\r\n \"Street.\": \"Street\",\r\n \"Ave\": \"Avenue\",\r\n \"Ave.\": \"Avenue\",\r\n \"ave\": \"Avenue\",\r\n \"Rd.\": \"Road\", \r\n \"rd.\": \"Road\",\r\n \"Rd\": \"Road\", \r\n \"Hwy\": \"Highway\",\r\n \"HIghway\": \"Highway\",\r\n \"Pl\": \"Place\", \r\n \"place\": \"Place\",\r\n }\r\n\r\n# function that corrects incorrect street names\r\ndef update_name(name, mapping): \r\n for key in mapping:\r\n if key in name:\r\n name = string.replace(name,key,mapping[key])\r\n return name" } ]
9
FunkyC0ders/answery-api
https://github.com/FunkyC0ders/answery-api
f4b5c1628a6b8c340cbcc129f83222a75a225042
c5535f7f6092c1970c9401f6af6814b2dddb8df6
e59836f2a9790450689a26fef57042bf0e03d07e
refs/heads/master
2022-06-27T05:25:01.062108
2020-03-24T13:41:22
2020-03-24T13:41:22
248,129,484
0
0
null
2020-03-18T03:27:17
2020-03-25T22:24:42
2022-05-25T03:18:38
Python
[ { "alpha_fraction": 0.6783784031867981, "alphanum_fraction": 0.6783784031867981, "avg_line_length": 20.764705657958984, "blob_id": "0af52fdde35223f8f2ed9fc15e342e931e512a13", "content_id": "07b7ce7a91f2a5775861c690c1c8cbd26a00d25e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 52, "num_lines": 17, "path": "/app/models/category.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from flask_mongoengine import Document\nfrom . import db\nfrom .general import Translation\n\n\nclass Category(Document):\n meta = {\"collection\": \"category\"}\n\n name = db.EmbeddedDocumentListField(Translation)\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n\n @classmethod\n def find_all(cls):\n return cls.objects()\n" }, { "alpha_fraction": 0.6706587076187134, "alphanum_fraction": 0.6716566681861877, "avg_line_length": 24.69230842590332, "blob_id": "1168e38008a39544e8dc9021395d7db39e510d96", "content_id": "d5264ffc42751210419769bab9eb8d3d3710b3ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1002, "license_type": "no_license", "max_line_length": 92, "num_lines": 39, "path": "/app/models/location.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from flask_mongoengine import Document\nfrom . import db\nfrom .general import Translation\n\n\nclass Country(Document):\n meta = {\"collection\": \"countries\"}\n\n name = db.EmbeddedDocumentListField(Translation, required=True, unique=True)\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n\n @classmethod\n def find_all(cls):\n return cls.objects()\n\n\nclass City(Document):\n meta = {\"collection\": \"cities\"}\n\n name = db.EmbeddedDocumentListField(Translation, required=True, unique_with=[\"country\"])\n country = db.ReferenceField(Country, required=True, reverse_delete_rule=2)\n\n def to_location(self):\n return dict(id=self.id, city=self.name, country=self.country.name)\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n\n @classmethod\n def find_all(cls):\n return cls.objects()\n\n @classmethod\n def find_by_country(cls, country_id):\n return cls.objects(country=country_id)\n" }, { "alpha_fraction": 0.6983655095100403, "alphanum_fraction": 0.6983655095100403, "avg_line_length": 29.590909957885742, "blob_id": "a0ebcc6799d97fc4eb1fb9932d7b5830e896c86b", "content_id": "23ddc94a05bcdae75403bbe4f9196bd3aa111919", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/app/models/user.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from . import db\nfrom flask_mongoengine import Document\nfrom datetime import datetime\n\n\nclass User(Document):\n meta = {\"collection\": \"users\", \"allow_inheritance\": True}\n\n name = db.StringField(required=True)\n email = db.EmailField(required=True, unique=True)\n password = db.StringField(required=True)\n avatar = db.StringField()\n creation_date = db.DateTimeField(required=True, default=datetime.utcnow)\n verified = db.BooleanField(required=True, default=False)\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n\n @classmethod\n def find_by_email(cls, email):\n return cls.objects(email=email).first()\n" }, { "alpha_fraction": 0.734778106212616, "alphanum_fraction": 0.7358101010322571, "avg_line_length": 33.60714340209961, "blob_id": "dec3c606358f44e45ba27d675a525f6d8492525a", "content_id": "ff0544836791b4cd70f34d6631bfc5fca4415af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/app/models/question.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from . import db\nfrom flask_mongoengine import Document\nfrom datetime import datetime\nfrom .user import User\nfrom .answer import Answer\nfrom .general import Reaction\nfrom .location import City\nfrom .category import Category\n\n\nclass Question(Document):\n meta = {\"collection\": \"questions\"}\n\n created_by = db.ReferenceField(User, required=True)\n title = db.StringField(required=True)\n content = db.StringField()\n approved = db.BooleanField(required=True, default=False)\n creation_date = db.DateTimeField(required=True, default=datetime.utcnow)\n view_count = db.IntField()\n reactions = db.EmbeddedDocumentListField(Reaction)\n images = db.ListField(db.StringField())\n category = db.ReferenceField(Category, required=True, reverse_delete_rule=1)\n location = db.ReferenceField(City, required=True)\n answers = db.ListField(db.ReferenceField(Answer))\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n" }, { "alpha_fraction": 0.6152416467666626, "alphanum_fraction": 0.6263940334320068, "avg_line_length": 25.633663177490234, "blob_id": "cbffb6ffe1e4d96aa8d6b96d45138780b437e00e", "content_id": "144c1849ad1da06b3f9fc945266c39c54d1a5994", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2690, "license_type": "no_license", "max_line_length": 90, "num_lines": 101, "path": "/app/web.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from flask import Blueprint, send_file, url_for, request, abort, redirect\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom app.models.user import User\nfrom app.models.question import Question\nimport os\nfrom secrets import token_urlsafe\n\n\nweb = Blueprint(\"web\", __name__)\n\nROOT_PATH = os.path.dirname(__file__)\nALLOWED_FORMATS = [\"jpeg\", \"jpg\", \"png\"]\n\n\n@web.route(\"/avatar/<string:file_name>\", methods=[\"GET\"])\ndef avatar(file_name):\n file_path = ROOT_PATH + url_for(\"static\", filename=\"avatar/{}\".format(file_name))\n\n return send_file(file_path)\n\n\n@web.route(\"/avatar\", methods=[\"POST\"])\n@jwt_required\ndef upload_avatar():\n if 'avatar' not in request.files:\n print(\"'avatar' key not found\")\n abort(404) # Not Found\n\n file = request.files['avatar']\n if file.filename == '':\n abort(400) # Bad Request\n\n current_user = get_jwt_identity()\n user = User.find_by_id(current_user[\"id\"])\n\n if not user:\n abort(404) # Not Found\n\n file_name = file.filename\n _, ext = file_name.rsplit('.', 1)\n \n if ext.lower() not in ALLOWED_FORMATS:\n abort(406) # Not Acceptable\n\n file_name = \"{}.{}\".format(token_urlsafe(16), ext)\n path = ROOT_PATH + url_for(\"static\", filename=\"avatar/{}\".format(file_name))\n\n file.save(path)\n\n if user.avatar:\n old_path = ROOT_PATH + url_for(\"static\", filename=\"avatar/{}\".format(user.avatar))\n os.remove(old_path)\n\n user.avatar = file_name\n user.save()\n\n return redirect(url_for(\"web.avatar\", file_name=file_name))\n\n\n@web.route(\"/img/<string:file_name>\", methods=[\"GET\"])\ndef img(file_name):\n file_path = ROOT_PATH + url_for(\"static\", filename=\"img/{}\".format(file_name))\n\n return send_file(file_path)\n\n\n@web.route(\"/img/<string:question_id>\", methods=[\"POST\"])\n@jwt_required\ndef upload_img(question_id):\n if 'img' not in request.files:\n print(\"'img' key not found\")\n abort(404) # Not Found\n\n file = request.files['img']\n if file.filename == '':\n abort(400) # Bad Request\n\n question = Question.find_by_id(question_id)\n if not question:\n print(\"Question not found\")\n abort(404) # Not Found\n\n file_name = file.filename\n _, ext = file_name.rsplit('.', 1)\n\n if ext.lower() not in ALLOWED_FORMATS:\n abort(406) # Not Acceptable\n\n file_name = \"{}.{}\".format(token_urlsafe(16), ext)\n path = ROOT_PATH + url_for(\"static\", filename=\"img/{}\".format(file_name))\n\n file.save(path)\n\n if question.images:\n question.images.append(file_name)\n else:\n question.images = [file_name]\n\n question.save()\n\n return redirect(url_for(\"web.img\", file_name=file_name))\n" }, { "alpha_fraction": 0.6128771901130676, "alphanum_fraction": 0.6135226488113403, "avg_line_length": 26.54222297668457, "blob_id": "6bc3b673989168bf6fcd944ce4a96a645b626412", "content_id": "f48811e850bb68dc4895ef3a1eee03d8e0d9ded2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6197, "license_type": "no_license", "max_line_length": 118, "num_lines": 225, "path": "/app/api/answer.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, ID, Boolean, Int, DateTime, Field, List\nfrom graphql import GraphQLError\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom .user import User\nfrom .reaction import Reaction, ReactionInput\nfrom app.models.user import User as UserModel\nfrom app.models.question import Question as QuestionModel\nfrom app.models.answer import Answer as AnswerModel, Reply as ReplyModel\nfrom app.models.general import Reaction as ReactionModel\nimport json\n\n\nclass CommonAttributes(object):\n content = String(required=True)\n\n\nclass ReplyInterface(CommonAttributes, Interface):\n id = ID(required=True)\n created_by = Field(User, required=True)\n creation_date = DateTime(required=True)\n reactions = List(Reaction, required=True)\n\n\nclass Reply(ObjectType):\n class Meta:\n name = \"Reply\"\n description = \"...\"\n interfaces = (ReplyInterface,)\n\n\nclass Answer(ObjectType):\n class Meta:\n name = \"Answer\"\n description = \"...\"\n interfaces = (ReplyInterface,)\n\n replies = List(Reply)\n\n\nclass NewReply(CommonAttributes, InputObjectType):\n pass\n\n\nclass NewAnswer(CommonAttributes, InputObjectType):\n pass\n\n\nclass ReplyToAnswer(Mutation):\n class Meta:\n name = \"ReplyToAnswer\"\n description = \"...\"\n\n class Arguments:\n answer_id = ID(required=True)\n reply_data = NewReply(required=True)\n\n ok = Boolean(required=True)\n reply = Field(lambda: Reply, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, answer_id, reply_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n answer = AnswerModel.find_by_id(answer_id)\n if not answer:\n errors[\"answer\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n reply = ReplyModel(created_by=user, **reply_data)\n reply.save()\n\n if answer.replies:\n answer.replies.append(reply)\n else:\n answer.replies = [reply]\n\n answer.save()\n\n return ReplyToAnswer(reply=reply, ok=True)\n\n\nclass AnswerQuestion(Mutation):\n class Meta:\n name = \"AnswerQuestion\"\n description = \"...\"\n\n class Arguments:\n question_id = ID(required=True)\n answer_data = NewAnswer(required=True)\n\n ok = Boolean(required=True)\n answer = Field(lambda: Answer, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, question_id, answer_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n question = QuestionModel.find_by_id(question_id)\n if not question:\n errors[\"question\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n answer = AnswerModel(created_by=user, **answer_data)\n answer.save()\n\n if question.answers:\n question.answers.append(answer)\n else:\n question.answers = [answer]\n\n question.save()\n\n return AnswerQuestion(answer=answer, ok=True)\n\n\nclass ReactToReply(Mutation):\n class Meta:\n name = \"ReactToReply\"\n description = \"...\"\n\n class Arguments:\n reply_id = ID(required=True)\n reaction_data = ReactionInput(required=True)\n\n ok = Boolean(required=True)\n reply = Field(lambda: Reply, required=True)\n reactions = List(lambda: Reaction, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, reply_id, reaction_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n reply = ReplyModel.find_by_id(reply_id)\n if not reply:\n errors[\"reply\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n reaction = ReactionModel(user=user, **reaction_data)\n\n if not reply.reactions:\n reply.reactions = [reaction]\n\n else:\n old_reaction = list(filter(lambda r: r.user == reaction.user, reply.reactions))\n if old_reaction:\n if old_reaction[0].reaction != reaction.reaction:\n reply.reactions.remove(old_reaction[0])\n reply.reactions.append(reaction)\n else:\n reply.reactions.append(reaction)\n\n reply.save()\n return ReactToReply(reply=reply, reactions=reply.reactions, ok=True)\n\n\nclass ReactToAnswer(Mutation):\n class Meta:\n name = \"ReactToAnswer\"\n description = \"...\"\n\n class Arguments:\n answer_id = ID(required=True)\n reaction_data = ReactionInput(required=True)\n\n ok = Boolean(required=True)\n answer = Field(lambda: Answer, required=True)\n reactions = List(lambda: Reaction, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, answer_id, reaction_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n answer = AnswerModel.find_by_id(answer_id)\n if not answer:\n errors[\"answer\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n reaction = ReactionModel(user=user, **reaction_data)\n\n if not answer.reactions:\n answer.reactions = [reaction]\n\n else:\n old_reaction = list(filter(lambda r: r.user == reaction.user, answer.reactions))\n if old_reaction:\n if old_reaction[0].reaction != reaction.reaction:\n answer.reactions.remove(old_reaction[0])\n answer.reactions.append(reaction)\n else:\n answer.reactions.append(reaction)\n\n answer.save()\n return ReactToAnswer(answer=answer, reactions=answer.reactions, ok=True)\n" }, { "alpha_fraction": 0.6889312863349915, "alphanum_fraction": 0.6940203309059143, "avg_line_length": 26.10344886779785, "blob_id": "4cd6930db7049f2975a14071cfb49a199d3a64e9", "content_id": "c8916792107aa4bbe4e38d10d557371d8b1e7cc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1572, "license_type": "no_license", "max_line_length": 103, "num_lines": 58, "path": "/app/api/auth.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from app import jwt\nfrom datetime import timedelta\nfrom graphene import ObjectType, ID\nfrom flask_jwt_extended import create_access_token, create_refresh_token\n\n\nblacklist = set()\n\n\nclass Token(ObjectType):\n class Meta:\n name = \"Token\"\n description = \"...\"\n\n access = ID(required=True)\n refresh = ID() # required=True)\n\n\n@jwt.token_in_blacklist_loader\ndef blacklist_lookup(token):\n jti = token[\"jti\"]\n return jti in blacklist\n\n\n@jwt.user_claims_loader\ndef add_claims_to_access_token(user):\n claims = {\"verified\": user.verified,\n \"state\": \"development\"}\n\n return claims\n\n\n@jwt.user_identity_loader\ndef user_identity_lookup(user):\n return {\"id\": str(user.id),\n \"email\": user.email,\n \"name\": user.name}\n\n\ndef create_tokens(user, remember_me=False):\n access_token = create_access_token(identity=user, expires_delta=timedelta(minutes=10), fresh=False)\n if remember_me:\n refresh_token = create_refresh_token(identity=user, expires_delta=timedelta(days=30))\n else:\n refresh_token = create_refresh_token(identity=user, expires_delta=timedelta(days=1))\n\n return Token(access=access_token, refresh=refresh_token)\n\n\ndef create_fresh_token(user):\n access_token = create_access_token(identity=user, expires_delta=timedelta(minutes=1), fresh=True)\n return Token(access=access_token)\n\n\ndef refresh_access_token(user):\n print(\"Refreshing...\")\n new_token = create_access_token(identity=user, expires_delta=timedelta(minutes=10), fresh=False)\n return Token(access=new_token)\n" }, { "alpha_fraction": 0.6600660085678101, "alphanum_fraction": 0.6600660085678101, "avg_line_length": 23.567567825317383, "blob_id": "9e156f339a2e59e4cd4c9b830b80313d9d64a0a1", "content_id": "90337bc5ca7da1154821e70ac073446925d3f2c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2727, "license_type": "no_license", "max_line_length": 118, "num_lines": 111, "path": "/app/api/location.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, ID, Boolean, Int, DateTime, Field, List\nfrom graphql import GraphQLError\nfrom flask_jwt_extended import jwt_required\nfrom .translation import Translation, TranslationInput\nfrom app.models.location import City as CityModel, Country as CountryModel\nimport json\n\n\nclass CommonAttributes(object):\n pass\n\n\nclass CountryInterface(Interface):\n id = ID(required=True)\n name = List(Translation, required=True)\n\n\nclass Country(ObjectType):\n class Meta:\n name = \"Country\"\n description = \"...\"\n interfaces = (CountryInterface,)\n\n\nclass CityInterface(Interface):\n id = ID(required=True)\n name = List(Translation, required=True)\n country = Field(Country, required=True)\n\n\nclass City(ObjectType):\n class Meta:\n name = \"City\"\n description = \"...\"\n interfaces = (CityInterface,)\n\n\nclass LocationInterface(CommonAttributes, Interface):\n id = ID(required=True)\n country = List(Translation, required=True)\n city = List(Translation, required=True)\n\n\nclass Location(ObjectType):\n class Meta:\n name = \"Location\"\n description = \"...\"\n interfaces = (LocationInterface,)\n\n\nclass NewCountry(CommonAttributes, InputObjectType):\n name = List(TranslationInput, required=True)\n\n\nclass AddCountry(Mutation):\n class Meta:\n name = \"AddCountry\"\n description = \"...\"\n\n class Arguments:\n country_data = NewCountry(required=True)\n\n ok = Boolean(required=True)\n country = Field(lambda: Country, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, country_data):\n errors = {}\n\n country = CountryModel(**country_data)\n country.save()\n\n return AddCountry(country=country, ok=True)\n\n\nclass NewCity(CommonAttributes, InputObjectType):\n name = List(TranslationInput, required=True)\n country_id = ID(required=True)\n\n\nclass AddCity(Mutation):\n class Meta:\n name = \"AddCity\"\n description = \"...\"\n\n class Arguments:\n city_data = NewCity(required=True)\n\n ok = Boolean(required=True)\n city = Field(lambda: City, required=True)\n location = Field(lambda: Location, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, city_data):\n errors = {}\n\n country = CountryModel.find_by_id(city_data[\"country_id\"])\n if not country:\n errors[\"country\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n del city_data[\"country_id\"]\n\n city = CityModel(country=country, **city_data)\n city.save()\n\n return AddCity(city=city, location=city.to_location(), ok=True)\n" }, { "alpha_fraction": 0.4975247383117676, "alphanum_fraction": 0.6930692791938782, "avg_line_length": 16.565217971801758, "blob_id": "1a0402b228746e0a54febf42a96fbb8815b128ff", "content_id": "34af148a894153f80745e3d1e4b64628816d9a6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 404, "license_type": "no_license", "max_line_length": 26, "num_lines": 23, "path": "/requirements.txt", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "aniso8601==7.0.0\nclick==7.1.1\nFlask==1.1.1\nFlask-Cors==3.0.8\nFlask-GraphQL==2.0.1\nFlask-JWT-Extended==3.24.1\nflask-mongoengine==0.9.5\nFlask-WTF==0.14.3\ngraphene==2.1.8\ngraphql-core==2.3.1\ngraphql-relay==2.0.1\ngraphql-server-core==1.2.0\nitsdangerous==1.1.0\nJinja2==2.11.1\nMarkupSafe==1.1.1\nmongoengine==0.19.1\npromise==2.3\nPyJWT==1.7.1\npymongo==3.10.1\nRx==1.6.1\nsix==1.14.0\nWerkzeug==1.0.0\nWTForms==2.2.1\n" }, { "alpha_fraction": 0.7290322780609131, "alphanum_fraction": 0.7290322780609131, "avg_line_length": 27.18181800842285, "blob_id": "78d6eb1e5ee2573c495397e521b03626e3f72c08", "content_id": "dad5385081291d73dbfed1fa6e8cd8ca87e1cf2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 73, "num_lines": 11, "path": "/app/models/general.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from . import db\n\n\nclass Translation(db.EmbeddedDocument):\n language = db.StringField(required=True)\n text = db.StringField(required=True)\n\n\nclass Reaction(db.EmbeddedDocument):\n user = db.ReferenceField(\"User\", required=True)\n reaction = db.StringField(required=True, choices=[\"like\", \"dislike\"])\n" }, { "alpha_fraction": 0.7195253372192383, "alphanum_fraction": 0.7238403558731079, "avg_line_length": 20.06818199157715, "blob_id": "c9f330d5c5824ade9b8945bdc2ce7e6eb12ca2c6", "content_id": "4ba0dd9a1e7dae257b0f826fff601e0fdbca7298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 927, "license_type": "no_license", "max_line_length": 92, "num_lines": 44, "path": "/app/__init__.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "# App\nfrom flask import Flask\n\napp = Flask(__name__)\n\napp.config[\"HOST\"] = \"localhost\"\napp.config[\"PORT\"] = 5000\napp.config[\"DEBUG\"] = True\n\n# Cross-Origin Resource Sharing\nfrom flask_cors import CORS\n\nCORS(app)\n\n\n# JWT\nfrom flask_jwt_extended import JWTManager\n\napp.config['JWT_SECRET_KEY'] = 'g07USLZd3WzytP-62w8CmVAJaXtW7v0galtJsH4UVgY'\napp.config['JWT_BLACKLIST_ENABLED'] = True\napp.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']\njwt = JWTManager(app)\n\n\n# Database\nfrom .models import db, DB_HOST, DB_NAME, DB_PORT\n\napp.config[\"MONGODB_DB\"] = DB_NAME\napp.config[\"MONGODB_HOST\"] = DB_HOST\napp.config[\"MONGODB_PORT\"] = DB_PORT\ndb.init_app(app)\n\n# GraphQL\nfrom flask_graphql import GraphQLView\nfrom app.api import schema\nfrom app.api.auth import user_identity_lookup, add_claims_to_access_token\n\napp.add_url_rule('/api', view_func=GraphQLView.as_view('api', schema=schema, graphiql=True))\n\n\n# URL\nfrom app.web import web\n\napp.register_blueprint(web)\n" }, { "alpha_fraction": 0.688524603843689, "alphanum_fraction": 0.7295082211494446, "avg_line_length": 14.25, "blob_id": "8d9e19027d12d92c26418cea0fa2c0ea1ecd27c9", "content_id": "7e04f1cfc27998d144408eb3a4b124d382c38959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 41, "num_lines": 8, "path": "/app/models/__init__.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from flask_mongoengine import MongoEngine\n\n\ndb = MongoEngine()\n\nDB_HOST = \"localhost\"\nDB_PORT = 27017\nDB_NAME = \"answery\"\n" }, { "alpha_fraction": 0.6436781883239746, "alphanum_fraction": 0.6440489292144775, "avg_line_length": 29.134078979492188, "blob_id": "5a5aab2af512ec27470e08493da7a4d89f2cee52", "content_id": "c4c3114fe0c71a01bb3ba2b50218d07c1848d477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5394, "license_type": "no_license", "max_line_length": 118, "num_lines": 179, "path": "/app/api/question.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, ID, Boolean, Int, DateTime, Field, List\nfrom graphql import GraphQLError\nfrom flask import url_for\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom .user import User\nfrom .location import Location\nfrom .reaction import Reaction, ReactionInput\nfrom .answer import Answer\nfrom .category import Category\nfrom app.models.question import Question as QuestionModel\nfrom app.models.user import User as UserModel\nfrom app.models.category import Category as CategoryModel\nfrom app.models.location import City as CityModel\nfrom app.models.general import Reaction as ReactionModel\nfrom app.web import ROOT_PATH\nimport json\nimport os\n\n\nclass CommonAttributes(object):\n title = String(required=True)\n content = String()\n images = List(String)\n\n\nclass QuestionInterface(CommonAttributes, Interface):\n id = ID(required=True)\n created_by = Field(User, required=True)\n approved = Boolean(required=True)\n creation_date = DateTime(required=True)\n view_count = Int(required=True)\n location = Field(Location, required=True)\n category = Field(Category, required=True)\n reactions = List(Reaction)\n answers = List(Answer)\n\n\nclass Question(ObjectType):\n class Meta:\n name = \"Question\"\n description = \"...\"\n interfaces = (QuestionInterface,)\n\n\nclass NewQuestion(CommonAttributes, InputObjectType):\n location_id = ID(required=True)\n category_id = ID(required=True)\n\n\nclass CreateQuestion(Mutation):\n class Meta:\n name = \"CreateQuestion\"\n description = \"...\"\n\n class Arguments:\n question_data = NewQuestion(required=True)\n\n ok = Boolean(required=True)\n question = Field(lambda: Question, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, question_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n category = CategoryModel.find_by_id(question_data[\"category_id\"])\n if not category:\n errors[\"category\"] = \"not found\"\n\n location = CityModel.find_by_id(question_data[\"location_id\"])\n if not location:\n errors[\"location\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n del question_data[\"category_id\"]\n del question_data[\"location_id\"]\n question = QuestionModel(created_by=user, category=category, location=location, **question_data)\n question.save()\n\n return CreateQuestion(question=question, ok=True)\n\n\nclass DeleteImg(Mutation):\n class Meta:\n name = \"DeleteImg\"\n description = \"...\"\n\n class Arguments:\n question_id = ID(required=True)\n file_name = String(required=True)\n\n ok = Boolean(required=True)\n question = Field(lambda: Question, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, question_id, file_name):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n question = QuestionModel.find_by_id(question_id)\n if not question:\n errors[\"question\"] = \"not found\"\n\n if user != question.created_by:\n errors[\"user\"] = \"do not have permission to edit this question\"\n\n if file_name not in question.images:\n errors[\"question\"] = \"does not have an image called {}\".format(file_name)\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n file_path = ROOT_PATH + url_for(\"static\", filename=\"img/{}\".format(file_name))\n os.remove(file_path)\n\n question.images.remove(file_name)\n question.save()\n\n return DeleteImg(question=question, ok=True)\n\n\nclass ReactToQuestion(Mutation):\n class Meta:\n name = \"AddReaction\"\n description = \"...\"\n\n class Arguments:\n question_id = ID(required=True)\n reaction_data = ReactionInput(required=True)\n\n ok = Boolean(required=True)\n question = Field(lambda: Question, required=True)\n reactions = List(lambda: Reaction, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, question_id, reaction_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n question = QuestionModel.find_by_id(question_id)\n if not question:\n errors[\"question\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n reaction = ReactionModel(user=user, **reaction_data)\n\n if not question.reactions:\n question.reactions = [reaction]\n\n else:\n old_reaction = list(filter(lambda r: r.user == reaction.user, question.reactions))\n if old_reaction:\n if old_reaction[0].reaction != reaction.reaction:\n question.reactions.remove(old_reaction[0])\n question.reactions.append(reaction)\n else:\n question.reactions.append(reaction)\n\n question.save()\n return ReactToQuestion(question=question, reactions=question.reactions, ok=True)\n" }, { "alpha_fraction": 0.6084787845611572, "alphanum_fraction": 0.6084787845611572, "avg_line_length": 24.0625, "blob_id": "6152bb80fc1dab5e6225c4af0c9889ee83a45d65", "content_id": "0f490fc66d868340c4c00b939081397af61413aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 802, "license_type": "no_license", "max_line_length": 46, "num_lines": 32, "path": "/README.md", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "# answery-api\n- __Authentication__\n * [x] Signup\n * [x] Sign in\n * [x] Refresh access token\n * [x] Sign out\n- __User Related__\n * [x] Upload avatar\n * [x] Edit profile\n * [x] Delete avatar\n * [ ] Delete account\n- __Questions Related__\n * [x] Create question\n * [x] Upload images\n * [x] Delete images\n * [x] Like/Dislike a question\n * [ ] Undo like/dislike a question\n- __Answer Related__\n * [x] Answer a question\n * [x] Reply to an answer\n * [x] Like/Dislike a answer\n * [ ] Undo like/dislike a answer\n * [x] Like/Dislike a reply\n * [ ] Undo like/dislike a reply\n- __Filter Related__\n * [x] Add new category\n * [x] List all categories\n * [x] Add new country\n * [x] List all countries\n * [x] Add new city\n * [x] List all `cities` in a given `country`\n * [x] List all locations\n" }, { "alpha_fraction": 0.7552238702774048, "alphanum_fraction": 0.7552238702774048, "avg_line_length": 26.91666603088379, "blob_id": "f6c32f3c7fc71280ff012b7aa1d2a5f12bceebbd", "content_id": "09e8b85353743fd683698be2b814fdb951cc4faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 670, "license_type": "no_license", "max_line_length": 118, "num_lines": 24, "path": "/app/api/translation.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, ID, Boolean, Int, DateTime, Field, List\nfrom graphql import GraphQLError\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom app.models.general import Translation as TranslationModel\n\n\nclass CommonAttributes(object):\n language = String(required=True)\n text = String(required=True)\n\n\nclass TranslationInterface(CommonAttributes, Interface):\n pass\n\n\nclass Translation(ObjectType):\n class Meta:\n name = \"Translation\"\n description = \"...\"\n interfaces = (TranslationInterface,)\n\n\nclass TranslationInput(CommonAttributes, InputObjectType):\n pass\n" }, { "alpha_fraction": 0.619439423084259, "alphanum_fraction": 0.6202108263969421, "avg_line_length": 23.770700454711914, "blob_id": "fc0c2a61a77c13a1ef55ef2882ffa6e90c18648c", "content_id": "ca0d283ea97310c2e81d397c7b1e1523d6ca14cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3889, "license_type": "no_license", "max_line_length": 103, "num_lines": 157, "path": "/app/api/user.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, Boolean, Field, DateTime\nfrom graphql import GraphQLError\nfrom flask import url_for\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom werkzeug.security import generate_password_hash\nfrom .auth import Token, create_tokens\nfrom app.models.user import User as UserModel\nfrom app.web import ROOT_PATH\nimport json\nimport os\n\n\nclass CommonAttributes(object):\n name = String(required=True)\n email = String(required=True)\n # avatar = String()\n\n\nclass UserInterface(CommonAttributes, Interface):\n creation_date = DateTime(required=True)\n verified = Boolean(required=True)\n avatar = String()\n\n\nclass User(ObjectType):\n class Meta:\n name = \"User\"\n description = \"...\"\n interfaces = (UserInterface,)\n\n\nclass SignIn(ObjectType):\n class Meta:\n name = \"SignIn\"\n description = \"...\"\n\n user = Field(User, required=True)\n token = Field(Token, required=True)\n\n\nclass SignInInput(InputObjectType):\n email = String(required=True)\n password = String(required=True)\n remember_my = Boolean()\n\n\nclass NewUser(CommonAttributes, InputObjectType):\n password = String(required=True)\n\n\nclass EditUser(InputObjectType):\n name = String()\n email = String()\n # avatar = String()\n\n\nclass Signup(Mutation):\n class Meta:\n name = \"Signup\"\n description = \"...\"\n\n class Arguments:\n user_data = NewUser(required=True)\n\n user = Field(lambda: User, required=True)\n token = Field(lambda: Token, required=True)\n\n @staticmethod\n def mutate(root, info, user_data):\n errors = {}\n\n email_check = UserModel.find_by_email(user_data.email)\n if email_check:\n errors[\"email\"] = \"This email already exists.\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n user = UserModel(**user_data)\n user.password = generate_password_hash(user_data.password, method=\"sha256\")\n\n user.save()\n return Signup(user=user, token=create_tokens(user))\n\n\nclass UpdateUser(Mutation):\n class Meta:\n name = \"UpdateUser\"\n description = \"...\"\n\n class Arguments:\n user_data = EditUser(required=True)\n\n ok = Boolean(required=True)\n user = Field(lambda: User, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, user_data):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n if \"email\" in user_data.keys():\n print(user_data[\"email\"])\n email_check = UserModel.find_by_email(user_data[\"email\"])\n if email_check and email_check != user:\n errors[\"email\"] = \"already exists\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n for key in user_data.keys():\n setattr(user, key, user_data[key])\n\n user.save()\n\n return UpdateUser(user=user, ok=True)\n\n\nclass DeleteAvatar(Mutation):\n class Meta:\n name = \"DeleteAvatar\"\n description = \"...\"\n\n class Arguments:\n pass\n\n ok = Boolean(required=True)\n user = Field(lambda: User, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info):\n errors = {}\n\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n if not user:\n errors[\"user\"] = \"not found\"\n\n if not user.avatar:\n errors[\"avatar\"] = \"not found\"\n\n if errors:\n raise GraphQLError(json.dumps(errors))\n\n file_path = ROOT_PATH + url_for(\"static\", filename=\"avatar/{}\".format(user.avatar))\n os.remove(file_path)\n\n user.avatar = None\n user.save()\n\n return DeleteAvatar(user=user, ok=True)\n" }, { "alpha_fraction": 0.7292993664741516, "alphanum_fraction": 0.7292993664741516, "avg_line_length": 27.545454025268555, "blob_id": "fcc9244b0cb33830ab25dcab7cebb8121eec9b06", "content_id": "d95768816cd76961f42f9022a7576d1b66c0bca2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 628, "license_type": "no_license", "max_line_length": 76, "num_lines": 22, "path": "/app/models/answer.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from . import db\nfrom flask_mongoengine import Document\nfrom datetime import datetime\nfrom .user import User\nfrom .general import Reaction\n\n\nclass Reply(Document):\n meta = {\"collection\": \"answers\", \"allow_inheritance\": True}\n\n created_by = db.ReferenceField(User, required=True)\n content = db.StringField(required=True)\n creation_date = db.DateTimeField(required=True, default=datetime.utcnow)\n reactions = db.EmbeddedDocumentListField(Reaction)\n\n @classmethod\n def find_by_id(cls, _id):\n return cls.objects(id=_id).first()\n\n\nclass Answer(Reply):\n replies = db.ListField(db.ReferenceField(Reply))\n" }, { "alpha_fraction": 0.724609375, "alphanum_fraction": 0.724609375, "avg_line_length": 23.380952835083008, "blob_id": "8ebd958e131ecf267f96dd64287171ec6668fd53", "content_id": "79c6e5160b4963a0a11d8888d9e7193e6b7d19e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 114, "num_lines": 21, "path": "/app/api/reaction.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, Boolean, Int, DateTime, Field, List\nfrom .user import User\n\n\nclass CommonAttributes(object):\n reaction = String(required=True)\n\n\nclass ReactionInterface(CommonAttributes, Interface):\n user = Field(User, required=True)\n\n\nclass Reaction(ObjectType):\n class Meta:\n name = \"Reaction\"\n description = \"...\"\n interfaces = (ReactionInterface,)\n\n\nclass ReactionInput(CommonAttributes, InputObjectType):\n pass\n" }, { "alpha_fraction": 0.6942558288574219, "alphanum_fraction": 0.6942558288574219, "avg_line_length": 32.02424240112305, "blob_id": "b38f8ae54befb59845e1973be8df5a9db6db87c8", "content_id": "c87f3439af80d8588de4614b77a7f2b99e666cec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5449, "license_type": "no_license", "max_line_length": 120, "num_lines": 165, "path": "/app/api/__init__.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import Schema, ObjectType, Field, String, Boolean, ID, List\nfrom graphql import GraphQLError\nfrom werkzeug.security import check_password_hash\nfrom flask_jwt_extended import jwt_required, jwt_refresh_token_required, get_raw_jwt, get_jwt_identity\nfrom .user import SignIn, Signup, User as UserType, SignInInput, UpdateUser, DeleteAvatar\nfrom .auth import Token, create_tokens, blacklist, refresh_access_token\nfrom .question import CreateQuestion, Question as QuestionType, DeleteImg, ReactToQuestion\nfrom .answer import AnswerQuestion, Answer as AnswerType, ReplyToAnswer, Reply as ReplyType, ReactToAnswer, ReactToReply\nfrom .category import Category as CategoryType, AddCategory\nfrom .location import Location as LocationType, AddCountry, AddCity, Country as CountryType\nfrom app.models.user import User as UserModel\nfrom app.models.question import Question as QuestionModel\nfrom app.models.answer import Answer as AnswerModel, Reply as ReplyModel\nfrom app.models.category import Category as CategoryModel\nfrom app.models.location import City as CityModel, Country as CountryModel\nimport json\n\n\nclass QueryType(ObjectType):\n class Meta:\n name = \"Query\"\n description = \"...\"\n\n # User\n sign_in = Field(SignIn, sign_in_data=SignInInput(required=True), required=True)\n sign_out = Boolean(required=True)\n refresh = Field(Token, required=True)\n avatar = String()\n\n @staticmethod\n def resolve_sign_in(root, info, sign_in_data):\n user = UserModel.find_by_email(sign_in_data[\"email\"])\n\n if user and check_password_hash(user.password, sign_in_data[\"password\"]):\n return SignIn(user=user, token=create_tokens(user, sign_in_data.get(\"remember_me\", False)))\n\n raise GraphQLError(\"email or password were incorrect\")\n\n @staticmethod\n @jwt_refresh_token_required\n def resolve_sign_out(root, info):\n jti = get_raw_jwt()['jti']\n blacklist.add(jti)\n return True\n\n @staticmethod\n @jwt_refresh_token_required\n def resolve_refresh(root, info):\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n return refresh_access_token(user)\n\n @staticmethod\n @jwt_required\n def resolve_avatar(root, info):\n current_user = get_jwt_identity()\n user = UserModel.find_by_id(current_user[\"id\"])\n return user.avatar\n\n # Question\n question = Field(QuestionType, _id=ID(required=True), required=True)\n\n @staticmethod\n @jwt_required\n def resolve_question(root, info, _id):\n question = QuestionModel.find_by_id(_id)\n return question\n\n # Answer\n answer = Field(AnswerType, _id=ID(required=True), required=True)\n\n @staticmethod\n @jwt_required\n def resolve_answer(root, info, _id):\n answer = AnswerModel.find_by_id(_id)\n return answer\n\n # Reply\n reply = Field(ReplyType, _id=ID(required=True), required=True)\n\n @staticmethod\n @jwt_required\n def resolve_reply(root, info, _id):\n reply = ReplyModel.find_by_id(_id)\n return reply\n\n # Category\n category = Field(CategoryType, _id=ID(required=True), required=True)\n category_list = List(CategoryType, required=True)\n\n @staticmethod\n @jwt_required\n def resolve_category(root, info, _id):\n category = CategoryModel.find_by_id(_id)\n return category\n\n @staticmethod\n @jwt_required\n def resolve_category_list(root, info):\n category_list = CategoryModel.find_all()\n return category_list\n\n # Location\n location = Field(LocationType, _id=ID(required=True), required=True)\n location_list = List(LocationType, required=True)\n country_list = List(CountryType, required=True)\n location_by_country = List(LocationType, country_id=ID(required=True), required=True)\n\n @staticmethod\n @jwt_required\n def resolve_location(root, info, _id):\n city = CityModel.find_by_id(_id)\n return city.to_location()\n\n @staticmethod\n @jwt_required\n def resolve_location_list(root, info):\n city_list = CityModel.find_all()\n return [city.to_location() for city in city_list]\n\n @staticmethod\n @jwt_required\n def resolve_country_list(root, info):\n country_list = CountryModel.find_all()\n return country_list\n\n @staticmethod\n @jwt_required\n def resolve_location_by_country(root, info, country_id):\n city_list = CityModel.find_by_country(country_id)\n return [city.to_location() for city in city_list]\n\n\nclass MutationType(ObjectType):\n class Meta:\n name = \"Mutation\"\n description = \"...\"\n\n # User\n signup = Signup.Field(required=True)\n update_user = UpdateUser.Field(required=True)\n delete_avatar = DeleteAvatar.Field(required=True)\n\n # Question\n create_question = CreateQuestion.Field(required=True)\n react_to_question = ReactToQuestion.Field(required=True)\n delete_img = DeleteImg.Field(required=True)\n\n # Answer\n answer_question = AnswerQuestion.Field(required=True)\n react_to_answer = ReactToAnswer.Field(required=True)\n\n # Reply\n reply_to_answer = ReplyToAnswer.Field(required=True)\n react_to_reply = ReactToReply.Field(required=True)\n\n # Category\n add_category = AddCategory.Field(required=True)\n\n # Location\n add_country = AddCountry.Field(required=True)\n add_ciy = AddCity.Field(required=True)\n\n\nschema = Schema(query=QueryType, mutation=MutationType)\n" }, { "alpha_fraction": 0.7046005129814148, "alphanum_fraction": 0.7046005129814148, "avg_line_length": 25.36170196533203, "blob_id": "c5be39ec1d0a001e06fbecf3a56946d8bb46a0cf", "content_id": "89f5ccd57a0a2fb5f857951e5a43c9b8240f9aa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1239, "license_type": "no_license", "max_line_length": 118, "num_lines": 47, "path": "/app/api/category.py", "repo_name": "FunkyC0ders/answery-api", "src_encoding": "UTF-8", "text": "from graphene import ObjectType, Mutation, InputObjectType, Interface, String, ID, Boolean, Int, DateTime, Field, List\nfrom graphql import GraphQLError\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom .translation import Translation, TranslationInput\nfrom app.models.category import Category as CategoryModel\n\n\nclass CommonAttributes(object):\n pass\n\n\nclass CategoryInterface(CommonAttributes, Interface):\n id = ID(required=True)\n name = List(Translation, required=True)\n\n\nclass Category(ObjectType):\n class Meta:\n name = \"Category\"\n description = \"...\"\n interfaces = (CategoryInterface,)\n\n\nclass NewCategory(CommonAttributes, InputObjectType):\n name = List(TranslationInput, required=True)\n\n\nclass AddCategory(Mutation):\n class Meta:\n name = \"AddCategory\"\n description = \"...\"\n\n class Arguments:\n category_data = NewCategory(required=True)\n\n ok = Boolean(required=True)\n category = Field(lambda: Category, required=True)\n\n @staticmethod\n @jwt_required\n def mutate(root, info, category_data):\n errors = {}\n\n category = CategoryModel(**category_data)\n category.save()\n\n return AddCategory(category=category, ok=True)\n" } ]
20
bencord0/django-skel
https://github.com/bencord0/django-skel
6201750af3c94bb859e8199b56624604c384d195
9c156b3b87ba6dc81c15005b2faa81f4dc617a43
e8725afa00dc2a6c69f52dfff20239c9c684ea4b
refs/heads/master
2021-01-18T07:06:32.815948
2013-08-05T18:37:43
2013-08-05T18:37:43
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6866504549980164, "alphanum_fraction": 0.6897950768470764, "avg_line_length": 31.424341201782227, "blob_id": "dae9a52a7dc8ef1e5adb13bd7248a364f62f18f0", "content_id": "2be20f89ded67d5c0f46405b40c572ea12ec06f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9858, "license_type": "no_license", "max_line_length": 117, "num_lines": 304, "path": "/project_name/settings/common.py", "repo_name": "bencord0/django-skel", "src_encoding": "UTF-8", "text": "\"\"\"Common settings and globals.\"\"\"\n\n\nfrom datetime import timedelta\nfrom os import environ\nfrom os.path import abspath, basename, dirname, join, normpath\nimport random\nfrom sys import path\n\nfrom djcelery import setup_loader\ntruthy = ['True', 'true', 'Y', 'y', '1']\n\nimport dj_database_url\nfrom memcacheify import memcacheify\n\n\n########## PATH CONFIGURATION\n# Absolute filesystem path to the Django project directory:\nDJANGO_ROOT = dirname(dirname(abspath(__file__)))\n\n# Absolute filesystem path to the top-level project folder:\nSITE_ROOT = dirname(DJANGO_ROOT)\n\n# Site name:\nSITE_NAME = basename(DJANGO_ROOT)\n\n# Add our project to our pythonpath, this way we don't need to type our project\n# name in our dotted import paths:\npath.append(DJANGO_ROOT)\n########## END PATH CONFIGURATION\n\n\n########## DEBUG CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = environ.get('DEBUG', 'False') in truthy\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\nTEMPLATE_DEBUG = DEBUG\n########## END DEBUG CONFIGURATION\n\n\n########## EMAIL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend\nEMAIL_VIA_CONSOLE = 'django.core.mail.backends.console.EmailBackend'\nEMAIL_VIA_SMTP = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_BACKEND = EMAIL_VIA_CONSOLE if DEBUG else EMAIL_VIA_SMTP\n########## END EMAIL CONFIGURATION\n\n\n########## MANAGER CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# Set the environment ADMINS variable from a comma separated list of\n# name:<email@domain> pairs.\nADMINS = ((name_email.strip().split(':')) for name_email in environ.get('ADMINS', '').split(','))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\nMANAGERS = ADMINS\n########## END MANAGER CONFIGURATION\n\n\n########## DATABASE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\n# https://github.com/kennethreitz/dj-database-url\nDATABASES = {'default': dj_database_url.config(\n default='sqlite:///{}'.format(normpath(join(DJANGO_ROOT, 'default.db'))))}\n########## END DATABASE CONFIGURATION\n\n\n########## CACHE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches\n# https://github.com/rdegges/django-heroku-memcacheify\n# Will use local memory caching as a backup\nCACHES = memcacheify()\n########## END CACHE CONFIGURATION\n\n\n########## GENERAL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone\nTIME_ZONE = environ.get('TIME_ZONE', 'Europe/London')\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = environ.get('LANGUAGE_CODE', 'en-gb')\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = int(environ.get('SITE_ID', '1'))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = environ.get('USE_I18N', 'True') in truthy\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = environ.get('USE_L10N', 'True') in truthy\n########## END GENERAL CONFIGURATION\n\n\n########## MEDIA CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = normpath(join(DJANGO_ROOT, 'media'))\nMEDIA_ROOT = environ.get('MEDIA_ROOT', MEDIA_ROOT)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = environ.get('MEDIA_URL', '/media/')\n########## END MEDIA CONFIGURATION\n\n\n########## STATIC FILE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\n# See: https://github.com/kennethreitz/dj-static/blob/master/README.rst\nSTATIC_ROOT = normpath(join(DJANGO_ROOT, 'static'))\nSTATIC_ROOT = environ.get('STATIC_ROOT', STATIC_ROOT)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = environ.get('STATIC_URL', '/static/')\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = (\n# normpath(join(DJANGO_ROOT, 'assets')),\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n########## END STATIC FILE CONFIGURATION\n\n\n########## SECRET CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = environ.get('SECRET_KEY', \"\".join([random.choice(\n \"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)\"\n ) for i in range(50)]))\nif not environ.get('SECRET_KEY'):\n print(\"SECRET_KEY is not set: Using a temporary value instead\")\n print(\"SECRET_KEY: {}\".format(SECRET_KEY))\n########## END SECRET CONFIGURATION\n\n\n########## FIXTURE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n normpath(join(DJANGO_ROOT, 'fixtures')),\n)\n########## END FIXTURE CONFIGURATION\n\n\n########## TEMPLATE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\nTEMPLATE_DIRS = (\n normpath(join(DJANGO_ROOT, 'templates')),\n)\n########## END TEMPLATE CONFIGURATION\n\n\n########## MIDDLEWARE CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes\nMIDDLEWARE_CLASSES = (\n # Use GZip compression to reduce bandwidth.\n 'django.middleware.gzip.GZipMiddleware',\n\n # Default Django middleware.\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n########## END MIDDLEWARE CONFIGURATION\n\n\n########## URL CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf\nROOT_URLCONF = '%s.urls' % SITE_NAME\n########## END URL CONFIGURATION\n\n\n########## APP CONFIGURATION\nDJANGO_APPS = (\n # Default Django apps:\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # Useful template tags:\n 'django.contrib.humanize',\n\n # Admin panel and documentation:\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n)\n\nTHIRD_PARTY_APPS = (\n # Database migration helpers:\n 'south',\n\n # Static file management:\n 'compressor',\n\n # Asynchronous task queue:\n 'djcelery',\n)\n\nLOCAL_APPS = (\n)\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n########## END APP CONFIGURATION\n\n\n########## LOGGING CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins', 'console'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n '{{ project_name }}': {\n 'handlers': ['mail_admins', 'console'],\n 'level': 'DEBUG' if DEBUG else 'INFO',\n 'propagate': True,\n },\n }\n}\n########## END LOGGING CONFIGURATION\n\n\n########## CELERY CONFIGURATION\n# See: http://celery.readthedocs.org/en/latest/configuration.html#celery-task-result-expires\nCELERY_TASK_RESULT_EXPIRES = timedelta(minutes=30)\n# See: http://celery.github.com/celery/django/\nsetup_loader()\n# See: http://docs.celeryq.org/en/latest/configuration.html#celery-always-eager\nCELERY_ALWAYS_EAGER = DEBUG\n########## END CELERY CONFIGURATION\n\n\n########## WSGI CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = 'wsgi.application'\n########## END WSGI CONFIGURATION\n\n\n########## COMPRESSION CONFIGURATION\n# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED\nCOMPRESS_ENABLED = True\n\n# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_CSS_FILTERS\nCOMPRESS_CSS_FILTERS = [\n 'compressor.filters.template.TemplateFilter',\n]\n\n# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_JS_FILTERS\nCOMPRESS_JS_FILTERS = [\n 'compressor.filters.template.TemplateFilter',\n]\n########## END COMPRESSION CONFIGURATION\n\n\n########## ALLOWED HOSTS CONFIGURATION\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [ allowed_host.strip() for allowed_host in environ.get('ALLOWED_HOSTS', '.herokuapp.com').split(',')]\n########## END ALLOWED HOST CONFIGURATION\n\n" }, { "alpha_fraction": 0.582317054271698, "alphanum_fraction": 0.582317054271698, "avg_line_length": 24.230770111083984, "blob_id": "020965be72d7b577b513b9085e6c67b156971a85", "content_id": "42f9afc2e4fe96d3654ab8854ce6d89fce5aea29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 70, "num_lines": 13, "path": "/setup.py", "repo_name": "bencord0/django-skel", "src_encoding": "UTF-8", "text": "import os\nfrom setuptools import setup, find_packages\n\nos.chdir(os.path.dirname(os.path.abspath(os.path.normpath(__file__))))\n\nsetup(name='{{ project_name }}',\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n '{{ project_name }} = {{ project_name }}.wsgi:main'\n ]\n }\n)\n" }, { "alpha_fraction": 0.6810392737388611, "alphanum_fraction": 0.6865671873092651, "avg_line_length": 25.985074996948242, "blob_id": "354be4ad24b9068abce578017ff135db40cd8364", "content_id": "76ff5c7f85f8b1712647a7eb8e223669a6fdca0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1809, "license_type": "no_license", "max_line_length": 89, "num_lines": 67, "path": "/README.txt", "repo_name": "bencord0/django-skel", "src_encoding": "UTF-8", "text": "======\nREADME\n======\n\nUsage\n=====\n\n $ project=\"my_project_name\"\n $ virtualenv --distribute ${project} && cd ${project}\n $ source ./bin/activate\n (project)$ pip install django\n (project)$ django-admin.py startproject --template=<path to this repo> ${project} .\n (project)$ pip install -r requirements.txt\n (project)$ pip intsall -e .\n (project)$ ${project}\n\nThe server will now be listening on port 8000. Feel free to connect to it from a browser.\n\nTo login to the admin interface, you will need to create a super user.\n\n (project)$ python manage.py createsuperuser\n\nYou only need to do this once per deployment.\n\nConfiguration environment\n=========================\n\nSECRET_KEY\n----------\nWhile not strictly required, it is also recommended to do\n\n $ SECRET_KEY=$(python -c 'import random;\n print \"\".join([random.choice(\n \"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)\"\n ) for i in range(50)])')\n $ heroku config:add SECRET_KEY=$SECRET_KEY\n\nThe production settings pull SECRET_KEY from environment but fallbacks\nto a value which is generated mainly for development environment.\n\nThis setup allows you to easily keep your site in a public repo if you so \nwish without causing opening a route to attack your Django passwords.\n\nDATABASE_URL\n------------\nIt is also a good idea to use a dedicated database.\n\n $ DATABASE_URL=postgres://username:password@host:port/db_name\n\nA local sqlite database will be used if unset.\n\nPORT\n----\nBy default, the application will bind to port 8000. This can be overwritten\nusing the PORT environmental variable. E.g. locally with foreman/honcho, or\non a PaaS like Heroku.\n\nDependencies\n============\n\nGentoo\n------\nRecompile python with USE=sqlite\nAlso install\n\tdev-python/virtualenv\n\tdev-db/postgresql-base\n\tdev-libs/libmemcached\n\n" }, { "alpha_fraction": 0.7026315927505493, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 27.11111068725586, "blob_id": "a13be10030fcf43eed371044fe1a13e6d7bc268b", "content_id": "644f92d18720de4365f09746c97ad19652912a2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 760, "license_type": "no_license", "max_line_length": 92, "num_lines": 27, "path": "/project_name/settings/dev.py", "repo_name": "bencord0/django-skel", "src_encoding": "UTF-8", "text": "\"\"\"Development settings and globals.\"\"\"\n\n\nfrom os.path import join, normpath\nfrom os import environ\n\n########## DEBUG CONFIGURATION\nenviron.setdefault(\"DEBUG\", \"True\")\n########## END DEBUG CONFIGURATION\n\nfrom common import *\n\n\n########## TOOLBAR CONFIGURATION\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nINSTALLED_APPS += (\n 'debug_toolbar',\n)\n\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nINTERNAL_IPS = [ iip.strip() for iip in environ.get('INTERNAL_IPS', '127.0.0.1').split(',')]\n\n# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation\nMIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n########## END TOOLBAR CONFIGURATION\n\n" }, { "alpha_fraction": 0.7506879568099976, "alphanum_fraction": 0.7528893947601318, "avg_line_length": 36.081634521484375, "blob_id": "f3756b5c269aa853db0837ae3b4e26036c0e40ef", "content_id": "cbc16449815f34be05044050e2d88f492979eedc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 82, "num_lines": 49, "path": "/project_name/wsgi.py", "repo_name": "bencord0/django-skel", "src_encoding": "UTF-8", "text": "\"\"\"\nWSGI config for {{ project_name }} project.\n\nThis module contains the WSGI application used by Django's development server\nand any production WSGI deployments. It should expose a module-level variable\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` commands discover\nthis application via the ``WSGI_APPLICATION`` setting.\n\nUsually you will have the standard Django WSGI application here, but it also\nmight make sense to replace the whole Django WSGI application with a custom one\nthat later delegates to the Django one. For example, you could introduce WSGI\nmiddleware here, or combine a Django application with an application of another\nframework.\n\n\"\"\"\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"{{ project_name }}.settings.dev\")\n\n# This application object is used by any WSGI server configured to use this\n# file. This includes Django's development server, if the WSGI_APPLICATION\n# setting points here.\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n\n# Apply WSGI middleware here.\n# from helloworld.wsgi import HelloWorldApplication\n# application = HelloWorldApplication(application)\n\n# See: https://github.com/kennethreitz/dj-static/blob/master/README.rst\nfrom dj_static import Cling\napplication = Cling(application)\n\ndef main():\n from django.core import management\n from django.conf import settings\n\n PORT = os.environ.get('PORT', 8000)\n\n management.call_command('syncdb', interactive=False, migrate=True)\n management.call_command('collectstatic', interactive=False)\n\n if 'gunicorn' in settings.INSTALLED_APPS:\n management.call_command('run_gunicorn', '[::]:{PORT}'.format(PORT=PORT))\n else:\n management.call_command('runserver', '[::]:{PORT}'.format(PORT=PORT))\n\nif __name__ == '__main__':\n main()\n" } ]
5
azizahmedkhan/azizsdcn
https://github.com/azizahmedkhan/azizsdcn
762f450591eb12bf586f41ae6512929a64625515
137d7de9411cedb9e8e42c23b166f2fc41d75d4b
e55d50b856b49910b4b87abc622a5febf36652dd
refs/heads/master
2020-05-27T02:01:10.495553
2017-04-13T04:16:40
2017-04-13T04:16:40
82,513,716
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6137163043022156, "alphanum_fraction": 0.6193825602531433, "avg_line_length": 49.67326736450195, "blob_id": "e441965d419cd2439a69005c33f65d513f99bf6a", "content_id": "5982e9524fedf167eb81e9266e9d6c70f6cd9153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5118, "license_type": "no_license", "max_line_length": 164, "num_lines": 101, "path": "/TrafficSignClassifier/tscp/TrafficSignClassifier.py", "repo_name": "azizahmedkhan/azizsdcn", "src_encoding": "UTF-8", "text": "# Load pickled data\nimport pickle\nimport matplotlib.pyplot as plt\n\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = \"traffic-signs-data/train.p\"\nvalidation_file = \"traffic-signs-data/valid.p\"\ntesting_file = \"traffic-signs-data/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n##############################################\nX_train, y_train = train['features'], train['labels']\nX_valid, y_valid = valid['features'], valid['labels']\nX_test, y_test = test['features'], test['labels']\n\n### Replace each question mark with the appropriate value.\n### Use python, pandas or numpy methods rather than hard coding the results\n\n# TODO: Number of training examples\nn_train = len(X_train)\n\n# TODO: Number of testing examples.\nn_test = len(X_test)\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = X_test[0].shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = ?\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\n############################################################################\n### Data exploration visualization code goes here.\n### Feel free to use as many code cells as needed.\n\n# Visualizations will be shown in the notebook.\n#%matplotlib inline\n\n#####################################################################################\n### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.\n### Feel free to use as many code cells as needed.\n\n#####################################################################################\n### Define your architecture here.\n### Feel free to use as many code cells as needed\n\n###############################################################################\n### Load the images and plot them here.\n### Feel free to use as many code cells as needed.\n\n##################################################################################\n### Run the predictions here and use the model to output the prediction for each image.\n### Make sure to pre-process the images with the same pre-pr\n### Feel free to use as many code cells as needed.\n\n#####################################################################################\n#6666 Calculate the accuracy for these 5 new images.\n### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images\n\n###############################################################################################\n### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.\n### Feel free to use as many code cells as needed.\n\n###################################################################################\n# ### Visualize your network's feature maps here.\n### Feel free to use as many code cells as needed.\n\n# image_input: the test image being fed into the network to produce the feature maps\n# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer\n# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output\n# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry\n\ndef outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):\n # Here make sure to preprocess your image_input in a way your network expects\n # with size, normalization, ect if needed\n # image_input =\n # Note: x should be the same name as your network's tensorflow data placeholder variable\n # If you get an error tf_activation is not defined it maybe having trouble accessing the variable from inside a function\n activation = tf_activation.eval(session=sess,feed_dict={x : image_input})\n featuremaps = activation.shape[3]\n plt.figure(plt_num, figsize=(15, 15))\n for featuremap in range(featuremaps):\n subplot(6, 8, featuremap + 1) # sets the number of feature maps to show on each row and column\n title('FeatureMap ' + str(featuremap)) # displays the feature map number\n if activation_min != -1 & activation_max != -1:\n imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmin =activation_min, vmax=activation_max, cmap=\"gray\")\n elif activation_max != -1:\n imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmax=activation_max, cmap=\"gray\")\n elif activation_min !=-1:\n imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", vmin=activation_min, cmap=\"gray\")\n else:\n imshow(activation[0, :, :, featuremap], interpolation=\"nearest\", cmap=\"gray\")ocessing pipeline used earlier.\n" }, { "alpha_fraction": 0.6363377571105957, "alphanum_fraction": 0.6567362546920776, "avg_line_length": 33.217533111572266, "blob_id": "25ad9e686f3ea490ae66523dfb879d5aaac5b43c", "content_id": "fd62dfe3d7911d90f831345d85a54daf3589efd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10540, "license_type": "no_license", "max_line_length": 118, "num_lines": 308, "path": "/TrafficSignClassifier/JGermanClassifier.py", "repo_name": "azizahmedkhan/azizsdcn", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport os\n\nfrom multiprocessing import Queue\n\nimport pickle\n\ntraining_file = \"./traffic-signs-data/train.p\"\nvalidation_file=\"./traffic-signs-data/valid.p\"\ntesting_file = \"./traffic-signs-data/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\n\nX_train, y_train = train['features'], train['labels']\nX_test, y_test = test['features'], test['labels']\n\nn_train = len(X_train)\nn_test = len(X_test)\nimage_shape = X_train[0].shape\nn_classes = len(set(y_train))\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", n_classes)\n\nprint('Sample images')\n#for i in range(4):\n# plt.subplot(2,2,i+1)\n# plt.imshow(X_train[i*1500+1])\n\n#plt.hist(y_train, bins=n_classes)\n#plt.title('Number of examples of each sign in the training set')\n#plt.xlabel('Sign')\n#plt.ylabel('Count')\n#plt.plot()\n\n#plt.imshow(X_train[17031])\n# Shuffle training examples\nfrom sklearn.utils import shuffle\nX_train, y_train = shuffle(X_train, y_train, random_state=42)\n\nX_train_orig = X_train\nX_test_orig = X_test\n\n# Normalise input (images still in colour)\nX_train = (X_train - X_train.mean()) / (np.max(X_train) - np.min(X_train))\nX_test = (X_test - X_test.mean()) / (np.max(X_test) - np.min(X_test))\n\ndef plot_norm_image(image_index):\n \"\"\"Plots original image on the left and normalised image on the right.\"\"\"\n plt.subplot(2,2,1)\n plt.imshow(X_train_orig[image_index])\n plt.subplot(2,2,2)\n plt.imshow(X_train[image_index])\n\n#plot_norm_image(20)\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train,\n test_size=0.2,\n random_state=42)\n\n# Network parameters\nn_input = 32 * 32 * 3\nnb_filters = 32\nkernel_size = (3, 3)\ninput_shape = (32, 32, 3)\nn_fc1 = 512\nn_fc2 = 128\nin_channels = 3\npool_size = 2 # i.e. (2,2)\n\ndropout_conv = 0.9\ndropout_fc = 0.9\n\nweights_stddev = 0.1\nweights_mean = 0.0\nbiases_mean = 0.0\n\npadding = 'VALID'\nif padding == 'SAME':\n conv_output_length = 6\nelif padding == 'VALID':\n conv_output_length = 5\n#else:\n# raiseException(\"Unknown padding.\")\n\n# tf Graph input\nx_unflattened = tf.placeholder(\"float\", [None, 32, 32, 3])\nx = x_unflattened\n\ny_rawlabels = tf.placeholder(\"int32\", [None])\ny = tf.one_hot(y_rawlabels, depth=43, on_value=1., off_value=0., axis=-1)\n\n\n## Create model\n\ndef conv2d(x, W, b, strides=3):\n print(\"v>>\",x, \" Weights >>\", W,\" Biases >>\", b)\n\n \"\"\"Conv2D wrapper, with bias and relu activation\"\"\"\n # strides = [batch, in_height, in_width, channels]\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef maxpool2d(x, k=2, padding_setting='SAME'):\n \"\"\"MaxPool2D wrapper.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n padding=padding_setting)\n\ndef conv_net(model_x, model_weights, model_biases, model_pool_size,\n model_dropout_conv, model_dropout_fc, padding='SAME'):\n \"\"\"Convolutional neural network model.\"\"\"\n # Convolution Layer 1\n print(\"model_x>>\", model_x)\n print (\"model_weights['conv1']\",model_weights['conv1'])\n conv1 = conv2d(model_x, model_weights['conv1'], model_biases['conv1'])\n print(\"conv1 after conv>>\", conv1)\n # Max Pooling (down-sampling)\n conv1 = maxpool2d(conv1, k=model_pool_size, padding_setting=padding)\n print(\"conv1 after max pool>>\", conv1)\n conv1 = tf.nn.dropout(conv1, model_dropout_conv)\n print(\"conv1 after dropout>>\",conv1)\n # Fully connected layer 1\n # Reshape conv1 output to fit fully connected layer input\n conv1_shape = conv1.get_shape().as_list()\n fc1 = tf.reshape(conv1, [-1, conv1_shape[1]*conv1_shape[2]*conv1_shape[3]])\n print(\"fc1 after reshape>>\", fc1)\n fc1 = tf.add(tf.matmul(fc1, model_weights['fc1']), model_biases['fc1'])\n print(\"fc1 after matmul>>\", fc1)\n fc1 = tf.nn.relu(fc1)\n print(\"fc1 after relu>>\", fc1)\n fc1 = tf.nn.dropout(fc1, model_dropout_fc)\n print(\"fc1 after dropout>>\", fc1)\n # Fully connected layer 2\n fc2 = tf.add(tf.matmul(fc1, model_weights['fc2']), model_biases['fc2'])\n print(\"fc2 after matmul>>\", fc2)\n fc2 = tf.nn.relu(fc2)\n print(\"fc2 after relu>>\", fc2)\n fc2 = tf.nn.dropout(fc2, model_dropout_fc)\n print(\"fc2 after dropout>>\", fc2)\n # Output layer\n output = tf.add(tf.matmul(fc2, model_weights['out']), model_biases['out'])\n print(\"output>>\", output)\n # Note: Softmax is outside the model\n return output\n\n\n## Store layers weight & bias\n\n# NEW: initialise neurons with slightly positive initial bias\n# to avoid dead neurons.\ndef weight_variable(shape, weight_mean, weight_stddev):\n initial = tf.truncated_normal(shape, stddev=weight_stddev, mean=weight_mean)\n # alt: tf.random_normal(shape)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape, bias_mean):\n initial = tf.constant(bias_mean, shape=shape)\n return tf.Variable(initial)\n\n\nweights = {\n 'conv1': weight_variable([kernel_size[0], kernel_size[1], in_channels, nb_filters], weights_mean, weights_stddev),\n 'fc1': weight_variable([nb_filters * conv_output_length**2, n_fc1], weights_mean, weights_stddev),\n 'fc2': weight_variable([n_fc1, n_fc2], weights_mean, weights_stddev),\n 'out': weight_variable([n_fc2, n_classes], weights_mean, weights_stddev)\n}\n\nbiases = {\n 'conv1': bias_variable([nb_filters], biases_mean),\n 'fc1': bias_variable([n_fc1], biases_mean),\n 'fc2': bias_variable([n_fc2], biases_mean),\n 'out': bias_variable([n_classes], biases_mean)\n}\n\n# Training parameters\nlearning_rate = 0.001\ninitial_learning_rate = learning_rate\ntraining_epochs = 150\nbatch_size = 100\ndisplay_step = 1\nn_train = len(X_train)\n\nanneal_mod_frequency = 15\n# Annealing rate of 1: learning rate remains constant.\nannealing_rate = 1\n\nprint_accuracy_mod_frequency = 1\n\n# Construct model\npred = conv_net(x, weights, biases, pool_size, dropout_conv, dropout_fc, padding=padding)\npred_probs = tf.nn.softmax(pred)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Function to initialise the variables\ninit = tf.initialize_all_variables()\n\n### RUN MODEL ###\n# Launch the graph\nsess = tf.Session()\n\n# Initialise variables\nsess.run(init)\n\n# Initialise time logs\ninit_time = time.time()\nepoch_time = init_time\n\nfive_epoch_moving_average = 0.\nepoch_accuracies = []\n\n# Training cycle\nfor epoch in range(training_epochs):\n if five_epoch_moving_average > 0.96:\n break\n\n avg_cost = 0.\n\n total_batch = int(n_train / batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x, batch_y = np.array(X_train[i * batch_size:(i + 1) * batch_size]), \\\n np.array(y_train[i * batch_size:(i + 1) * batch_size])\n # tf.train.batch([X_train, y_train], batch_size=100, enqueue_many=True)\n # Run optimization op (backprop) and cost op (to get loss value)\n _, c = sess.run([optimizer, cost], feed_dict={x_unflattened: batch_x, y_rawlabels: batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # print(avg_cost)\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch + 1), \"cost=\",\n \"{:.9f}\".format(avg_cost))\n last_epoch_time = epoch_time\n epoch_time = time.time()\n # print(\"Time since last epoch: \", epoch_time - last_epoch_time)\n # Anneal learning rate\n if (epoch + 1) % anneal_mod_frequency == 0:\n learning_rate *= annealing_rate\n print(\"New learning rate: \", learning_rate)\n\n if (epoch + 1) % print_accuracy_mod_frequency == 0:\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n # Line below needed only when not using `with tf.Session() as sess`\n with sess.as_default():\n epoch_accuracy = accuracy.eval({x_unflattened: X_val, y_rawlabels: y_val})\n # TODO: optimise five_epoch_moving_average, e.g. using a queue\n epoch_accuracies.append(epoch_accuracy)\n if epoch >= 4:\n five_epoch_moving_average = np.sum(epoch_accuracies[epoch - 5:epoch]) / 5\n print(\"Five epoch moving average: \", five_epoch_moving_average)\n print(\"Accuracy (validation):\", epoch_accuracy)\n\nprint(\"Optimization Finished!\")\n\n# Test model\ncorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n# Calculate accuracy\n# accuracy_train = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n# print(\"Accuracy (train):\", accuracy_train.eval({x_unflattened: X_train, y_rawlabels: y_train}))\ntrain_predict_time = time.time()\n# print(\"Time to calculate accuracy on training set: \", train_predict_time - epoch_time)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n# Line below needed only when not using `with tf.Session() as sess`\nwith sess.as_default():\n print(\"Accuracy (test):\", accuracy.eval({x_unflattened: X_test, y_rawlabels: y_test}))\ntest_predict_time = time.time()\nprint(\"Time to calculate accuracy on test set: \", test_predict_time - train_predict_time)\n\n# Print parameters for reference\nprint(\"\\nParameters:\")\nprint(\"Learning rate (initial): \", initial_learning_rate)\nprint(\"Anneal learning rate every \", anneal_mod_frequency, \" epochs by \", 1 - annealing_rate)\nprint(\"Learning rate (final): \", learning_rate)\nprint(\"Training epochs: \", training_epochs)\nprint(\"Batch size: \", batch_size)\nprint(\"Dropout (conv): \", dropout_conv)\nprint(\"Dropout (fc): \", dropout_fc)\nprint(\"Padding: \", padding)\nprint(\"weights_mean: \", weights_mean)\nprint(\"weights_stddev: \", weights_stddev)\nprint(\"biases_mean: \", biases_mean)\n\n\nepochs_results = pd.read_csv('logs/tsc-p2-submission-2-model-perf.csv')\nepochs_results.plot(x='epoch', y='validation_accuracy',\n title='Validation accuracy')\nepochs_results.plot(x='epoch', y='five_epoch_moving_average',\n title='Validation accuracy (five-epoch moving average)')\n\n" }, { "alpha_fraction": 0.6272875666618347, "alphanum_fraction": 0.6775326728820801, "avg_line_length": 43.03237533569336, "blob_id": "36ad7e0af49a7eb09efe7a69300e3a57c821a36b", "content_id": "55511261af9f9743a1895752a047916c2eb9400a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12240, "license_type": "no_license", "max_line_length": 144, "num_lines": 278, "path": "/TrafficSignClassifier/TrafficSignClassifier.py", "repo_name": "azizahmedkhan/azizsdcn", "src_encoding": "UTF-8", "text": "# Load pickled data\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n#%matplotlib inline\n# TODO: Fill this in based on where you saved the training and testing data\n\ntraining_file = \"./traffic-signs-data/train.p\"\nvalidation_file=\"./traffic-signs-data/valid.p\"\ntesting_file = \"./traffic-signs-data/test.p\"\n\nwith open(training_file, mode='rb') as f:\n train = pickle.load(f)\nwith open(validation_file, mode='rb') as f:\n valid = pickle.load(f)\nwith open(testing_file, mode='rb') as f:\n test = pickle.load(f)\nx_train, y_train = train['features'], train['labels']\nx_valid, y_valid = valid['features'], valid['labels']\nx_test, y_test = test['features'], test['labels']\n\n### Replace each question mark with the appropriate value.\n### Use python, pandas or numpy methods rather than hard coding the results\n\n# TODO: Number of training examples\nn_train = len(x_train)\n\n# TODO: Number of testing examples.\nn_test = len(x_test)\n\n# TODO: What's the shape of an traffic sign image?\nimage_shape = x_test[0].shape\n\n# TODO: How many unique classes/labels there are in the dataset.\nn_classes = np.unique(y_train)\n\nprint(\"Number of training examples =\", n_train)\nprint(\"Number of testing examples =\", n_test)\nprint(\"Image data shape =\", image_shape)\nprint(\"Number of classes =\", len(n_classes))\nprint(\"Unique clases = \", n_classes)\n\n### Data exploration visualization code goes here.\n### Feel free to use as many code cells as needed.\nimport matplotlib.pyplot as plt\n# Visualizations will be shown in the notebook.\n#%matplotlib inline\n# I can visualize how much a lable appear in a dataset.\n#Also I can show how much label diversity in testing and validation sets are.\n#from numpy.random import beta\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\nplt.xlabel(\"Image Label\")\nplt.ylabel(\"number of times\")\nplt.hist(y_train, label= \"Train\", alpha=0.1, bins = n_classes)\nplt.hist(y_test, label= \"Test\", alpha=0.5, bins = n_classes)\nplt.hist(y_valid, label= \"Valid\", alpha=0.9, bins = n_classes)\n#plt.rcParams[\"figure.figsize\"] = [1.0, 2.0]\n#plt.figure(figsize=(20,10))\nplt.xticks(n_classes)\nplt.legend()\n#plt.show()\n\n# to view how the image look like\nplt.figure(figsize=(2,2))\nplt.imshow(x_train[0])\nprint(y_train[0])\nplt.imshow(x_train[21])\n#plt.show()\nprint(y_train)\n\n### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.\n### Feel free to use as many code cells as needed.\nfrom sklearn.utils import shuffle\nx_train,y_train = shuffle(x_train,y_train)\n\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\nEPOCHS = 10\nBATCH_SIZE = 128\nfrom tensorflow.contrib.layers import flatten\n\n# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\nmu = 0\nsigma = 0.1\ndef convolution(input, patchSize, numbeOfInputChannel, numberOfOutputChannel):\n conv_W = tf.Variable(tf.truncated_normal(shape=[patchSize, patchSize, numbeOfInputChannel, numberOfOutputChannel], mean=mu, stddev=sigma))\n conv_B = tf.Variable(tf.zeros(numberOfOutputChannel))\n#v>> Tensor(\"Placeholder:0\", shape=(?, 32, 32, 3), dtype=float32) Weights >> Tensor(\"Variable/read:0\", shape=(3, 3, 3, 32), dtype=float32)\n # Biases >> Tensor(\"Variable_4/read:0\", shape=(32,), dtype=float32)\n print(\"input>>\",input, \" Weights >>\", conv_W,\" Biases >>\", conv_B)\n#input >> Tensor(\"Placeholder:0\", shape=(?, 32, 32, 3), dtype = float32) Weights >> Tensor(\"Variable/read:0\",shape=(3, 3, 3, 32),dtype=float32)\n #Biases >> Tensor(\"Variable_1/read:0\", shape=(32,), dtype=float32)\n\n input = tf.nn.conv2d(input, conv_W, strides=[1, 3, 3, 1], padding='SAME')\n input = tf.nn.bias_add(input, conv_B)\n return tf.nn.relu(input)\n\ndef conv2d(x, W, b, strides=3):\n \"\"\"Conv2D wrapper, with bias and relu activation\"\"\"\n # strides = [batch, in_height, in_width, channels]\n\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\ndef reluLayer(input, shapeForNormalWeight, sizeOfBiasArray ):\n conv_W = tf.Variable(tf.truncated_normal(shape=shapeForNormalWeight, mean = mu, stddev = sigma))\n conv_b = tf.Variable(tf.zeros(sizeOfBiasArray))\n conv = tf.nn.conv2d(input, conv_W, strides=[1, 1, 1, 1], padding='SAME') + conv_b\n return tf.nn.relu(conv)\n\ndef xwplusb(inpputLayer, inputSize, outputSize):\n fc_W = tf.Variable(tf.truncated_normal(shape=(inputSize, outputSize), mean = mu, stddev = sigma))\n fc_b = tf.Variable(tf.zeros(outputSize))\n return tf.matmul(inpputLayer, fc_W) + fc_b\n\n### Define your architecture here.\n### Feel free to use as many code cells as needed.\nfrom tensorflow.contrib.layers import flatten\ndef flatten_layer(layer):\n # Get the shape of the input layer.\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:4].num_elements()\n print (num_features)\n layer_flat = tf.reshape(layer, [-1, num_features])\n layer_flat.get_shape()\n return layer_flat, num_features\n\ndef trafficSignClassifier1(inputData):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n keepProb = tf.placeholder(tf.float32)\n layer1OutPut = reluLayer (inputData, (1,1,3,3), 3 )\n layer2OutPut = reluLayer (layer1OutPut, (5,5,3,32), 32 )\n layer3OutPut = reluLayer (layer2OutPut, (5,5,32,32), 32 )\n layer3OutPut = tf.nn.dropout(layer3OutPut, keepProb)\n #layer3OutPut = tf.nn.max_pool(layer3OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n layer4OutPut = reluLayer (layer3OutPut, (5,5,32,64), 64 )\n layer5OutPut = reluLayer (layer4OutPut, (5,5,64,64), 64 )\n #layer5OutPut = tf.nn.max_pool(layer5OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n layer5OutPut = tf.nn.dropout(layer5OutPut, keepProb)\n layer6OutPut = reluLayer (layer5OutPut, (5,5,64,128), 128 )\n layer7OutPut = reluLayer (layer6OutPut, (5,5,128,128), 128 )\n layer5OutPut = tf.nn.dropout(layer5OutPut, keepProb)\n #layer7OutPut = tf.nn.max_pool(layer7OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n #layer1OutPut = reluLayer (x, (5,5,3,6), 6 )\n # TODO: Pooling. Input = 28x28x6. Output = 14x14x6.\n #layer1OutPut = tf.nn.max_pool(layer1OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n # TODO: Layer 2: Convolutional. Output = 10x10x16.\n #layer2OutPut = reluLayer (layer1OutPut, (5,5,6,16), 16)\n # TODO: Pooling. Input = 10x10x16. Output = 5x5x16.\n #layer2OutPut = tf.nn.max_pool(layer2OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n # TODO: Flatten. Input = 5x5x16. Output = 400.\n layer_flat3, num_fc_layers3 = flatten_layer(layer3OutPut)\n layer_flat5, num_fc_layers5 = flatten_layer(layer5OutPut)\n layer_flat7, num_fc_layers7 = flatten_layer(layer7OutPut)\n fc0 = flatten(layer7OutPut)\n # TODO: Layer 3: Fully Connected. Input = 400. Output = 120.\n # TODO: Activation.\n #fc1 =tf.nn.relu(xwplusb (fc0, 400, 120))\n # TODO: Layer 4: Fully Connected. Input = 120. Output = 84.\n # TODO: Activation.\n #fc2 =tf.nn.relu(xwplusb (fc1, 120, 84))\n # TODO: Layer 5: Fully Connected. Input = 84. Output = 10.\n #logits =xwplusb (fc2, 84, 10 )\n #return logits\n return fc0\n\ndef trafficSignClassifier(inputData):\n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n print(\"inputData is >>>\",inputData)\n #layer1OutPut = convolution(input=inputData, patchSize=3, numbeOfInputChannel=3, numberOfOutputChannel=32)\n conv_W = tf.Variable(\n tf.truncated_normal(shape=[3, 3, 3, 32], mean=mu,\n stddev=sigma))\n conv_B = tf.Variable(tf.zeros(32))\n layer1OutPut = conv2d(inputData, conv_W, conv_B)\n print(\"conv1 after conv>>\", inputData)\n # TODO: Pooling. Input = 28x28x6. Output = 14x14x6.\n layer1OutPut = tf.nn.max_pool(layer1OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n print(\"conv1 after max pool>>\", layer1OutPut)\n layer1OutPut = tf.nn.dropout(layer1OutPut, 0.9)\n print(\"conv1 after dropout>>\", layer1OutPut)\n fc1 = tf.reshape(layer1OutPut,[-1,16*16*32])\n print(\"fc1 after reshape>>\", fc1)\n fc1 = xwplusb(fc1, 32*6**2, 512)\n print(\"fc1 after matmul>>\", fc1)\n # TODO: Layer 2: Convolutional. Output = 10x10x16.\n # TODO: Activation.\n #layer2OutPut = reluLayer(layer1OutPut, (5, 5, 6, 16), 16)\n # TODO: Pooling. Input = 10x10x16. Output = 5x5x16.\n #layer2OutPut = tf.nn.max_pool(layer2OutPut, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n # TODO: Flatten. Input = 5x5x16. Output = 400.\n #fc0 = flatten(layer2OutPut)\n # TODO: Layer 3: Fully Connected. Input = 400. Output = 120.\n # TODO: Activation.\n #fc1 = tf.nn.relu(xwplusb(fc0, 1024, 120))\n # TODO: Layer 4: Fully Connected. Input = 120. Output = 84.\n # TODO: Activation.\n #fc2 = tf.nn.relu(xwplusb(fc1, 120, 84))\n # TODO: Layer 5: Fully Connected. Input = 84. Output = 10.\n logits = xwplusb(fc1, 84, 10)\n return logits\n'''\nmodel_x>> Tensor(\"Placeholder:0\", shape=(?, 32, 32, 3), dtype=float32)\nconv1 after conv>> Tensor(\"Relu:0\", shape=(?, 11, 11, 32), dtype=float32)\nconv1 after max pool>> Tensor(\"MaxPool:0\", shape=(?, 5, 5, 32), dtype=float32)\nconv1 after dropout>> Tensor(\"dropout/mul:0\", shape=(?, 5, 5, 32), dtype=float32)\nfc1 after reshape>> Tensor(\"Reshape:0\", shape=(?, 800), dtype=float32)\nfc1 after matmul>> Tensor(\"Add:0\", shape=(?, 512), dtype=float32)\nfc1 after relu>> Tensor(\"Relu_1:0\", shape=(?, 512), dtype=float32)\nfc1 after dropout>> Tensor(\"dropout_1/mul:0\", shape=(?, 512), dtype=float32)\nfc2 after matmul>> Tensor(\"Add_1:0\", shape=(?, 128), dtype=float32)\n#fc2 after relu>> Tensor(\"Relu_2:0\", shape=(?, 128), dtype=float32)\n#fc2 after dropout>> Tensor(\"dropout_2/mul:0\", shape=(?, 128), dtype=float32)\n#output>> Tensor(\"Add_2:0\", shape=(?, 43), dtype=float32)\n'''\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 3))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 10)\n### Train your model here.\nrate = 0.001\nlogits = trafficSignClassifier(x)\n\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=rate)\ntraining_operation = optimizer.minimize(loss_operation)\n### Calculate and report the accuracy on the training and validation set.\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset + BATCH_SIZE], y_data[offset:offset + BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\n### Once a final model architecture is selected,\n### the accuracy on the test set should be calculated and reported as well.\n### Feel free to use as many code cells as needed.\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(x_train)\n\n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n x_train, y_train = shuffle(x_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = x_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n\n validation_accuracy = evaluate(x_valid, y_valid)\n print(\"EPOCH {} ...\".format(i + 1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n\n saver.save(sess, './trafficsigns')\n print(\"Model saved\")" }, { "alpha_fraction": 0.7774725556373596, "alphanum_fraction": 0.7829670310020447, "avg_line_length": 44.375, "blob_id": "e9b94150cd0aae7a4b82a5dcefdb425621aca500", "content_id": "c5e692c63715ed9ac3675898d9510a9d1e12be18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 364, "license_type": "no_license", "max_line_length": 234, "num_lines": 8, "path": "/README.md", "repo_name": "azizahmedkhan/azizsdcn", "src_encoding": "UTF-8", "text": "# azizsdcn\nMy assignments for Self Driving Car Nanodegree\n\nTerms I heard first time\nStochastic Gradient Descent\nEntropy\nSoftmax\nA Rectified linear unit (ReLU) is type of activation function that is defined as f(x) = max(0, x). The function returns 0 if x is negative, otherwise it returns x. TensorFlow provides the ReLU function as tf.nn.relu(), as shown below.\n\n" } ]
4
aryan2000sp/Django_REST
https://github.com/aryan2000sp/Django_REST
27e04766e8c8a85d9f510e62d6d974ab5b323b51
6de98198db3598eb5cc00bd50467f2863a9036da
804f4a3371368b80f8aaab1738ad9aa2026b71d8
refs/heads/main
2023-02-21T00:11:35.276736
2021-01-17T17:33:18
2021-01-17T17:33:18
327,529,505
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7677419185638428, "alphanum_fraction": 0.7693548202514648, "avg_line_length": 46.94736862182617, "blob_id": "18a756cd69480535ce1eee4fc3561873613fc1a9", "content_id": "c0376e56b6f73db7cf02679eb31766df4c30c4b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1860, "license_type": "permissive", "max_line_length": 185, "num_lines": 38, "path": "/Dockerfile", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "#First we are going to inherit the file python file that we going to use. This file will have all the dependencies that we need.\r\n#The FROM will locate which file we want to inherit from\r\n#Then we put the file name that is python and the version.\r\nFROM python:3.9-alpine\r\n\r\n#Now we add the maintainers name it can be your company's name.\r\nMAINTAINER Aryan Pandhare\r\n\r\n#Now we set the environment to the run the python code.\r\n#The environment here we use is PYTHONUNBUFFERED which is\r\n#the python running in unbuffered environment.\r\nENV PYTHONUNBUFFERED 1\r\n\r\n#Now we are going to copy the requirements.txt file in the local directory to dockers image requirement.txt file which will store all the file dependencies.\r\n#The requiremnt.txt file in docker file will have all the libraries that are required to build the project.\r\nCOPY ./requirements.txt /requirements.txt\r\n\r\n#Now we will download all the porject dependencies(all the library or frameworks) in the docker image to required to build this project.\r\n#pip install is the command to install the libraries. -r is the flag and reads the file file passed along with it and downloads all the libraries or frameworks in requirements.txt file.\r\nRUN pip install -r /requirements.txt\r\n\r\n#Now this run command will create the /app directory in docker image.\r\nRUN mkdir /app\r\n\r\n#This command will set the our /app as working directory.\r\n#In other words this will be our default directory.\r\nWORKDIR /app\r\n\r\n#Now we will copy all the files that are in the ./app local directory to docker /app directory\r\nCOPY ./app /app\r\n\r\n# Now we will create the user that is going to run the application using the docker\r\n# The -D will only allow user to run the application and make any changes to application.\r\n# The -D is for security purposes.\r\nRUN adduser -D user\r\n\r\n#Now we are going to switch to the user created above\r\nUSER user\r\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 24, "blob_id": "a0627bb871406383afa313723871c20c89db3cb2", "content_id": "c14bbe5b49ae8b7b34e1481650990cbefebe7fa8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "permissive", "max_line_length": 35, "num_lines": 2, "path": "/README.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# Django_REST\r\nCreating Django REST API for recipe\r\n" }, { "alpha_fraction": 0.7251908183097839, "alphanum_fraction": 0.7251908183097839, "avg_line_length": 42.33333206176758, "blob_id": "38eb9710081d491d80ad265403400d22e40b729f", "content_id": "eaa2b3ad03f92fa71a29f9fb36a6d0ab10f2c973", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 131, "license_type": "permissive", "max_line_length": 105, "num_lines": 3, "path": "/Explanation/Testing_Tutorial.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# Testing In Django\r\n---\r\n__[Click Here to learn more](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/Testing)__" }, { "alpha_fraction": 0.7533632516860962, "alphanum_fraction": 0.7533632516860962, "avg_line_length": 26.125, "blob_id": "47c2494bc79103e946f6be156dfc07b22b4c389c", "content_id": "0b4735ac6c8c699e52c3ddb1418babfd7fd8c6aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 223, "license_type": "permissive", "max_line_length": 44, "num_lines": 8, "path": "/Explanation/BaseUserManager.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# What is BaseUserManager?\r\n---\r\n\r\nWhen customize the user model we need the \r\nthe manager to as well. The manager will \r\ninteract with the database and us(the code).\r\nSince we customized the user model we\r\nBaseUsermanager." }, { "alpha_fraction": 0.6058091521263123, "alphanum_fraction": 0.6058091521263123, "avg_line_length": 22.109588623046875, "blob_id": "b1416cfc793743ff11a0cfdc460b135422a58943", "content_id": "56380ed0fa5c79a6fc5a683d203c497f7d28f5ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1687, "license_type": "permissive", "max_line_length": 65, "num_lines": 73, "path": "/app/core/admin.py", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom core import models\nfrom django.utils.translation import gettext as _\n\n\"\"\"\nThis class we have extend\nthe default UserAdmin class\nby ordering the users according\nto their ids\n\"\"\"\n\n\nclass UserAdmin(BaseUserAdmin):\n \"\"\"\n This will order the users\n according to their ids\n \"\"\"\n ordering = ['id']\n\n \"\"\"\n This will display the users\n name and email\n \"\"\"\n list_display = ['name', 'email']\n\n \"\"\"\n Now we customize the admin\n page for user change page.\n This will include the changing\n the fields. _ will mark\n which fields are translated\n into language prefered by the\n user. We basically add field\n sets which will contain fields.\n \"\"\"\n fieldsets = (\n (None, {'fields': ('email', 'password')}),\n (_('Personal_Info'), {'fields': ('name',)}),\n (\n _('Permissions'),\n {'fields': ('is_active', 'is_staff', 'is_superuser')}\n ),\n (_('Important_Dates'), {'fields': ('last_login',)}),\n )\n\n \"\"\"\n You need to add your\n custom fields to fieldsets\n for fields to be used in\n editing users and to\n add_fieldsets for fields\n to be used when creating a user.\n The classes key sets any\n custom CSS classes we want\n to apply to the form section.\n \"\"\"\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2',)\n }),\n )\n\n\n\"\"\"\nFinally we want to\nregister the user\nmodel to admin site\nin order to show the\ntable admin users.\n\"\"\"\nadmin.site.register(models.User, UserAdmin)\n" }, { "alpha_fraction": 0.7661972045898438, "alphanum_fraction": 0.7718309760093689, "avg_line_length": 57.5, "blob_id": "24f8a8a7159a0b95aa038838e574a3d1ae49a51f", "content_id": "075505b1c278aec8afb002ad697982b4d3120f72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 363, "license_type": "permissive", "max_line_length": 210, "num_lines": 6, "path": "/Explanation/Model_vs_Manager.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# Models Methods VS Manager Methods\r\n---\r\n\r\nDefine custom methods on a model to add custom “row-level” functionality to your objects. Whereas Manager methods are intended to do “table-wide” things, model methods should act on a particular model instance.\r\n\r\n[Click Here to Learn More](https://docs.djangoproject.com/en/3.1/topics/db/models/#model-methods)" }, { "alpha_fraction": 0.7944915294647217, "alphanum_fraction": 0.7987288236618042, "avg_line_length": 77, "blob_id": "e9613d36ed5fdb9ac5d391cec120fa0b408b5bdd", "content_id": "88484e1794e5c0dc4858e97bafbcd245647ed8fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 476, "license_type": "permissive", "max_line_length": 297, "num_lines": 6, "path": "/Explanation/PermissionMixins.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# What are PermissionMixins?\r\n---\r\n\r\nTo make it easy to include Django’s permission framework into your own user class, Django provides PermissionsMixin. This is an abstract model you can include in the class hierarchy for your user model, giving you all the methods and database fields necessary to support Django’s permission model.\r\n\r\n[Click Here to Learn More](https://docs.djangoproject.com/en/3.1/topics/auth/customizing/#django.contrib.auth.models.PermissionsMixin)" }, { "alpha_fraction": 0.5851508378982544, "alphanum_fraction": 0.5851508378982544, "avg_line_length": 27.93055534362793, "blob_id": "591a3ee0fd7c14ac4e965b10bc1b9623c7cb8623", "content_id": "9d2f7692daef97fad65ee4ec08227f018fa228d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2155, "license_type": "permissive", "max_line_length": 72, "num_lines": 72, "path": "/app/core/test/test_model.py", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "from django.test import TestCase\r\n\r\n# Imports the user model or the database table for storing the users\r\nfrom django.contrib.auth import get_user_model\r\n\r\n\r\nclass ModelTests(TestCase):\r\n def test_create_user_with_email(self):\r\n \"\"\"\r\n Tests the user creation with email\r\n \"\"\"\r\n email = \"someName@gmaal.com\"\r\n password = \"testPass12@A1\"\r\n\r\n # Create a dummy user to test if the user is created or not\r\n user = get_user_model().objects.create_user(\r\n email=email,\r\n password=password\r\n )\r\n\r\n \"\"\"\r\n Now we check for the user email has been set\r\n properply by calling the assertEqual()\r\n Since the password is incrypted we can not\r\n access the password so we call the assertTrue()\r\n for password.\r\n \"\"\"\r\n self.assertEqual(user.email, email)\r\n self.assertTrue(user.check_password(password))\r\n\r\n def test_new_user_email_normalized(self):\r\n \"\"\"\r\n This function will test if the\r\n user email is normalized or not.\r\n Normalization of email is just\r\n converting the domain name to\r\n lower case.\r\n \"\"\"\r\n email = \"test@SOME_DOMAIN.COM\"\r\n user = get_user_model().objects.create_user(email, \"somePassWord!a\")\r\n\r\n self.assertEqual(user.email, email.lower())\r\n\r\n \"\"\"\r\n This function will check if\r\n the empty email is passed\r\n then an error is raised or not\r\n \"\"\"\r\n\r\n def test_new_user_email_invalid(self):\r\n \"\"\"\r\n This function comes with the\r\n TestCase. Whatever is passed\r\n in this fuction that gives the\r\n error is passed as assertions\r\n \"\"\"\r\n with self.assertRaises(ValueError):\r\n get_user_model().objects.create_user(None, \"somePassWord!a\")\r\n\r\n \"\"\"\r\n This function will check if a new\r\n super user created using the CLI\r\n (Command Line Interface)\r\n \"\"\"\r\n\r\n def test_create_new_superuer(self):\r\n user = get_user_model().objects.create_superuser(\r\n \"test@somedomain.com\", \"testPassword!A1\"\r\n )\r\n\r\n self.assertTrue(user.is_superuser)\r\n self.assertTrue(user.is_staff)\r\n" }, { "alpha_fraction": 0.577349841594696, "alphanum_fraction": 0.5815926790237427, "avg_line_length": 26.635513305664062, "blob_id": "5a783ba8a588117acd3624d6621a3a3d5d172a4c", "content_id": "7d51bf57d6c2e2768fafd6a52089b0288f82e99b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3064, "license_type": "permissive", "max_line_length": 85, "num_lines": 107, "path": "/app/core/test/test_admin.py", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "from django.test import TestCase, Client\r\nfrom django.contrib.auth import get_user_model\r\n\r\n\"\"\"\r\nThis import will allow us\r\nto generate the urls to\r\ntest the admin page.\r\n\"\"\"\r\nfrom django.urls import reverse\r\n\r\n\r\nclass AdminSiteTests(TestCase):\r\n \"\"\"\r\n This will create a setup\r\n for testing the admin page.\r\n This function will called\r\n everytime the a test function\r\n runs.\r\n \"\"\"\r\n def setUp(self):\r\n # We variable will hold the Client() object\r\n self.client = Client()\r\n\r\n # Now we create a dummy superUser for testing\r\n self.admin_user = get_user_model().objects.create_superuser(\r\n email='admin@somedomain.com',\r\n password='somePassword!A'\r\n )\r\n\r\n # Now we forcefully login the superuser for testing\r\n self.client.force_login(self.admin_user)\r\n\r\n # Now we create a normal user\r\n self.user = get_user_model().objects.create_user(\r\n email='user@somedomain.com',\r\n password='somePassword!A',\r\n name='SomeName'\r\n )\r\n\r\n \"\"\"\r\n This function will test if\r\n the user was created and stored\r\n in the admin list.\r\n \"\"\"\r\n def test_users_listed(self):\r\n # First we get the urls where the users lists are loacted\r\n url = reverse('admin:core_user_changelist')\r\n\r\n # Now we create a response object which store repose recieved from url\r\n response = self.client.get(url)\r\n\r\n # Now we check if the reponse contains the user object\r\n self.assertContains(response, self.user.name)\r\n self.assertContains(response, self.user.email)\r\n\r\n \"\"\"\r\n This test will check if the\r\n user change page is redered\r\n correctly\r\n \"\"\"\r\n def test_change_user_page(self):\r\n \"\"\"\r\n First we get the url for the user change\r\n page. The args in reverse will assign a\r\n endpoint to the url and will produce a\r\n url like: admin/core/user/1\r\n where 1 is the args\r\n For informantion:-\r\n https://www.youtube.com/watch?v=JqbBGxDLQeU\r\n also use the following link\r\n to know the conventions for url names\r\n in django for admin:\r\n https://docs.djangoproject.com/en/3.1/ref/contrib/admin/#reversing-admin-urls\r\n \"\"\"\r\n url = reverse(\r\n 'admin:core_user_change',\r\n args=[self.user.id]\r\n )\r\n\r\n \"\"\"\r\n Now we will get pass the url\r\n as request to get user change\r\n page render information in\r\n the form of response object\r\n \"\"\"\r\n response = self.client.get(url)\r\n\r\n \"\"\"\r\n Now we check if the status code\r\n is 200 or not.\r\n \"\"\"\r\n self.assertEqual(response.status_code, 200)\r\n\r\n \"\"\"\r\n This function will test\r\n if the create user in\r\n admin page is rendered\r\n properly or not.\r\n \"\"\"\r\n def test_create_user_page(self):\r\n url = reverse(\r\n 'admin:core_user_add'\r\n )\r\n\r\n response = self.client.get(url)\r\n\r\n self.assertEqual(response.status_code, 200)\r\n" }, { "alpha_fraction": 0.7514018416404724, "alphanum_fraction": 0.7514018416404724, "avg_line_length": 65.125, "blob_id": "937e4f29fc8dc87b643ee15a5f460518eafab8ea", "content_id": "c51421d378982a308b17a1da366d7bcc243f53bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 535, "license_type": "permissive", "max_line_length": 193, "num_lines": 8, "path": "/Explanation/setUp()_vs_setUpTestData().md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# setUp() VS setUpTestData()\r\n---\r\n\r\n* setUpTestData() is called once at the beginning of the test run for class-level setup. You'd use this to create objects that aren't going to be modified or changed in any of the test methods.\r\n\r\n* setUp() is called before every test function to set up any objects that may be modified by the test (every test function will get a \"fresh\" version of these objects).\r\n\r\n__[Click Here To Learn More](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/Testing#test_structure_overview)__" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 9.916666984558105, "blob_id": "4416f2741615da96b974e1942f89975a042e7ff3", "content_id": "7f5abc74f1e3ab31197bc44a997f210766eed57f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 132, "license_type": "permissive", "max_line_length": 26, "num_lines": 12, "path": "/deploy.sh", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "#!/bin/bash\nmessage=\"\"\nfor i in \"$@\"\ndo\n\tmessage=\"${message} ${i}\"\ndone\n\ngit add .\n\ngit commit -m \"$message\"\n\ngit push origin main\n\n" }, { "alpha_fraction": 0.7644230723381042, "alphanum_fraction": 0.7676281929016113, "avg_line_length": 76.25, "blob_id": "171f1e828278f9e6d19691605981a0631e2dcdf3", "content_id": "f1b765fa3a6c2da79bcb8e0813986de5eab811b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 636, "license_type": "permissive", "max_line_length": 255, "num_lines": 8, "path": "/Explanation/Managers.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# What are managers?\r\n---\r\n\r\nA Manager is the interface through which database query operations are provided to Django models. At least one Manager exists for every model in a Django application. __By default, Django adds a Manager with the name objects to every Django model class.__\r\n\r\nAdding extra Manager methods is the preferred way to add “table-level” functionality to your models. (For “row-level” functionality – i.e., functions that act on a single instance of a model object – use Model methods, not custom Manager methods.)\r\n\r\n[Click Here To Learn More](https://docs.djangoproject.com/en/3.1/topics/db/managers/)" }, { "alpha_fraction": 0.7121211886405945, "alphanum_fraction": 0.7121211886405945, "avg_line_length": 21, "blob_id": "0bc92ae9a4ae887601035cf67feac232ffd84a5a", "content_id": "29e79dbf3f3d07db7e43288404f5edb22637c000", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/test.sh", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndocker-compose run app sh -c \"python manage.py test\"\n" }, { "alpha_fraction": 0.6583301424980164, "alphanum_fraction": 0.660638689994812, "avg_line_length": 23.990385055541992, "blob_id": "b487a90d871e52f322d77e97d8326dc3be6458b6", "content_id": "1866bef6788153a18464ffc2b6a5bde728229660", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2599, "license_type": "permissive", "max_line_length": 76, "num_lines": 104, "path": "/app/core/models.py", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# These import will help us to extend the user model\nfrom django.contrib.auth.models import (\n AbstractBaseUser,\n BaseUserManager,\n PermissionsMixin,\n)\n\n\"\"\"\nThis class will provide the\nhelper function for creating\nand maintaining the user and\nsuperuser.\n\"\"\"\n\n\nclass UserManager(BaseUserManager):\n \"\"\"\n If in the future we want to extend the\n model then we can add them as\n extra_fields.\n This function will create a\n a new user and saves(commit) it\n to the database.\n \"\"\"\n\n def create_user(self, email, password=None, **extra_fields):\n\n if not email:\n raise ValueError(\"User Should Have Email\")\n\n # Now we add the user's email to models\n user = self.model(email=self.normalize_email(email), **extra_fields)\n\n \"\"\"\n Now we add the user's password to models.\n We want the user password to be not\n exposed so we use set_password() which comes\n in build with BaseUserManager class\n \"\"\"\n user.set_password(password)\n\n # Now we save the user. If in case we use a different database.\n user.save(using=self._db)\n\n return user\n\n \"\"\"\n This function create a new\n super user.\n \"\"\"\n\n def create_superuser(self, email, password):\n # We create a normal user first.\n user = self.create_user(email, password)\n\n # Now we give the user some spacial permissions\n user.is_staff = True\n user.is_superuser = True\n\n # Now we save the user\n user.save(using=self._db)\n\n return user\n\n\n\"\"\"\nSince we are writing the user model from\nsratch we are extending the AbstractBaseUser.\nAbstractBaseUser allows us to customize the\nuser model.\nThis will also define the structure of\nthe user model. In this class we will\ndefine the fields in user model.\n\"\"\"\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(max_length=255, unique=True)\n name = models.CharField(max_length=250)\n\n \"\"\"\n Allows us to know if the user is active in the system\n \"\"\"\n is_active = models.BooleanField(default=True)\n\n \"\"\"\n Now set the staff user activation to false\n This restricts the user accesing the\n admin page.\n \"\"\"\n is_staff = models.BooleanField(default=False)\n\n \"\"\"\n Since we customise the user model we\n need the customized user manager to\n handle the backend management of data.\n Hence we set the object to UserManager() class.\n NOTE:-\n Ojects are Model managers.\n \"\"\"\n objects = UserManager()\n USERNAME_FIELD = \"email\"\n" }, { "alpha_fraction": 0.7654321193695068, "alphanum_fraction": 0.7654321193695068, "avg_line_length": 63, "blob_id": "bf2c134b277f80f7e896604974feaf418b2022b7", "content_id": "ef4e9f8d956e1e90926295a33fbe642a8cade855", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 648, "license_type": "permissive", "max_line_length": 243, "num_lines": 10, "path": "/Explanation/AbstractBaseUser.md", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "# AbstractUser vs AbstractBaseUser\r\n---\r\n\r\nThe default User model in Django uses a username to uniquely identify a user during authentication. If you'd rather use an email address, you'll need to create a custom User model by either subclassing __AbstractUser__ or __AbstractBaseUser__.\r\n\r\nOptions:\r\n* __AbstractUser__: Use this option if you are happy with the existing fields on the User model and just want to remove the username field..\r\n* __AbstractBaseUser__: Use this option if you want to start from scratch by creating your own, completely new User model.\r\n\r\n[click Here To Learn More](https://testdriven.io/blog/django-custom-user-model/)" }, { "alpha_fraction": 0.356589138507843, "alphanum_fraction": 0.6279069781303406, "avg_line_length": 24.200000762939453, "blob_id": "30048703f6ca7c45160f86a54018f1069d886c94", "content_id": "dbbdeb6efce725e9ea10d63fcbfda5e9ff3d0580", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 129, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/requirements.txt", "repo_name": "aryan2000sp/Django_REST", "src_encoding": "UTF-8", "text": "django>=3.1.4,<3.2.0\r\ndjangorestframework>=3.12.1,<3.13.0\r\nflake8>=3.8.4,<3.9.0\r\nautopep8>=1.5.4,<1.6.0\r\npep8radius>=0.9.2,<1.0.0" } ]
16
sachinthakur9614/Data-Structure-Using-Python
https://github.com/sachinthakur9614/Data-Structure-Using-Python
9c65c2dea615f656a3416dcc76d46871b90561da
7efa3a13afd569419fd4d63a6c89f8b4d2c6d421
0484df16f0da56e15cf224409a6d89bf5224d5ed
refs/heads/master
2020-12-22T11:18:59.960239
2020-02-28T19:22:50
2020-02-28T19:22:50
236,763,899
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5265888571739197, "alphanum_fraction": 0.5590142607688904, "avg_line_length": 25.586206436157227, "blob_id": "3bb7f0739c036e275e3a68582b51b7062755028d", "content_id": "e5b5f12b1dce2c0715cfea20d2f470bf0e486dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 771, "license_type": "no_license", "max_line_length": 55, "num_lines": 29, "path": "/sorting_algorithms/merge_sort.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\ndef merge_sort(unlist):\n if len(unlist) <= 1:\n return unlist\n mid = len(unlist) // 2\n left_lst = unlist[:mid]\n right_lst = unlist[mid:]\n left_lst = merge_sort(left_lst)\n right_lst = merge_sort(right_lst)\n return list(merge(left_lst,right_lst))\n\n\ndef merge(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0] < right_half[0]:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n \n if len(left_half) ==0:\n res = res+ right_half\n else:\n res= res +left_half\n return res\n\nunlst = [64,34,25,12,22,90,80]\nprint(merge_sort(unlst))" }, { "alpha_fraction": 0.4760563373565674, "alphanum_fraction": 0.47887325286865234, "avg_line_length": 25.393617630004883, "blob_id": "4da60ab7e36bf249869b19ae119fb7a6a148973c", "content_id": "4251e5cdaf55dfdf90485bb07a1884b51618c5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2485, "license_type": "no_license", "max_line_length": 63, "num_lines": 94, "path": "/tree_data_structure/binary_tree.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\n\nclass Node:\n def __init__(self,data):\n self.left = None\n self.right = None\n self.data = data\n \n def insert(self,data):\n if self.data:\n if data <self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data >self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n \n else:\n self.data = data\n \n def delete_node(self,root,key):\n if not root:\n return root\n if root.data >key:\n root.left = self.delete_node(root.left,key)\n elif root.data < key:\n root.right = self.delete_node(root.right,key)\n else:\n if not root.right:\n return root.left\n if not root.left:\n return root.right\n \n temp_val = root.right\n mini_val = temp_val.data\n while temp_val.left:\n temp_val = temp_val.left\n mini_val = temp_val.data\n root.data = mini_val\n\n root.right = self.delete_node(root.right,root.data)\n return root\n\n \n def find_val(self,key_val):\n if key_val<self.data:\n if self.left is None:\n return str(key_val) +\"not found!\"\n return self.left.find_val(key_val)\n elif key_val >self.data:\n if self.right is None:\n return str(key_val) +\"not found!\"\n return self.right.find_val(key_val)\n else:\n print(str(self.data)+'is found!')\n\n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print(self.data)\n if self.right:\n self.right.PrintTree()\n \n def post_order(self,root):\n if root:\n self.post_order(root.left)\n self.post_order(root.right)\n print(root.data)\n \n def in_order(self,root):\n if root:\n self.in_order(root.left)\n print(root.data)\n self.in_order(root.right)\n\n def pre_order(self,root):\n if root:\n print(root.data)\n self.pre_order(self.left)\n self.pre_order(self.right)\n\n\n\nroot = Node(12)\nroot.insert(6)\nroot.insert(10)\nroot.insert(13)\n \n \n \n\nroot.PrintTree()\n\n\n" }, { "alpha_fraction": 0.4923076927661896, "alphanum_fraction": 0.5538461804389954, "avg_line_length": 26.785715103149414, "blob_id": "3b48217b8dbf79c00890465e1fc4999327c19504", "content_id": "bd209c1089ea74fabb5b83367215f94d1235c9b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 46, "num_lines": 14, "path": "/sorting_algorithms/selection_sort.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\n\ndef selection_sort(nlist):\n for item in range(len(nlist)-1,0,-1):\n max_pos = 0\n for location in range(1,item+1):\n if nlist[location]>nlist[max_pos]:\n max_pos = location\n \n temp = nlist[item]\n nlist[item] = nlist[max_pos]\n nlist[max_pos] = temp\n\nnlist = [14,46,43,27,57,41,45,21,70]\nselection_sort(nlist)\nprint(nlist)" }, { "alpha_fraction": 0.7494577169418335, "alphanum_fraction": 0.7516269087791443, "avg_line_length": 53.29411697387695, "blob_id": "8a4f7db682fe39e84ebf151b20cd25f856223174", "content_id": "936a695e7c018f37da975e83ec6c54d76011b90b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 932, "license_type": "no_license", "max_line_length": 601, "num_lines": 17, "path": "/linkedlist/translate.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "from translation import google, ConnectError\n\n# print(google('This is Sachin Here', dst = 'hi'))\n# Tkinter import *\nimport Tkinter\nde_blob = TextBlob(u\"How are you reading this post right now? It might be on desktop, on mobile, maybe a tablet, but whatever device you’re using, it’s most definitely connected to the internet.An internet connection is a wonderful thing, it give us all sorts of benefits that just weren’t possible before. If you’re old enough, think of your cellphone before it was a smartphone. You could call and you could text sure, but now you can read any book, watch any movie, or listen to any song all in the palm of your hand. And that’s just to name a few of the incredible things your smartphone can do.\")\n\n# print(de_blob.translate(to='hi'))\n# print(de_blob)\nwindow = Tk()\nwindow.title(\"Welcome to LikeGeeks app\")\n \nlbl = Label(window, text=fig)\n \nlbl.grid(column=0, row=0)\n \nwindow.mainloop()" }, { "alpha_fraction": 0.5320945978164673, "alphanum_fraction": 0.5540540814399719, "avg_line_length": 17.3125, "blob_id": "007f6205144cf26c45228876804a073aa8c41e4d", "content_id": "d399b7affa129e1f5268158c7a2ade8493aba923", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 43, "num_lines": 32, "path": "/graphs/depth_first_search.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\n\nfrom collections import defaultdict\n\n\nclass Graph:\n def __init__(self):\n self.graph = defaultdict(list)\n \n def addEdge(self,u,v):\n self.graph[u].append(v)\n \n def DFSUtil(self,v,visited):\n visited[v] = True\n print(v,end=\" \")\n\n for i in self.graph[v]:\n if visited[i]==False:\n self.DFSUtil(i,visited)\n\n def DFS(self,v):\n visited = [False]*(len(self.graph))\n self.DFSUtil(v,visited)\n\n\n\ng = Graph()\ng.addEdge(0,1)\ng.addEdge(0,2)\ng.addEdge(1,2)\ng.addEdge(2,0)\ng.addEdge(2,3)\ng.addEdge(3,3)\ng.DFS(1)\n\n\n\n\n" }, { "alpha_fraction": 0.5103550553321838, "alphanum_fraction": 0.514053225517273, "avg_line_length": 20.774192810058594, "blob_id": "3b5543eae0797d6c94af345a102fa35a83f56988", "content_id": "0f448453b8ed13dd16bf552444c0d3d31f3f7e9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 43, "num_lines": 62, "path": "/linkedlist/double_linked_list.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.previous = None\n self.next = None\n\n \n\nclass DoublyLinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def addNode(self, data):\n newNode = Node(data)\n if self.head ==None:\n self.head = self.tail = newNode\n self.head.previous = None\n self.tail.next = None\n else:\n self.tail.next = newNode\n newNode.previous = self.tail\n self.tail = newNode\n self.tail.next = None\n\n \n def display(self):\n current = self.head\n if self.head ==None:\n print(\"list is empty\")\n return\n while(current!= None):\n \n print(current.data)\n current = current.next\n\n def append(self,data):\n new_node = Node(data)\n new_node.next = None\n if self.head is None:\n new_node.previous = None\n self.head = new_node\n return\n current = self.head\n while current.next is not None:\n current = current.next\n current.next = new_node\n new_node.previous = current\n return\n\n\n\n\n\nd = DoublyLinkedList()\nd.addNode(1)\nd.addNode(2)\nd.addNode(3)\nd.addNode(4)\nd.addNode(5)\n\nd.display()\n" }, { "alpha_fraction": 0.46809256076812744, "alphanum_fraction": 0.46809256076812744, "avg_line_length": 30.0108699798584, "blob_id": "17c238c283da3c220bd880d39ac8c3d00b96eaac", "content_id": "af02d866a579610bc592dc8e7a4f95d24df7ecc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2852, "license_type": "no_license", "max_line_length": 82, "num_lines": 92, "path": "/linkedlist/circular_linked_list.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "class Node(object):\n def __init__(self,key,next = None):\n self.key = key\n self.next = None\n \n def __str__(self):\n return str(self.key)\n \n def __repr__(self):\n return str(self.key)\n\n\n\nclass CircularLinkedList:\n def __init__(self,head=None):\n self.head = head\n \n def append(self,key):\n new_node = Node(key)\n\n if self.head is None:\n self.head = new_node\n new_node.next = self.head\n else:\n current_node = self.head\n while current_node.next!= self.head:\n current_node = current_node.next\n current_node.next= new_node\n new_node.next= self.head\n \n def prepend(self,key):\n new_node = Node(key)\n\n current_node = self.head\n new_node.next = self.head\n\n if self.head is None:\n new_node.next = new_node\n else:\n while current_node.next!= self.head:\n current_node = current_node.next\n \n current_node.next = new_node\n self.head = new_node\n\n\n def insert_after_key_node(self,next_key,key):\n currrent_node = self.head\n while current_node:\n if currrent_node.next == self.head and currrent_node.key == next_key:\n self.append(key)\n return\n \n elif currrent_node.key == next_key:\n new_node = Node(key)\n new_node = currrent_node.next\n currrent_node.next = new_node\n new_node.next = new_node\n\n else:\n if currrent_node.next == self.head:\n break\n currrent_node = currrent_node.next\n\n \n def delete(self,delete_key):\n currrent_node = self.head\n pre_node = None\n while current_node:\n \n if currrent_node.key == delete_key and currrent_node == self.head:\n if currrent_node.new_node == self.head:\n currrent_node = None\n self.head = None\n\n else:\n while currrent_node.next!= self.head:\n currrent_node = currrent_node.next\n \n currrent_node.next = self.head.next\n self.head = self.head.next\n currrent_node = None\n return\n elif currrent_node.key == delete_key:\n pre_node.next = currrent_node.next\n currrent_node = None\n return\n else: \n if currrent_node.next ==self.head:\n break\n pre_node = currrent_node\n currrent_node = currrent_node.next" }, { "alpha_fraction": 0.4033333361148834, "alphanum_fraction": 0.4866666793823242, "avg_line_length": 25.909090042114258, "blob_id": "110a35e99de252c461901c5d4de112b6d43d5d54", "content_id": "98dfd1bffdf09641f812486672777a9d5f74a887", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/sorting_algorithms/bubble_sort.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "\n\n\ndef bubblesort(lst):\n for item_num in range(len(lst)-1,0,-1):\n for idx in range(item_num):\n if lst[idx]>lst[idx+1]:\n temp = lst[idx]\n lst[idx] = lst[idx+1]\n lst[idx+1] = temp\n\n\nlst = [10,12,9,15,16,70,80,42,34,90]\nbubblesort(lst)\n\n" }, { "alpha_fraction": 0.5045122504234314, "alphanum_fraction": 0.5126772522926331, "avg_line_length": 21.152381896972656, "blob_id": "9938b7164eb728ee61c2b9436a096696e3a5387b", "content_id": "4ef6fbb07f836e60331196765305ed34caddaddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2327, "license_type": "no_license", "max_line_length": 52, "num_lines": 105, "path": "/linkedlist/single_linked_list.py", "repo_name": "sachinthakur9614/Data-Structure-Using-Python", "src_encoding": "UTF-8", "text": "class Node(object):\n\n def __init__(self,data = None,next_node = None):\n self.data = data\n self.next_node = next_node\n\n def get_data(self):\n return self.data\n def get_next(self):\n return self.next_node\n def set_next(self,new_next):\n self.next_node = new_next\n\n \n\n\n\n\n\n\nclass LinkedList(object):\n def __init__(self,head = None):\n self.head = head\n\n def __repr__(self):\n nodes = []\n\n current = self.head\n while current:\n nodes.append(repr(current.get_data()))\n current = current.next_node\n return '[' + ','.join(nodes) + ']'\n \n\n def insert(self,data):\n new_node = Node(data)\n new_node.set_next(self.head)\n self.head = new_node\n\n \n def size(self):\n current = self.head\n count =0\n while current:\n count +=1\n current = current.get_next()\n\n return count\n\n def search(self,data):\n current = self.head\n found = False\n while current and found is False:\n if current.get_data() == data:\n found = True\n return current.get_data()\n\n else:\n current = current.get_next()\n \n if current is None:\n raise ValueError(\"Data not in list\")\n return current.get_data()\n \n def delete(self,data):\n current = self.head\n previous = None\n found = False\n while current and found is False:\n if current.get_data()== data:\n found = True\n else:\n previous = current\n current = current.get_next\n if current is None:\n raise ValueError(\"Data not in list\")\n if previous is None:\n raise ValueError(\"Data not in list\")\n\n else:\n previous.set_next(current.get_next)\n\n\n\n def reverse(self):\n current = self.head\n pre_node = None\n next_node = None\n while current:\n next_node = current.get_next()\n current.set_next = pre_node\n pre_node = current\n current.set_next = next_node\n self.head = pre_node\n\nf1 = LinkedList()\n\nf1.insert(10)\nf1.insert(20)\nf1.insert(30)\nf1.insert(40)\nf1.insert(50)\n\n\nprint(f1)\n\n" } ]
9
SU-3IN025-fev2020/projet-kolkata-lila
https://github.com/SU-3IN025-fev2020/projet-kolkata-lila
255fb531c8cef69229aa344704202ff67009fb08
073d11a77c0e150e6322dc0efbc1bb9ac45a79f7
bfd24f1b7ea5abafe5fde54f3cf33cb5539b131d
refs/heads/master
2021-04-14T22:05:18.138342
2020-05-15T15:53:27
2020-05-15T15:53:27
249,270,911
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5257844924926758, "alphanum_fraction": 0.5366366505622864, "avg_line_length": 27.01886749267578, "blob_id": "10ea05e10e0d19c8a42a46de0ad1928e44c88e2c", "content_id": "974c4d3d91a2c9e5eb0bb15d248abf04e874dc96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11950, "license_type": "no_license", "max_line_length": 301, "num_lines": 424, "path": "/kolkata-restaurant/kalkota_restaurants.py", "repo_name": "SU-3IN025-fev2020/projet-kolkata-lila", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Nicolas, 2020-03-20\n\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom gameclass import Game,check_init_game_done\nfrom spritebuilder import SpriteBuilder\nfrom players import Player\nfrom sprite import MovingSprite\nfrom ontology import Ontology\nfrom itertools import chain\nimport pygame\nimport glo\n\nimport random \nimport numpy as np\nimport sys\n\nimport heapq \ntaille= 20\nfrom collections import deque\n\nclass Case():\n def __init__(self,g,pere,cor):\n self.cor=cor\n self.g=g\n self.pere = pere\n \n def estBut(self,buts):\n return self.cor == buts \n def __lt__(self,other):\n return self.g > other.g\n \n\ndef h(case, but):\n return abs(case[0]-but[0]) + abs(case[1]-but[1])\n \ndef A_star(initStates ,goalStates , wallStates, dim ):\n frontiere =[]\n reserve = np.zeros(dim)\n\n caseInit = Case(0,None ,initStates) #case initiale \n frontiere = [ (caseInit.g+h(initStates,goalStates), caseInit) ] \n #frontiere qui est la case init , je calcuyle son heuristique \n\n bCase = caseInit \n\n while frontiere !=[] and not bCase.estBut(goalStates):\n f,bCase = heapq.heappop(frontiere)\n \n\n \"\"\" \n étendre les cases \n \"\"\"\n i,j = bCase.cor \n reserve[i,j] =1 \n #etendre les cases \n if j+1<taille and not (i,j+1) in wallStates and reserve[i,j+1]==0 :\n case = Case(bCase.g+1, bCase, (i,j+1))\n heapq.heappush(frontiere,(h(case.cor,goalStates)+case.g, case))\n \n if j-1>0 and not (i,j-1) in wallStates and reserve[i,j-1]==0 :\n case = Case(bCase.g+1, bCase, (i,j-1))\n heapq.heappush(frontiere,(h(case.cor,goalStates)+case.g, case))\n \n if i+1 <taille and not (i+1,j) in wallStates and reserve[i+1,j]==0 :\n case = Case(bCase.g+1, bCase, (i+1,j))\n heapq.heappush(frontiere,(h(case.cor,goalStates)+case.g, case))\n \n if i-1>0 and not (i-1,j) in wallStates and reserve[i-1,j]==0 :\n case = Case(bCase.g+1, bCase, (i-1,j))\n heapq.heappush(frontiere,(h(case.cor,goalStates)+case.g, case))\n \n\n\n currcase = bCase \n chemin = deque()\n while currcase.cor !=caseInit.cor:\n chemin.appendleft(currcase.cor)\n currcase= currcase.pere\n\n\n return chemin\n\n \n# ---- ---- ---- ---- ---- ----\n# ---- Main ----\n# ---- ---- ---- ---- ---- ----\n\ngame = Game()\n\ndef init(_boardname=None):\n global player,game\n # pathfindingWorld_MultiPlayer4\n name = _boardname if _boardname is not None else 'kolkata_6_10'\n game = Game('Cartes/' + name + '.json', SpriteBuilder)\n game.O = Ontology(True, 'SpriteSheet-32x32/tiny_spritesheet_ontology.csv')\n game.populate_sprite_names(game.O)\n game.fps = 5 # frames per second\n game.mainiteration()\n game.mask.allow_overlaping_players = True\n #player = game.player\n \ndef main():\n\n #for arg in sys.argv:\n iterations = 20 # default\n if len(sys.argv) == 2:\n iterations = int(sys.argv[1])\n print (\"Iterations: \")\n print (iterations)\n\n init()\n \n \n \n\n \n #-------------------------------\n # Initialisation\n #-------------------------------\n nbLignes = game.spriteBuilder.rowsize\n nbColonnes = game.spriteBuilder.colsize\n print(\"lignes\", nbLignes)\n print(\"colonnes\", nbColonnes)\n \n \n players = [o for o in game.layers['joueur']]\n nbPlayers = len(players)\n \n \n # on localise tous les états initiaux (loc du joueur)\n initStates = [o.get_rowcol() for o in game.layers['joueur']]\n print (\"Init states:\", initStates)\n \n \n # on localise tous les objets ramassables (les restaurants)\n goalStates = [o.get_rowcol() for o in game.layers['ramassable']]\n print (\"Goal states:\", goalStates)\n global nbRestaus \n nbRestaus = len(goalStates)\n \n # on localise tous les murs\n wallStates = [w.get_rowcol() for w in game.layers['obstacle']]\n #print (\"Wall states:\", wallStates)\n \n # on liste toutes les positions permises\n allowedStates = [(x,y) for x in range(nbLignes) for y in range(nbColonnes)\\\n if (x,y) not in wallStates or goalStates] \n \n #-------------------------------\n # Placement aleatoire des joueurs, en évitant les obstacles\n #-------------------------------\n \n posPlayers = initStates\n\n \n for j in range(nbPlayers):\n x,y = random.choice(allowedStates)\n players[j].set_rowcol(x,y)\n game.mainiteration()\n posPlayers[j]=(x,y)\n\n\n \n \n \n #-------------------------------\n # chaque joueur choisit un restaurant\n #-------------------------------\n # STRATEGIE DU CHOIX DU RESTAURANT \n\n restau=[0]*nbPlayers\n\n #-------------------------------\n # Boucle principale de déplacements \n #-------------------------------\n \n \n # bon ici on fait juste plusieurs random walker pour exemple...\n #MOI JE LE REMPLACE PAR A*\n \n \n #historique = {0:[0],1:[0],2:[0],3:[0],4:[0],5:[0]}\n historique = [[0] for i in range(nbRestaus)]\n \n gain = [0]* nbPlayers \n \n \n global it\n \"\"\"\n La stratégie du choix du restau \n \"\"\"\n global strategies\n strategies = { 0 :alea , 1 : tetu , 2 : most , 3 : less ,4 : derMax , 5 : derMin, 6 : iteration }\n global nbStrategie \n nbStrategie = len(strategies)\n \n description = {0 :\"Choix aléatoire\" , 1 : \"Stratégie Tetu \" , 2 : \"Le restau le plus fréquenté\" , 3 : \"Le restau le moins fréquenté \" ,4 : \"Le restau le plus fréquenté à la dernière itération \", 5 : \"Le restau le moins fréquenté à la dernière itération\" , 6 : \"Choix séquentiel des restaurants\" }\n \n \n #\"\"\"\n for i in range(iterations):\n it = i \n \n #Permet de savoir pour chaque itération quelle sont les jouerus qui ont choisi un restau donnée \n \n choix = [[] for i in range(nbRestaus)]\n \n \n #élargir l'historique pour chaqie itération \n \n if i !=0 :\n for h in historique:\n h.append(0)\n \n \n for j in range(nbPlayers):\n x,y = random.choice(allowedStates)\n players[j].set_rowcol(x,y)\n game.mainiteration()\n posPlayers[j]=(x,y)\n \n \n for j in range(nbPlayers):\n restau[j] = strategies[j%nbStrategie](historique ,nbPlayers)\n \n \n for j in range(nbPlayers):\n rest = restau[j]\n row,col = posPlayers[j]\n restaurantP_pos = goalStates[rest]\n \n \n #Obtenir le chemin avec A*\n \n chemin = A_star((row,col) ,restaurantP_pos, wallStates, (taille, taille) )\n \n for ch in chemin :\n col=ch[1]\n row=ch[0]\n players[j].set_rowcol(row , col)\n print (\"pos :\", j,row,col)\n game.mainiteration()\n posPlayers[j]=(row,col)\n \n choix[rest].append(j) #on ajoute le joueur \n \n historique[rest][i]+=1 \n \n \n \n #Le choix aléatoire d'un joueur qui s'est présenté a un restaurant \n \n for r in choix :\n if len(r)>0 :\n g = random.randint(0,len(r)-1)\n joueur = r[g]\n gain[joueur]+=1 \n print(historique)\n print(gain)\n index =[idx for idx,e in enumerate(gain) if e == max(gain)]\n \n print(\"les tratégies qui ont donneles meilleurs gains sont :\") \n for s in index :\n print(description[s%nbStrategie])\n \n \n pygame.quit()\n \n \n \n \n \n \n \"\"\"\n #VEUILLEZ DECOMMENTER CETTE PARTIE ET COMMENTER LA PARTIE EN DESSUS POUR FAIRE UNE SIMULATION DE DEUX STRATEGIES DIFFERENTES \n #VEILLEZ FAIRE LE CHOIX DE DEUX STRATEGIES .. \n #ICI PAR DEFAUT C'EST : RESTAU MOINS FREQUENTE ET RESTAU PLUS FREQUENTE \n \n for i in range(iterations):\n it = i \n \n #Permet de savoir pour chaque itération quelle sont les jouerus qui ont choisi un restau donnée \n \n choix = [[] for i in range(nbRestaus)]\n \n \n #élargir l'historique pour chaqie itération \n \n if i !=0 :\n for h in historique:\n h.append(0)\n \n \n for j in range(nbPlayers):\n x,y = random.choice(allowedStates)\n players[j].set_rowcol(x,y)\n game.mainiteration()\n posPlayers[j]=(x,y)\n \n \n for j in range(nbPlayers):\n if j %2 == 0 :\n restau[j] =less (historique ,nbPlayers)\n else :\n \n restau[j] = most(historique ,nbPlayers)\n \n \n for j in range(nbPlayers):\n rest = restau[j]\n row,col = posPlayers[j]\n restaurantP_pos = goalStates[rest]\n \n #Obtenir le chemin avec A*\n \n chemin = A_star((row,col) ,restaurantP_pos, wallStates, (taille, taille) )\n \n for ch in chemin :\n col=ch[1]\n row=ch[0]\n players[j].set_rowcol(row , col)\n print (\"pos :\", j,row,col)\n game.mainiteration()\n posPlayers[j]=(row,col)\n \n choix[rest].append(j) #on ajoute le joueur \n \n historique[rest][i]+=1 \n \n \n \n #Le choix aléatoire d'un joueur qui s'est présenté a un restaurant \n \n for r in choix :\n if len(r)>0 :\n g = random.randint(0,len(r)-1)\n joueur = r[g]\n gain[joueur]+=1 \n print(historique)\n print(gain)\n index =[idx for idx,e in enumerate(gain) if e == max(gain)]\n \n \n print(\"la stratégie qui ont donne le meilleur gain est :\") \n s1 =0 \n s2=0\n \n for j in range(int(nbPlayers/2)):\n s1 += gain[j] \n s2 +=gain[j+1]\n \n if (s1==s2) :\n print(\"les deux stratégies ont donnée les mêmes gains\")\n if ( s1> s2):\n print(\"Le restau le moins fréquenté \")\n else :\n print(\"Le restau le plus fréquenté\")\n \n \n \n pygame.quit()\n \n \"\"\"\n \n \n \n\"\"\"\nChoix aléatoire \n\"\"\"\ndef alea(historique ,nbJoueur ):\n return random.randint(0,nbRestaus-1)\n \n\"\"\"\nLe jouerur choisit toujours le meme restaurant \n\"\"\"\ndef tetu(historique , nbJoueur ):\n return nbJoueur % nbRestaus \n\n\"\"\"\nLe joueur choisit le restaurant le plus fréquenté pour toute les itérations\n\"\"\"\ndef most(historique , nbJoueur ):\n l = [sum(h) for h in historique]\n r =[idx for idx,e in enumerate(l) if e == max(l)]\n return r[random.randint(0,len(r)-1)]\n \n\"\"\"\nLe joueur choisit le restaurant le moins fréquenté pour toute les itérations\n\"\"\" \ndef less(historique , nbJoueur ):\n l = [sum(h) for h in historique]\n r =[idx for idx,e in enumerate(l) if e == min(l)]\n return r[random.randint(0,len(r)-1)]\n\n\"\"\"\nLe joueur choisit le restaurant le moins fréquenté à la dernière itération \n\"\"\" \ndef derMin(historique , nbJoueur):\n l= [h[-1] for h in historique] \n r =[idx for idx,e in enumerate(l) if e == min(l)]\n return r[random.randint(0,len(r)-1)]\n \n\"\"\"\nLe joueur choisit le restaurant le plus fréquenté à la dernière itération \n\"\"\" \ndef derMax(historique , nbJoueur ):\n l= [h[-1] for h in historique] \n r =[idx for idx,e in enumerate(l) if e == max(l)]\n return r[random.randint(0,len(r)-1)]\n\n\"\"\"\nLe joueur choisit les restaurants séquentiellement \n\"\"\" \ndef iteration(historique , nbJoueur ):\n return it % nbRestaus \n\n\n\n\n\nif __name__ == '__main__':\n main()\n \n\n\n" } ]
1
gsemet/zwave-js-server-python
https://github.com/gsemet/zwave-js-server-python
7ae666e53767381c719426faf10f4e53c1216f6b
a5094e822ba82ba6768b690b0268179505f661c0
a39c8a19ff7ec6142e5027c160d1ce6e5d5cd390
refs/heads/master
2023-03-11T14:42:02.269838
2021-03-02T21:22:18
2021-03-02T21:22:18
344,403,606
0
0
Apache-2.0
2021-03-04T08:31:33
2021-03-04T08:31:34
2021-03-04T08:32:43
null
[ { "alpha_fraction": 0.6882622241973877, "alphanum_fraction": 0.6882622241973877, "avg_line_length": 27.83516502380371, "blob_id": "29b5abf027dd8cf2265e098bb863224273dff1e0", "content_id": "0ef18c92d1f168227be5acac536a07e3fee34e97", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2624, "license_type": "permissive", "max_line_length": 84, "num_lines": 91, "path": "/zwave_js_server/exceptions.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Exceptions for zwave-js-server.\"\"\"\n\n\nfrom typing import Optional\n\n\nclass BaseZwaveJSServerError(Exception):\n \"\"\"Base Zwave JS Server exception.\"\"\"\n\n\nclass TransportError(BaseZwaveJSServerError):\n \"\"\"Exception raised to represent transport errors.\"\"\"\n\n def __init__(self, message: str, error: Optional[Exception] = None) -> None:\n \"\"\"Initialize a transport error.\"\"\"\n super().__init__(message)\n self.error = error\n\n\nclass ConnectionClosed(TransportError):\n \"\"\"Exception raised when the connection is closed.\"\"\"\n\n\nclass CannotConnect(TransportError):\n \"\"\"Exception raised when failed to connect the client.\"\"\"\n\n def __init__(self, error: Exception) -> None:\n \"\"\"Initialize a cannot connect error.\"\"\"\n super().__init__(f\"{error}\", error)\n\n\nclass ConnectionFailed(TransportError):\n \"\"\"Exception raised when an established connection fails.\"\"\"\n\n def __init__(self, error: Optional[Exception] = None) -> None:\n \"\"\"Initialize a connection failed error.\"\"\"\n if error is None:\n super().__init__(\"Connection failed.\")\n return\n super().__init__(f\"{error}\", error)\n\n\nclass NotFoundError(BaseZwaveJSServerError):\n \"\"\"Exception that is raised when an entity can't be found.\"\"\"\n\n\nclass NotConnected(BaseZwaveJSServerError):\n \"\"\"Exception raised when not connected to client.\"\"\"\n\n\nclass InvalidState(BaseZwaveJSServerError):\n \"\"\"Exception raised when data gets in invalid state.\"\"\"\n\n\nclass InvalidMessage(BaseZwaveJSServerError):\n \"\"\"Exception raised when an invalid message is received.\"\"\"\n\n\nclass InvalidServerVersion(BaseZwaveJSServerError):\n \"\"\"Exception raised when connected to server with incompatible version.\"\"\"\n\n\nclass FailedCommand(BaseZwaveJSServerError):\n \"\"\"When a command has failed.\"\"\"\n\n def __init__(self, message_id: str, error_code: str):\n \"\"\"Initialize a failed command error.\"\"\"\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code\n\n\nclass UnparseableValue(BaseZwaveJSServerError):\n \"\"\"Exception raised when a value can't be parsed.\"\"\"\n\n\nclass UnwriteableValue(BaseZwaveJSServerError):\n \"\"\"Exception raised when trying to change a read only Value.\"\"\"\n\n\nclass InvalidNewValue(BaseZwaveJSServerError):\n \"\"\"Exception raised when target new value is invalid based on Value metadata.\"\"\"\n\n\nclass SetValueFailed(BaseZwaveJSServerError):\n \"\"\"\n Exception raise when setting a value fails.\n\n Refer to https://zwave-js.github.io/node-zwave-js/#/api/node?id=setvalue for\n possible reasons.\n \"\"\"\n" }, { "alpha_fraction": 0.519249439239502, "alphanum_fraction": 0.5253962874412537, "avg_line_length": 26.35398292541504, "blob_id": "4198970f9be881eed5cf456b95cb42255517951f", "content_id": "7ba66f56f2138b83a464df71b443b529fdf5fda4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3091, "license_type": "permissive", "max_line_length": 84, "num_lines": 113, "path": "/test/model/test_driver.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test the driver model.\"\"\"\nimport json\n\nfrom zwave_js_server.const import LogLevel\nfrom zwave_js_server.event import Event\nfrom zwave_js_server.model import driver as driver_pkg, log_config as log_config_pkg\n\nfrom .. import load_fixture\n\n\ndef test_from_state():\n \"\"\"Test from_state method.\"\"\"\n ws_msgs = load_fixture(\"basic_dump.txt\").strip().split(\"\\n\")\n\n driver = driver_pkg.Driver(None, json.loads(ws_msgs[0])[\"state\"])\n\n for msg in ws_msgs[1:]:\n msg = json.loads(msg)\n assert msg[\"type\"] == \"event\"\n event = Event(type=msg[\"event\"][\"event\"], data=msg[\"event\"])\n driver.receive_event(event)\n\n assert len(driver.controller.nodes) == 8\n\n\nasync def test_update_log_config(driver, uuid4, mock_command):\n \"\"\"Test update log config.\"\"\"\n # Update log level\n ack_commands = mock_command(\n {\"command\": \"update_log_config\", \"config\": {\"level\": 0}},\n {\"success\": True},\n )\n\n assert (\n await driver.async_update_log_config(\n log_config_pkg.LogConfig(level=LogLevel.ERROR)\n )\n is None\n )\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"update_log_config\",\n \"config\": {\"level\": 0},\n \"messageId\": uuid4,\n }\n\n # Update all parameters\n ack_commands = mock_command(\n {\n \"command\": \"update_log_config\",\n \"config\": {\n \"enabled\": True,\n \"level\": 0,\n \"logToFile\": True,\n \"filename\": \"/test.txt\",\n \"forceConsole\": True,\n },\n },\n {\"success\": True},\n )\n assert (\n await driver.async_update_log_config(\n log_config_pkg.LogConfig(\n enabled=True,\n level=LogLevel.ERROR,\n log_to_file=True,\n filename=\"/test.txt\",\n force_console=True,\n )\n )\n is None\n )\n\n assert len(ack_commands) == 2\n assert ack_commands[1] == {\n \"command\": \"update_log_config\",\n \"config\": {\n \"enabled\": True,\n \"level\": 0,\n \"logToFile\": True,\n \"filename\": \"/test.txt\",\n \"forceConsole\": True,\n },\n \"messageId\": uuid4,\n }\n\n\nasync def test_get_log_config(driver, uuid4, mock_command):\n \"\"\"Test set value.\"\"\"\n ack_commands = mock_command(\n {\"command\": \"get_log_config\"},\n {\n \"success\": True,\n \"config\": {\n \"enabled\": True,\n \"level\": 0,\n \"logToFile\": False,\n \"filename\": \"/test.txt\",\n \"forceConsole\": False,\n },\n },\n )\n log_config = await driver.async_get_log_config()\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\"command\": \"get_log_config\", \"messageId\": uuid4}\n\n assert log_config.enabled\n assert log_config.level == LogLevel.ERROR\n assert log_config.log_to_file is False\n assert log_config.filename == \"/test.txt\"\n assert log_config.force_console is False\n" }, { "alpha_fraction": 0.6686182618141174, "alphanum_fraction": 0.6686182618141174, "avg_line_length": 24.878787994384766, "blob_id": "4079aa8e8f6241a4adca01a50f27fc203e311cbd", "content_id": "d8e7d21a65a53e9f70582147c7ba0881a6f8027e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1708, "license_type": "permissive", "max_line_length": 66, "num_lines": 66, "path": "/zwave_js_server/model/device_class.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"\nModel for a Zwave Node's device class.\n\nhttps://zwave-js.github.io/node-zwave-js/#/api/node?id=deviceclass\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import List, TypedDict\n\n\nclass DeviceClassItemDataType(TypedDict):\n \"\"\"Represent a device class data dict type.\"\"\"\n\n key: int\n label: str\n\n\nclass DeviceClassDataType(TypedDict):\n \"\"\"Represent a device class data dict type.\"\"\"\n\n basic: DeviceClassItemDataType\n generic: DeviceClassItemDataType\n specific: DeviceClassItemDataType\n mandatorySupportedCCs: List[int]\n mandatoryControlledCCs: List[int]\n\n\n@dataclass\nclass DeviceClassItem:\n \"\"\"Model for a DeviceClass item (e.g. basic or generic).\"\"\"\n\n key: int\n label: str\n\n\nclass DeviceClass:\n \"\"\"Model for a Zwave Node's device class.\"\"\"\n\n def __init__(self, data: DeviceClassDataType) -> None:\n \"\"\"Initialize.\"\"\"\n self.data = data\n\n @property\n def basic(self) -> DeviceClassItem:\n \"\"\"Return basic DeviceClass.\"\"\"\n return DeviceClassItem(**self.data[\"basic\"])\n\n @property\n def generic(self) -> DeviceClassItem:\n \"\"\"Return generic DeviceClass.\"\"\"\n return DeviceClassItem(**self.data[\"generic\"])\n\n @property\n def specific(self) -> DeviceClassItem:\n \"\"\"Return specific DeviceClass.\"\"\"\n return DeviceClassItem(**self.data[\"specific\"])\n\n @property\n def mandatory_supported_ccs(self) -> List[int]:\n \"\"\"Return list of mandatory Supported CC id's.\"\"\"\n return self.data[\"mandatorySupportedCCs\"]\n\n @property\n def mandatory_controlled_ccs(self) -> List[int]:\n \"\"\"Return list of mandatory Controlled CC id's.\"\"\"\n return self.data[\"mandatoryControlledCCs\"]\n" }, { "alpha_fraction": 0.6143577098846436, "alphanum_fraction": 0.6146095991134644, "avg_line_length": 35.09090805053711, "blob_id": "9dc7b8c77e48343483bc36f0aadc4ede0e21aed5", "content_id": "8b217bcec1e29d7e587d4e4b10eb9b203c1b87f7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3970, "license_type": "permissive", "max_line_length": 88, "num_lines": 110, "path": "/zwave_js_server/util/node.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Utility functions for Z-Wave JS nodes.\"\"\"\nimport json\nfrom typing import Optional, Union\n\nfrom ..const import CommandClass, ConfigurationValueType\nfrom ..exceptions import InvalidNewValue, NotFoundError, SetValueFailed\nfrom ..model.node import Node\nfrom ..model.value import ConfigurationValue, get_value_id\n\n\nasync def async_set_config_parameter(\n node: Node,\n new_value: Union[int, str],\n property_or_property_name: Union[int, str],\n property_key: Optional[Union[int, str]] = None,\n) -> ConfigurationValue:\n \"\"\"\n Set a value for a config parameter on this node.\n\n new_value and property_ can be provided as labels, so we need to resolve them to\n the appropriate key\n \"\"\"\n config_values = node.get_configuration_values()\n\n # If a property name is provided, we have to search for the correct value since\n # we can't use value ID\n if isinstance(property_or_property_name, str):\n try:\n zwave_value = next(\n config_value\n for config_value in config_values.values()\n if config_value.property_name == property_or_property_name\n )\n except StopIteration:\n raise NotFoundError(\n \"Configuration parameter with parameter name \"\n f\"{property_or_property_name} could not be found\"\n ) from None\n else:\n value_id = get_value_id(\n node,\n CommandClass.CONFIGURATION,\n property_or_property_name,\n endpoint=0,\n property_key=property_key,\n )\n\n try:\n zwave_value = config_values[value_id]\n except KeyError:\n raise NotFoundError(\n f\"Configuration parameter with value ID {value_id} could not be \"\n \"found\"\n ) from None\n\n # Validate that new value for enumerated configuration parameter is a valid state\n # key or label\n if (\n zwave_value.configuration_value_type == ConfigurationValueType.ENUMERATED\n and str(new_value)\n not in [\n *zwave_value.metadata.states,\n *zwave_value.metadata.states.values(),\n ]\n ):\n raise InvalidNewValue(\n \"Must provide a value that represents a valid state key or label from \"\n f\"{json.dumps(zwave_value.metadata.states)}\"\n )\n\n # If needed, convert a state label to its key. We know the state exists because\n # of the validation above.\n if isinstance(new_value, str):\n new_value = int(\n next(\n key\n for key, label in zwave_value.metadata.states.items()\n if label == new_value\n )\n )\n\n if zwave_value.configuration_value_type == ConfigurationValueType.UNDEFINED:\n # We need to use the Configuration CC API to set the value for this type\n raise NotImplementedError(\"Configuration values of undefined type can't be set\")\n\n # Validate that new value for range configuration parameter is within bounds\n max_ = zwave_value.metadata.max\n min_ = zwave_value.metadata.min\n if zwave_value.configuration_value_type == ConfigurationValueType.RANGE and (\n (max_ is not None and new_value > max_)\n or (min_ is not None and new_value < min_)\n ):\n bounds = []\n if min_ is not None:\n bounds.append(f\"Min: {min_}\")\n if max_ is not None:\n bounds.append(f\"Max: {max_}\")\n raise InvalidNewValue(\n f\"Must provide a value within the target range ({', '.join(bounds)})\"\n )\n\n # Finally attempt to set the value and return the Value object if successful\n if not await node.async_set_value(zwave_value, new_value, wait_for_result=True):\n raise SetValueFailed(\n \"Unable to set value, refer to \"\n \"https://zwave-js.github.io/node-zwave-js/#/api/node?id=setvalue for \"\n \"possible reasons\"\n )\n\n return zwave_value\n" }, { "alpha_fraction": 0.4342857003211975, "alphanum_fraction": 0.4595237970352173, "avg_line_length": 21.580644607543945, "blob_id": "39dbe8131a7b17c3028ac69b6623bf117a7d0e2d", "content_id": "8766839d455ac37c2458ba0d648ac714e99039b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4200, "license_type": "permissive", "max_line_length": 87, "num_lines": 186, "path": "/test/util/const.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Constants for utility tests.\"\"\"\n\nfrom zwave_js_server.const import ATTR_CODE_SLOT, ATTR_IN_USE, ATTR_NAME, ATTR_USERCODE\n\nCODE_SLOTS = [\n {\n ATTR_CODE_SLOT: 1,\n ATTR_IN_USE: True,\n ATTR_NAME: \"User Code (1)\",\n ATTR_USERCODE: \"**********\",\n },\n {\n ATTR_CODE_SLOT: 2,\n ATTR_IN_USE: True,\n ATTR_NAME: \"User Code (2)\",\n ATTR_USERCODE: \"**********\",\n },\n {\n ATTR_CODE_SLOT: 3,\n ATTR_IN_USE: True,\n ATTR_NAME: \"User Code (3)\",\n ATTR_USERCODE: \"**********\",\n },\n {\n ATTR_CODE_SLOT: 4,\n ATTR_IN_USE: True,\n ATTR_NAME: \"User Code (4)\",\n ATTR_USERCODE: \"7030\\n\\r\",\n },\n {\n ATTR_CODE_SLOT: 5,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (5)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 6,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (6)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 7,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (7)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 8,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (8)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 9,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (9)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 10,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (10)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 11,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (11)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 12,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (12)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 13,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (13)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 14,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (14)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 15,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (15)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 16,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (16)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 17,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (17)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 18,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (18)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 19,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (19)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 20,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (20)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 21,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (21)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 22,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (22)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 23,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (23)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 24,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (24)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 25,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (25)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 26,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (26)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 27,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (27)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 28,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (28)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 29,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (29)\",\n ATTR_USERCODE: None,\n },\n {\n ATTR_CODE_SLOT: 30,\n ATTR_IN_USE: False,\n ATTR_NAME: \"User Code (30)\",\n ATTR_USERCODE: None,\n },\n]\n" }, { "alpha_fraction": 0.5430508852005005, "alphanum_fraction": 0.5701767802238464, "avg_line_length": 31.08802032470703, "blob_id": "1a1397dc649d846a7e3be012a5afcf155b76bbc0", "content_id": "1c95294c3efa9140dcc54ea77999c00f29e9c612", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13126, "license_type": "permissive", "max_line_length": 88, "num_lines": 409, "path": "/test/model/test_node.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test the node model.\"\"\"\nimport json\n\nimport pytest\n\nfrom zwave_js_server.const import CommandClass\nfrom zwave_js_server.event import Event\nfrom zwave_js_server.exceptions import UnwriteableValue\nfrom zwave_js_server.model import node as node_pkg\nfrom zwave_js_server.model.node import Node, NodeStatus\n\nfrom .. import load_fixture\n\nDEVICE_CONFIG_FIXTURE = {\n \"manufacturer_id\": 134,\n \"manufacturer\": \"AEON Labs\",\n \"label\": \"ZW090\",\n \"description\": \"Z‐Stick Gen5 USB Controller\",\n \"devices\": [\n {\"productType\": \"0x0001\", \"productId\": \"0x005a\"},\n {\"productType\": \"0x0101\", \"productId\": \"0x005a\"},\n {\"productType\": \"0x0201\", \"productId\": \"0x005a\"},\n ],\n \"firmware_version\": {\"min\": \"0.0\", \"max\": \"255.255\"},\n \"associations\": {},\n \"param_information\": {\"_map\": {}},\n}\n\n\ndef test_from_state():\n \"\"\"Test from_state method.\"\"\"\n state = json.loads(load_fixture(\"basic_dump.txt\").split(\"\\n\")[0])[\"state\"]\n\n node = node_pkg.Node(None, state[\"nodes\"][0])\n\n assert node.node_id == 1\n assert node.index == 0\n assert node.status == 4\n assert node.ready is True\n assert node.device_class.basic.key == 2\n assert node.device_class.generic.label == \"Static Controller\"\n assert node.device_class.mandatory_supported_ccs == []\n assert node.device_class.mandatory_controlled_ccs == [32]\n\n assert node.is_listening is True\n assert node.is_frequent_listening is False\n assert node.is_routing is False\n assert node.max_baud_rate == 40000\n assert node.is_secure is False\n assert node.version == 4\n assert node.is_beaming is True\n assert node.manufacturer_id == 134\n assert node.product_id == 90\n assert node.product_type == 257\n for attr, value in DEVICE_CONFIG_FIXTURE.items():\n assert getattr(node.device_config, attr) == value\n assert node.label == \"ZW090\"\n assert node.neighbors == [31, 32, 33, 36, 37, 39, 52]\n assert node.interview_attempts == 1\n assert len(node.endpoints) == 1\n assert node.endpoints[0].index == 0\n\n\nasync def test_unknown_values(cover_qubino_shutter):\n \"\"\"Test that values that are unknown return as None.\"\"\"\n node = cover_qubino_shutter\n assert (\n \"5-38-0-currentValue-00-00\" in node.values\n and node.values[\"5-38-0-currentValue-00-00\"].value is None\n )\n assert (\n \"5-37-0-currentValue-00-00\" in node.values\n and node.values[\"5-37-0-currentValue-00-00\"].value is None\n )\n\n\nasync def test_values_without_property_key_name(multisensor_6):\n \"\"\"Test that values with property key and without property key name can be found.\"\"\"\n node = multisensor_6\n assert \"52-112-0-101-1-00\" in node.values\n assert \"52-112-0-101-16-00\" in node.values\n\n\nasync def test_hash(climate_radio_thermostat_ct100_plus):\n \"\"\"Test node hash.\"\"\"\n node = climate_radio_thermostat_ct100_plus\n assert hash(node) == hash((node.client.driver, node.node_id))\n\n\nasync def test_command_class_values(climate_radio_thermostat_ct100_plus):\n \"\"\"Test node methods to get command class values.\"\"\"\n node = climate_radio_thermostat_ct100_plus\n assert node.node_id == 13\n switch_values = node.get_command_class_values(CommandClass.SENSOR_MULTILEVEL)\n assert len(switch_values) == 2\n\n with pytest.raises(UnwriteableValue):\n await node.async_set_value(\"13-112-0-2-00-00\", 1)\n\n\nasync def test_set_value(multisensor_6, uuid4, mock_command):\n \"\"\"Test set value.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.set_value\", \"nodeId\": node.node_id},\n {\"success\": True},\n )\n value_id = \"52-32-0-targetValue-00-00\"\n value = node.values[value_id]\n assert await node.async_set_value(value_id, 42) is None\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.set_value\",\n \"nodeId\": node.node_id,\n \"valueId\": value.data,\n \"value\": 42,\n \"messageId\": uuid4,\n }\n\n\nasync def test_poll_value(multisensor_6, uuid4, mock_command):\n \"\"\"Test poll value.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.poll_value\", \"nodeId\": node.node_id},\n {\"result\": \"something\"},\n )\n value_id = \"52-32-0-currentValue-00-00\"\n value = node.values[value_id]\n assert await node.async_poll_value(value_id) is None\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.poll_value\",\n \"nodeId\": node.node_id,\n \"valueId\": value.data,\n \"messageId\": uuid4,\n }\n\n\nasync def test_refresh_info(multisensor_6, uuid4, mock_command):\n \"\"\"Test refresh info.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.refresh_info\", \"nodeId\": node.node_id},\n {},\n )\n assert await node.async_refresh_info() is None\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.refresh_info\",\n \"nodeId\": node.node_id,\n \"messageId\": uuid4,\n }\n\n\nasync def test_get_defined_value_ids(multisensor_6, uuid4, mock_command):\n \"\"\"Test get defined value ids.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.get_defined_value_ids\", \"nodeId\": node.node_id},\n {\n \"valueIds\": [\n {\n \"commandClassName\": \"Wake Up\",\n \"commandClass\": 132,\n \"endpoint\": 0,\n \"property\": \"wakeUpInterval\",\n \"propertyName\": \"wakeUpInterval\",\n },\n {\n \"commandClassName\": \"Wake Up\",\n \"commandClass\": 132,\n \"endpoint\": 0,\n \"property\": \"controllerNodeId\",\n \"propertyName\": \"controllerNodeId\",\n },\n ]\n },\n )\n result = await node.async_get_defined_value_ids()\n\n assert len(result) == 2\n\n assert result[0].command_class_name == \"Wake Up\"\n assert result[0].command_class == 132\n assert result[0].endpoint == 0\n assert result[0].property_ == \"wakeUpInterval\"\n assert result[0].property_name == \"wakeUpInterval\"\n\n assert result[1].command_class_name == \"Wake Up\"\n assert result[1].command_class == 132\n assert result[1].endpoint == 0\n assert result[1].property_ == \"controllerNodeId\"\n assert result[1].property_name == \"controllerNodeId\"\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.get_defined_value_ids\",\n \"nodeId\": node.node_id,\n \"messageId\": uuid4,\n }\n\n\nasync def test_get_value_metadata(multisensor_6, uuid4, mock_command):\n \"\"\"Test get value metadata.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.get_value_metadata\", \"nodeId\": node.node_id},\n {\n \"type\": \"any\",\n \"readable\": True,\n \"writeable\": False,\n \"label\": \"Node ID of the controller\",\n \"description\": \"Description of the value metadata\",\n },\n )\n\n value_id = \"52-32-0-targetValue-00-00\"\n value = node.values[value_id]\n result = await node.async_get_value_metadata(value)\n\n assert result.type == \"any\"\n assert result.readable is True\n assert result.writeable is False\n assert result.label == \"Node ID of the controller\"\n assert result.description == \"Description of the value metadata\"\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.get_value_metadata\",\n \"nodeId\": node.node_id,\n \"valueId\": value.data,\n \"messageId\": uuid4,\n }\n\n\nasync def test_abort_firmware_update(multisensor_6, uuid4, mock_command):\n \"\"\"Test abort firmware update.\"\"\"\n node = multisensor_6\n ack_commands = mock_command(\n {\"command\": \"node.abort_firmware_update\", \"nodeId\": node.node_id},\n {},\n )\n\n assert await node.async_abort_firmware_update() is None\n\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.abort_firmware_update\",\n \"nodeId\": node.node_id,\n \"messageId\": uuid4,\n }\n\n\ndef test_node_inclusion():\n \"\"\"Emulate a node being added.\"\"\"\n # when a node node is added, it has minimal info first\n node = node_pkg.Node(\n None, {\"nodeId\": 52, \"status\": 1, \"ready\": False, \"values\": []}\n )\n assert node.node_id == 52\n assert node.status == 1\n assert not node.ready\n assert len(node.values) == 0\n # the ready event contains a full (and complete) dump of the node, including values\n state = json.loads(load_fixture(\"multisensor_6_state.json\"))\n event = Event(\"ready\", {\"nodeState\": state})\n node.receive_event(event)\n assert len(node.values) > 0\n\n\nasync def test_node_status_events(multisensor_6):\n \"\"\"Test Node status events.\"\"\"\n node = multisensor_6\n assert node.status == NodeStatus.ASLEEP\n # mock node wake up event\n event = Event(type=\"wake up\")\n node.handle_wake_up(event)\n assert node.status == NodeStatus.AWAKE\n # mock node dead event\n event = Event(type=\"dead\")\n node.handle_dead(event)\n assert node.status == NodeStatus.DEAD\n # mock node alive event\n event = Event(type=\"alive\")\n node.handle_alive(event)\n assert node.status == NodeStatus.ALIVE\n # mock node sleep event\n event = Event(type=\"sleep\")\n node.handle_sleep(event)\n assert node.status == NodeStatus.ASLEEP\n\n\nasync def test_value_notification(wallmote_central_scene: Node):\n \"\"\"Test value notification events.\"\"\"\n node = wallmote_central_scene\n\n # Validate that metadata gets added to notification when it's not included\n event = Event(\n type=\"value notification\",\n data={\n \"source\": \"node\",\n \"event\": \"value notification\",\n \"nodeId\": 35,\n \"args\": {\n \"endpoint\": 0,\n \"commandClass\": 91,\n \"commandClassName\": \"Central Scene\",\n \"property\": \"scene\",\n \"propertyKey\": \"002\",\n \"propertyName\": \"scene\",\n \"propertyKeyName\": \"002\",\n \"ccVersion\": 2,\n },\n },\n )\n\n node.handle_value_notification(event)\n assert event.data[\"value_notification\"].metadata.states\n\n # Validate that a value notification event for an unknown value gets returned as is\n\n event = Event(\n type=\"value notification\",\n data={\n \"source\": \"node\",\n \"event\": \"value notification\",\n \"nodeId\": 35,\n \"args\": {\n \"endpoint\": 0,\n \"commandClass\": 91,\n \"commandClassName\": \"Central Scene\",\n \"property\": \"scene\",\n \"propertyKey\": \"005\",\n \"propertyName\": \"scene\",\n \"propertyKeyName\": \"005\",\n \"ccVersion\": 2,\n },\n },\n )\n\n node.handle_value_notification(event)\n assert event.data[\"value_notification\"].data == {\n \"endpoint\": 0,\n \"commandClass\": 91,\n \"commandClassName\": \"Central Scene\",\n \"property\": \"scene\",\n \"propertyKey\": \"005\",\n \"propertyName\": \"scene\",\n \"propertyKeyName\": \"005\",\n \"ccVersion\": 2,\n }\n\n\nasync def test_metadata_updated(climate_radio_thermostat_ct100_plus: Node):\n \"\"\"Test metadata updated events.\"\"\"\n node = climate_radio_thermostat_ct100_plus\n\n value = node.values[\"13-135-1-value-00-00\"]\n\n assert not value.metadata.states\n\n # Validate that states becomes available on a value that doesn't have a state when\n # a metadata updated event with states is received\n event = Event(\n type=\"value notification\",\n data={\n \"source\": \"node\",\n \"event\": \"metadata updated\",\n \"nodeId\": 13,\n \"args\": {\n \"commandClassName\": \"Indicator\",\n \"commandClass\": 135,\n \"endpoint\": 1,\n \"property\": \"value\",\n \"propertyName\": \"value\",\n \"metadata\": {\n \"type\": \"number\",\n \"readable\": True,\n \"writeable\": True,\n \"min\": 0,\n \"max\": 255,\n \"label\": \"Indicator value\",\n \"ccSpecific\": {\"indicatorId\": 0},\n \"states\": {\n \"0\": \"Idle\",\n \"1\": \"Heating\",\n \"2\": \"Cooling\",\n \"3\": \"Fan Only\",\n \"4\": \"Pending Heat\",\n \"5\": \"Pending Cool\",\n \"6\": \"Vent/Economizer\",\n \"7\": \"Aux Heating\",\n \"8\": \"2nd Stage Heating\",\n \"9\": \"2nd Stage Cooling\",\n \"10\": \"2nd Stage Aux Heat\",\n \"11\": \"3rd Stage Aux Heat\",\n },\n },\n \"value\": 0,\n },\n },\n )\n\n node.handle_metadata_updated(event)\n assert value.metadata.states\n" }, { "alpha_fraction": 0.6029983162879944, "alphanum_fraction": 0.6029983162879944, "avg_line_length": 31.160715103149414, "blob_id": "e62082fd4a7e521e49b8b4c4f5107f83a686bdbd", "content_id": "0bec847cb26cabe94288fd2a84d1d4b72c7b5690", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1801, "license_type": "permissive", "max_line_length": 84, "num_lines": 56, "path": "/zwave_js_server/model/driver.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Provide a model for the Z-Wave JS Driver.\"\"\"\nfrom typing import TYPE_CHECKING\nfrom zwave_js_server.model.log_config import LogConfig\n\nfrom ..event import Event, EventBase\nfrom .controller import Controller\n\nif TYPE_CHECKING:\n from ..client import Client\n\n\nclass Driver(EventBase):\n \"\"\"Represent a Z-Wave JS driver.\"\"\"\n\n def __init__(self, client: \"Client\", state: dict) -> None:\n \"\"\"Initialize driver.\"\"\"\n super().__init__()\n self.client = client\n self.controller = Controller(client, state)\n\n def __hash__(self) -> int:\n \"\"\"Return the hash.\"\"\"\n return hash(self.controller)\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Return whether this instance equals another.\"\"\"\n if not isinstance(other, Driver):\n return False\n return self.controller == other.controller\n\n def receive_event(self, event: Event) -> None:\n \"\"\"Receive an event.\"\"\"\n if event.data[\"source\"] != \"driver\":\n self.controller.receive_event(event)\n return\n\n self._handle_event_protocol(event)\n\n self.emit(event.type, event.data)\n\n def handle_all_nodes_ready(self, event: Event) -> None:\n \"\"\"Process a driver all nodes ready event.\"\"\"\n\n async def async_update_log_config(self, log_config: LogConfig) -> None:\n \"\"\"Update log config for driver.\"\"\"\n await self.client.async_send_command(\n {\n \"command\": \"update_log_config\",\n \"config\": log_config.to_dict(),\n }\n )\n\n async def async_get_log_config(self) -> LogConfig:\n \"\"\"Return current log config for driver.\"\"\"\n result = await self.client.async_send_command({\"command\": \"get_log_config\"})\n return LogConfig.from_dict(result[\"config\"])\n" }, { "alpha_fraction": 0.6888889074325562, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 44, "blob_id": "7906b1e45b2159ded73311a962c6f1886d53b512", "content_id": "404c5928c0b8a19d256b1eac811af27cbbcf1da6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "permissive", "max_line_length": 44, "num_lines": 1, "path": "/zwave_js_server/__init__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Provide a package for zwave-js-server.\"\"\"\n" }, { "alpha_fraction": 0.6459770202636719, "alphanum_fraction": 0.6816092133522034, "avg_line_length": 31.22222137451172, "blob_id": "4d3572840aa9dc2a0e690130846996678edd86f5", "content_id": "5f2cd97fdb847aaa2499ad53791c7ba6ff97ea27", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 872, "license_type": "permissive", "max_line_length": 72, "num_lines": 27, "path": "/test/model/test_value.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test value model.\"\"\"\nfrom zwave_js_server.model.node import Node\nfrom zwave_js_server.model.value import get_value_id\n\n\ndef test_buffer_dict(client, idl_101_lock_state):\n \"\"\"Test that we handle buffer dictionary correctly.\"\"\"\n node = Node(client, idl_101_lock_state)\n\n value_id = get_value_id(node, 99, \"userCode\", 0, 3, \"3\")\n\n assert value_id == \"26-99-0-userCode-3-3\"\n\n zwave_value = node.values[value_id]\n\n assert zwave_value.metadata.type == \"string\"\n assert zwave_value.value == \"¤\\x0eªV\"\n\n\ndef test_unparseable_value(client, unparseable_json_string_value_state):\n \"\"\"Test that we handle string value with unparseable format.\"\"\"\n node = Node(client, unparseable_json_string_value_state)\n\n value_id = get_value_id(node, 99, \"userCode\", 0, 4, \"4\")\n\n assert value_id == \"20-99-0-userCode-4-4\"\n assert value_id not in node.values\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 33.344825744628906, "blob_id": "ef93e6567b969cf0573ffda7a1286c8636eb9820", "content_id": "00840841e87ae2a3c0cbadf67d58dfe8482c4908", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "permissive", "max_line_length": 83, "num_lines": 29, "path": "/zwave_js_server/util/helpers.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Generic Utility helper functions.\"\"\"\n\nimport json\nfrom typing import Any, Dict\n\nfrom ..exceptions import UnparseableValue\n\n\ndef is_json_string(value: Any) -> bool:\n \"\"\"Check if the provided string looks like json.\"\"\"\n # NOTE: we do not use json.loads here as it is not strict enough\n return isinstance(value, str) and value.startswith(\"{\") and value.endswith(\"}\")\n\n\ndef parse_buffer(value: Dict[str, Any]) -> str:\n \"\"\"Parse value dictionary from a buffer data type.\"\"\"\n if value.get(\"type\") != \"Buffer\" or \"data\" not in value:\n raise UnparseableValue(f\"Unparseable value: {value}\") from ValueError(\n \"JSON does not match expected schema\"\n )\n return \"\".join([chr(x) for x in value[\"data\"]])\n\n\ndef parse_buffer_from_json(value: str) -> str:\n \"\"\"Parse value string from a buffer data type.\"\"\"\n try:\n return parse_buffer(json.loads(value))\n except ValueError as err:\n raise UnparseableValue(f\"Unparseable value: {value}\") from err\n" }, { "alpha_fraction": 0.6418322324752808, "alphanum_fraction": 0.6432119011878967, "avg_line_length": 26.044776916503906, "blob_id": "fb7ea64a06f10e84de625a4121d2585cba89fa32", "content_id": "2e31a738df7b704d82d1d94d595203c90f536530", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3624, "license_type": "permissive", "max_line_length": 87, "num_lines": 134, "path": "/zwave_js_server/__main__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Basic CLI to test Z-Wave JS server.\"\"\"\nimport argparse\nimport asyncio\nimport logging\nimport sys\n\nimport aiohttp\n\nfrom .client import Client\nfrom .dump import dump_msgs\nfrom .version import get_server_version\n\nlogger = logging.getLogger(__package__)\n\n\ndef get_arguments() -> argparse.Namespace:\n \"\"\"Get parsed passed in arguments.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Z-Wave JS Server Python\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"Log with debug level\")\n parser.add_argument(\n \"--server-version\", action=\"store_true\", help=\"Print the version of the server\"\n )\n parser.add_argument(\n \"--dump-state\", action=\"store_true\", help=\"Dump the driver state\"\n )\n parser.add_argument(\n \"--event-timeout\",\n help=\"How long to listen for events when dumping state\",\n )\n parser.add_argument(\n \"url\",\n type=str,\n help=\"URL of server, ie ws://localhost:3000\",\n )\n\n arguments = parser.parse_args()\n\n return arguments\n\n\nasync def start_cli() -> None:\n \"\"\"Run main.\"\"\"\n args = get_arguments()\n level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(level=level)\n\n async with aiohttp.ClientSession() as session:\n if args.server_version:\n await print_version(args, session)\n elif args.dump_state:\n await handle_dump_state(args, session)\n else:\n await connect(args, session)\n\n\nasync def print_version(\n args: argparse.Namespace, session: aiohttp.ClientSession\n) -> None:\n \"\"\"Print the version of the server.\"\"\"\n logger.setLevel(logging.WARNING)\n version = await get_server_version(args.url, session)\n print(\"Driver:\", version.driver_version)\n print(\"Server:\", version.server_version)\n print(\"Home ID:\", version.home_id)\n\n\nasync def handle_dump_state(\n args: argparse.Namespace, session: aiohttp.ClientSession\n) -> None:\n \"\"\"Dump the state of the server.\"\"\"\n timeout = None if args.event_timeout is None else float(args.event_timeout)\n msgs = await dump_msgs(args.url, session, timeout=timeout)\n for msg in msgs:\n print(msg)\n\n\nasync def connect(args: argparse.Namespace, session: aiohttp.ClientSession) -> None:\n \"\"\"Connect to the server.\"\"\"\n async with Client(args.url, session) as client:\n\n driver_ready = asyncio.Event()\n asyncio.create_task(on_driver_ready(client, driver_ready))\n\n await client.listen(driver_ready)\n\n\nasync def on_driver_ready(client: Client, driver_ready: asyncio.Event) -> None:\n \"\"\"Act on driver ready.\"\"\"\n await driver_ready.wait()\n\n assert client.driver\n # Set up listeners on new nodes\n client.driver.controller.on(\n \"node added\",\n lambda event: event[\"node\"].on(\"value updated\", log_value_updated),\n )\n\n # Set up listeners on existing nodes\n for node in client.driver.controller.nodes.values():\n node.on(\"value updated\", log_value_updated)\n\n\ndef log_value_updated(event: dict) -> None:\n \"\"\"Log node value changes.\"\"\"\n node = event[\"node\"]\n value = event[\"value\"]\n\n if node.device_config:\n description = node.device_config.description\n else:\n description = f\"{node.device_class.generic} (missing device config)\"\n\n logger.info(\n \"Node %s %s (%s) changed to %s\",\n description,\n value.property_name or \"\",\n value.value_id,\n value.value,\n )\n\n\ndef main() -> None:\n \"\"\"Run main.\"\"\"\n try:\n asyncio.run(start_cli())\n except KeyboardInterrupt:\n pass\n\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5198282599449158, "alphanum_fraction": 0.5438241958618164, "avg_line_length": 26.685314178466797, "blob_id": "0045b48150a131d0a2023192dd9e08de679ce000", "content_id": "ea0693a45ceae99bf8ff3b53e5b34c2dbdbb3473", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3959, "license_type": "permissive", "max_line_length": 71, "num_lines": 143, "path": "/test/util/test_lock.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test lock utility functions.\"\"\"\nimport pytest\n\nfrom zwave_js_server.const import ATTR_USERCODE\nfrom zwave_js_server.exceptions import NotFoundError\nfrom zwave_js_server.util.lock import (\n clear_usercode,\n get_code_slots,\n get_usercode,\n get_usercodes,\n set_usercode,\n)\n\nfrom .const import CODE_SLOTS\n\n\ndef test_get_code_slots(lock_schlage_be469):\n \"\"\"Test get_code_slots utility function.\"\"\"\n node = lock_schlage_be469\n assert get_code_slots(node) == [\n {k: v for k, v in code_slot.items() if k != ATTR_USERCODE}\n for code_slot in CODE_SLOTS\n ]\n\n\ndef test_get_usercode(lock_schlage_be469):\n \"\"\"Test get_usercode utility function.\"\"\"\n node = lock_schlage_be469\n\n # Test in use slot\n user_code = get_usercode(node, 1)\n assert all(char == \"*\" for char in user_code)\n\n # Test unused slot\n assert get_usercode(node, 30) is None\n\n # Test invalid slot\n with pytest.raises(NotFoundError):\n get_usercode(node, 100)\n\n\ndef test_get_usercodes(lock_schlage_be469):\n \"\"\"Test get_usercodes utility function.\"\"\"\n node = lock_schlage_be469\n assert get_usercodes(node) == CODE_SLOTS\n\n\nasync def test_set_usercode(lock_schlage_be469, mock_command, uuid4):\n \"\"\"Test set_usercode utility function.\"\"\"\n node = lock_schlage_be469\n ack_commands = mock_command(\n {\"command\": \"node.set_value\", \"nodeId\": node.node_id},\n {\"success\": True},\n )\n\n # Test valid code\n await set_usercode(node, 1, \"1234\")\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.set_value\",\n \"nodeId\": 20,\n \"messageId\": uuid4,\n \"valueId\": {\n \"commandClassName\": \"User Code\",\n \"commandClass\": 99,\n \"endpoint\": 0,\n \"property\": \"userCode\",\n \"propertyName\": \"userCode\",\n \"propertyKey\": 1,\n \"propertyKeyName\": \"1\",\n \"metadata\": {\n \"type\": \"string\",\n \"readable\": True,\n \"writeable\": True,\n \"minLength\": 4,\n \"maxLength\": 10,\n \"label\": \"User Code (1)\",\n },\n \"value\": \"**********\",\n },\n \"value\": \"1234\",\n }\n\n # Test invalid code slot\n with pytest.raises(NotFoundError):\n await set_usercode(node, 100, \"1234\")\n\n # assert no new command calls\n assert len(ack_commands) == 1\n\n # Test invalid code length\n with pytest.raises(ValueError):\n await set_usercode(node, 1, \"123\")\n\n # assert no new command calls\n assert len(ack_commands) == 1\n\n\nasync def test_clear_usercode(lock_schlage_be469, mock_command, uuid4):\n \"\"\"Test clear_usercode utility function.\"\"\"\n node = lock_schlage_be469\n ack_commands = mock_command(\n {\"command\": \"node.set_value\", \"nodeId\": node.node_id},\n {\"success\": True},\n )\n\n # Test valid code\n await clear_usercode(node, 1)\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.set_value\",\n \"nodeId\": 20,\n \"messageId\": uuid4,\n \"valueId\": {\n \"commandClassName\": \"User Code\",\n \"commandClass\": 99,\n \"endpoint\": 0,\n \"property\": \"userIdStatus\",\n \"propertyName\": \"userIdStatus\",\n \"propertyKey\": 1,\n \"propertyKeyName\": \"1\",\n \"metadata\": {\n \"type\": \"number\",\n \"readable\": True,\n \"writeable\": True,\n \"label\": \"User ID status (1)\",\n \"states\": {\n \"0\": \"Available\",\n \"1\": \"Enabled\",\n \"2\": \"Disabled\",\n },\n },\n \"value\": 1,\n },\n \"value\": 0,\n }\n\n # Test invalid code slot\n with pytest.raises(NotFoundError):\n await clear_usercode(node, 100)\n\n # assert no new command calls\n assert len(ack_commands) == 1\n" }, { "alpha_fraction": 0.6448087692260742, "alphanum_fraction": 0.6448087692260742, "avg_line_length": 25.14285659790039, "blob_id": "90190d474567792d7e60c6eb156358cc5347a0a3", "content_id": "149d0e05d200c60f8c00f028c296744393213edf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "permissive", "max_line_length": 74, "num_lines": 7, "path": "/test/__init__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Provide tests for zwave-js-server.\"\"\"\nimport pathlib\n\n\ndef load_fixture(name):\n \"\"\"Load a fixture.\"\"\"\n return (pathlib.Path(__file__).parent / \"fixtures\" / name).read_text()\n" }, { "alpha_fraction": 0.6038851141929626, "alphanum_fraction": 0.6038851141929626, "avg_line_length": 23.163265228271484, "blob_id": "1fe038c3aca43fa9a50f1c315c18cd3505d541de", "content_id": "5142f07abc57f489f38e9f1eceb64da88c5870e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1184, "license_type": "permissive", "max_line_length": 71, "num_lines": 49, "path": "/zwave_js_server/dump.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Dump helper.\"\"\"\nimport asyncio\nfrom typing import List, Optional\n\nimport aiohttp\n\nfrom .const import MAX_SERVER_SCHEMA_VERSION\n\n\nasync def dump_msgs(\n url: str,\n session: aiohttp.ClientSession,\n timeout: Optional[float] = None,\n) -> List[dict]:\n \"\"\"Dump server state.\"\"\"\n client = await session.ws_connect(url)\n msgs = []\n\n version = await client.receive_json()\n msgs.append(version)\n\n for to_send in (\n {\n \"command\": \"set_api_schema\",\n \"messageId\": \"api-schema-id\",\n \"schemaVersion\": MAX_SERVER_SCHEMA_VERSION,\n },\n {\"command\": \"start_listening\", \"messageId\": \"listen-id\"},\n ):\n await client.send_json(to_send)\n msgs.append(await client.receive_json())\n\n if timeout is None:\n await client.close()\n return msgs\n\n current_task = asyncio.current_task()\n assert current_task is not None\n asyncio.get_running_loop().call_later(timeout, current_task.cancel)\n\n while True:\n try:\n msg = await client.receive_json()\n msgs.append(msg)\n except asyncio.CancelledError:\n break\n\n await client.close()\n return msgs\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 21, "blob_id": "725cf03951d8ee32e8f1fba5b88b7a18a485428d", "content_id": "0e1b20067ce123ab2df2289957b564fe2a08804a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "permissive", "max_line_length": 21, "num_lines": 1, "path": "/test/model/__init__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test the model.\"\"\"\n" }, { "alpha_fraction": 0.6950549483299255, "alphanum_fraction": 0.6950549483299255, "avg_line_length": 27, "blob_id": "e7679fa4e223e567b617c9729622f682f341a0c7", "content_id": "815be3c2ad3d9e634e34fe7114ff83fb672c1283", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "permissive", "max_line_length": 86, "num_lines": 13, "path": "/zwave_js_server/version.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Version helper.\"\"\"\nimport aiohttp\n\nfrom .model.version import VersionInfo\n\n\nasync def get_server_version(url: str, session: aiohttp.ClientSession) -> VersionInfo:\n \"\"\"Return a server version.\"\"\"\n client = await session.ws_connect(url)\n try:\n return VersionInfo.from_message(await client.receive_json())\n finally:\n await client.close()\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 41, "blob_id": "3539e8d7d0e371df58d8b6500e4e2313c5ef30cb", "content_id": "c991425aa1800b652ef9f9a02a97ad7fef6ee631", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42, "license_type": "permissive", "max_line_length": 41, "num_lines": 1, "path": "/zwave_js_server/util/__init__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Utility module for zwave-js-server.\"\"\"\n" }, { "alpha_fraction": 0.6459209322929382, "alphanum_fraction": 0.6459209322929382, "avg_line_length": 26.022727966308594, "blob_id": "27349ec2f09d304c1aee02eb4e5906511c57011e", "content_id": "ad2114cf5c220551c4c2d42967ea3adec91fd250", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "permissive", "max_line_length": 75, "num_lines": 44, "path": "/zwave_js_server/model/notification.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"\nModel for a Zwave Node's Notification Event.\n\nhttps://zwave-js.github.io/node-zwave-js/#/api/node?id=quotnotificationquot\n\"\"\"\n\nfrom typing import Literal, TYPE_CHECKING, Any, Dict, TypedDict\n\nif TYPE_CHECKING:\n from .node import Node\n\n\nclass NotificationDataType(TypedDict, total=False):\n \"\"\"Represent a notification event data dict type.\"\"\"\n\n source: Literal[\"node\"] # required\n event: Literal[\"notification\"] # required\n nodeId: int # required\n notificationLabel: str # required\n parameters: Dict[str, Any]\n\n\nclass Notification:\n \"\"\"Model for a Zwave Node's notification event.\"\"\"\n\n def __init__(self, node: \"Node\", data: NotificationDataType) -> None:\n \"\"\"Initialize.\"\"\"\n self.node = node\n self.data = data\n\n @property\n def node_id(self) -> int:\n \"\"\"Return node ID property.\"\"\"\n return self.data[\"nodeId\"]\n\n @property\n def notification_label(self) -> str:\n \"\"\"Return notification label property.\"\"\"\n return self.data[\"notificationLabel\"]\n\n @property\n def parameters(self) -> Dict[str, Any]:\n \"\"\"Return installer icon property.\"\"\"\n return self.data.get(\"parameters\", {})\n" }, { "alpha_fraction": 0.64014732837677, "alphanum_fraction": 0.6405156254768372, "avg_line_length": 30.569766998291016, "blob_id": "a57e8c0eb6379c15c4378589217c33481819dd86", "content_id": "7ad9e24a8f80ae8590d91391425053cee970d4d4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2715, "license_type": "permissive", "max_line_length": 95, "num_lines": 86, "path": "/zwave_js_server/model/device_config.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"\nModel for a Zwave Node's device config.\n\nhttps://zwave-js.github.io/node-zwave-js/#/api/node?id=deviceconfig\n\"\"\"\n\nfrom typing import Dict, List, Optional, TypedDict\n\n\nclass DeviceConfigDataType(TypedDict, total=False):\n \"\"\"Represent a device config data dict type.\"\"\"\n\n manufacturer: str\n manufacturerId: str\n label: str\n description: str\n devices: List[Dict[str, str]]\n firmwareVersion: Dict[str, str]\n associations: Dict[str, dict]\n supportsZWavePlus: bool\n proprietary: dict\n paramInformation: Dict[str, dict]\n compat: Dict[str, dict]\n\n\nclass DeviceConfig:\n \"\"\"Model for a Zwave Node's device config.\"\"\"\n\n def __init__(self, data: DeviceConfigDataType) -> None:\n \"\"\"Initialize.\"\"\"\n self.data = data\n\n @property\n def manufacturer(self) -> Optional[str]:\n \"\"\"Return name of the manufacturer.\"\"\"\n return self.data.get(\"manufacturer\")\n\n @property\n def manufacturer_id(self) -> Optional[str]: # TODO: In the dump this is an int.\n \"\"\"Return manufacturer id (as defined in the specs) as a 4-digit hexadecimal string.\"\"\"\n return self.data.get(\"manufacturerId\")\n\n @property\n def label(self) -> Optional[str]:\n \"\"\"Return short label for the device.\"\"\"\n return self.data.get(\"label\")\n\n @property\n def description(self) -> Optional[str]:\n \"\"\"Return longer description of the device, usually the full name.\"\"\"\n return self.data.get(\"description\")\n\n @property\n def devices(self) -> List[Dict[str, str]]:\n \"\"\"Return list of product type and product ID combinations.\"\"\"\n return self.data.get(\"devices\", [])\n\n @property\n def firmware_version(self) -> Dict[str, str]:\n \"\"\"Return firmware version range this config is valid for.\"\"\"\n return self.data.get(\"firmwareVersion\", {})\n\n @property\n def associations(self) -> Dict[str, dict]:\n \"\"\"Return association groups the device supports.\"\"\"\n return self.data.get(\"associations\", {})\n\n @property\n def supports_zwave_plus(self) -> Optional[bool]:\n \"\"\"Return if the device complies with the Z-Wave+ standard.\"\"\"\n return self.data.get(\"supportsZWavePlus\")\n\n @property\n def proprietary(self) -> dict:\n \"\"\"Return dictionary of settings for the proprietary CC.\"\"\"\n return self.data.get(\"proprietary\", {})\n\n @property\n def param_information(self) -> Dict[str, dict]:\n \"\"\"Return dictionary of the configuration parameters the device supports.\"\"\"\n return self.data.get(\"paramInformation\", {})\n\n @property\n def compat(self) -> Dict[str, dict]:\n \"\"\"Return compatibility flags.\"\"\"\n return self.data.get(\"compat\", {})\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 13.666666984558105, "blob_id": "88b34253d4fc5a8ff52649ff2828cc575c5c51e9", "content_id": "f11f178f520e11947e71218b349f0e00e8c55b91", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 88, "license_type": "permissive", "max_line_length": 24, "num_lines": 6, "path": "/requirements_dev.txt", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "-r requirements.txt\n-r requirements_lint.txt\n-r requirements_test.txt\ntox==3.22.0\n\n-e .\n" }, { "alpha_fraction": 0.6040268540382385, "alphanum_fraction": 0.6140939593315125, "avg_line_length": 21.923076629638672, "blob_id": "26803cb529bc3d3b53d0a3a8949c193727587041", "content_id": "73a216ca86fa0cb2b9e82307e2771c2f236201e0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/test/test_event.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test event helpers.\"\"\"\n\nfrom zwave_js_server import event\n\n\ndef test_once():\n \"\"\"Test once listens to event once.\"\"\"\n mock = event.EventBase()\n calls = []\n mock.once(\"test-event\", calls.append)\n mock.emit(\"test-event\", 1)\n mock.emit(\"test-event\", 2)\n assert len(calls) == 1\n" }, { "alpha_fraction": 0.7345454692840576, "alphanum_fraction": 0.7367272973060608, "avg_line_length": 40.66666793823242, "blob_id": "a655c232d5f71a21d8cff2315510bc0291c5f147", "content_id": "3a5f95f1066289cd154f5fe752d295db2a6002e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1375, "license_type": "permissive", "max_line_length": 80, "num_lines": 33, "path": "/test/test_version.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test the server version helper.\"\"\"\nfrom unittest.mock import call\n\nfrom zwave_js_server.version import get_server_version\n\n\nasync def test_get_server_version(client_session, ws_client, url, version_data):\n \"\"\"Test the get server version helper.\"\"\"\n ws_client.receive_json.return_value = version_data\n\n version_info = await get_server_version(url, client_session)\n\n assert client_session.ws_connect.called\n assert client_session.ws_connect.call_args == call(url)\n assert version_info.driver_version == version_data[\"driverVersion\"]\n assert version_info.server_version == version_data[\"serverVersion\"]\n assert version_info.home_id == version_data[\"homeId\"]\n assert version_info.min_schema_version == version_data[\"minSchemaVersion\"]\n assert version_info.max_schema_version == version_data[\"maxSchemaVersion\"]\n assert ws_client.close.called\n\n\nasync def test_missing_server_schema_version(\n client_session, ws_client, url, version_data\n):\n \"\"\"test missing schema version processed as schema version 0.\"\"\"\n del version_data[\"minSchemaVersion\"]\n del version_data[\"maxSchemaVersion\"]\n ws_client.receive_json.return_value = version_data\n version_info = await get_server_version(url, client_session)\n assert version_info.min_schema_version == 0\n assert version_info.max_schema_version == 0\n assert ws_client.close.called\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6299999952316284, "avg_line_length": 33.482757568359375, "blob_id": "25baaf5b08797ed745dc089896d9524fc65fa0ab", "content_id": "0746ecb03c0f07b7e206225ecf6f5ba5962adcfc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 79, "num_lines": 29, "path": "/zwave_js_server/model/version.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Represents the version from the server.\"\"\"\n\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass VersionInfo:\n \"\"\"Version info of the server.\"\"\"\n\n driver_version: str\n server_version: str\n home_id: int\n min_schema_version: int\n max_schema_version: int\n\n @classmethod\n def from_message(cls, msg: dict) -> \"VersionInfo\":\n \"\"\"Create a version info from a version message.\"\"\"\n return cls(\n driver_version=msg[\"driverVersion\"],\n server_version=msg[\"serverVersion\"],\n home_id=msg[\"homeId\"],\n # schema versions are sent in the response from schema version 1+\n # this info not present means the server is at schema version 0\n # at some point in time (when we stop supporting schema version 0),\n # we could adjust this code and assume the keys are there.\n min_schema_version=msg.get(\"minSchemaVersion\", 0),\n max_schema_version=msg.get(\"maxSchemaVersion\", 0),\n )\n" }, { "alpha_fraction": 0.682519793510437, "alphanum_fraction": 0.7012932896614075, "avg_line_length": 36.453125, "blob_id": "f87aa9d9d0e2ae42d1a7d1a65b5b6243992bef12", "content_id": "63149cbe405dd5c32be3fca73e9979b1e9a0bf33", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "permissive", "max_line_length": 86, "num_lines": 64, "path": "/test/util/test_node.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Test node utility functions.\"\"\"\nimport pytest\n\nfrom zwave_js_server.exceptions import InvalidNewValue, NotFoundError\nfrom zwave_js_server.model.node import Node\nfrom zwave_js_server.model.value import ConfigurationValue\nfrom zwave_js_server.util.node import async_set_config_parameter\n\n\nasync def test_configuration_parameter_values(\n climate_radio_thermostat_ct100_plus, uuid4, mock_command\n):\n \"\"\"Test node methods to get and set configuration parameter values.\"\"\"\n node: Node = climate_radio_thermostat_ct100_plus\n ack_commands = mock_command(\n {\"command\": \"node.set_value\", \"nodeId\": node.node_id},\n {\"success\": True},\n )\n\n assert node.node_id == 13\n config_values = node.get_configuration_values()\n assert len(config_values) == 12\n\n for value in config_values.values():\n assert isinstance(value, ConfigurationValue)\n\n # Test setting a configuration parameter that has no metadata\n with pytest.raises(NotImplementedError):\n await async_set_config_parameter(node, 1, 2)\n\n # Test setting an enumerated configuration parameter with an invalid value\n with pytest.raises(InvalidNewValue):\n await async_set_config_parameter(node, 5, 1)\n\n # Test setting a range configuration parameter with an out of bounds value\n with pytest.raises(InvalidNewValue):\n await async_set_config_parameter(node, 200, 10)\n\n # Test configuration parameter not found when using an invalid property name\n with pytest.raises(NotFoundError):\n await async_set_config_parameter(node, 5, \"fake configuration parameter name\")\n\n # Test using an invalid state label to set a value\n with pytest.raises(InvalidNewValue):\n await async_set_config_parameter(node, \"fake state label\", 1)\n\n # Test configuration parameter not found when property key is invalid\n with pytest.raises(NotFoundError):\n await async_set_config_parameter(node, 1, 1, property_key=1)\n\n # Test setting a configuration parameter by state label and property name\n await async_set_config_parameter(\n node, \"2.0\\u00b0 F\", \"Temperature Reporting Threshold\"\n )\n\n value = node.values[\"13-112-0-1-00-00\"]\n assert len(ack_commands) == 1\n assert ack_commands[0] == {\n \"command\": \"node.set_value\",\n \"nodeId\": node.node_id,\n \"valueId\": value.data,\n \"value\": 4,\n \"messageId\": uuid4,\n }\n" }, { "alpha_fraction": 0.6991869807243347, "alphanum_fraction": 0.7154471278190613, "avg_line_length": 29.75, "blob_id": "dd903dd8e18a0d5e4fa20913764739ff2cfbfaaf", "content_id": "e8459e716a82908a4a28059b713230d129d08cfc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "permissive", "max_line_length": 78, "num_lines": 4, "path": "/zwave_js_server/model/__init__.py", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "\"\"\"Provide a model based on Z-Wave JS.\n\nThe model pieces here should map 1:1 with the model of Z-Wave JS upstream API.\n\"\"\"\n" }, { "alpha_fraction": 0.5324675440788269, "alphanum_fraction": 0.701298713684082, "avg_line_length": 18.25, "blob_id": "85b8bf4630dfe32ac2a01de140416e78b3c9764d", "content_id": "bfd2f8ce8c5786dfef2ce161daef194182714025", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 77, "license_type": "permissive", "max_line_length": 21, "num_lines": 4, "path": "/requirements_test.txt", "repo_name": "gsemet/zwave-js-server-python", "src_encoding": "UTF-8", "text": "pytest==6.2.2\npytest-aiohttp==0.3.0\npytest-cov==2.11.1\npytest-timeout==1.4.2\n" } ]
26
wangbq18/kesic_army_rc
https://github.com/wangbq18/kesic_army_rc
1c9d9f3c0cb9f8e1a12f930fc9c88412957f94f5
2d64a4c7b06bf241036097ec037ab7a22b5776fd
d86ae08ce83cc01436707d8fce99a410fd77a459
refs/heads/master
2020-08-11T10:58:25.091771
2019-10-07T09:58:11
2019-10-07T09:58:11
214,554,124
2
0
MIT
2019-10-12T01:11:28
2019-10-07T09:58:14
2019-10-07T09:58:12
null
[ { "alpha_fraction": 0.5704600214958191, "alphanum_fraction": 0.5849878787994385, "avg_line_length": 30.227272033691406, "blob_id": "b3c52d272cab5800c1a4e653035866632299fda0", "content_id": "8589a3945f56747e063cd2573b1bdd0e2105ca81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2111, "license_type": "permissive", "max_line_length": 122, "num_lines": 66, "path": "/code/treat_res.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "\nimport numpy as np \nfrom utils import * \ndatas = np.load('test_res.npy', allow_pickle=True).tolist()\nq_final_res_dict = {}\nfor q_id in datas.keys():\n q_final_res_dict.setdefault(q_id, '未知')\n q_cnt_dict = datas[q_id]['q_cnt']\n answer_dict = datas[q_id]['answer']\n q_cnt = 1 \n if q_cnt_dict:\n q_cnt_after_sort = sorted(q_cnt_dict.items(), key=lambda info: info[1], reverse=True)\n q_cnt = q_cnt_after_sort[0][0]\n else:\n q_cnt = 1\n\n # print(q_cnt)\n # q_cnt_after_sort = sorted(q_cnt.items(), key=lambda info: info[1], reverse=True)\n # q_cnt = q_cnt_after_sort[0][0]\n if answer_dict:\n answer_after_sort = sorted(answer_dict.items(), key=lambda info: info[1], reverse=True)\n print(answer_after_sort)\n q_final_res_dict[q_id] = answer_after_sort[0][0]\n\n \n if len(answer_after_sort) > 1: \n q_final_res_dict[q_id] += answer_after_sort[1][0]\n if len(answer_after_sort) > 2 and answer_after_sort[2][0]: \n q_final_res_dict[q_id] += answer_after_sort[2][0]\n \n\n else:\n q_final_res_dict[q_id] = '未知'\n\n \nimport time \nimport re \n# 读取测试数据,然后生成答案文件\nch_good_regex = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9]')\ntest_path = '../data/test_data_r0.csv'\ndef get_test_dataset_submit(test_path):\n test_data_json = [] \n test_data = pd.read_csv(test_path)\n submit = {'question_id': [], 'answer': []}\n for d in test_data.iterrows():\n question_id = d[1]['question_id']\n submit['question_id'].append(question_id)\n print(question_id)\n # assert question_id in q_final_res_dict\n answer = q_final_res_dict.get(question_id, '未知')\n ch_find = ch_good_regex.findall(answer)\n if len(ch_find) == 0:\n answer = '未知'\n answer = str(answer.replace(',', ' '))\n submit['answer'].append(answer)\n write_csv(submit, './testsubmit{}.method7.csv'.format(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))))\n\n\n\n\n\n# get_test_dataset(test_path)\nget_test_dataset_submit(test_path)\n\n\n\n# \n\n\n" }, { "alpha_fraction": 0.5136821269989014, "alphanum_fraction": 0.5383981466293335, "avg_line_length": 31.61612319946289, "blob_id": "68a19782dc5b0e594fe85726c8aba7732b6d4c73", "content_id": "5bc8504ea4d6bcece5c347dba84eaa456182765d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17705, "license_type": "permissive", "max_line_length": 83, "num_lines": 521, "path": "/code/dgcnn.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding:utf-8 -*-\n\nimport json\nimport numpy as np\nfrom tqdm import tqdm\nimport jieba_fast as jieba\nfrom gensim.models import Word2Vec\nimport re, os\nimport codecs\nimport editdistance\nimport warnings\nwarnings.filterwarnings(\"ignore\") # 忽略keras带来的满屏警告\n\nmode = 0\nchar_size = 128\nmaxlen = 256\nmin_count = 16\n\nword2vec = Word2Vec.load('data/word2vec_baike')\n\nid2word = {i + 1: j for i, j in enumerate(word2vec.wv.index2word)}\nword2id = {j: i for i, j in id2word.items()}\nword2vec = word2vec.wv.syn0\nword_size = word2vec.shape[1]\nword2vec = np.concatenate([np.zeros((1, word_size)), word2vec])\n\nfor w in word2id:\n if w not in jieba.dt.FREQ:\n jieba.add_word(w)\n\n\ndef tokenize(s):\n return jieba.lcut(s, HMM=False)\n\n\ndef sent2vec(S):\n \"\"\"S格式:[[w1, w2]]\n \"\"\"\n V = []\n for s in S:\n V.append([])\n for w in s:\n for _ in w:\n V[-1].append(word2id.get(w, 0))\n V = seq_padding(V)\n V = word2vec[V]\n return V\n\n\nwebqa_data = json.load(open('data/WebQA.json'))\nsogou_data = json.load(open('data/SogouQA.json'))\n\nif not os.path.exists('./dgcnn_config.json'):\n chars = {}\n for D in [webqa_data, sogou_data]:\n for d in tqdm(iter(D)):\n for c in d['question']:\n chars[c] = chars.get(c, 0) + 1\n for p in d['passages']:\n for c in p['passage']:\n chars[c] = chars.get(c, 0) + 1\n chars = {i: j for i, j in chars.items() if j >= min_count}\n id2char = {i + 2: j for i, j in enumerate(chars)} # 0: mask, 1: padding\n char2id = {j: i for i, j in id2char.items()}\n json.dump([id2char, char2id], open('./dgcnn_config.json', 'w'))\nelse:\n id2char, char2id = json.load(open('./dgcnn_config.json'))\n\nif not os.path.exists('./random_order.json'):\n random_order = [x for x in range(len(sogou_data))]\n np.random.shuffle(random_order)\n json.dump(random_order, open('./random_order.json', 'w'), indent=4)\nelse:\n random_order = json.load(open('./random_order.json'))\n\ntrain_data = [\n sogou_data[j] for i, j in enumerate(random_order) if i % 3 != mode\n]\ndev_data = [sogou_data[j] for i, j in enumerate(random_order) if i % 3 == mode]\ntrain_data.extend(train_data)\ntrain_data.extend(webqa_data) # 将SogouQA和WebQA按2:1的比例混合\n\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x\n for x in X\n ])\n\n\nclass data_generator(object):\n def __init__(self, data, batch_size=128):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n\n def random_generate(self, s):\n l = maxlen // 2 + maxlen % 2\n if len(s) > l:\n p = np.random.random()\n if p > 0.5:\n i = np.random.randint(len(s) - l + 1)\n j = np.random.randint(l + i, min(len(s), i + maxlen) + 1)\n return s[i:j]\n else:\n return s[:maxlen]\n else:\n return s\n\n def __len__(self):\n return self.steps\n\n def __iter__(self):\n while True:\n idxs = [x for x in range(len(self.data))]\n np.random.shuffle(idxs)\n Q1, Q2, P1, P2, A1, A2 = [], [], [], [], [], []\n for i in idxs:\n d = self.data[i]\n # 问题\n q_text = d['question']\n q_text_words = tokenize(q_text)\n q_text = ''.join(q_text_words)\n qid = [char2id.get(c, 1) for c in q_text]\n # 篇章\n pi = np.random.choice(len(d['passages']))\n p = d['passages'][pi]\n p_text = self.random_generate(p['passage'])\n p_text_words = tokenize(p_text)\n p_text = ''.join(p_text_words)\n pid = [char2id.get(c, 1) for c in p_text]\n # 答案\n a1, a2 = np.zeros(len(p_text)), np.zeros(len(p_text))\n if p['answer']:\n for j in re.finditer(re.escape(p['answer']), p_text):\n a1[j.start()] = 1\n a2[j.end() - 1] = 1\n # 组合\n Q1.append(qid)\n Q2.append(q_text_words)\n P1.append(pid)\n P2.append(p_text_words)\n A1.append(a1)\n A2.append(a2)\n if len(Q1) == self.batch_size or i == idxs[-1]:\n Q1 = seq_padding(Q1)\n Q2 = sent2vec(Q2)\n P1 = seq_padding(P1)\n P2 = sent2vec(P2)\n A1 = seq_padding(A1)\n A2 = seq_padding(A2)\n yield [Q1, Q2, P1, P2, A1, A2], None\n Q1, Q2, P1, P2, A1, A2 = [], [], [], [], [], []\n\n\nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.callbacks import Callback\nfrom radam import RAdam\n\n\nclass OurLayer(Layer):\n \"\"\"定义新的Layer,增加reuse方法,允许在定义Layer时调用现成的层\n \"\"\"\n def reuse(self, layer, *args, **kwargs):\n if not layer.built:\n if len(args) > 0:\n inputs = args[0]\n else:\n inputs = kwargs['inputs']\n if isinstance(inputs, list):\n input_shape = [K.int_shape(x) for x in inputs]\n else:\n input_shape = K.int_shape(inputs)\n layer.build(input_shape)\n outputs = layer.call(*args, **kwargs)\n for w in layer.trainable_weights:\n if w not in self._trainable_weights:\n self._trainable_weights.append(w)\n for w in layer.non_trainable_weights:\n if w not in self._non_trainable_weights:\n self._non_trainable_weights.append(w)\n return outputs\n\n\nclass AttentionPooling1D(OurLayer):\n \"\"\"通过加性Attention,将向量序列融合为一个定长向量\n \"\"\"\n def __init__(self, h_dim=None, **kwargs):\n super(AttentionPooling1D, self).__init__(**kwargs)\n self.h_dim = h_dim\n\n def build(self, input_shape):\n super(AttentionPooling1D, self).build(input_shape)\n if self.h_dim is None:\n self.h_dim = input_shape[0][-1]\n self.k_dense = Dense(self.h_dim, use_bias=False, activation='tanh')\n self.o_dense = Dense(1, use_bias=False)\n\n def call(self, inputs):\n xo, mask = inputs\n x = xo\n x = self.reuse(self.k_dense, x)\n x = self.reuse(self.o_dense, x)\n x = x - (1 - mask) * 1e12\n x = K.softmax(x, 1)\n return K.sum(x * xo, 1)\n\n def compute_output_shape(self, input_shape):\n return (None, input_shape[0][-1])\n\n\nclass DilatedGatedConv1D(OurLayer):\n \"\"\"膨胀门卷积(DGCNN)\n \"\"\"\n def __init__(self,\n o_dim=None,\n k_size=3,\n rate=1,\n skip_connect=True,\n drop_gate=None,\n **kwargs):\n super(DilatedGatedConv1D, self).__init__(**kwargs)\n self.o_dim = o_dim\n self.k_size = k_size\n self.rate = rate\n self.skip_connect = skip_connect\n self.drop_gate = drop_gate\n\n def build(self, input_shape):\n super(DilatedGatedConv1D, self).build(input_shape)\n if self.o_dim is None:\n self.o_dim = input_shape[0][-1]\n self.conv1d = Conv1D(self.o_dim * 2,\n self.k_size,\n dilation_rate=self.rate,\n padding='same')\n if self.skip_connect and self.o_dim != input_shape[0][-1]:\n self.conv1d_1x1 = Conv1D(self.o_dim, 1)\n\n def call(self, inputs):\n xo, mask = inputs\n x = xo * mask\n x = self.reuse(self.conv1d, x)\n x, g = x[..., :self.o_dim], x[..., self.o_dim:]\n if self.drop_gate is not None:\n g = K.in_train_phase(K.dropout(g, self.drop_gate), g)\n g = K.sigmoid(g)\n if self.skip_connect:\n if self.o_dim != K.int_shape(xo)[-1]:\n xo = self.reuse(self.conv1d_1x1, xo)\n return (xo * (1 - g) + x * g) * mask\n else:\n return x * g * mask\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:-1] + (self.o_dim, )\n\n\nclass MixEmbedding(OurLayer):\n \"\"\"混合Embedding\n 输入字id、词embedding,然后字id自动转字embedding,\n 词embedding做一个dense,再加上字embedding,并且\n 加上位置embedding。\n \"\"\"\n def __init__(self, i_dim, o_dim, **kwargs):\n super(MixEmbedding, self).__init__(**kwargs)\n self.i_dim = i_dim\n self.o_dim = o_dim\n\n def build(self, input_shape):\n super(MixEmbedding, self).build(input_shape)\n self.char_embeddings = Embedding(self.i_dim, self.o_dim)\n self.word_dense = Dense(self.o_dim, use_bias=False)\n\n def call(self, inputs):\n x1, x2 = inputs\n x1 = self.reuse(self.char_embeddings, x1)\n x2 = self.reuse(self.word_dense, x2)\n return x1 + x2\n\n def compute_output_shape(self, input_shape):\n return input_shape[0] + (self.o_dim, )\n\n\ndef seq_and_vec(x):\n x, v = x\n v = K.expand_dims(v, 1)\n v = K.tile(v, [1, K.shape(x)[1], 1])\n return K.concatenate([x, v], 2)\n\n\nq1_in = Input(shape=(None, )) # 问题字id输入\nq2_in = Input(shape=(None, word_size)) # 问题词向量输入\np1_in = Input(shape=(None, )) # 篇章字id输入\np2_in = Input(shape=(None, word_size)) # 篇章词向量输入\na1_in = Input(shape=(None, )) # 答案左边界输入\na2_in = Input(shape=(None, )) # 答案右边界输入\n\nq1, q2, p1, p2, a1, a2 = q1_in, q2_in, p1_in, p2_in, a1_in, a2_in\nq_mask = Lambda(\n lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(q1)\np_mask = Lambda(\n lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(p1)\n\nembeddings = MixEmbedding(len(char2id) + 2, char_size)\n\nq = embeddings([q1, q2])\nq = Dropout(0.1)(q)\np = embeddings([p1, p2])\np = Dropout(0.1)(p)\n\nq = DilatedGatedConv1D(rate=1, drop_gate=0.1)([q, q_mask])\nq = DilatedGatedConv1D(rate=2, drop_gate=0.1)([q, q_mask])\nq = DilatedGatedConv1D(rate=1, drop_gate=0.1)([q, q_mask])\nqv = AttentionPooling1D()([q, q_mask])\n\np = Lambda(seq_and_vec)([p, qv])\np = Dense(char_size, use_bias=False)(p)\np = DilatedGatedConv1D(rate=1, drop_gate=0.1)([p, p_mask])\np = DilatedGatedConv1D(rate=2, drop_gate=0.1)([p, p_mask])\np = DilatedGatedConv1D(rate=4, drop_gate=0.1)([p, p_mask])\np = DilatedGatedConv1D(rate=8, drop_gate=0.1)([p, p_mask])\np = DilatedGatedConv1D(rate=16, drop_gate=0.1)([p, p_mask])\np = DilatedGatedConv1D(rate=1, drop_gate=0.1)([p, p_mask])\np = Lambda(seq_and_vec)([p, qv])\npv = AttentionPooling1D()([p, p_mask])\n\npa = Dense(1, activation='sigmoid')(pv)\npa1 = Dense(1, activation='sigmoid')(p)\npa2 = Dense(1, activation='sigmoid')(p)\npa1 = Lambda(lambda x: x[0] * x[1][..., 0])([pa, pa1])\npa2 = Lambda(lambda x: x[0] * x[1][..., 0])([pa, pa2])\n\nmodel = Model([q1_in, q2_in, p1_in, p2_in], [pa1, pa2])\nmodel.summary()\n\ntrain_model = Model([q1_in, q2_in, p1_in, p2_in, a1_in, a2_in], [pa1, pa2])\n\n\ndef focal_loss(y_true, y_pred):\n alpha, gamma = 0.25, 2\n y_pred = K.clip(y_pred, 1e-8, 1 - 1e-8)\n return - alpha * y_true * K.log(y_pred) * (1 - y_pred)**gamma\\\n - (1 - alpha) * (1 - y_true) * K.log(1 - y_pred) * y_pred**gamma\n\n\nloss1 = focal_loss(a1_in, pa1)\nloss1 = K.sum(loss1 * p_mask[..., 0]) / K.sum(p_mask)\nloss2 = focal_loss(a2_in, pa2)\nloss2 = K.sum(loss2 * p_mask[..., 0]) / K.sum(p_mask)\nloss = (loss1 + loss2) * 100 # 放大100倍,可读性好些,不影响Adam的优化\n\ntrain_model.add_loss(loss)\ntrain_model.compile(optimizer=RAdam(1e-3))\n\n\nclass ExponentialMovingAverage:\n \"\"\"对模型权重进行指数滑动平均。\n 用法:在model.compile之后、第一次训练之前使用;\n 先初始化对象,然后执行inject方法。\n \"\"\"\n def __init__(self, model, momentum=0.9999):\n self.momentum = momentum\n self.model = model\n self.ema_weights = [K.zeros(K.shape(w)) for w in model.weights]\n\n def inject(self):\n \"\"\"添加更新算子到model.metrics_updates。\n \"\"\"\n self.initialize()\n for w1, w2 in zip(self.ema_weights, self.model.weights):\n op = K.moving_average_update(w1, w2, self.momentum)\n self.model.metrics_updates.append(op)\n\n def initialize(self):\n \"\"\"ema_weights初始化跟原模型初始化一致。\n \"\"\"\n self.old_weights = K.batch_get_value(self.model.weights)\n K.batch_set_value(zip(self.ema_weights, self.old_weights))\n\n def apply_ema_weights(self):\n \"\"\"备份原模型权重,然后将平均权重应用到模型上去。\n \"\"\"\n self.old_weights = K.batch_get_value(self.model.weights)\n ema_weights = K.batch_get_value(self.ema_weights)\n K.batch_set_value(zip(self.model.weights, ema_weights))\n\n def reset_old_weights(self):\n \"\"\"恢复模型到旧权重。\n \"\"\"\n K.batch_set_value(zip(self.model.weights, self.old_weights))\n\n\nEMAer = ExponentialMovingAverage(train_model)\nEMAer.inject()\n\n\ndef extract_answer(q_text, p_texts, maxlen=12, threshold=0.1):\n \"\"\"q_text为问题,p_texts为篇章集合(list)\n 最终输出一个dict,dict的key为候选答案,而value为对应的分数。\n \"\"\"\n Q1, Q2, P1, P2 = [], [], [], []\n # 问题\n q_text_words = tokenize(q_text)\n q_text = ''.join(q_text_words)\n qid = [char2id.get(c, 1) for c in q_text]\n for i, p_text in enumerate(p_texts):\n # 篇章\n p_text_words = tokenize(p_text)\n p_text = ''.join(p_text_words)\n pid = [char2id.get(c, 1) for c in p_text]\n Q1.append(qid)\n Q2.append(q_text_words)\n P1.append(pid)\n P2.append(p_text_words)\n # 给出结果序列\n Q1 = seq_padding(Q1)\n Q2 = sent2vec(Q2)\n P1 = seq_padding(P1)\n P2 = sent2vec(P2)\n A1, A2 = model.predict([Q1, Q2, P1, P2])\n # 输出每个篇章的答案\n Result = []\n for a1, a2, p in zip(A1, A2, p_texts):\n a1, a2 = a1[:len(p)], a2[:len(p)]\n l_idxs = np.where(a1 > threshold)[0]\n r_idxs = np.where(a2 > threshold)[0]\n result = {}\n for i in l_idxs:\n cond = (r_idxs >= i) & (r_idxs < i + maxlen)\n for j in r_idxs[cond]:\n k = p[i:j + 1]\n result[k] = max(result.get(k, 0), a1[i] * a2[j])\n if result:\n Result.append(result)\n # 综合所有答案\n R = {}\n for result in Result:\n for k, v in result.items():\n R[k] = R.get(k, []) + [v]\n R = {k: (np.array(v)**2).sum() / (sum(v) + 1) for k, v in R.items()}\n return R\n\n\ndef max_in_dict(d):\n if d:\n return sorted(d.items(), key=lambda s: -s[1])[0][0]\n\n\ndef predict(data, filename, threshold=0.1):\n with codecs.open(filename, 'w', encoding='utf-8') as f:\n for d in tqdm(iter(data)):\n q_text = d['question']\n p_texts = [p['passage'] for p in d['passages']]\n a = extract_answer(q_text, p_texts, threshold=threshold)\n a = max_in_dict(a)\n if a:\n s = u'%s\\t%s\\n' % (d['id'], a)\n else:\n s = u'%s\\t\\n' % (d['id'])\n f.write(s)\n\n\nclass Evaluate(Callback):\n def __init__(self):\n self.metrics = []\n self.best = 0.\n self.stage = 0\n\n def on_epoch_end(self, epoch, logs=None):\n EMAer.apply_ema_weights()\n acc, f1, final = self.evaluate()\n self.metrics.append((epoch, acc, f1, final))\n json.dump(self.metrics, open('train.log', 'w'), indent=4)\n if final > self.best:\n self.best = final\n train_model.save_weights('best_model.weights')\n print('learning rate: %s' % (K.eval(self.model.optimizer.lr)))\n print('acc: %.4f, f1: %.4f, final: %.4f, best final: %.4f\\n' %\n (acc, f1, final, self.best))\n EMAer.reset_old_weights()\n if epoch + 1 == 30 or (self.stage == 0 and epoch > 15 and\n (final < 0.5 or np.argmax(self.metrics, 0)[3] <\n len(self.metrics) - 5)):\n \"\"\"达到30个epoch,或者final开始下降到0.5以下(开始发散),\n 或者连续5个epoch都没提升,就降低学习率。\n \"\"\"\n self.stage = 1\n train_model.load_weights('best_model.weights')\n EMAer.initialize()\n K.set_value(self.model.optimizer.lr, 1e-4)\n K.set_value(self.model.optimizer.iterations, 0)\n opt_weights = K.batch_get_value(self.model.optimizer.weights)\n opt_weights = [w * 0. for w in opt_weights]\n K.batch_set_value(zip(self.model.optimizer.weights, opt_weights))\n\n def evaluate(self, threshold=0.1):\n predict(dev_data, 'tmp_result.txt', threshold=threshold)\n acc, f1, final = json.loads(\n os.popen(\n 'python ../evaluate_tool/evaluate.py tmp_result.txt tmp_output.txt'\n ).read().strip())\n return acc, f1, final\n\n\ntrain_D = data_generator(train_data)\nevaluator = Evaluate()\n\nif __name__ == '__main__':\n train_model.fit_generator(train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=120,\n callbacks=[evaluator])\nelse:\n train_model.load_weights('best_model.weights')\n" }, { "alpha_fraction": 0.5122777223587036, "alphanum_fraction": 0.5363713502883911, "avg_line_length": 28.93317985534668, "blob_id": "3013bc9c026f1fc83eeaae0bb32c9aae9617a79e", "content_id": "0cb6c8778708b7fd1e0067bf429dd36e830c5771", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13209, "license_type": "permissive", "max_line_length": 140, "num_lines": 434, "path": "/code/method2.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding:utf-8 -*-\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom random import choice\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport re, os\nimport codecs\nfrom keras.callbacks import Callback\nimport tqdm \nmaxlen = 510 # 450 is ok \nimport sys \nfrom utils import write_csv\n\n\nimport tensorflow as tf \n\n\n\nbase_path = \"/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets\"\nconfig_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_config.json')\ncheckpoint_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_model.ckpt')\ndict_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/vocab.txt')\n\ndata_path = '/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets/senti_dataset'\nmodel_path = '/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets'\nweight_save_path = os.path.join(model_path, 'kesicnl2sql_finetune_method2.weights') # kesicnl2sql_finetune.weight is good \ntoken_dict = {}\n\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]') # space类用未经训练的[unused1]表示\n else:\n R.append('[UNK]') # 剩余的字符是[UNK]\n return R\n\ntokenizer = OurTokenizer(token_dict)\n\n\n\n\nmode = 'train'\n\nif mode == 'train':\n datas = np.load('kesic_new.npy', allow_pickle=True).tolist()\n data = [] \n for q in datas:\n d = datas[q]\n #print(d)\n data.append((d['para'] + d['question'], d['answer_mark'], len(d['answer_mark'])))\n\n # for d in datas:\n # if d['answer'] == '':\n # data.append((d['passage'] + ' ' + d['question'], d['answer'], 0))\n # if len(data) > 8 * lendata:\n # break \nelif mode == 'test': \n datas = np.load('kesic_test.npy', allow_pickle=True).tolist()\n test_data = [] \n for d in datas:\n test_data.append((d['passage'] + d['question'], d['question_id']))\n\n#data = data[:100]\n#print(data)\n\n# 按照9:1的比例划分训练集和验证集\n'''\nrandom_order = [x for x in range(len(data))]\nnp.random.shuffle(random_order)\ntrain_data = [data[j] for i, j in enumerate(random_order) if i % 10 != 0]\nvalid_data = [data[j] for i, j in enumerate(random_order) if i % 10 == 0]\n'''\n# 评估数据集就别是那种打撒的了\nif mode == 'train':\n train_data = data[: int(len(data) * 0.85)]\n valid_data = data[int(len(data) * 0.85):]\n\n random_order = [x for x in range(len(train_data))]\n np.random.shuffle(random_order)\n train_data = [train_data[idx] for idx in random_order]\n\n print(len(train_data))\n\n print(len(valid_data))\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n ])\n\n\nclass data_generator:\n def __init__(self, data, batch_size=6):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n def __len__(self):\n return self.steps\n def __iter__(self):\n while True:\n idxs = [id for id in range(len(self.data))]\n np.random.shuffle(idxs)\n X1, X2, H, Y, ANSWER_POS, PASSAGE_MASK = [], [], [],[], [], []\n #X1, X2,Y = [], [], []\n for i in idxs:\n d = self.data[i]\n #text = d[0][:maxlen] ## 切割了\n text = d[0]\n # print(text)\n x1, x2 = tokenizer.encode(first=text)\n passage_mask = [0] + [1] * len(text) + [0]\n answer_pos = np.zeros(len(text) + 2 , dtype='int32')\n h = [len(text) + 2]\n\n un_good = False \n for ans_info in d[1]:\n if ans_info['answer'] not in text: \n un_good = True \n print(text)\n continue\n idx = text.index(ans_info['answer']) \n # ans_start_pos[idx + 1] = 1 \n # ans_end_pos[idx + len(ans_info['answer'])] = 1 \n answer_pos[idx + 1: idx + 1 + len(ans_info['answer'])] = 1 \n if un_good == True: continue \n y = d[2]\n #y = 0\n X1.append(x1)\n X2.append(x2)\n # ANS_START_POS.append(ans_start_pos)\n # ANS_END_POS.append(ans_end_pos)\n PASSAGE_MASK.append(passage_mask)\n ANSWER_POS.append(answer_pos)\n H.append(h)\n \n Y.append([y])\n if len(X1) == self.batch_size or i == idxs[-1]:\n X1 = seq_padding(X1)\n X2 = seq_padding(X2)\n Y = seq_padding(Y)\n \n # ANS_START_POS = seq_padding(ANS_START_POS)\n # ANS_END_POS = seq_padding(ANS_END_POS)\n PASSAGE_MASK = seq_padding(PASSAGE_MASK)\n ANSWER_POS = seq_padding(ANSWER_POS)\n H = seq_padding(H)\n \n\n\n yield [X1, X2, H, Y, ANSWER_POS, PASSAGE_MASK], None \n X1, X2, H, Y, ANSWER_POS, PASSAGE_MASK= [], [], [],[], [], []\n #yield [X1, X2, Y], None \n #X1, X2, Y = [], [], []\n\n\nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.optimizers import Adam\n\n\ndef seq_gather(x):\n \"\"\"seq是[None, seq_len, s_size]的格式,\n idxs是[None, n]的格式,在seq的第i个序列中选出第idxs[i]个向量,\n 最终输出[None, n, s_size]的向量。\n seq_gather[x, h]\n \"\"\"\n seq, idxs = x\n idxs = K.cast(idxs, 'int32') # must int 32 \n return K.tf.batch_gather(seq, idxs)\n\n\n\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n\nfor l in bert_model.layers:\n l.trainable = True\n\nx1_in = Input(shape=(None,))\nx2_in = Input(shape=(None,))\nh_in = Input(shape=(None,))\ny_in = Input(shape=(None,))\n\n\nanswer_pos_in = Input(shape=(None,), dtype='int32')\npassage_mask_in = Input(shape=(None,))\n\n\nx = bert_model([x1_in, x2_in])\n\n#x = Dense(units=128, activation='relu')(x) \n# question前面的cls呢\n#q_cls_idx = 256\n\n\n\nq_cls = Lambda(seq_gather)([x, h_in]) # header [cls] is selected [batch_size, header_step, hidden_size]\n\nq_cls = Lambda(lambda x: x[:, 0])(q_cls) \n\nx_cls = Lambda(lambda x: x[:, 0])(x)\n\n\ncls_info = concatenate([q_cls, x_cls], axis=-1) \n\n\n\n\np = Dense(10, activation='softmax')(cls_info)\n\n\n\n# x = Dropout(0.1)(x)\ncls_info_dense = Dense(768, activation='relu')(cls_info)\n\ncls_info_dense = Lambda(lambda x: K.expand_dims(x, 1))(cls_info_dense) \nx_answer_pos = add([x, cls_info_dense])\n\nanswer_pos = Dense(2, activation='softmax')(x_answer_pos)\npassage_mask = passage_mask_in\n\n\ntrain_model = Model([x1_in, x2_in, h_in, y_in, answer_pos_in, passage_mask_in], [p, answer_pos])\nmodel = Model([x1_in, x2_in, h_in], [p, answer_pos])\n#model = Model([x1_in, x2_in, y_in], [p])\n\nloss_p = K.sparse_categorical_crossentropy(y_in, p) \nloss_p = K.mean(loss_p)\n\np_ans_pos_loss = K.sparse_categorical_crossentropy(answer_pos_in, answer_pos)\np_ans_pos_loss = K.sum(p_ans_pos_loss * passage_mask) / K.sum(passage_mask)\n\n\nloss = loss_p + p_ans_pos_loss \n\n#loss = loss_p \n\ntrain_model.add_loss(loss)\ntrain_model.compile(\n optimizer=Adam(10e-5), # 用足够小的学习率\n metrics=['accuracy']\n)\ntrain_model.summary()\n\nif mode == 'train':\n train_D = data_generator(train_data)\n\n # for d in train_D:\n # pass \n valid_D = data_generator(valid_data)\n\n# for d in train_D:\n# pass \n\n#for d in train_D:\n# for i in range(len(d[0])):\n# print(d[0][i].shape)\n# print('-' * 10)\n# # print(d[0][4].shape)\n\n# learning_rate = 5e-5\nlearning_rate = 3e-5# from 15 to 8 \nmin_learning_rate = 9e-6\n\n#model.load_weights(weight_save_path)\n\ndef test(test_data):\n model.load_weights(weight_save_path)\n # csv_f = open(\"testsubmit.csv\", \"w\") \n # writer = csv.writer(csv_f)\n right = 0 \n # submit = [['question_id', 'answer']]\n submit = {'question_id': [], 'answer': []}\n for val in test_data:\n text, q_id = val[0] , val[1]\n \n x1, x2 = tokenizer.encode(text)\n p, ans_start, ans_end = model.predict([np.array([x1]), np.array([x2])])\n startpos = ans_start[0].argmax(1).tolist()\n \n endpos = ans_end[0].argmax(1).tolist()\n submit['question_id'].append(q_id)\n #print('*' * 10)\n if 1 in startpos and 1 in endpos:\n #print(text)\n\n #print(q_id, p[0], startpos.index(1), endpos.index(1), text[startpos.index(1) - 1 : endpos.index(1)])\n ans = text[startpos.index(1) - 1 : endpos.index(1)]\n ans = str(ans.replace(',', ' '))\n # submit.append([q_id, ans.replace(',', ' ')])\n\n submit['answer'].append(ans)\n\n elif 1 in startpos:\n ans_may = text[startpos.index(1): startpos.index(1) + 12]\n ans_may = str(ans_may.replace(',', ' '))\n submit['answer'].append(ans_may)\n \n elif 1 in endpos:\n ans_may = text[endpos.index(1) - 12: endpos.index(1)]\n ans_may = str(ans_may.replace(',', ' '))\n \n submit['answer'].append(ans_may)\n\n\n else: \n print(q_id)\n print(text)\n # submit.append([q_id, ''])\n submit['answer'].append('未知')\n # writer.writerows(submit)\n # print(submit)\n write_csv(submit, './testsubmit.csv')\n \n\n\nif mode == 'test':\n test(test_data)\n import sys \n sys.exit(0)\n\ndef evaluate(valid_data):\n valid_len = len(valid_data)\n return 0 \n model.load_weights(weight_save_path)\n right = 0 \n right_start_may = 0 \n right_end_may = 0 \n for val in valid_data:\n text, ans, ans_cnt = val[0][:maxlen] , val[1], val[2]\n \n x1, x2 = tokenizer.encode(text)\n p_ans_cnt, ans_pos = model.predict([np.array([x1]), np.array([x2])])\n ans_pos = ans_pos[0].argmax(1).tolist()\n print(p_ans_cnt)\n print(ans_pos)\n continue \n #print('*' * 10)\n #print(text)\n pp = p_ans_cnt[0].tolist()\n #if startpos.count(1) > 1:\n # print(startpos)\n # print(endpos)\n\n\n \n if 1 in startpos and 1 in endpos: #\n #print('*' * 10)\n #print(text)\n # print(pp.index(max(pp)), startpos.index(1), endpos.index(1), text[startpos.index(1) - 1 : endpos.index(1)], '-------', ans )\n # if text[startpos.index(1) - 1 : endpos.index(1)] == ans[0]['answer']:\n # pass \n right += 1 \n elif 1 in startpos:\n print(pp.index(max(pp)), 'start_has', startpos.index(1), text[startpos.index(1) :startpos.index(1) + 8], '-------', ans )\n right_start_may += 1 \n elif 1 in endpos:\n print(pp.index(max(pp)), 'end has', endpos.index(1), text[endpos.index(1) - 8 : endpos.index(1)], '-------', ans )\n \n right_end_may += 1 \n else: \n #print(pp, ans)\n # print('* wrong*' * 3)\n pass \n #if ans == '' :right += 1\n \n\n return right / valid_len\n\n#if mode == 'train':\n# evaluate(valid_data)\n# import sys \n# sys.exit(0)\n\nclass Evaluate(Callback):\n def __init__(self):\n self.accs = []\n self.best = 0\n self.passed = 0\n self.stage = 0\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"\n 第一个epoch用来warmup,第二个epoch把学习率降到最低\n \"\"\"\n if self.passed < self.params['steps']:\n lr = (self.passed + 1.) / self.params['steps'] * learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n elif self.params['steps'] <= self.passed < self.params['steps'] * 2:\n lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)\n lr += min_learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n\n def on_epoch_end(self, epoch, logs=None):\n \n acc = self.evaluate()\n self.accs.append(acc)\n \n if acc >= self.best:\n self.best = acc\n train_model.save_weights(weight_save_path)\n print('acc: %.5f, best acc: %.5f\\n' % (acc, self.best))\n def evaluate(self):\n\n return evaluate(valid_data)\n\n\nevaluator = Evaluate()\ntrain_model.fit_generator(\n train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=300,\n validation_data=valid_D.__iter__(),\n validation_steps=len(valid_D),\n callbacks=[evaluator]\n)\n" }, { "alpha_fraction": 0.45381253957748413, "alphanum_fraction": 0.47532567381858826, "avg_line_length": 38.018211364746094, "blob_id": "3e06dc40d95b6cbdb88bfebfe73669de41d84148", "content_id": "125c2db6bd0a7d55b7f6efb3c08b5f6bb727ed63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49986, "license_type": "permissive", "max_line_length": 179, "num_lines": 1208, "path": "/code/main.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding: utf-8 -*-\n\nimport json\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport random as rn\nnp.random.seed(42)\nrn.seed(12345)\ntf.set_random_seed(1234)\n\nos.environ['PYTHONHASHSEED'] = '0'\n\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport codecs\nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nimport time \nfrom keras.optimizers import Adam\nfrom keras.callbacks import Callback\nfrom tqdm import tqdm\nimport jieba\nimport editdistance\nimport re\nimport numpy as np \n\nimport sys\nfrom dbengine import DBEngine\nfrom calc_acc import * \nfrom check_input_feature import * \nfrom post_treat import * \n# from mark_acc_ensure import * \nfrom new_mark_acc_ensure import * \nfrom question_prepro import * \nfrom exceptdata import * \n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', type=str, default='', help='execute mode, eg: train/test/evaluate')\nargs = parser.parse_args()\nif args.mode not in set(['train', 'test', 'evaluate']): \n raise ValueError('Please input correct execute mode')\nmode = args.mode \n\n\nmaxlen = 160\nnum_agg = 7 # agg_sql_dict = {0:\"\", 1:\"AVG\", 2:\"MAX\", 3:\"MIN\", 4:\"COUNT\", 5:\"SUM\", 6:\"不被select\"}\nnum_op = 5 # {0:\">\", 1:\"<\", 2:\"==\", 3:\"!=\", 4:\"不被select\"}\nnum_cond_conn_op = 3 # conn_sql_dict = {0:\"\", 1:\"and\", 2:\"or\"}\ncsel_num = 20 # col cnt 最大为20 \n\n# learning_rate = 5e-5\nlearning_rate = 1e-5 # from 15 to 8 \nmin_learning_rate = 1e-5\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\ntoy = False \ntoy_data_cnt = 200\nexcept_cnt = 0 \n\n\n\nconfig_path = os.path.join(model_bert_wwm_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_config.json')\ncheckpoint_path = os.path.join(model_bert_wwm_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_model.ckpt')\ndict_path = os.path.join(model_bert_wwm_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/vocab.txt')\nweight_save_path = os.path.join(model_path, 'weights/nl2sql_finetune_add_div2.weights')\n\n# weight_save_path = os.path.join(model_path, 'weights/nl2sql_finetune_add_div_goodest_0.804.weights')\n\n\n\ndef read_data(data_file, table_file):\n data, tables = [], {}\n with open(data_file) as f:\n for l in f:\n data.append(json.loads(l))\n with open(table_file) as f:\n for l in f:\n l = json.loads(l)\n d = {}\n d['headers'] = l['header']\n d['header2id'] = {j: i for i, j in enumerate(d['headers'])}\n d['content'] = {}\n d['keywords'] = {}\n d['all_values'] = set()\n d['types'] = l['types']\n d['title'] = l['title']\n rows = np.array(l['rows'])\n for i, h in enumerate(d['headers']):\n d['content'][h] = set(rows[:, i])\n if d['types'][i] == 'text':\n d['keywords'][i] = ''\n # get_key_words(d['content'][h])\n else:\n d['keywords'][i] = ''\n\n d['all_values'].update(d['content'][h])\n # print(d['keywords'])\n d['all_values'] = set([i for i in d['all_values'] if hasattr(i, '__len__')])\n tables[l['id']] = d\n if toy:\n data = data[:toy_data_cnt]\n return data, tables\n\nif mode != 'test':\n train_data, train_tables = read_data(\n os.path.join(train_data_path, 'train.json'),\n os.path.join(train_data_path, 'train.tables.json')\n ) # 41522 5013\n\n\nvalid_data, valid_tables = read_data(\n os.path.join(valid_data_path, 'val.json'),\n os.path.join(valid_data_path, 'val.tables.json')\n) # 4396 1197\ntest_data, test_tables = read_data(\n os.path.join(test_file_path, 'final_test.json'),\n os.path.join(test_file_path, 'final_test.tables.json')\n)\n\n\ntoken_dict = {}\n\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]')\n else:\n R.append('[UNK]') \n return R\n\ntokenizer = OurTokenizer(token_dict)\n\n\ndef seq_padding(X, padding=0, maxlen=None):\n if maxlen is None:\n L = [len(x) for x in X]\n ML = max(L)\n else:\n ML = maxlen\n return np.array([\n np.concatenate([x[:ML], [padding] * (ML - len(x))]) if len(x[:ML]) < ML else x for x in X\n ])\n\n\n\nclass data_generator:\n\n def __init__(self, data, tables, batch_size=32): # 32 to 256 for cpu , 32 for gpu \n self.data = data\n self.tables = tables\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n \n\n def __len__(self):\n return self.steps\n\n def __iter__(self):\n while True:\n if PY2:\n idxs = range(len(self.data))\n elif PY3:\n idxs = [x for x in range(len(self.data))]\n np.random.shuffle(idxs)\n X1, X2, XM, H, HM, SEL, CONN, CSEL0, COP = [], [], [], [], [], [], [], [], []\n CSEL1, CSEL2 = [], [] \n CDIV = [] \n\n for i in idxs:\n d = self.data[i]\n ori_q = d['question']\n\n d['question'] = trans_question_acc(d['question'])\n t = self.tables[d['table_id']]['headers']\n dtypes = self.tables[d['table_id']]['types']\n x1, x2 = tokenizer.encode(d['question']) \n '''\n 这里的xm的左侧和右侧的mask值为0, len(d['question']) 这个长度被标记为1, \n mask其实就是像一个盖子一样,把有用的东西盖起来,或者说标记出来对后续有用的东西。 \n 为什么xm的左侧这个[cls]被标记为0了?因为这个位置铁打不动,长度固定就是1\n 同理xm右侧这个[seq]被标记为0了,因为这个[sep]没啥用这里.\n ''' \n xm = [0] + [1] * len(d['question']) + [0]\n h = []\n for j in t:\n _x1, _x2 = tokenizer.encode(j)\n h.append(len(x1))\n x1.extend(_x1)\n x2.extend(_x2)\n '''\n batch中的每一个hm其实就是告诉你,前len(h)这个长度是有效的,后面都是为了凑 header_max_cnt 而pandding出来的0.\n padding出来的东西对loss无任何意义.\n '''\n hm = [1] * len(h) \n sel = []\n for j in range(len(h)):\n if j in d['sql']['sel']:\n j = d['sql']['sel'].index(j)\n sel.append(d['sql']['agg'][j])\n else:\n sel.append(num_agg - 1) \n\n conn = [d['sql']['cond_conn_op']]\n csel0 = np.zeros(len(d['question']) + 2, dtype='int32') # 这里的0既表示padding,表示当前位置不对应任何表中的列\n csel1 = np.zeros(len(d['question']) + 2, dtype='int32')\n csel2 = np.zeros(len(d['question']) + 2, dtype='int32')\n\n cop = np.zeros(len(d['question']) + 2, dtype='int32') + num_op - 1 \n\n\n cdiv = np.zeros(len(d['question']) + 2, dtype='int32')\n\n is_wrong_q = False \n\n\n for cond in d['sql']['conds']:\n # 重新在这里面弄\n if d['question'] in correct_q_set:\n # print(d['question'])\n if dtypes[cond[0]] == 'real':\n _, start_pos, end_pos = check_num_exactly_match(cond[2], d['question'])\n\n # print(start_pos, end_pos, d['question'][start_pos: end_pos + 1])\n\n csel0[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n cop[start_pos + 1: end_pos + 1 + 1] = cond[1] \n else:\n start_pos = d['question'].index(cond[2]) if cond[2] in d['question'] else \\\n d['question'].index(most_similar_2(cond[2], d['question']))\n # print(start_pos, start_pos + len(cond[2]), d['question'][start_pos: start_pos + len(cond[2])])\n csel0[start_pos + 1: start_pos + 1 + len(cond[2])] = cond[0] + 1 \n cop[start_pos + 1: start_pos + 1 + len(cond[2])] = cond[1]\n\n elif d['question'] in no_num_similar_set:\n # print('cond val is{}'.format(cond[2]))\n # print('cond val is {}, q is {} and sim is {}'.format(cond[2], most_similar_2(cond[2], d['question'])))\n\n sim = cond[2] if cond[2] in d['question'] else most_similar_2(cond[2], d['question'])\n start_pos = d['question'].index(sim)\n # print(d['question'])\n # print(start_pos, start_pos + len(sim), d['question'][start_pos: start_pos + len(sim)])\n csel0[start_pos + 1: start_pos + len(sim) + 1] = cond[0] + 1 \n cop[start_pos + 1: start_pos + len(sim) + 1] = cond[1]\n elif d['question'] in q_one_vs_more_col_set:\n # print(d['question'])\n if check_num_exactly_match(cond[2], d['question'])[0] == 1: \n _, start_pos, end_pos = check_num_exactly_match(cond[2], d['question'])\n elif check_num_exactly_match_zero_case(cond[2], d['question'])[0] == 1:\n _, start_pos, end_pos = check_num_exactly_match_zero_case(cond[2], d['question'])\n else:\n raise ValueError('value error')\n if max(csel0[start_pos + 1: end_pos + 1 + 1]) != 0:\n if max(csel1[start_pos + 1: end_pos + 1 + 1]) != 0: \n csel2[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n else:\n csel1[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n else:\n csel0[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n\n cop[start_pos + 1: end_pos + 1 + 1] = cond[1] \n\n # print(start_pos, end_pos, d['question'][start_pos: end_pos + 1])\n\n\n\n elif d['question'] in q_need_exactly_match_set:\n _, start_pos, end_pos = check_num_exactly_match(cond[2], d['question'])\n # print(d['question'])\n # print(start_pos, end_pos, d['question'][start_pos: end_pos + 1])\n csel0[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n cop[start_pos + 1: end_pos + 1 + 1] = cond[1] \n elif d['question'] in q_need_exactly_match_more_strinct_set:\n _, start_pos, end_pos = check_num_exactly_match_zero_case(cond[2], d['question'])\n # print(d['question'])\n # print(start_pos, end_pos, d['question'][start_pos: end_pos + 1])\n csel0[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n cop[start_pos + 1: end_pos + 1 + 1] = cond[1] \n\n elif d['question'] in q_text_contain_similar_set:\n if dtypes[cond[0]] == 'real': # 如果是数字的话,通过另外的方法判断\n # print(d['question'])\n find_cnt, start_pos, end_pos = check_num_exactly_match_zero_case(cond[2], d['question'])\n if find_cnt == 1:\n \n # print(start_pos, end_pos, d['question'][start_pos: end_pos + 1])\n csel0[start_pos + 1: end_pos + 1 + 1] = cond[0] + 1 \n cop[start_pos + 1: end_pos + 1 + 1] = cond[1] \n \n elif find_cnt == 0: \n val = most_similar_2(cond[2], d['question'])\n start_pos = d['question'].index(val)\n # print(start_pos, start_pos + len(sim), d['question'][start_pos: start_pos + len(sim)])\n csel0[start_pos + 1: start_pos + len(val) + 1] = cond[0] + 1 \n cop[start_pos + 1: start_pos + len(val) + 1] = cond[1]\n\n else: # 文本\n val = most_similar_2(cond[2], d['question'])\n start_pos = d['question'].index(val)\n # print(start_pos, start_pos + len(sim), d['question'][start_pos: start_pos + len(sim)])\n csel0[start_pos + 1: start_pos + len(val) + 1] = cond[0] + 1 \n cop[start_pos + 1: start_pos + len(val) + 1] = cond[1]\n\n elif d['question'] in q_need_col_similar_set:\n header_name = t[cond[0]]\n start_pos, end_pos, match_val = alap_an_cn_mark(d['question'], header_name, cond[2])\n csel0[start_pos + 1: end_pos + 1] = cond[0] + 1 \n cop[start_pos + 1: end_pos + 1] = cond[1] \n\n # print(d['question'])\n # print(start_pos, end_pos, d['question'][start_pos: end_pos])\n else:\n is_wrong_q = True \n\n ab = True \n if ab:\n for idx in range(1, len(csel0) - 1):\n if csel0[idx] != csel0[idx - 1] and csel0[idx - 1] != 0 and csel0[idx] != 0:\n # print(d['question'])\n cdiv[idx] = 1 \n # print(cdiv)\n \n if len(x1) > maxlen or is_wrong_q :\n continue\n X1.append(x1) # bert的输入\n X2.append(x2) # bert的输入\n XM.append(xm) # 输入序列的mask\n H.append(h) # 列名所在位置\n HM.append(hm) # 列名mask\n SEL.append(sel) # 被select的列\n CONN.append(conn) # 连接类型\n CSEL0.append(csel0) # 条件中的列\n CSEL1.append(csel1) # 条件中的列\n CSEL2.append(csel2) # 条件中的列\n COP.append(cop) # 条件中的运算符(同时也是值的标记)\n CDIV.append(cdiv) # \n if len(X1) == self.batch_size:\n X1 = seq_padding(X1)\n X2 = seq_padding(X2)\n XM = seq_padding(XM, maxlen=X1.shape[1])\n H = seq_padding(H)\n HM = seq_padding(HM)\n SEL = seq_padding(SEL)\n CONN = seq_padding(CONN)\n CSEL0 = seq_padding(CSEL0, maxlen=X1.shape[1])\n CSEL1 = seq_padding(CSEL1, maxlen=X1.shape[1])\n CSEL2 = seq_padding(CSEL2, maxlen=X1.shape[1])\n CDIV = seq_padding(CDIV, maxlen=X1.shape[1])\n\n COP = seq_padding(COP, maxlen=X1.shape[1])\n yield [X1, X2, XM, H, HM, SEL, CONN, CSEL0, CSEL1, CSEL2, COP, CDIV], None\n X1, X2, XM, H, HM, SEL, CONN, CSEL0, COP = [], [], [], [], [], [], [], [], []\n CSEL1, CSEL2 = [], []\n CDIV = []\n else:\n pass \n\n\n\n\n\n\ndef seq_gather(x):\n \"\"\"seq是[None, seq_len, s_size]的格式,\n idxs是[None, n]的格式,在seq的第i个序列中选出第idxs[i]个向量,\n 最终输出[None, n, s_size]的向量。\n\n seq_gather[x, h]\n \"\"\"\n seq, idxs = x\n idxs = K.cast(idxs, 'int32') # must int 32 \n return K.tf.batch_gather(seq, idxs)\n\n\ntrain_D = data_generator(train_data, train_tables) #get Train data \n#valid_D = data_generator(valid_data, valid_tables) #get Train data \n\n\n\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n\nfor l in bert_model.layers:\n l.trainable = True\n\n\nx1_in = Input(shape=(None,), dtype='int32')\nx2_in = Input(shape=(None,))\nxm_in = Input(shape=(None,))\nh_in = Input(shape=(None,), dtype='int32')\nhm_in = Input(shape=(None,))\nsel_in = Input(shape=(None,), dtype='int32')\nconn_in = Input(shape=(1,), dtype='int32')\ncsel0_in = Input(shape=(None,), dtype='int32')\ncsel1_in = Input(shape=(None,), dtype='int32')\ncsel2_in = Input(shape=(None,), dtype='int32')\ncop_in = Input(shape=(None,), dtype='int32')\ncdiv_in = Input(shape=(None,), dtype='int32')\n\nx1, x2, xm, h, hm, sel, conn, csel0, csel1, csel2, cop, cdiv = (\n x1_in, x2_in, xm_in, h_in, hm_in, sel_in, conn_in, csel0_in, csel1_in, csel2_in, cop_in, cdiv_in\n)\n\nhm = Lambda(lambda x: K.expand_dims(x, 1))(hm) # header的mask.shape=(None, 1, h_len)\n\nx = bert_model([x1_in, x2_in]) # shape x [?, ?, 768] [batch_size, n_step(n_input_step), hidden_size]\nx4conn = Lambda(lambda x: x[:, 0])(x) #x[:, 0]是获取每个训练样本的输入的第0个step,也就是用来判断条件之间关联运算符 [None, hidden_size]\npconn = Dense(num_cond_conn_op, activation='softmax')(x4conn) # [None, num_cond_conn_op]\n\n# h记录各个header[cls]所在的位置, 这个seq_gather的作用就是把input_x中的header[cls]搞出来。 \nx4h = Lambda(seq_gather)([x, h]) # header [cls] is selected [batch_size, header_step, hidden_size] \npsel = Dense(num_agg, activation='softmax')(x4h) # [bs, header_step, num_agg]\n\npcop = Dense(num_op, activation='softmax')(x) # [bs, q_step, num_op]\n\n\nx_ori = x\nx = Lambda(lambda x: K.expand_dims(x, 2))(x) # shape [batch_size, n_step, 1, hidden_size]\nx4h_ori = x4h\nx4h = Lambda(lambda x: K.expand_dims(x, 1))(x4h) # header cls selected in x4h [None, 1, header_n_step, hidden_size ]\n\n\npcsel_1 = Dense(1)(x) # [None, q_n_step, 1 ,1]\npcsel_2 = Dense(1)(x4h) # [None, 1, h_n_step, 1 ]\npcsel_att = Lambda(lambda x: x[0] * x[1])([pcsel_1, pcsel_2]) # [None, q_n_step, h_n_step,1]\n\npcsel_att = Lambda(lambda x: x[..., 0])(pcsel_att) # [None,q_n_step,h_n_step]\n\n#x4h_ori [None, h_n_step,hidden_size】\npcsel_h_part = Lambda(lambda x: K.batch_dot(x[0], x[1]))([pcsel_att, x4h_ori]) # [None, q_n_step, hidden_siz] \n\n\n#pcsel = Lambda(lambda x: K.batch_dot(x[0], x[1]))([att_val, x4h_ori]) # [None, q_n_step, hidden_size]\n\n\n#pcsel = Lambda(lambda x: x[0] + x[1])([x_ori, pcsel_h_part]) # [None, q_n_step, hidden]\n\n\npcsel = concatenate([x_ori, pcsel_h_part], axis=-1) \n\n# pcsel = Dropout(0.2)(pcsel)\n\n# 支持 cdiv \n\n\n# new add \npcsel = Dense(1200, activation='relu')(pcsel) #[None, q_n_step, 1200] 看看这个对于缓解 loss 作用大不大 梯度爆炸\npcdiv = Dense(2, activation='softmax')(pcsel)\n\n# # ## add Drop out layer \n# # pcsel = Dense(1200, activation='relu')(pcsel) #[None, q_n_step, 1200]\n# pcsel = Lambda(lambda x: x, output_shape=lambda s:s)(pcsel)\n# pcsel = Reshape((-1, 3, 400))(pcsel)\n# #pcsel = Lambda(lambda x: K.reshape(x, (-1, 3, 400)))(pcsel) # header的mask.shape=(None, 1, h_len)\n\n\n\n\n\n\n\n# pcsel = concatenate([x_ori, pcsel_h_part], axis=-1) \n\n\npcsel0 = Dense(csel_num, activation='softmax')(pcsel) # [bs, q_step, 3, num_op]\npcsel1 = Dense(csel_num, activation='softmax')(pcsel) # [bs, q_step, 3, num_op]\npcsel2 = Dense(csel_num, activation='softmax')(pcsel) # [bs, q_step, 3, num_op]\n\n\n\n\n\n# Model的参数 def __init__(self, inputs, outputs, name=None): \nmodel = Model(\n [x1_in, x2_in, h_in, hm_in], # inputs \n [psel, pconn, pcop, pcsel0, pcsel1, pcsel2, pcdiv] # outputs \n)\n\n# shuai 这里看看是不是要加载一个已经训练出来的模型??\n\ntrain_model = Model(\n [x1_in, x2_in, xm_in, h_in, hm_in, sel_in, conn_in, csel0_in, csel1_in, csel2_in, cop_in, cdiv_in],\n [psel, pconn, pcop, pcsel0, pcsel1, pcsel2, pcdiv]\n)\n\n'''\nmask存在的意义是什么? 为什么被mask的位置要用1,而padding要用0??? \n(1) mask的位置说明这个位置的数据是有效的,我们后续会专门把为1的部分拿出来,比如计算损失等\n(2) 在训练过程中,数据会以batch的形式进行组织,例如batch_size=16, 在这16组训练数据中,以question长度为例,这16组数据的question长度是不同的,那么一般会这样做:\n 获取这16个question中的长度最大值,然后question中的有效部分会通过mask 1值来记录,而question中未达到max_len的部分会通过padding 0标记.\n 所以其实这个mask并没有很神奇,只不过是为了标记一下哪些位置是\"有用的\",而且后续计算损失等会\"保留\"的.\n'''\nxm = xm # question的mask.shape=(None, x_len) \n\nhm = hm[:, 0] # header的mask.shape=(None, h_len) # 这个操作就是去掉1,torch中有squeeze压紧方法做这个事情\n\n# condition mask, [1, 0, 1, 1,1 ] 如果元素为1,说明当前位置的op不是空操作\ncm = K.cast(K.not_equal(cop, num_op - 1), 'float32') # conds的mask.shape=(None, x_len)\n\n# 注意hm & xm 用在啥地方了\n\n'''\n例子1:\n 以psel_loss = K.sum(psel_loss * hm) / K.sum(hm) 为例介绍这里的hm的用处.\n首先 hm作为header mask, 里面存储的都是header[cls]的位置\n hm的shape为[bs, header_cnt], header_cnt这个维度里面的值有一些padding的0 \n psel_loss 的shape为 [bs, header_cnt] ? \n psel_loss * hm 的shape为 [bs, header_cnt] 相乘之后即只关注有效header的损失,那些padding出来的不要加入损失计算\n k.sum(psel_loss * hm) keras.backend中的sum和普通的sum不同,他会将矩阵中的所有值相加,最后结果为一个值\n\n几乎所有参数都是带有batch_size维度的,因为计算损失是在整个batch上计算的\n''' \n\nprint(pcsel.get_shape()[2])\nprint(type(pcsel))\n#pcsel0 = pcsel[32, int(pcsel.get_shape()[2]), 1, 20]\n\n#print(pcsel0.get_shape())\n\npsel_loss = K.sparse_categorical_crossentropy(sel_in, psel)\npsel_loss = K.sum(psel_loss * hm) / K.sum(hm) # case: test10 padding位置的header不纳入损失计算\npconn_loss = K.sparse_categorical_crossentropy(conn_in, pconn)\npconn_loss = K.mean(pconn_loss) # 取均值,是为了算在整个batch中的损失\npcop_loss = K.sparse_categorical_crossentropy(cop_in, pcop) \npcop_loss = K.sum(pcop_loss * xm) / K.sum(xm)\n\n\n\n\npcsel0_loss = K.sparse_categorical_crossentropy(csel0_in, pcsel0)\npcsel0_loss = K.sum(pcsel0_loss * xm * cm) / K.sum(xm * cm)\n\npcsel1_loss = K.sparse_categorical_crossentropy(csel1_in, pcsel1)\npcsel1_loss = K.sum(pcsel1_loss * xm * cm) / K.sum(xm * cm)\n\n\npcsel2_loss = K.sparse_categorical_crossentropy(csel2_in, pcsel2)\npcsel2_loss = K.sum(pcsel2_loss * xm * cm) / K.sum(xm * cm)\n\npcdiv_loss = K.sparse_categorical_crossentropy(cdiv_in, pcdiv)\npcdiv_loss = K.sum(pcdiv_loss * xm * cm) / K.sum(xm * cm)\n\n\nloss = psel_loss + pconn_loss + pcop_loss + pcsel0_loss + pcsel1_loss + pcsel2_loss + pcdiv_loss\n\ntrain_model.add_loss(loss)\ntrain_model.compile(optimizer=Adam(learning_rate))\n# train_model.summary()\n\nmodel.load_weights(weight_save_path) # \n\nexcept_tr_cnt = 0 \n\n\nlog_file = open('./asser_log.log', 'w') \n\nassert_wrong_log_file = open('./asser_wrong_log.log', 'w') \n\ndef nl2sql(question, table):\n \"\"\"输入question和headers,转SQL\n \"\"\"\n try:\n question = trans_question_acc(question) \n question = trans_question_short_year(question)\n\n # shuai \n\n question = question.replace('负数', '小于0')\n question = question.replace('负值', '小于0')\n question = question.replace('为负', '小于0')\n question = question.replace('正数', '大于0')\n question = question.replace('正值', '大于0')\n question = question.replace('为正', '大于0')\n question = question.replace('没什么要求', '不限')\n question = question.replace('没要求', '不限')\n\n\n except: \n pass \n #raise ValueError\n \n x1, x2 = tokenizer.encode(question)\n if question in set([\n '总收入高于500万或者座位数大于5000个的场馆有多少个啊',\n '有多少个场馆的座位超过了5000个或者收入超过了500万'\n ]):\n print(question)\n print(question)\n h = []\n for i in table['headers']:\n _x1, _x2 = tokenizer.encode(i)\n # h这里记录了每个header的[cls]所在位置\n h.append(len(x1))\n x1.extend(_x1)\n x2.extend(_x2)\n hm = [1] * len(h) # hm为header在[cls]位置的mask, hm的长度,正好是header的个数,当然header的个数和[cls]的个数是一致的\n\n psel, pconn, pcop, pcsel0, pcsel1, pcsel2, pcdiv = model.predict([\n np.array([x1]),\n np.array([x2]),\n np.array([h]),\n np.array([hm])\n ])\n\n\n if max(pcsel2[0][1:len(question) + 1].argmax(1)) > 0:\n pass \n # print('pcsel is > 0 with q \\n {}\\nand pcsel is :{}\\n'.format(question, pcsel2[0][1:len(question) + 1].argmax(1)))\n \n\n pcsel_ori = pcsel\n # pcsel0 = np.squeeze(pcsel[:,:,:1,:], axis=(2,)) # numpy 不能是tensor哈 ,返回是numpy格式数据\n\n # test \n if max(pcdiv[0][0:len(question) + 1].argmax(1)) > 0: \n pass \n \n # print(question)\n # print(pcdiv[0][0:len(question) + 1].argmax(1))\n # print(pcop[0][0:len(question) + 1].argmax(1))\n # print(pcsel0[0][0:len(question) + 1].argmax(1)) \n\n # print('\\ncop and csel is ---------\\n')\n # pcsel1 = np.squeeze(pcsel[:,:,1:2,:], axis=(2,))\n\n '''\n if max(pcsel1[0][1:len(question) + 1].argmax(1)) > 0: \n print('\\nmul col ---------\\n')\n print(question)\n print(pcop[0][0:len(question) + 1].argmax(1))\n print(pcsel0[0][0:len(question) + 1].argmax(1))\n print(pcsel1[0][0:len(question) + 1].argmax(1))\n else: \n print('\\single op and col ---------\\n')\n print(question)\n print(pcop[0][0:len(question) + 1].argmax(1))\n print(pcsel0[0][0:len(question) + 1].argmax(1))\n \n print('\\pcdiv is ---------\\n')\n print(pcdiv[0][0:len(question) + 1].argmax(1))\n print('\\pconn is ---------\\n')\n print(pconn[0, 1:].argmax())\n '''\n\n\n\n\n\n R = {'agg': [], 'sel': []}\n # psel是对header的[CLS]位置做处理的出来的,各个header的聚合或者是否被select的概率。\n # psel shape [1, 9, 7] => [None, header_col_cnt, op]\n for i, j in enumerate(psel[0].argmax(1)): \n if j != num_agg - 1: # num_agg-1类是不被select的意思\n # 7中状态拆分成下面两种,1种是是否被选择,另外一种是agg operation\n R['sel'].append(i)\n R['agg'].append(j)\n conds = []\n v_op = -1\n\n\n # pcop: shape [bs, seq_len(n_step), num_op] 下面截取了:len(question) + 1 \n # 截取之后shape: [bs, question_len] \n # 在这里的bs=1, 貌似是对每一个样例做的处理.\n # pcop (1, 103, 5) => [None, question+header_len, op_len]\n # 这里的pcop的第二个维度为105\n # 105 = 32(question len) + 2(question cls&sep)+ 53(all header col len) +18(header cls+sep,total 9 column in table)\n # 下面取的时候只取了 [0:33]\n\n unit_first_list = [] \n unit_second_list = []\n for i, j in enumerate(pcop[0, :len(question) + 1].argmax(1)): #[,,op_cnt]\n # 这里结合标注和分类来预测条件\n if i == 0: continue # start 0 is of no use \n if j != num_op - 1: # num_op: {0:\">\", 1:\"<\", 2:\"==\", 3:\"!=\", 4:\"不被select\"}\n if v_op != j:\n if v_op != -1:\n v_end = v_start + len(v_str)\n v_start_idx, v_end_idx, smooth_val = smooth_numeric(v_start - 1, v_end - 1, question)\n unit_first, unit_second = get_append_unit(v_start - 1, v_end - 1, question) # unit_firt: 亿 unit_second: 元\n \n # 添加div中的信息\n # print(max(pcdiv[0][v_start: v_end].argmax(1)))\n if max(pcdiv[0][v_start: v_end].argmax(1)) > 0:# 0 \n # print(smooth_val)\n if 1 in pcdiv[0][v_start: v_end].argmax(1):\n\n entity_start_pos_list = [v_start + 1 + idx for idx, mark in enumerate(pcdiv[0][v_start + 1: v_end].argmax(1)) if mark == 1]\n if entity_start_pos_list[0] != v_start:\n entity_start_pos_list.insert(0, v_start)\n if entity_start_pos_list[-1] != v_end:\n entity_start_pos_list.append(v_end)\n\n for idx in range(len(entity_start_pos_list) - 1):\n new_s = entity_start_pos_list[idx]\n new_e = entity_start_pos_list[idx + 1]\n # print(question[new_s - 1: new_e - 1])\n\n csel = pcsel0[0][new_s: new_e].mean(0).argmax() - 1 \n v_str1 = question[new_s - 1: new_e - 1] \n\n\n if v_str1 is not None and csel >= 0:\n \n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str1)) \n\n \n # print(conds)\n else: \n csel = pcsel0[0][v_start: v_end].mean(0).argmax() - 1 \n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n\n # print('here--')\n # print(pcsel1[0][v_start: v_end].argmax(1))\n\n if pcsel1[0][v_start: v_end].mean(0).argmax() - 1 >= 0: # add \n csel = pcsel1[0][v_start: v_end].mean(0).argmax() - 1 \n if csel >= 0: \n '''\n print('warnings_ok ---- ')\n print(question)\n print((csel, v_op, v_str))\n '''\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n \n if pcsel2[0][v_start: v_end].mean(0).argmax() - 1 > 0: # add \n csel = pcsel2[0][v_start: v_end].mean(0).argmax() - 1 \n '''\n print('warnings_ok ---- ')\n print(question)\n print((csel, v_op, v_str))\n '''\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n \n \n \n v_start = i \n v_op = j # v_op在这里第一次赋值\n v_str = question[i - 1] # v_str在这里第一次赋值\n\n\n \n else: # 在 这里 j == num_op-1,说明当前运算符在question上延续着\n v_str += question[i - 1]\n\n\n\n\n if i == len(question):\n v_end = v_start + len(v_str)\n\n v_start_idx, v_end_idx, smooth_val = smooth_numeric(v_start - 1, v_end - 1, question)\n unit_first, unit_second = get_append_unit(v_start - 1, v_end - 1, question) # unit_firt: 亿 unit_second: 元\n \n # 添加div中的信息\n # print(max(pcdiv[0][v_start: v_end].argmax(1)))\n if max(pcdiv[0][v_start: v_end].argmax(1)) > 0:# 0 \n # print(smooth_val)\n if 1 in pcdiv[0][v_start: v_end].argmax(1):\n\n entity_start_pos_list = [v_start + 1 + idx for idx, mark in enumerate(pcdiv[0][v_start + 1: v_end].argmax(1)) if mark == 1]\n if entity_start_pos_list[0] != v_start:\n entity_start_pos_list.insert(0, v_start)\n if entity_start_pos_list[-1] != v_end:\n entity_start_pos_list.append(v_end)\n\n for idx in range(len(entity_start_pos_list) - 1):\n new_s = entity_start_pos_list[idx]\n new_e = entity_start_pos_list[idx + 1]\n # print(question[new_s - 1: new_e - 1])\n\n csel = pcsel0[0][new_s: new_e].mean(0).argmax() - 1 \n v_str1 = question[new_s - 1: new_e - 1] \n\n if v_str1 is not None and csel >= 0:\n \n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str1)) \n # print(conds)\n else:\n\n # print('here_break--')\n # print(pcsel1[0][v_start: v_end].argmax(1))\n\n if pcsel1[0][v_start: v_end].mean(0).argmax() - 1 >= 0: # add \n csel = pcsel1[0][v_start: v_end].mean(0).argmax() - 1 \n if csel >= 0: \n '''\n print('warnings_ok ---- ')\n print(question)\n print((csel, v_op, v_str))\n '''\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n \n if pcsel2[0][v_start: v_end].mean(0).argmax() - 1 >= 0: # add \n csel = pcsel2[0][v_start: v_end].mean(0).argmax() - 1 \n '''\n print('warnings_ok ---- ')\n print(question)\n print((csel, v_op, v_str))\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n '''\n \n\n\n csel = pcsel0[0][v_start: v_end].mean(0).argmax() - 1 \n if v_str is not None:\n\n v_str = smooth_val\n\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n # print(conds)\n break \n\n\n\n elif v_op != -1: # 遇到了\"not selected\" 了\n v_end = v_start + len(v_str)\n # pcsel (1, 105, 9) => (None, q_len+h_len, col_cnt ) \n # 第二个维度为105, 105 = 32(question len)+2(question cls&sep)+ 53(all header col len) +18(header cls+sep,total 9 column in table\n # pcsel的作用是定位question中的每个字段对应header中的哪个列,所以最后一维为9,当前测试样例的表有9列\n\n v_start_idx, v_end_idx, smooth_val = smooth_numeric(v_start - 1, v_end - 1, question)\n unit_first, unit_second = get_append_unit(v_start - 1, v_end - 1, question) # unit_firt: 亿 unit_second: 元\n \n\n\n # 添加div中的信息\n # print(max(pcdiv[0][v_start: v_end].argmax(1)))\n if max(pcdiv[0][v_start: v_end].argmax(1)) > 0: \n '''\n print(question)\n print(smooth_val)\n print(v_start)\n print(v_end)\n print(pcdiv[0][v_start: v_end].argmax(1))\n '''\n \n if 1 in pcdiv[0][v_start: v_end].argmax(1) and pcdiv[0][v_start].argmax() != 1 :\n\n entity_start_pos_list = [v_start + 1 + idx for idx, mark in enumerate(pcdiv[0][v_start + 1: v_end].argmax(1)) if mark == 1]\n # print(entity_start_pos_list)\n if entity_start_pos_list[0] != v_start:\n entity_start_pos_list.insert(0, v_start)\n if entity_start_pos_list[-1] != v_end:\n entity_start_pos_list.append(v_end)\n\n for idx in range(len(entity_start_pos_list) - 1):\n new_s = entity_start_pos_list[idx]\n new_e = entity_start_pos_list[idx + 1]\n # print(question[new_s - 1: new_e - 1])\n\n csel = pcsel0[0][new_s: new_e].mean(0).argmax() - 1 \n v_str1 = question[new_s - 1: new_e - 1] \n\n\n\n\n\n\n if v_str1 is not None and csel >= 0:\n \n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str1)) \n # print(conds)\n else:\n\n csel = pcsel0[0][v_start: v_end].mean(0).argmax() - 1 \n\n\n # 做一些处理\n v_e = v_end - 1 \n untreat_unit = ''\n if v_e < len(question) and len(re.findall(regex_tail, question[v_e])) > 0:\n untreat_unit = re.findall(regex_tail, question[v_e])[0]\n # print('unit is{}, first{}'.format(untreat_unit))\n if v_e + 1 < len(question) and len(re.findall(regex_tail, question[v_e + 1])) > 0:\n untreat_unit += re.findall(regex_tail, question[v_e + 1])[0]\n if untreat_unit != '':\n pass \n # print('untreat_unit is not null and is{} and q is {}\\n and v_str is {} \\n '.format(untreat_unit, question, v_str))\n \n\n\n if v_str is not None:\n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n\n # print('here--')\n # print(pcsel1[0][v_start: v_end].argmax(1))\n\n\n\n\n\n if pcsel1[0][v_start: v_end].mean(0).argmax() - 1 >= 0: # add \n csel = pcsel1[0][v_start: v_end].mean(0).argmax() - 1 \n if csel >= 0:\n # print('warnings_ok ---- ')\n # print(question)\n # print((csel, v_op, v_str))\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n \n if pcsel2[0][v_start: v_end].mean(0).argmax() - 1 >= 0: # add \n csel = pcsel2[0][v_start: v_end].mean(0).argmax() - 1 \n # print('warnings_ok ---- ')\n # print(question)\n # print((csel, v_op, v_str))\n\n if v_str is not None: \n v_str = smooth_val\n unit_first_list.append(unit_first)\n unit_second_list.append(unit_second)\n conds.append((csel, v_op, v_str))\n \n \n\n\n v_op = -1\n R['conds'] = set() # 集合自己去重\n\n idx = 0 \n\n for i, j, k in conds:\n if re.findall('[^\\d\\-\\.\\%]', k): # 找到非数字\n j = 2 # 非数字只能用等号,,\n # and len(re.findall('[\\d\\-\\.\\%]', k)) != len(k)\n # re.findall('[^\\d\\-\\.\\%]', k)\n\n\n # 如果目标列的所有值里面没有除数字之外的东西,那么就不要模糊匹配了\n # try:\n # col_table_vals = table['content'][table['headers'][i]]\n # except: col_table_vals = {'--------'}\n # no_other_text = True \n # print(col_table_vals)\n # for val in col_table_vals:\n # print('val is {}'.format(val))\n # if re.findall('[^\\d\\-\\.\\%]', str(val)): no_other_text = False \n\n \n\n\n if j == 2 : # 这里判定出条件运算符是等号哦 等号也有可能是数字的\n\n\n ori_k = k \n before_treat = 'before of i,j, k is({},{},{})\\n'.format(i, j, k)\n before_ikj = (i, j, k)\n \n # sqlite3 text where col1='2' 与 col2='2.0'执行结果不一致\n if k not in table['all_values']:\n k_sim = most_similar_new(k, list(table['all_values'])) \n if k_sim is not None: k = k_sim \n idx_except = False \n try:\n h = table['headers'][i]\n except:\n global except_cnt\n except_cnt += 1 \n idx_except = True \n h = table['headers'][0]\n # 然后检查值对应的列是否正确,如果不正确,直接修正列名\n if k not in table['content'][h]: # 发现标记出来的值不在预测出来所属的列不一致。 \n for r, v in table['content'].items():\n if k in v:\n i = table['header2id'][r] \n break\n\n # if not re.findall('[^\\d\\-\\.\\%]', ori_k):\n # # 字符串还是要精准匹配的,比如 2015跟2015.0\n # after_ijk = (i, j, k)\n # if before_ikj != after_ijk:\n # print(before_treat)\n # print(types[i])\n # print('after of i,j, k is({},{},{})\\n'.format(i, j, k))\n\n\n unit_first = None if unit_first_list[idx] == '' else unit_first_list[idx]\n unit_second = None if unit_second_list[idx] == '' else unit_second_list[idx]\n idx += 1 \n if not re.findall('[^\\d]', str(k)) and i <= len(table['headers']) - 1: ## 需要改 不要用 isnumeric() 并要处理百分号\n # 添加一个预处理,预测的数字左右缺失的话 \n ori_k = k \n try:\n right_str = \"\\n{},'{}','{}',format_of_number={}, format_desc={}\\n\".format(k, table['headers'][i], table['title'], unit_first, unit_second)\n\n assert_str = \"assert number_trans({},'{}','{}',format_of_number='{}', format_desc='{}') ==\".format(k, table['headers'][i], table['title'], unit_first, unit_second)\n \n k = number_trans(int(k), table['headers'][i], table['title'], format_of_number=unit_first, format_desc=unit_second)\n # print('number_trans right {} \\n right param is \\n{}\\n'.format(question, right_str))\n\n max_col_val = None \n have_text_in_col = False \n for col_val in table['content'][table['headers'][i]]:\n if not re.findall('[^\\d\\.]', str(col_val)): \n if max_col_val is None: \n max_col_val = float(col_val) \n else: \n max_col_val = max(max_col_val, float(col_val))\n else: \n have_text_in_col = True \n break \n \n \n if '万平' in question:\n if max_col_val is not None and not re.findall('[^\\d\\.]', str(k)) and float(k) > max_col_val * 10:\n k = ori_k \n print('--- warining -- \\n use ori {}'.format(k))\n print(question)\n \n print(table['content'][table['headers'][i]])\n print('max is {}'.format(max_col_val))\n assert_str = assert_str + '{}'.format(k)\n print(assert_str)\n\n # log_file.write(assert_str)\n\n \n except:\n # print('number_trans error {} \\n'.format(question))\n # print(\"\\n{},'{}','{}',format_of_number={}, format_desc={}\\n\".format(k, table['headers'][i], table['title'], unit_first, unit_second))\n # raise \n assert_str = assert_str + '{}\\n'.format(k)\n # assert_wrong_log_file.write(assert_str)\n\n \n #这里如果k是 数字的话,包含百分数,那么去掉后面的百分号\n if not re.findall('[^\\d\\-\\.\\%]', str(k)) and '%' in str(k):\n # print('\\n get percent val {} '.format(k))\n k = str(k)[:-1]\n # print('percent after treate {}\\n'.format(k))\n R['conds'].add((i, j, k))\n\n if max(pcsel2[0][1:len(question) + 1].argmax(1)) > 0:\n pass \n # print('pcsel cond is \\n {}\\n'.format(R['conds']))\n \n R['conds'] = list(R['conds'])\n if len(R['conds']) <= 1: # 条件数少于等于1时,条件连接符直接为0\n R['cond_conn_op'] = 0\n else:\n # pconn (1,3) => (None, con_conn_cnt)\n R['cond_conn_op'] = 1 + pconn[0, 1:].argmax() # 不能是0\n return R\n\n\ndef evaluate(data, tables):\n pbar = tqdm()\n F = open('../data/logs/evaluate_pred.json', 'w')\n pred_sql_list = []\n gd_sql_list = []\n tables_list = [] \n\n for i, d in enumerate(data):\n question = d['question']\n table = tables[d['table_id']]\n R = nl2sql(question, table) # \n # print(\"predicted is {}\\n\".format(R))\n gd_sql_list.append(d['sql'])\n pred_sql_list.append(R)\n tables_list.append(d['table_id'])\n pbar.update(1)\n d['sql_pred'] = R\n \n if PY2:\n s = json.dumps(d, ensure_ascii=False) # add str \n F.write(s.encode('utf-8') + '\\n')\n elif PY3:\n s = json.dumps(eval(str(R)), ensure_ascii=False) \n F.write(s + '\\n') # \n F.close()\n acc = check_part_acc(pred_sql_list, gd_sql_list, tables_list, data)\n print(' Acc in evaluate data set is {}'.format(1 - acc[-1])) # data here is valid data\n pbar.close()\n return 1 - acc[-1]\n\nif mode == 'evaluate':\n evaluate(valid_data, valid_tables)\n import sys\n sys.exit(0) \n\n\ndef test(data, tables, submit_path):\n pbar = tqdm()\n F = open(submit_path, 'w')\n for i, d in enumerate(data):\n question = d['question']\n table = tables[d['table_id']]\n\n\n R = nl2sql(question, table)\n pbar.update(1)\n if PY2:\n s = json.dumps(R, ensure_ascii=False)\n F.write(s.encode('utf-8') + '\\n')\n elif PY3:\n sql_pred = eval(str(R))\n F.writelines(json.dumps(sql_pred, ensure_ascii=False) + '\\n') \n F.close()\n pbar.close()\n\nif mode == 'test':\n print(\"Start create test result ....\")\n # submit_path = '../submit/submit-{}.json'.format(time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(time.time())))\n test(test_data, test_tables, test_submit_path) # add by shuai should used !!!!!\n print('Finish create test result and saved in {}'.format(test_submit_path))\n import sys\n sys.exit(0)\n\n\nclass Evaluate(Callback):\n def __init__(self):\n self.accs = []\n self.best = 0\n self.passed = 0\n self.stage = 0\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"\n 第一个epoch用来warmup,第二个epoch把学习率降到最低\n \"\"\"\n if self.passed < self.params['steps']:\n lr = (self.passed + 1.) / self.params['steps'] * learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n elif self.params['steps'] <= self.passed < self.params['steps'] * 2:\n lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)\n lr += min_learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n\n def on_epoch_end(self, epoch, logs=None):\n \n acc = self.evaluate()\n self.accs.append(acc)\n \n if acc >= self.best:\n self.best = acc\n train_model.save_weights(weight_save_path)\n print('acc: %.5f, best acc: %.5f\\n' % (acc, self.best))\n def evaluate(self):\n return evaluate(valid_data, valid_tables)\n\n\n\nevaluator = Evaluate()\n\nif __name__ == '__main__':\n train_model.fit_generator(\n train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=40,\n callbacks=[evaluator]\n )\n\nelse:\n train_model.load_weights(weight_save_path)\n" }, { "alpha_fraction": 0.5207118391990662, "alphanum_fraction": 0.5462497472763062, "avg_line_length": 37.98725509643555, "blob_id": "3fe5e24e21981e14b291a565bc79e63a0675c2d2", "content_id": "64ccf2d62336c08341a132d7443c6b7abc0faa46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42885, "license_type": "permissive", "max_line_length": 190, "num_lines": 1020, "path": "/code/data_treat.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "import random \n\nimport re \nimport sys \ndef mail_test():\n \"\"\"\n 一个解析出邮箱的小测试\n # 括号是用来涵盖输出的值要哪些的\n # @是否转义都可以\n \"\"\"\n str1 = 'aaf ssa@ss.net asdf asdb@163.com.cn asdf ss-a@ss.net asdf asdd.cba@163.com afdsaf'\n reg_str1 = r'([\\w-]+(\\.[\\w-]+)*@[\\w-]+(\\.[\\w-]+)+)'\n mod = re.compile(reg_str1)\n items = mod.findall(str1)\n for item in items:\n print(item)\n\nimport os \nimport sys \nimport pandas as pd \nimport jieba \nimport editdistance \nimport numpy as np \n\ndef get_stop_words():\n file_name = './stopwords.txt'\n stop_word_set = set([])\n with open(file_name, 'r') as f:\n for line in f.readlines():\n stop_word_set.add(line.strip())\n # print(stop_word_set)\n return stop_word_set\n\n\n\n\n\nstop_words_set = get_stop_words()\n# sys.exit(0)\n\n\nre_ans = r'content([1-5])@(.*?)@content([1-5])+'\n\nregex_ans = re.compile(re_ans)\n\nregex_supp = regex_ans\ntest_path = '../data/test_data_r0.csv'\ntrain_path = '../data/train_round_0.csv'\n\nret = regex_supp.findall('@content1@中国@content1@@content2@美国@content2@')\nret = regex_supp.findall('@content1@中国@content1@')\nprint(ret)\n\n# sys.exit(0)\ndef text_prepro(text):\n return re.sub('\\s', '' ,text)\n\ndef most_similar(source, target_list):\n \"\"\" \n  这个只能针对于文本进行匹配的\n 从词表中找最相近的词(当无法全匹配的时候)\n \"\"\"\n score_list = [editdistance.eval(source, t) for t in target_list]\n\n return \n if len(target_list) == 0:\n return None \n s_set = set([item for item in source])\n contain_score = []\n un_contain_score = []# target当中相比于source多出来的部分\n for target in target_list:\n t_set = set([t for t in target])\n contain_score.append(len(s_set & t_set))\n # un_contain_score.append(len(t_set.difference(s_set))) #\n un_contain_score.append(0) # 先不扣分了...\n char_match_score = [contain_score[idx] for idx in range(len(target_list))]\n\n # 如果最高匹配分数为0,说明一个匹配的都没有,,那么返回None \n if max(char_match_score) == 0: return None \n\n # 下面计算编辑距离分数\n e_d_score = [ len(source) - editdistance.eval(source, t) for t in target_list] \n\n final_score = [char_match_score[idx] + e_d_score[idx] for idx in range(len(target_list))]\n\n return target_list[final_score.index(max(final_score))]\n\ndef most_similar_2(w, s):\n \"\"\"从句子s中找与w最相近的片段,\n 借助分词工具和ngram的方式尽量精确地确定边界。\n w : cond value \n s: question \n 输入和输出的相似度函数不应该相同\n 对于输入来说: 进行自动标注的时候,是按照相邻原则来标记的,所以输入采用的相似度方法是n-gram \n 对于输出来说: xxx\n \"\"\"\n sw = jieba.lcut(s)\n sl = [x for x in list(sw)]\n sl.extend([char for char in s])\n sl.extend([''.join(i) for i in zip(sw, sw[1:])]) # 2-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:])]) # 3-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:])]) # 4-grarm \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:])]) # 5-gram\n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:])]) # 6-gram\n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:])]) # 7-gram\n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:])]) # 8-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:])]) # 9-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:])]) # 10-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:])]) # 11-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:])]) # 12-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:], sw[12:])]) # 13-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:], sw[12:], sw[13:])]) # 14-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:], sw[12:], sw[13:], sw[14:])]) # 15-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:], sw[12:], sw[13:], sw[14:], sw[15:])]) # 16-gram \n sl.extend([''.join(i) for i in zip(sw, sw[1:], sw[2:], sw[3:], sw[4:], sw[5:], sw[6:], sw[7:], sw[8:], sw[9:], sw[10:], sw[11:], sw[12:], sw[13:], sw[14:], sw[15:], sw[16:])]) # 12-gram \n return most_similar(w, sl) \n\n\n\n\ndef get_passages_n_gram(question, input_passage, max_len=180):\n q_len = len(question)\n sentens = input_passage.split('。')\n sentens = [senten for senten in sentens if len(senten.strip()) > 0]\n passages = []\n passages.extend([senten for senten in sentens[:max_len - q_len]])\n # passages.extend(['。'.join(senten) for senten in zip(sentens, sentens[1:])])\n # passages.extend(['。'.join(senten) for senten in zip(sentens, sentens[1:], sentens[2:])])\n allowed_passage = [passage for passage in passages if len(passage) + q_len <= max_len and len(passage.strip()) >=1] \n return allowed_passage\n\n\n\n\nch_regex = re.compile(r'[^\\u4e00-\\u9fa5a-zA-Z0-9]')\ndef get_all_pun_stat():\n '''\n 获取所有标点的统计信息\n '''\n train_data = pd.read_csv(train_path)\n pun_d = {}\n for d in train_data.iterrows():\n # if ids == 100: break \n # print(d[1]['keyword']) \n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_prepro(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n content_list = [content1, content2, content3, content4, content5]\n \n for content in content_list:\n paras = content.split('。')\n for para in paras:\n pun_list = ch_regex.findall(para)\n for pun in pun_list:\n pun_d.setdefault(pun, 0)\n pun_d[pun] += 1 \n # print(biaodian)\n print(pun_d)\n\n \n\n pun_after_sort = sorted(pun_d.items(), key=lambda pun:pun[1], reverse=True)\n print(pun_after_sort)\n\n\n# get_all_pun_stat()\n\ndef get_all_content_contain():\n \"\"\"\n 获取包含xxx报道的数据\n \"\"\"\n train_data = pd.read_csv(train_path)\n\n for d in train_data.iterrows():\n ids += 1 \n # if ids == 100: break \n # print(d[1]['keyword']) \n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_prepro(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n content_list = [content1, content2, content3, content4, content5]\n pun_d = {}\n for content in content_list:\n paras = content.split('。')\n for para in paras:\n if '报道' in para:\n print(para) \n\n# sys.exit(0)\n\n\ndef get_word_freq():\n \"\"\"\n 获取词频数据,进而对文章进行进一步清洗\n \"\"\"\n train_data = pd.read_csv(train_path)\n word_dict = {}\n\n for d in train_data.iterrows():\n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_prepro(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n content_list = [content1, content2, content3, content4, content5]\n \n for content in content_list:\n paras = content.split('。')\n \n for para in paras:\n words = [w for w in jieba.cut(para)]\n for word in words:\n word_dict.setdefault(word, 0)\n word_dict[word] += 1\n print(word_dict)\n\n# get_word_freq()\n# sys.exit(0)\n\n\n# 几种匹配模式\n# 据xxx报道\n# match1_re = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9“]{0,3}据(.*?)报道$')\n# 报道称\nmatch2_re = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9]{0,20}报道[\\u4e00-\\u9fa5a-zA-Z0-9]{0,4}$')\n# 匹配 【】\nmatch3_re = re.compile(r'【(.*?)】')\n# (图片来源于网络)\n# (图中中国军队士兵正在操作82迫) 含有图的\nmatch4_re = re.compile(r'\\(记者(.*?)\\)')\npara_regex = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9,。!?,.!?、/\\-;()%()~~]')\ndef text_pre_treat(text):\n \"\"\"\n 文本预处理\n \"\"\"\n para = text \n para = para.replace('[', '【')\n para = para.replace(']', '】')\n para = para.replace('(', '(')\n para = para.replace(')', ')')# 中文括号统一转英文括号\n para = para.replace('”', '') # 中文双引号\n para = para.replace('“', '')\n para = para.replace('(图片来源于网络)', '')\n para = para.replace('(上图)', '')\n\n para = para.replace('2', '2')\n para = para.replace('5', '5')\n para = para.replace('%', '%')\n para = para.replace('1', '1')\n para = para.replace('6', '6')\n para = para.replace('Слава', 'cnbba')\n\n \n\n # para = para.replace('《', '')\n # para = para.replace('》', '')\n #新增 对para做预处理\n para_first = re.split(',|,|;|;', para)[0]\n if match2_re.findall(para_first):\n # para = para.replace(para_first, '')\n regex = '{}[,|,|;|;]'.format(para_first)\n try:\n para = re.sub(regex, '', para)\n except:\n pass \n # print('excep ----')\n # print(para)\n # raise \n # print('-' * 10) \n # print(para)\n # print(match3_re.findall(para))\n if match3_re.findall(para):\n para_back = para\n for item in match3_re.findall(para_back):\n # print(item)\n regex = '【{}】'.format(item)\n # print('regex {}'.format(regex))\n para = para.replace(regex, '')\n if match4_re.findall(para):\n for m in match4_re.findall(para):\n # print(m)\n para = para.replace('(记者{})'.format(m), '')\n \n para = ''.join(para_regex.findall(para)) \n return para \n \n\n\n\ndef get_latent_para(question, content_list, max_len=510, mat_supps=None):\n \"\"\"\n 从content_list中获取潜在的可能包含答案的段落, \n 要保证content顺序性服从\n ret: \n 用于进行训练的文本数据, 默认截断长度为510 \n \n \"\"\"\n\n all_paras_sim_stat = {}\n question = text_pre_treat(question)\n question_set = set([word for word in jieba.cut(question)])\n question_set -= stop_words_set\n print('-' * 10)\n\n\n for c_idx, content in enumerate(content_list):\n paras = content.split('。') # 有没有必要再通过感叹号来进行段落划分\n # 标点符号没有参与的意义\n for pid, para in enumerate(paras):\n # print(para)\n para = text_pre_treat(para)\n \n # 新增停止\n para_set = set([word for word in jieba.cut(para)]) - stop_words_set\n # all_paras_sim_stat[para] = '{}:{}'.format(idx, len(question_set & para_set)) \n all_paras_sim_stat[para] = [len(question_set & para_set), pid, c_idx]\n # all_paras_sim_stat = sorted(all_paras_sim_stat.items(), key=lambda item: item[1])\n \n para_after_sort = sorted(all_paras_sim_stat.items(), key=lambda all_paras_sim_stat:all_paras_sim_stat[1][0], reverse=True)\n import_para = '。'.join([para[0] for para in para_after_sort])[:max_len - len(question)]\n\n # 看看和重点语句的交集: \n # import_set_and = set(import_para.split('。')) & mat_supps\n # print('and and is {}'.format(len(import_set_and)))\n # print(import_para)\n return import_para, para_after_sort # 如果不需要反转\n\n # 是否有必要反转? \n reserverd_para = para_after_sort[:len(import_para.split('。'))]\n reserverd_para_sort = sorted(reserverd_para, key=lambda item:str(item[1][2]) + ':' + str(item[1][1]), \n reverse=False)\n\n import_para = '。'.join([para[0] for para in reserverd_para_sort])[:max_len - len(question)]\n return import_para, question\n # print(import_para)\n # final_list = [] \n # for content in content_list:\n # paras = content.split('。') # 有没有必要再通过感叹号来进行段落划分\n # # 标点符号没有参与的意义\n # for para in paras:\n # para = text_pre_treat(para)\n # for import_para in import_para.split('。'):\n # if para == import_para:\n # final_list.append(para)\n # # elif import_para in para:\n # # final_list.append(para)\n # print('。'.join(final_list))\n # return '。'.join(final_list)\n # print('-' * 10)\n # print(question)\n # print(para_after_sort) \n # para_after_sort = set([para[0] for para in para_after_sort])\n\n ## 还原最初的顺序\n # print(import_para)\n # print(question)\n # print(len(import_para) + len(question))\n return import_para, para_after_sort\n\ndef check_train_support_and_brid():\n '''现在需要的就是 passage , question, answer 单独搞出来,\n 另外一种方式, 长度控制在256,其中开头由支撑段落预处理后填充,然后后面用通过算法搞出来的 相似句子进行填充\n\n 要有正例, 有反例, 训练的时候,如何制造反例??不含有支撑段落的,但是却很相似的\n '''\n train_data = pd.read_csv(train_path)\n data_json = []\n idx = 0\n passages_train = {}\n\n content_supp_cnt = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for d in train_data.iterrows():\n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_pre_treat(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n\n # answer info extract \n ans_mat = regex_ans.findall(answer_ori)\n ans_mat = [ text_pre_treat(ans[1]) for ans in ans_mat]\n # print('*' * 10)\n # print(question)\n # print(ans_mat)\n \n assert len(ans_mat) != 0\n\n content_list = [content1, content2, content3, content4, content5]\n # import_para = get_latent_para_ver2(question, content_list) # 获取所有材料中的重点语句\n # 答案和对应的支撑段落匹配起来,重点要看看是否可以找到蛛丝马迹,即问题在支撑段落中是否出现\n # 看看supporting graph中有多少包含我们所要的答案,首先解析出来\n content_supp_cnt[supp_ori.count('@content')] += 1 \n mat_supps = regex_supp.findall(supp_ori)\n mat_supps = [text_pre_treat(mat_supp[1]) for mat_supp in mat_supps]\n print(mat_supps)\n\n\n# check_train_support_and_brid()\n# sys.exit(0)\n\ndef get_latent_para_ver2(question, content_list):\n \"\"\"\n 从content_list中获取潜在的可能包含答案的段落, 添加一个递归把候选项目放到问题集合中的方式\n ret: \n 用于进行训练的文本数据, 默认截断长度为1024\n \"\"\"\n import_para, para_after_sort = get_latent_para(question, content_list)\n import_para, para_after_sort = get_latent_para(question + para_after_sort[0][0] + para_after_sort[1][0], content_list)\n\n return import_para \n\n\n\n\nstat_info = [0, 0]\nch_good_regex = re.compile(r'[\\u4e00-\\u9fa5a-zA-Z0-9\\-“)]')\n\n\ndef mark_answer_in_para(answer_list, para):\n \"\"\"\n 在段落中标记出答案的位置\n input: \n answer: type: List \n para: 重点段落\n \n ret: start_pos: the start pos of answer \n end_pos: the end pos of answer \n \"\"\"\n # print(answer_list)\n para = ''.join(ch_good_regex.findall(para))\n for answer in answer_list: \n ans = ''.join(ch_good_regex.findall(answer[1]))\n if ans in para:\n stat_info[1] += 1 \n else: \n print('-' * 10)\n print(para)\n print(ans)\n stat_info[0] += 1 \n print(stat_info)\n \n # 可以看看,有多少answer,是在我们的question中的. \n\n\ndef remove_unwant(answer):\n ans_treat = ''\n start, end = 0, len(answer) \n for word in answer: \n if not ch_good_regex.findall(word): start += 1 \n else: break \n for word in answer[::-1]:\n if not ch_good_regex.findall(word): end -= 1 \n else: break \n return answer[start:end]\n\n\ndef mark_answer_in_para_new(answer_list, mat_supps, para):\n \"\"\"\n 在段落中标记出答案的位置\n input: \n answer_list: type: List \n mat_supps: 关联的重点段落\n para: 重点段落\n \n ret: start_pos: the start pos of answer \n end_pos: the end pos of answer \n\n [{'answer': xx, 'start_pos':, 'end_pos': xx}]\n \"\"\"\n # print(answer_list)\n # para = ''.join(ch_good_regex.findall(para))\n ret_list = []\n for answer in answer_list: \n if len(answer.strip()) == 0: \n print(answer_list)\n raise \n # ans = ''.join(ch_good_regex.findall(answer[1]))\n # ans = remove_unwant(answer[1])\n answer = answer.replace('。', ',')# 改下标点\n if len(answer.strip()) != 0 and answer in para: # answer可能出现中间包含句号的情况, answer作为一个整体 \n # para.index from which idx ??? \n supp_para_start = None \n for mat_supp in mat_supps:\n # mat_supp可能还可以拆分\n # for mat_item in mat_supp.split('。'):\n mat_supp = remove_unwant(mat_supp)\n if len(mat_supp.strip()) == 0: continue\n answer = remove_unwant(answer)\n if answer in mat_supp and mat_supp in para:\n supp_para_start = para.index(mat_supp)\n break \n \n\n\n start_idx = para.index(answer, supp_para_start) # \n end_idx = start_idx + len(answer)\n \n ret_item = {'answer': answer, 'start_pos': start_idx, 'end_pos': end_idx - 1}\n ret_list.append(ret_item)\n return ret_list \n # stat_info[1] += 1 \n # else: \n # print('-' * 10)\n # print(para)\n # print(ans)\n # stat_info[0] += 1 \n # print(stat_info)\n\n\n\ndef get_test_dataset(test_path):\n test_data_json = [] \n test_data = pd.read_csv(test_path)\n for d in test_data.iterrows():\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_pre_treat(d[1]['question']), d[1]['question_id']\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n # test_data_json.append({\"question_id\": question_id, \"question\": question, })\n content_list = [content1, content2, content3, content4, content5]\n import_para, _ = get_latent_para(question, content_list, max_len=2000, mat_supps=None) # 从这3000个当中找出值来补充长度\n import_para_list = import_para.split('。')\n\n # 前几个句子是最重要的了\n for i in range(30):\n import_para_list_tmp = import_para_list\n random.shuffle(import_para_list_tmp)\n \n data_item = {'question': question, 'para': '。'.join(import_para_list_tmp)[:300 - len(question_id)]}\n\n print(data_item)\n test_data_json.append(data_item)\n np.save('kesic_test_new_method_300.npy', test_data_json)\n\n\n# get_test_dataset(test_path)\n\n# sys.\n\ndef get_test_dataset_new(test_path, max_len=508, pool_str_len=3500, per_sample=20):\n \"\"\"\n max_len: 问题+段落的最大长度\n pool_str_len:候选字符串长度\n per_sample :\n\n \"\"\"\n test_data_json = [] \n test_data = pd.read_csv(test_path)\n for d in test_data.iterrows():\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_pre_treat(d[1]['question']), d[1]['question_id']\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n # test_data_json.append({\"question_id\": question_id, \"question\": question, })\n content_list = [content1, content2, content3, content4, content5]\n import_para, _ = get_latent_para(question, content_list, max_len=pool_str_len, mat_supps=None) # 从这3000个当中找出值来补充长度\n import_para_list = [ para for para in import_para.split('。') if len(para.strip()) != 0]\n\n # 第一个句子万万是要留着的\n first_import_para = import_para_list[0]\n\n other_import_para = import_para_list[1:]\n\n # 前几个句子是最重要的了\n for i in range(per_sample):\n test_input_item = [first_import_para]\n random.shuffle(other_import_para) \n test_input_item.extend(other_import_para)\n\n # 再按照句号切分一次,然后再shuffle,为了避免第一个重要句子总是出现在句子首\n # print(test_input_item)\n # print('-' *10)\n para_input_contain_first = '。'.join(test_input_item)[:max_len - len(question)] # 最重要的句子在句首 \n para_input_list = [ para for para in para_input_contain_first.split('。') if len(para.strip()) != 0]\n random.shuffle(para_input_list)\n find_test_para = '。'.join(para_input_list)\n \n # print(len(question) , len(find_test_para))\n find_test_para += '。' * (max_len - len(question) - len(find_test_para)) \n # print(find_test_para)\n # print(len(question) + len(find_test_para))\n assert len(question) + len(find_test_para) == max_len \n # print(find_test_para)\n data_item = {'question': question, 'para': find_test_para, 'question_id': question_id}\n test_data_json.append(data_item)\n np.save('kesic_test_new_method_samp{}_of_{}_len{}.npy'.format(per_sample, pool_str_len, max_len), test_data_json)\n\n# print('start create')\n\nget_test_dataset_new(test_path, max_len=380, pool_str_len=2250, per_sample=12)\n\nsys.exit(0)\n\n\ndef check_train_data():\n data_path = './new_train_method_300.npy'\n datas = np.load(data_path, allow_pickle=True).tolist()\n no_answer_cnt = 1 \n has_answer_cnt = 1 \n q_cnt_info = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n for d in datas:\n print(d)\n # print(d['q_cnt'])\n if d['answer_mark']:\n has_answer_cnt += 1 \n else:\n no_answer_cnt += 1 \n q_cnt_info[d['q_cnt']] += 1 \n if d['q_cnt'] == 2 and d['question'].count('?') == 1:\n # print(d['question'])\n pass \n\n # print(q_cnt_info)\n # print(has_answer_cnt, no_answer_cnt) # 73483 74530\n\n\n# check_train_data()\n# sys.exit(0)\n\n\nimport random \ndef get_train_dataset_new(max_len=380):\n \"\"\"\n 1:1 混合正例与反例 要知道,句子当中可能是没有答案的\n 混合的时候,直接使用支撑文案,加一些相似属性,作为我们的训练语语料,长度可以用250-300??? 如何再进一步预处理下, 可以设置三次滑动窗口,这样可以滑动到700左右个字符串\n 当然如何滑动是个问题???毕竟前面的几个句子数据量是最大的. 可以将中间的100多替换掉咋的,多尝试,最大搜索长度为1000 \n\n 支撑段落必须要散落一点\n \"\"\"\n train_data = pd.read_csv(train_path)\n data_json = []\n idx = 0\n passages_train = {}\n\n content_supp_cnt = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n common_cnt = [0] * 80\n ids = 0 \n import_in_cnt, import_in_cnt_new = 0, 0 \n supp_in_content_cnt = 0 \n un_cnt = 0 \n for d in train_data.iterrows():\n ids += 1 \n # if ids == 100: break \n # print(d[1]['keyword'])\n \n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_pre_treat(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n content_list = [content1, content2, content3, content4, content5]\n\n # 看看支撑段落是否可以在训练样本中找到\n '''\n mat_supps = regex_supp.findall(supp_ori)\n for mat_supp in mat_supps:\n assert mat_supp[0] == mat_supp[2]\n cid, supp_text_treat, supp_text = mat_supp[0], text_pre_treat(mat_supp[1]), mat_supp[1]\n if remove_unwant(supp_text_treat) not in text_pre_treat(content_list[int(cid) - 1]): #remove unwanted很重要\n print('-' * 10)\n print(supp_text_treat)\n print(supp_text)\n print(cid)\n print(text_pre_treat(content_list[int(cid) - 1]))\n un_cnt +=1 \n # print(un_cnt)\n # mat_supps = [text_pre_treat(mat_supp[1]) for mat_supp in mat_supps]\n continue \n '''\n # answer info extract \n ans_mat_ori = regex_ans.findall(answer_ori)\n # print(ans_mat)\n ans_mat = [ remove_unwant(text_pre_treat(ans[1])) for ans in ans_mat_ori]\n # 获取ans_mat对应的supp_mat,按照idx对齐\n for ans in ans_mat:\n # print(ans_mat)\n if len(ans.strip()) == 0: \n print(ans_mat)\n print(answer_ori)\n raise \n\n assert len(ans_mat) != 0\n content_list = [content1, content2, content3, content4, content5] \n # 答案和对应的支撑段落匹配起来,重点要看看是否可以找到蛛丝马迹,即问题在支撑段落中是否出现\n # 看看supporting graph中有多少包含我们所要的答案,首先解析出来\n content_supp_cnt[supp_ori.count('@content')] += 1 \n mat_supps_ori = regex_supp.findall(supp_ori)\n mat_supps = [remove_unwant(text_pre_treat(mat_supp[1])) for mat_supp in mat_supps_ori]\n \n import_para, _ = get_latent_para(question, content_list, max_len=3000, mat_supps = mat_supps) # 从这3000个当中找出值来补充长度\n import_para_ori = import_para\n for mat_supp in mat_supps:\n for item in mat_supp.split('。'): # 防止一个支撑段落中含有多个句子\n import_para = import_para.replace(item, '')\n\n # 制作2个反例\n q_id = question_id\n for i in range(2):\n question_id = '{}_neg_{}'.format(q_id, i)\n import_para_list = [ para for para in import_para.split('。') if len(remove_unwant(para.strip())) != 0]\n random.shuffle(import_para_list)\n import_para_input = '。'.join([para for para in import_para_list if len(remove_unwant(para)) > 0])[:max_len - len(question)]\n # print(mat_supps)\n q_cnt = 1 if len(ans_mat) > 2 else len(ans_mat)\n print(len(import_para_input) + len(question))\n if len(import_para_input) + len(question) < 380:\n import_para_input = import_para_input + '。' * (380 - len(import_para_input) - len(question))\n assert len(import_para_input) + len(question) <= 380 and len(import_para_input) + len(question) >= 378\n assert len(import_para_input) + len(question) == 380\n # print('neg')\n # print(import_para_input)\n passages_train[question_id] = {'question': question, 'para': import_para_input, 'answer_mark': None, 'ans_cnt': 0, 'q_cnt': q_cnt}\n # print(passages_train[question_id])\n for i in range(2): # 二个正例样本\n question_id = '{}_pos_{}'.format(q_id, i)\n # print(question_id)\n import_para_list = [ para for para in import_para.split('。') if len(para.strip()) != 0]\n random.shuffle(import_para_list)\n supp_para_len, supp_para_concat = 0, ''\n \n for mat_supp in mat_supps:\n mat_supp = mat_supp.replace('。', ',') #句号改成逗号,让多个句子构成的段落成为整体\n supp_para_len += len(mat_supp)\n supp_para_concat += mat_supp + '。'\n left_part_max_len = 0 if max_len - supp_para_len - len(question) < 0 else max_len - supp_para_len - len(question) - 1 \n\n import_para_con = '。'.join([para for para in import_para_list if len(remove_unwant(para))> 0])[:left_part_max_len] + '。' + supp_para_concat[:max_len - len(question)]\n import_para_con_list = [para_con for para_con in import_para_con.split('。') if len(para_con.strip()) !=0]\n random.shuffle(import_para_con_list)\n\n import_para_input = remove_unwant('。'.join(import_para_con_list))[:max_len - len(question)]\n # print('pos')\n # print(supp_para_concat)\n # print(import_para_input)\n ans_ret_item = mark_answer_in_para_new(ans_mat, mat_supps, import_para_input) # ans也有可能是个数组\n if len(ans_mat) != len(ans_ret_item):\n continue \n # ans_ret_item = mark_answer_in_para_new(ans_mat, mat_supps, import_para_input)\n # print('-' * 10)\n # print(ans_mat)\n # print(ans_ret_item)\n # print(mat_supps)\n # print(import_para_input)\n # raise \n # print(mat_supps)\n q_cnt = 1 if len(ans_mat) > 2 else len(ans_mat)\n print(len(import_para_input) + len(question))\n print(import_para_input)\n if len(import_para_input) + len(question) < 380:\n import_para_input = import_para_input + '。' * (380 - len(import_para_input) - len(question))\n assert len(import_para_input) + len(question) <= 380 and len(import_para_input) + len(question) >= 378 # 10 <= 380 11> 375\n assert len(import_para_input) + len(question) == 380\n passages_train[question_id] = {'question': question, 'para': import_para_input, 'answer_mark': ans_ret_item, 'ans_cnt': len(ans_ret_item), 'q_cnt': q_cnt}\n # print(passages_train[question_id])\n # print(len(passages_train))\n\n # 查看这些标记数据 是否包含在里面呢\n\n # import_para_set = set(import_para.split('。'))\n mode = 'check_supps_in_content_un'\n \n if mode == 'check_supps_in_content': # 查看支撑段落是否在内容集合里面\n mat_supps_single = []\n for mat_supp in mat_supps:\n mat_supps_single.extend(mat_supp.split('。'))\n mat_supps_set = set(mat_supps_single) \n if '' in mat_supps_set:\n mat_supps_set.remove('')\n for m in mat_supps_set:\n if len(m.strip()) == 0 :raise \n all_para_set = set([]) \n for c_idx, content in enumerate(content_list):\n paras = content.split('。') # 有没有必要再通过感叹号来进行段落划分\n for pid, para in enumerate(paras):\n # print(para)\n para = text_pre_treat(para)\n all_para_set.add(para)\n if len(all_para_set & mat_supps_set) == len(mat_supps_set):\n supp_in_content_cnt += 1 \n else:\n print('*' * 10)\n print(mat_supps_set)\n # print(all_para_set & mat_supps_set)\n print(all_para_set)\n\n print('In cnt is {}'.format(supp_in_content_cnt)) # 19021\n # 虽然这个19021距离所有样本差距很多,但是注意一点,这个支撑段落可能只是一个句子中的一部分,所以会出现这么多匹配不上的\n elif mode == 'check_ans_in_supp': # 查看答案是否在支撑段落中可以找到\n pass \n # print(len(passages_train))\n passages_train_list = [passages_train[k] for k in passages_train.keys()]\n np.save('./new_train_method_380.npy', passages_train_list)\n\n\nget_train_dataset_new()\n\n\nsys.exit(0)\ndef get_train_dataset(treat_mode = 'bert', max_len=384):\n ## 现在需要的就是 passage , question, answer 单独搞出来\n train_data = pd.read_csv(train_path)\n data_json = []\n idx = 0\n passages_train = {}\n\n content_supp_cnt = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n common_cnt = [0] * 80\n ids = 0 \n for d in train_data.iterrows():\n ids += 1 \n # if ids == 100: break \n # print(d[1]['keyword'])\n \n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = text_prepro(d[1]['content1']), text_prepro(d[1]['content2']), \\\n text_prepro(d[1]['content3']), text_prepro(d[1]['content4']), text_prepro(d[1]['content5'])\n question, question_id = text_pre_treat(d[1]['question']), d[1]['question_id']\n supp_ori = text_prepro(d[1]['supporting_paragraph'])\n answer_ori = text_prepro(d[1]['answer'])\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n\n # answer info extract \n ans_mat = regex_ans.findall(answer_ori)\n print(ans_mat)\n for ans in ans_mat:\n if len(ans[1].strip()) == 0: \n print(ans_mat)\n print(answer_ori)\n raise \n ans_mat = [ text_pre_treat(ans[1]) for ans in ans_mat]\n \n for ans in ans_mat:\n print(ans_mat)\n if len(ans.strip()) == 0: \n print(ans_mat)\n print(answer_ori)\n raise \n\n assert len(ans_mat) != 0\n\n content_list = [content1, content2, content3, content4, content5]\n # import_para = get_latent_para_ver2(question, content_list) # 获取所有材料中的重点语句\n\n\n '''\n ans_content_id = int(ans_mat.group(1))\n answer = text_prepro(ans_mat.group(2))\n\n punctuation = ['。', '?', ',', '.', '、', '”', '.', '“', \":\", ',', ':', '!']\n answer = answer.lstrip(''.join(punctuation))\n answer = answer.rstrip(''.join(punctuation))\n '''\n \n # 答案和对应的支撑段落匹配起来,重点要看看是否可以找到蛛丝马迹,即问题在支撑段落中是否出现\n # print('-' * 10) \n # print(question)\n # print(ans_mat)\n # 看看supporting graph中有多少包含我们所要的答案,首先解析出来\n content_supp_cnt[supp_ori.count('@content')] += 1 \n mat_supps = regex_supp.findall(supp_ori)\n mat_supps = [text_pre_treat(mat_supp[1]) for mat_supp in mat_supps]\n # continue \n # 看看 支撑段落 和 问题之间 交集\n '''\n question_set = set([word for word in jieba.cut(question)])\n question_set -= stop_words_set\n for mat_supp in mat_supps:\n # print(mat_supp[1])\n word_sets = set([word for word in jieba.cut(mat_supp[1])])\n para_set = word_sets - stop_words_set\n if len(question_set & para_set) == 0:\n print('-' * 10)\n print(question)\n print(mat_supp[1])\n print(mat_supps)\n # print(len(question_set & para_set))\n common_cnt[len(question_set & para_set)] += 1 \n '''\n\n '''\n for content in content_list:\n paragraphs = content.split('。')\n for para in paragraphs:\n word_sets = set([word for word in jieba.cut(para)])\n print(word_sets - stop_words_set)\n '''\n if treat_mode == 'bert':\n max_len = 506\n import_para, answer = get_latent_para(question, content_list, max_len=max_len, mat_supps = mat_supps) # from 320 to 506 \n\n ret_item = mark_answer_in_para_new(ans_mat, import_para) # ans也有可能是个数组\n # print(question)\n # print(ret_item)\n if ret_item:\n passages_train[question_id] = {'question': question, 'para': import_para, 'answer_mark': ret_item}\n # print(ret_item)\n #print(passages_train_bert[question_id] )\n else: \n print('-' * 10)\n print(question_id)\n print(question)\n print(ans_mat)\n # print(import_para)\n new_import_para = '。'.join(mat_supps) + import_para\n new_import_para = new_import_para[:max_len - len(question)]\n\n # print(new_import_para)\n ret_item = mark_answer_in_para_new(ans_mat, new_import_para)\n if ret_item:\n passages_train[question_id] = {'question': question, 'para': new_import_para, 'answer_mark': ret_item}\n print(passages_train[question_id])\n assert len(new_import_para) + len(question) == max_len\n print(len(passages_train))\n '''\n\n for idx, content in enumerate(content_list): \n clean_passages = get_passages_n_gram(question, content_list[idx], max_len=1024) \n for clean_passage in clean_passages:\n train_answer = answer if idx == ans_content_id - 1 and answer in clean_passage else '' \n\n passages_train_bert.append({\"answer\": train_answer, 'passage': clean_passage, 'question': question, 'id': question_id})\n '''\n\n elif treat_mode == 'dgcnn_old':\n # 按照dgcnn格式处理数据\n passages_train = [] \n for idx, content in enumerate(content_list): \n clean_passages = get_passages_n_gram(question, content_list[idx]) \n # judege supporting_paragraph in the clean passages \n print(clean_passages)\n print('\\n' * 3)\n continue \n # \n for clean_passage in clean_passages:\n train_answer = answer if idx == ans_content_id - 1 and answer in clean_passage else '' \n passages_train.append({\"answer\": train_answer, 'passage': clean_passage})\n if len(passages_train) == 0: \n print(content_list)\n print('-----')\n sys.exit(0)\n data_json.append({'question': question, 'id': question_id, 'passages': passages_train})\n\n elif treat_mode == 'dgcnn_new':\n passages_train = [] \n clean_passage = get_latent_para(question, content_list, max_len=320)[0]\n if answer in clean_passage:\n train_answer = answer \n else: \n continue \n passages_train.append({\"answer\": train_answer, 'passage': clean_passage})\n if len(passages_train) == 0: \n print(content_list)\n print('-----')\n sys.exit(0)\n data_json.append({'question': question, 'id': question_id, 'passages': passages_train})\n\n\n # find_set = set([])\n # all_set = set([])\n # for passage in passages_train:\n # all_set.add(passage['id'])\n # if passage['answer'] != '':\n # find_set.add(passage['id'])\n # print(len(all_set - find_set))\n\n if treat_mode == 'bert':\n import numpy as np \n # print(passages_train)\n \n np.save('kesic_508.npy', passages_train)\n elif treat_mode == 'dgcnn':\n pass \n sys.exit(0)\n\n\n find_set = set([])\n all_set = set([])\n for passage in passages_train_bert:\n all_set.add(passage['id'])\n if passage['answer'] != '':\n find_set.add(passage['id'])\n print(len(all_set - find_set))\n sys.exit(0)\n\n\n\n has_answer_pass = [passage for passage in passages_train_bert if passage['answer'] != '']\n print(len(passages_train_bert))\n print(len(has_answer_pass))\n # print(passages_train_bert[:100])\n import numpy as np \n # np.save('kesic_bert.npy', passages_train_bert)\n\n sys.exit(0)\n\n\n np.save('kesic.npy', data_json) \n\n\n \n \nif __name__ == \"__main__\":\n \n get_train_dataset() \n \n\n\n\n \n\n" }, { "alpha_fraction": 0.7303370833396912, "alphanum_fraction": 0.7752808928489685, "avg_line_length": 16.799999237060547, "blob_id": "e670a2be06cc661e564031abe25faf1fe9ceba39", "content_id": "ac8417ae10420a8f4efe3aa1c6c993be72927c68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 211, "license_type": "permissive", "max_line_length": 37, "num_lines": 5, "path": "/README.md", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "# kesic_army_rc\n莱斯杯:全国第二届“军事智能机器阅读”挑战赛解决方案,持续维护中\n\n\n探索**长句机器阅读理解**, 预计代码10月10日整理好,目前代码有点乱\n" }, { "alpha_fraction": 0.7330785989761353, "alphanum_fraction": 0.7478166222572327, "avg_line_length": 31.73214340209961, "blob_id": "1844c37179b39f58e01ac8407570737c4e0ac1bb", "content_id": "e95e4c830ca3a51a348406a31c0d6a50d68fa3c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1832, "license_type": "permissive", "max_line_length": 92, "num_lines": 56, "path": "/code/other.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nimport gc\nimport random\nfrom keras.engine.topology import Layer\n\ndef bi_gru_model(sent_length, embeddings_weight,class_num):\n print(\"get_text_gru3\")\n content = Input(shape=(sent_length,), dtype='int32')\n embedding = Embedding(\n name=\"word_embedding\",\n input_dim=embeddings_weight.shape[0],\n weights=[embeddings_weight],\n output_dim=embeddings_weight.shape[1],\n trainable=False)\n\n x = SpatialDropout1D(0.2)(embedding(content))\n\n x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n avg_pool = GlobalAveragePooling1D()(x)\n max_pool = GlobalMaxPooling1D()(x)\n\n conc = concatenate([avg_pool, max_pool])\n\n x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(conc))))\n x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n output = Dense(class_num, activation=\"softmax\")(x)\n\n model = Model(inputs=content, outputs=output)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model" }, { "alpha_fraction": 0.5267605781555176, "alphanum_fraction": 0.5380281805992126, "avg_line_length": 20.484848022460938, "blob_id": "ed94af53aa7af8fc5e59b7e8ec50f37029c6a1e9", "content_id": "05c2d793142c04af2aaab44782658c25587c6c79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "permissive", "max_line_length": 88, "num_lines": 33, "path": "/code/utils.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "# \nimport pandas as pd \n\ndef write_csv(data_json, target_file):\n \"\"\"\n input: data_json,\n eg: {'question_id': ['q1','q2'], 'answer': ['中国,美国', '巴基斯坦,印度']}\n \"\"\"\n header_name = [k for k in data_json]\n df = pd.DataFrame(data_json, columns=header_name)\n df.to_csv(target_file, index=False)\n\n\n\ndef write_csv_test():\n data_json = {'question_id': ['q1','q2', 'q3'], 'answer': ['中国,美国', '巴基斯坦,印度', 1.23]}\n write_csv(data_json, './test.csv')\n\n\n\ndef read_csv(csv_file):\n \"\"\"\n \"\"\"\n df = pd.read_csv(csv_file)\n answer = df['answer']\n print(answer)\n new = answer.str.split(',')\n print(new)\n\n\nif __name__ == \"__main__\":\n # write_csv_test()\n read_csv('./test.csv')\n\n" }, { "alpha_fraction": 0.5635033249855042, "alphanum_fraction": 0.5909367799758911, "avg_line_length": 25.32085609436035, "blob_id": "f0f6259443b19fcd79ab1d8f9ff0e355b50a9518", "content_id": "514eb0b52e32d6f9e8c1f25a3ab08b71385a1d8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5017, "license_type": "permissive", "max_line_length": 125, "num_lines": 187, "path": "/code/bert_finetune_methoddebug.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding:utf-8 -*-\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom random import choice\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport re, os\nimport codecs\nimport sys \nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.optimizers import Adam\n\nmaxlen = 100\n\nbase_path = \"/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets\"\nconfig_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_config.json')\ncheckpoint_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_model.ckpt')\ndict_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/vocab.txt')\n\ndata_path = '/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets/senti_dataset'\n\n\n\ntoken_dict = {}\n\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]') # space类用未经训练的[unused1]表示\n else:\n R.append('[UNK]') # 剩余的字符是[UNK]\n return R\n\ntokenizer = OurTokenizer(token_dict)\n\n\n\n\ndata = np.load('kesic_bert.npy', allow_pickle=True).tolist()\n\n\n\n# 按照9:1的比例划分训练集和验证集\nrandom_order = [x for x in range(len(data))]\nnp.random.shuffle(random_order)\ntrain_data = [data[j] for i, j in enumerate(random_order) if i % 10 != 0]\nvalid_data = [data[j] for i, j in enumerate(random_order) if i % 10 == 0]\n\n\n\n\ndef seq_padding(X, padding=0, maxlen=None):\n if maxlen is None:\n L = [len(x) for x in X]\n ML = max(L)\n else:\n ML = maxlen\n return np.array([\n np.concatenate([x[:ML], [padding] * (ML - len(x))]) if len(x[:ML]) < ML else x for x in X\n ])\n\n\n\n\nclass data_generator:\n def __init__(self, data, batch_size=16):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n def __len__(self):\n return self.steps\n def __iter__(self):\n while True:\n \n idxs = [id for id in range(len(self.data))]\n np.random.shuffle(idxs)\n X1, X2, ANS_START_POS, ANS_END_POS, PASSAGE_MASK, HAS_ANS = [], [], [], [], [], []\n for i in idxs:\n d = self.data[i]\n x1, x2 = tokenizer.encode(first=d['passage'])\n _x1, _x2 = tokenizer.encode(first=d['question'])\n x1.extend(_x1)\n x2.extend(_x2)\n\n has_ans = [0]\n if d['answer'] != '':\n has_ans = [1]\n\n\n X1.append(x1)\n X2.append(x2)\n HAS_ANS.append(has_ans)\n \n if len(X1) == self.batch_size or i == idxs[-1]:\n X1 = seq_padding(X1)\n X2 = seq_padding(X2)\n HAS_ANS = seq_padding(HAS_ANS)\n yield [X1, X2], HAS_ANS \n X1, X2, HAS_ANS = [], [], []\n\n\n\n\ntrain_D = data_generator(train_data)\nvalid_D = data_generator(valid_data)\n\n\n\n\n\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n\nfor l in bert_model.layers:\n l.trainable = True\n\nx1_in = Input(shape=(None,), dtype='int32')\nx2_in = Input(shape=(None,))\n\n\n\nx = bert_model([x1_in, x2_in])\n\nx_cls = Lambda(lambda x: x[:, 0])(x)\n\np_has_ans = Dense(1, activation='sigmoid')(x_cls) # 确定该文档是否含有答案\n\n'''\nx = Dense(units=128, activation='relu')(x) \nx = Dropout(0.1)(x)\nans_start = Dense(units=1, activation='sigmoid')(x)\nans_end = Dense(units=1, activation='sigmoid')(x)\n\npassage_mask = passage_mask_in\n\n\np_has_ans_loss = K.sparse_categorical_crossentropy(has_ans_in, p_has_ans)\np_has_ans_loss = K.mean(p_has_ans_loss)\n\np_ans_start_loss = K.sparse_categorical_crossentropy(ans_start_pos_in, ans_start)\np_ans_start_loss = K.sum(p_ans_start_loss * passage_mask) / K.sum(passage_mask) \np_ans_end_loss = K.sparse_categorical_crossentropy(ans_end_pos_in, ans_end)\np_ans_end_loss = K.sum(p_ans_end_loss * passage_mask) / K.sum(passage_mask) \n'''\n\n#loss = p_has_ans_loss + p_ans_start_loss + p_ans_end_loss\n\n\n\n\nlearning_rate = 1e-6\n\n'''\nmodel = Model([x1_in, x2_in, ans_start_pos_in, ans_end_pos_in, passage_mask_in, has_ans_in], [p_has_ans, ans_start, ans_end])\n'''\nmodel = Model([x1_in, x2_in], p_has_ans)\nmodel.compile(\n loss='binary_crossentropy',\n optimizer=Adam(1e-5), # 用足够小的学习率\n metrics=['accuracy']\n)\nmodel.summary()\n\n\n\n\nmodel.fit_generator(\n train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=5,\n validation_data=valid_D.__iter__(),\n validation_steps=len(valid_D)\n)" }, { "alpha_fraction": 0.5275775790214539, "alphanum_fraction": 0.5534637570381165, "avg_line_length": 27.69198226928711, "blob_id": "4f12bba697a6b728d823a357de7f4e88e72f179f", "content_id": "400cb16512e8b0ce78844ae6f330367a97b73ea0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6879, "license_type": "permissive", "max_line_length": 110, "num_lines": 237, "path": "/code/fintoercsel.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding:utf-8 -*-\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom random import choice\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport re, os\nimport codecs\n\n\nmaxlen = 130\n\n\n\n\nbase_path = \"/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets\"\nconfig_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_config.json')\ncheckpoint_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/bert_model.ckpt')\ndict_path = os.path.join(base_path, 'chinese-bert_chinese_wwm_L-12_H-768_A-12/vocab.txt')\n\ndata_path = '/media/yinshuai/d8644f6c-5a97-4e12-909b-b61d2271b61c/nlp-datasets/senti_dataset'\n\n\ntoken_dict = {}\n\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]') # space类用未经训练的[unused1]表示\n else:\n R.append('[UNK]') # 剩余的字符是[UNK]\n return R\n\ntokenizer = OurTokenizer(token_dict)\n\ndatas = np.load('kesic_bert.npy', allow_pickle=True).tolist()\ndata = [] \nfor d in datas:\n if d['answer'] != '':\n data.append((d['passage'] + ' ' + d['question'], d['answer'], 1))\n else:\n data.append((d['passage'] + ' ' + d['question'], d['answer'], 0))\n \n \n\n\n\n'''\nneg = pd.read_excel(os.path.join(data_path, 'neg.xls'), header=None)\npos = pd.read_excel(os.path.join(data_path, 'neg.xls'), header=None)\n\ndata = []\nprint(type(neg))\n\nfor d in neg[0]:\n data.append((d, 0))\n\nfor d in pos[0]:\n data.append((d, 1))\n'''\n\n# 按照9:1的比例划分训练集和验证集\nrandom_order = [x for x in range(len(data))]\nnp.random.shuffle(random_order)\ntrain_data = [data[j] for i, j in enumerate(random_order) if i % 10 != 0]\n\n\n\nvalid_data = [data[j] for i, j in enumerate(random_order) if i % 10 == 0]\n\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n ])\n\n\nclass data_generator:\n def __init__(self, data, batch_size=48):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n def __len__(self):\n return self.steps\n def __iter__(self):\n while True:\n idxs = [id for id in range(len(self.data))]\n np.random.shuffle(idxs)\n #X1, X2,Y, ANS_START_POS, ANS_END_POS,PASSAGE_MASK = [], [], [],[], [], []\n X1, X2,Y = [], [], []\n TEST = [] \n for i in idxs:\n d = self.data[i]\n text = d[0][:maxlen] ## 切割了\n x1, x2 = tokenizer.encode(first=text)\n passage_mask = [0] + [1] * len(text) + [0]\n ans_start_pos = np.zeros(len(text) + 2 , dtype='int32')\n ans_end_pos = np.zeros(len(text) + 2 , dtype='int32')\n\n test = np.zeros(len(text) + 2 , dtype='int32')\n\n if d[1] != '' and d[1] in text:\n idx = text.index(d[1])\n ans_start_pos[idx + 1] = 1 \n ans_end_pos[idx + len(d[1])] = 1 \n else: continue \n test[1:8] = 1 \n\n\n y = d[2]\n\n X1.append(x1)\n X2.append(x2)\n '''\n ANS_START_POS.append(ans_start_pos)\n ANS_END_POS.append(ans_end_pos)\n PASSAGE_MASK.append(passage_mask)\n '''\n Y.append([y])\n TEST.append(test)\n if len(X1) == self.batch_size or i == idxs[-1]:\n X1 = seq_padding(X1)\n X2 = seq_padding(X2)\n Y = seq_padding(Y)\n TEST = seq_padding(TEST)\n\n '''\n ANS_START_POS = seq_padding(ANS_START_POS)\n ANS_END_POS = seq_padding(ANS_END_POS)\n PASSAGE_MASK = seq_padding(PASSAGE_MASK)\n '''\n\n\n #yield [X1, X2, Y, ANS_START_POS, ANS_END_POS, PASSAGE_MASK], None \n #X1, X2, Y, ANS_START_POS, ANS_END_POS, PASSAGE_MASK = [], [], [],[], [], []\n yield [X1, X2, Y, TEST], None \n X1, X2, Y = [], [], []\n TEST = [] \n\n\nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.optimizers import Adam\n\n\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n\nfor l in bert_model.layers:\n l.trainable = True\n\nx1_in = Input(shape=(None,))\nx2_in = Input(shape=(None,))\ny_in = Input(shape=(1,))\ntest_in = Input(shape=(None,))\n\nans_start_pos_in = Input(shape=(None,), dtype='int32' )\nans_end_pos_in = Input(shape=(None,), dtype='int32')\npassage_mask_in = Input(shape=(None,))\n\n\nx = bert_model([x1_in, x2_in])\nx_cls = Lambda(lambda x: x[:, 0])(x)\np = Dense(1, activation='sigmoid')(x_cls)\n\n\nx = Dense(units=128, activation='relu')(x) \nx = Dropout(0.1)(x)\np_test = Dense(2, activation='softmax')(x)\n\n\n'''\nx = Dense(units=128, activation='relu')(x) \nx = Dropout(0.1)(x)\nans_start = Dense(1, activation='sigmoid')(x)\nans_end = Dense(1, activation='sigmoid')(x)\npassage_mask = passage_mask_in\n'''\n\n#model = Model([x1_in, x2_in, y_in,ans_start_pos_in,ans_end_pos_in, passage_mask_in], [p, ans_start, ans_end])\nmodel = Model([x1_in, x2_in, y_in, test_in], [p])\n\nloss_p = K.binary_crossentropy(y_in, p) \nloss_p = K.mean(loss_p)\n\ntest_loss = K.sparse_categorical_crossentropy(test_in, p_test)\ntest_loss = K.mean(test_loss)\n\n'''\np_ans_start_loss = K.sparse_categorical_crossentropy(ans_start_pos_in, ans_start)\np_ans_start_loss = K.sum(p_ans_start_loss * passage_mask) / K.sum(passage_mask)\np_ans_end_loss = K.sparse_categorical_crossentropy(ans_end_pos_in, ans_end)\np_ans_end_loss = K.sum(p_ans_end_loss * passage_mask) / K.sum(passage_mask) \n\nloss = loss_p + p_ans_start_loss + p_ans_end_loss \n'''\nloss = loss_p + test_loss\n\nmodel.add_loss(loss)\nmodel.compile(\n optimizer=Adam(1e-6), # 用足够小的学习率\n metrics=['accuracy']\n)\nmodel.summary()\n\n\ntrain_D = data_generator(train_data)\n\n# for d in train_D:\n# for i in range(len(d[0])):\n# print(d[0][i].shape)\n# print('-' * 10)\n# # print(d[0][4].shape)\nvalid_D = data_generator(valid_data)\n\nmodel.fit_generator(\n train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=5,\n validation_data=valid_D.__iter__(),\n validation_steps=len(valid_D)\n)" }, { "alpha_fraction": 0.5629476308822632, "alphanum_fraction": 0.5876173973083496, "avg_line_length": 29.06220054626465, "blob_id": "6a4c7e3b0b89b0786e8c593874967a0a04b32f9d", "content_id": "cd2390e14fedad7126ab6049732b09cff1f2c3f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6285, "license_type": "permissive", "max_line_length": 143, "num_lines": 209, "path": "/code/data_util.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "import os \nimport sys \nimport pandas as pd \n\ntest_path = '../data/test_data_r0.csv'\ntrain_path = '../data/train_round_0.csv'\n\n\ntrain_data = pd.read_csv(train_path)\n\n\n\n\nprint(train_data.columns)\n\n'''\nIndex(['answer', 'bridging_entity', 'content1', 'content2', 'content3',\n 'content4', 'content5', 'keyword', 'question', 'supporting_paragraph',\n 'title1', 'title2', 'title3', 'title4', 'title5', 'question_id'],\n dtype='object')\n'''\n \nprint(train_data.head(3))\nfrom collections import Counter\ntrain_data['content1_sen'] = train_data['content1'].str.split('。')\nsen_len_stat = [] \nsen_cnt = 0 \nfor sen_list in train_data['content1_sen']:\n for sen in sen_list:\n sen_cnt += 1\n sen_len_stat.append(len(sen))\n\nprint(Counter(sen_len_stat).most_common(200))\nprint(sen_cnt)\ntrain_data['question_len'] = train_data['question'].str.len()\nprint(train_data['question_len'].describe())\nprint(\"question len of 99% is {}\".format(train_data['question_len'].quantile(.99)))\n\n\nsys.exit(0)\nimport matplotlib.pyplot as plt\nxdim = [] \nydim = []\nfor couter in Counter(sen_len_stat).most_common(200):\n xdim.append(couter[0])\n ydim.append(couter[1])\nplt.bar(xdim, ydim)\n#plt.show()\n\n\n\ntrain_data['content1_len'] = train_data['content1'].str.len()\ntrain_data['content2_len'] = train_data['content2'].str.len()\ntrain_data['content3_len'] = train_data['content3'].str.len()\ntrain_data['content4_len'] = train_data['content4'].str.len()\ntrain_data['content5_len'] = train_data['content5'].str.len()\n\ntrain_data['title1_len'] = train_data['title1'].str.len()\ntrain_data['title2_len'] = train_data['title2'].str.len()\ntrain_data['title3_len'] = train_data['title3'].str.len()\ntrain_data['title4_len'] = train_data['title4'].str.len()\ntrain_data['title5_len'] = train_data['title5'].str.len()\n\n\nprint(train_data['content1_len'].describe())\nprint(train_data['content2_len'].describe())\nprint(train_data['content3_len'].describe())\nprint(train_data['content4_len'].describe())\nprint(train_data['content5_len'].describe())\n\n\n\n\nprint(train_data['title1_len'].describe())\nprint(train_data['title2_len'].describe())\nprint(train_data['title3_len'].describe())\nprint(train_data['title4_len'].describe())\nprint(train_data['title5_len'].describe())\n\nanswer = train_data['answer']\nre_ans = r'^\\@content([1-5])\\@(.*?)\\@content([1-5])\\@'\nimport re\nregex_ans = re.compile(re_ans)\nregex_supp = regex_ans\n\n\nfor d in train_data.iterrows():\n print(d[1]['keyword'])\n \n \n bridging_entity, keyword = d[1]['bridging_entity'], d[1]['keyword']\n content1, content2, content3, content4, content5 = d[1]['content1'], d[1]['content2'], d[1]['content3'], d[1]['content4'], d[1]['content5']\n question, question_id = d[1]['question'], d[1]['question_id']\n supp_ori = d[1]['supporting_paragraph']\n answer_ori = d[1]['answer']\n title1, title2, title3, title4, title5 = d[1]['title1'], d[1]['title2'],d[1]['title3'],d[1]['title4'],d[1]['title5']\n\n # answer info extract \n ans_mat = regex_ans.match(answer_ori)\n assert ans_mat is not None \n assert ans_mat.group(1) == ans_mat.group(3)\n ans_content_id = ans_mat.group(1)\n answer = ans_mat.group(2)\n print('-' * 10)\n print(ans_content_id)\n print('question is {}\\n'.format(question))\n print(answer)\n\n content = ''\n if ans_content_id == '1': content = content1 \n elif ans_content_id == '2': content = content2 \n elif ans_content_id == '3': content = content3\n elif ans_content_id == '4': content = content4\n elif ans_content_id == '5': content = content5\n else: raise ValueError\n print(content)\n\n assert answer in content \n \n\n\n # supporting_paragraph info extract\n supp_mat = regex_supp.match(supp_ori)\n assert supp_mat is not None \n assert supp_mat.group(1) == supp_mat.group(3)\n supp_content_id = supp_mat.group(1)\n supp = supp_mat.group(2)\n\n print(supp_content_id)\n print(supp)\n \n #assert answer in supp\n\n\n \n \n\n\n\nsys.exit(0)\n\n\ndata['content_len'] = pd_data['content'].str.len()\n\n\nprint(\"\\ntitle len desc is:\")\nprint(pd_data['title_len'].describe())\nprint(\"\\ncontent len desc is:\")\nprint(pd_data['content_len'].describe())\n\n\nprint(\"content len of 90% is {}\".format(pd_data['content_len'].quantile(.9)))\nprint(\"title len of 90% is {}\".format(pd_data['title_len'].quantile(.9)))\n\n\nprint(\"content len of 99% is {}\".format(pd_data['content_len'].quantile(.99)))\nprint(\"title len of 99% is {}\".format(pd_data['title_len'].quantile(.99)))\n\n\n\n\ndef read_data(data_file, table_file):\n data, tables = [], {}\n with open(data_file) as f:\n for l in f:\n data.append(json.loads(l))\n with open(table_file) as f:\n for l in f:\n l = json.loads(l)\n d = {}\n d['headers'] = l['header']\n d['header2id'] = {j: i for i, j in enumerate(d['headers'])}\n d['content'] = {}\n d['keywords'] = {}\n d['all_values'] = set()\n d['types'] = l['types']\n d['title'] = l['title']\n rows = np.array(l['rows'])\n for i, h in enumerate(d['headers']):\n d['content'][h] = set(rows[:, i])\n if d['types'][i] == 'text':\n d['keywords'][i] = ''\n # get_key_words(d['content'][h])\n else:\n d['keywords'][i] = ''\n\n d['all_values'].update(d['content'][h])\n # print(d['keywords'])\n d['all_values'] = set([i for i in d['all_values'] if hasattr(i, '__len__')])\n tables[l['id']] = d\n if toy:\n data = data[:toy_data_cnt]\n return data, tables\n\nif mode != 'test':\n train_data, train_tables = read_data(\n os.path.join(train_data_path, 'train.json'),\n os.path.join(train_data_path, 'train.tables.json')\n ) # 41522 5013\n\n\nvalid_data, valid_tables = read_data(\n os.path.join(valid_data_path, 'val.json'),\n os.path.join(valid_data_path, 'val.tables.json')\n) # 4396 1197\ntest_data, test_tables = read_data(\n os.path.join(test_file_path, 'final_test.json'),\n os.path.join(test_file_path, 'final_test.tables.json')\n)\n" }, { "alpha_fraction": 0.5043886303901672, "alphanum_fraction": 0.5354093909263611, "avg_line_length": 29.693727493286133, "blob_id": "dd67307b8c2cf518bf9d0f650b594a5fa948c0ef", "content_id": "d49e1fc38faa4e314c8cda55d8473efba145d5cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8509, "license_type": "permissive", "max_line_length": 118, "num_lines": 271, "path": "/code/commercial_entity.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "#! -*- coding: utf-8 -*-\n\nimport json\nfrom tqdm import tqdm\nimport os, re\nimport numpy as np\nimport pandas as pd\nfrom keras_bert import load_trained_model_from_checkpoint, Tokenizer\nimport codecs\n\n\nmode = 0\nmaxlen = 128\nlearning_rate = 5e-5\nmin_learning_rate = 1e-5\n\n\nconfig_path = '../../kg/bert/chinese_L-12_H-768_A-12/bert_config.json'\ncheckpoint_path = '../../kg/bert/chinese_L-12_H-768_A-12/bert_model.ckpt'\ndict_path = '../../kg/bert/chinese_L-12_H-768_A-12/vocab.txt'\n\n\ntoken_dict = {}\n\nwith codecs.open(dict_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]') # space类用未经训练的[unused1]表示\n else:\n R.append('[UNK]') # 剩余的字符是[UNK]\n return R\n\ntokenizer = OurTokenizer(token_dict)\n\n\nD = pd.read_csv('../ccks2019_event_entity_extract/event_type_entity_extract_train.csv', encoding='utf-8', header=None)\nD = D[D[2] != u'其他']\nclasses = set(D[2].unique())\n\n\ntrain_data = []\nfor t,c,n in zip(D[1], D[2], D[3]):\n train_data.append((t, c, n))\n\n\nif not os.path.exists('../random_order_train.json'):\n random_order = range(len(train_data))\n np.random.shuffle(random_order)\n json.dump(\n random_order,\n open('../random_order_train.json', 'w'),\n indent=4\n )\nelse:\n random_order = json.load(open('../random_order_train.json'))\n\n\ndev_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 == mode]\ntrain_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 != mode]\nadditional_chars = set()\nfor d in train_data + dev_data:\n additional_chars.update(re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', d[2]))\n\nadditional_chars.remove(u',')\n\n\nD = pd.read_csv('../ccks2019_event_entity_extract/event_type_entity_extract_eval.csv', encoding='utf-8', header=None)\ntest_data = []\nfor id,t,c in zip(D[0], D[1], D[2]):\n test_data.append((id, t, c))\n\n\ndef seq_padding(X, padding=0):\n L = [len(x) for x in X]\n ML = max(L)\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n ])\n\n\ndef list_find(list1, list2):\n \"\"\"在list1中寻找子串list2,如果找到,返回第一个下标;\n 如果找不到,返回-1。\n \"\"\"\n n_list2 = len(list2)\n for i in range(len(list1)):\n if list1[i: i+n_list2] == list2:\n return i\n return -1\n\n\nclass data_generator:\n def __init__(self, data, batch_size=32):\n self.data = data\n self.batch_size = batch_size\n self.steps = len(self.data) // self.batch_size\n if len(self.data) % self.batch_size != 0:\n self.steps += 1\n def __len__(self):\n return self.steps\n def __iter__(self):\n while True:\n idxs = range(len(self.data))\n np.random.shuffle(idxs)\n X1, X2, S1, S2 = [], [], [], []\n for i in idxs:\n d = self.data[i]\n text, c = d[0][:maxlen], d[1]\n text = u'___%s___%s' % (c, text)\n tokens = tokenizer.tokenize(text)\n e = d[2]\n e_tokens = tokenizer.tokenize(e)[1:-1]\n s1, s2 = np.zeros(len(tokens)), np.zeros(len(tokens))\n start = list_find(tokens, e_tokens)\n if start != -1:\n end = start + len(e_tokens) - 1\n s1[start] = 1\n s2[end] = 1\n x1, x2 = tokenizer.encode(first=text)\n X1.append(x1)\n X2.append(x2)\n S1.append(s1)\n S2.append(s2)\n if len(X1) == self.batch_size or i == idxs[-1]:\n X1 = seq_padding(X1)\n X2 = seq_padding(X2)\n S1 = seq_padding(S1)\n S2 = seq_padding(S2)\n yield [X1, X2, S1, S2], None\n X1, X2, S1, S2 = [], [], [], []\n\n\nfrom keras.layers import *\nfrom keras.models import Model\nimport keras.backend as K\nfrom keras.callbacks import Callback\nfrom keras.optimizers import Adam\n\n\nbert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n\nfor l in bert_model.layers:\n l.trainable = True\n\n\nx1_in = Input(shape=(None,)) # 待识别句子输入\nx2_in = Input(shape=(None,)) # 待识别句子输入\ns1_in = Input(shape=(None,)) # 实体左边界(标签)\ns2_in = Input(shape=(None,)) # 实体右边界(标签)\n\nx1, x2, s1, s2 = x1_in, x2_in, s1_in, s2_in\nx_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x1)\n\nx = bert_model([x1, x2])\nps1 = Dense(1, use_bias=False)(x)\nps1 = Lambda(lambda x: x[0][..., 0] - (1 - x[1][..., 0]) * 1e10)([ps1, x_mask])\nps2 = Dense(1, use_bias=False)(x)\nps2 = Lambda(lambda x: x[0][..., 0] - (1 - x[1][..., 0]) * 1e10)([ps2, x_mask])\n\nmodel = Model([x1_in, x2_in], [ps1, ps2])\n\n\ntrain_model = Model([x1_in, x2_in, s1_in, s2_in], [ps1, ps2])\n\nloss1 = K.mean(K.categorical_crossentropy(s1_in, ps1, from_logits=True))\nps2 -= (1 - K.cumsum(s1, 1)) * 1e10\nloss2 = K.mean(K.categorical_crossentropy(s2_in, ps2, from_logits=True))\nloss = loss1 + loss2\n\ntrain_model.add_loss(loss)\ntrain_model.compile(optimizer=Adam(learning_rate))\ntrain_model.summary()\n\n\ndef softmax(x):\n x = x - np.max(x)\n x = np.exp(x)\n return x / np.sum(x)\n\n\ndef extract_entity(text_in, c_in):\n if c_in not in classes:\n return 'NaN'\n text_in = u'___%s___%s' % (c_in, text_in)\n text_in = text_in[:510]\n _tokens = tokenizer.tokenize(text_in)\n _x1, _x2 = tokenizer.encode(first=text_in)\n _x1, _x2 = np.array([_x1]), np.array([_x2])\n _ps1, _ps2 = model.predict([_x1, _x2])\n _ps1, _ps2 = softmax(_ps1[0]), softmax(_ps2[0])\n for i, _t in enumerate(_tokens):\n if len(_t) == 1 and re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', _t) and _t not in additional_chars:\n _ps1[i] -= 10\n start = _ps1.argmax()\n for end in range(start, len(_tokens)):\n _t = _tokens[end]\n if len(_t) == 1 and re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', _t) and _t not in additional_chars:\n break\n end = _ps2[start:end + 1].argmax() + start\n a = text_in[start - 1: end]\n return a\n\n\nclass Evaluate(Callback):\n def __init__(self):\n self.ACC = []\n self.best = 0.\n self.passed = 0\n def on_batch_begin(self, batch, logs=None):\n \"\"\"第一个epoch用来warmup,第二个epoch把学习率降到最低\n \"\"\"\n if self.passed < self.params['steps']:\n lr = (self.passed + 1.) / self.params['steps'] * learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n elif self.params['steps'] <= self.passed < self.params['steps'] * 2:\n lr = (2 - (self.passed + 1.) / self.params['steps']) * (learning_rate - min_learning_rate)\n lr += min_learning_rate\n K.set_value(self.model.optimizer.lr, lr)\n self.passed += 1\n def on_epoch_end(self, epoch, logs=None):\n acc = self.evaluate()\n self.ACC.append(acc)\n if acc > self.best:\n self.best = acc\n train_model.save_weights('best_model.weights')\n print 'acc: %.4f, best acc: %.4f\\n' % (acc, self.best)\n def evaluate(self):\n A = 1e-10\n F = open('dev_pred.json', 'w')\n for d in tqdm(iter(dev_data)):\n R = extract_entity(d[0], d[1])\n if R == d[2]:\n A += 1\n s = ', '.join(d + (R,))\n F.write(s.encode('utf-8') + '\\n')\n F.close()\n return A / len(dev_data)\n\n\ndef test(test_data):\n F = open('result.txt', 'w')\n for d in tqdm(iter(test_data)):\n s = u'\"%s\",\"%s\"\\n' % (d[0], extract_entity(d[1], d[2]))\n s = s.encode('utf-8')\n F.write(s)\n F.close()\n\n\nevaluator = Evaluate()\ntrain_D = data_generator(train_data)\n\n\nif __name__ == '__main__':\n train_model.fit_generator(train_D.__iter__(),\n steps_per_epoch=len(train_D),\n epochs=10,\n callbacks=[evaluator]\n )\nelse:\n train_model.load_weights('best_model.weights')" }, { "alpha_fraction": 0.648888885974884, "alphanum_fraction": 0.648888885974884, "avg_line_length": 44.20000076293945, "blob_id": "e37b0914e7bcef926f8b7aaab71c11f086e32069", "content_id": "80bc187ae6ef3cc491280cc0e88c31ef9e8047ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "permissive", "max_line_length": 73, "num_lines": 5, "path": "/code/bleutest.py", "repo_name": "wangbq18/kesic_army_rc", "src_encoding": "UTF-8", "text": "from nltk.translate.bleu_score import sentence_bleu\nreference = [['this', 'is', 'the', 'test'], ['this', 'is' , 'my','test']]\ncandidate = ['this', 'is', 'what', 'test']\nscore = sentence_bleu(reference, candidate)\nprint(score)" } ]
13
dongxinb/TigerInformation
https://github.com/dongxinb/TigerInformation
4c0d7346858dd6c050642f53280e825bd10fddb6
028509fb2ff4e06f166487f974919dacb30fd45c
54d01bc253b76267552a71e3181dc6a38682d327
refs/heads/master
2019-01-02T05:14:41.774462
2015-01-05T05:54:35
2015-01-05T05:54:35
26,997,011
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.594688355922699, "alphanum_fraction": 0.6215718388557434, "avg_line_length": 26.281064987182617, "blob_id": "02a000392f8348a753b7dfc25523c98cbcbcbbe2", "content_id": "6dbe89e4d0e6aef5e74c1f7c014e32607f9611e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9623, "license_type": "no_license", "max_line_length": 205, "num_lines": 338, "path": "/news1.py", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport urllib2\nimport urllib\nimport os\nimport string\nimport sys\nimport json\nimport httplib\nimport threading\nimport time\nimport datetime\nimport re\nimport chardet\nimport jieba\nimport jieba.analyse\nimport math\nimport md5\nimport sqlite3\n# import tldextract\n# import tld\nfrom urlparse import urlparse\nfrom urlparse import urljoin\nfrom bs4 import BeautifulSoup\nfrom tld import get_tld\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nf = file(\"filter.json\")\nmyFilter = json.load(f)\nf.close\n\ncx = sqlite3.connect(\"test2.db\",check_same_thread = False);\n# cu = cx.cursor()\n\n# cu.execute(\"create table if not exists keyword (name varchar(30))\")\n# cu.execute(\"create table if not exists article (keyword varchar(30), title text, source varchar(100), date date, content text, plainText text)\")\n# cu.execute(\"insert into keyword values('卢雍政')\")\n# cx.commit()\n\n\n\ndef baidu(keyword, index, keywordId):\n\ttry:\n\t\turl = 'http://news.baidu.com/ns?word=%s&pn=%d&cl=3&ct=1&tn=news&rn=40&ie=utf-8&bt=0&et=0' % (keyword, index)\n\t\treq = urllib2.Request(url)\n\t\tresponse = urllib2.urlopen(req)\n\t\tthe_page = response.read()\n\t\tsoup = BeautifulSoup(the_page)\n\t\tresult = soup.find_all('li', {'class':'result'})\n\t\tfor item in result:\n\t\t\t# print item\n\t\t\ttitle = item.h3.a.get_text()\n\t\t\turl = item.h3.a['href'].encode(\"utf-8\")\n\t\t\ts = item.div.p.get_text().encode(\"utf-8\")\n\t\t\ttemp = \"  \"\n\t\t\tindex = s.find(temp)\n\t\t\tsource = s[0:index]\n\t\t\tif index == -1:\n\t\t\t\tindex = -4;\n\t\t\ttime = s[index+4:len(s)]\n\t\t\tdateTime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M\")\n\n\t\t\tnewsDetail(url, keyword, title, source, dateTime, keywordId)\n\n\t\t\t\n\t\t\t# print title\n\t\t\t# print url\n\t\t\t# print source\n\t\t\t# print dateTime\n\n\t\t\t# print \"---------\"\n\texcept Exception, e:\n\t\tprint e\n\tfinally:\n\t\tpass\n\ndef newsDetail(url, keyword, title, source, dateTime, keywordId):\n\ttry:\n\t\tprint \"newsDetail\"\n\t\tdomain = getDomain(url)\n\t\tdetailFilter = myFilter.get(domain)\n\t\t# print myFilter[domain], '111'\n\t\tif detailFilter == None:\n\t\t\tprint domain\n\t\t\tprint \"no filter - \", url\n\t\telse:\n\t\t\treq = urllib2.Request(url)\n\t\t\treq.add_header(\"User-Agent\", \"User-Agent\tMozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53\")\n\t\t\tresponse = urllib2.urlopen(req, timeout = 5)\n\t\t\tthe_page = response.read()\n\t\t\tcharset = chardet.detect(the_page)\n\t\t\tthe_page = the_page.decode(charset['encoding'])\n\t\t\tprint charset['encoding'], url\n\t\t\tsoup = BeautifulSoup(the_page)\n\t\t\t# for tag in soup.find_all(attrs={\"class\":re.compile(\"\\.*gg*\")}):\n\t\t\t# \t# print tag\n\t\t\t# \ttag.clear()\n\t\t\t# print \"----------\"\n\t\t\t# print soup\n\t\t\tfor filterDetail in detailFilter:\n\t\t\t\tresult = soup.find(filterDetail['tag'], {filterDetail['attr']: filterDetail['attr_r']})\n\t\t\t\tif result != None:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t# if result == None:\n\t\t\t# \tresult = soup.find('font',{\"id\":\"zoom\"})\n\t\t\t# \tif result == None:\n\t\t\t# \t\tresult = soup.find('font',{\"id\":\"Zoom\"})\n\t\t\t# \t\tif result == None:\n\t\t\t# \t\t\tprint \"Error\"\n\t\t\t# \t\t\treturn None\n\t\t\tif result == None:\n\t\t\t\tprint \"Error\"\n\t\t\t\treturn None\n\t\t\t# print \"1\"\n\n\t\t\t# aList = getArticles(keyword)\n\t\t\t# for article in aList:\n\t\t\t# \taPlain = article[5]\n\t\t\t# \taDist = getDistResult(aPlain, result.get_text())\n\t\t\t# \tif aDist < 0.5:\n\t\t\t# \t\treturn None\n\n\t\t\t# print \"2\"\n\t\t\timgdir = 'mysite/static/newsImage'\n\t\t\ttry:\n\t\t\t\tos.makedirs(imgdir)\n\t\t\texcept Exception, e:\n\t\t\t\tpass\n\t\t\tfor image in result.find_all('img'):\n\t\t\t\timgsrc = image['src']\n\t\t\t\tif not imgsrc.startswith('http://') and not imgsrc.startswith('https://'):\n\t\t\t\t\timgsrc = urljoin(url, imgsrc)\n\t\t\t\tbasename = md5.new(imgsrc).hexdigest()\n\t\t\t\textension = os.path.splitext(urlparse(imgsrc).path)[1]\n\t\t\t\tfilename = basename + extension\n\t\t\t\timage['src'] = \"../static/newsImage/\"+filename\n\t\t\t\tprint 'downloading', imgsrc, 'as', filename\n\t\t\t\turllib.urlretrieve(imgsrc, '%s/%s' % (imgdir, filename))\n\n\t\t\tcu = cx.cursor()\n\t\t\tprint \"insert begin\"\n\t\t\t# print type(keyword), type(title), type(unicode(result)), type(result.get_text())\n\t\t\t# print \"insert into article (keyword, title, content, plainText) values(%s, %s, %s, %s)\" % (keyword, title, unicode(result), result.get_text())\n\t\t\tcu.execute(\"insert into article (keyword, title, source, date, content, plainText) values('%d', '%s', '%s', '%s', '%s', '%s')\" % (keywordId, title, source, dateTime, unicode(result), result.get_text()))\n\t\t\tcx.commit()\n\t\t\tprint \"insert success\"\n\t\t\treturn result.get_text()\n\texcept Exception, e:\n\t\tprint e\n\t\tpass\n\t# finally:\n\t\t# print \"done - \", url\n\ndef getSegmentation(content):\n\tresult = jieba.analyse.extract_tags(content,20,True)\n\t# print result\n\tfor string in result:\n\t\tprint \"tag: %s\\t\\t weight: %f\" % (string[0],string[1])\n\n\n# def cos_dist(a, b):\n# \tif len(a) != len(b):\n# \t\treturn None\n# \tpart_up = 0.0\n# \ta_sq = 0.0\n# \tb_sq = 0.0\n# \tfor a1, b1 in zip(a,b):\n# \t\tpart_up += a1*b1\n# \t\ta_sq += a1**2\n# \t\tb_sq += b1**2\n# \tpart_down = math.sqrt(a_sq*b_sq)\n# \tif part_down == 0.0:\n# \t\treturn None\n# \telse:\n# \t\treturn part_up / part_down\n\ndef getDomain(url):\n\tprint \"--\"*40\n\ttry:\n\t\treturn get_tld(url)\n\texcept Exception as e:\n\t\treturn \"\"\n\t\t\n\n\t# res = r'http:\\/\\/.*?\\/'\n\t# m = re.findall(res, url)\n\t# if len(m) > 0:\n\t# \treturn m[0]\n\t# topHostPostfix = (\n\t# return \"163.com\"\n\t\n\t# ext = tldextract.extract(url)\n\t# print ext.domain+\".\"+ext.suffix\n\t# return ext.domain+\".\"+ext.suffix\n\n # '.com','.la','.io','.co','.info','.net','.org','.me','.mobi',\n # '.us','.biz','.xxx','.ca','.co.jp','.com.cn','.net.cn',\n # '.org.cn','.mx','.tv','.ws','.ag','.com.ag','.net.ag',\n # '.org.ag','.am','.asia','.at','.be','.com.br','.net.br',\n # '.bz','.com.bz','.net.bz','.cc','.com.co','.net.co','.gov.cn'\n # '.nom.co','.de','.es','.com.es','.nom.es','.org.es',\n # '.eu','.fm','.fr','.gs','.in','.co.in','.firm.in','.gen.in',\n # '.ind.in','.net.in','.org.in','.it','.jobs','.jp','.ms',\n # '.com.mx','.nl','.nu','.co.nz','.net.nz','.org.nz',\n # '.se','.tc','.tk','.tw','.com.tw','.idv.tw','.org.tw',\n # '.hk','.co.uk','.me.uk','.org.uk','.vg', \".com.hk\")\n\t# regx = r'[^\\.]+('+'|'.join([h.replace('.',r'\\.') for h in topHostPostfix])+')$'\n\t# pattern = re.compile(regx,re.IGNORECASE)\n\t# print \"--\"*40\n\t# parts = urlparse(url)\n\t# host = parts.netloc\n\t# m = pattern.search(host)\n\t# res = m.group() if m else host\n\t# return \"unkonw\" if not res else res\n\n\n\nd = {}\nlog = lambda x: float('-inf') if not x else math.log(x)\nprob = lambda x: d[x] if x in d else 0 if len(x)>1 else 1\n\ndef init(filename='SogouLabDic.dic'):\n\td['_t_'] = 0.0\n\twith open(filename, 'r') as handle:\n\t\tfor line in handle:\n\t\t\tword, freq = line.split('\\t')[0:2]\n\t\t\ttry:\n\t\t\t\tword = word.decode('gbk').encode('utf-8')\n\t\t\texcept:\n\t\t\t\tword = word\n\t\t\t# print word\n\t\t\t# word = word.decode('gbk').encode('utf-8')\n\t\t\td['_t_'] += int(freq)+1\n\t\t\ttry:\n\t\t\t\td[word] = int(freq)+1\n\t\t\texcept:\n\t\t\t\td[word] = int(freq)+1\n\t\t\t\tpass\n \ndef solve(s):\n\tl = len(s)\n\tp = [0 for i in range(l+1)]\n\tt = [0 for i in range(l)]\n\tfor i in xrange(l-1, -1, -1):\n\t\tp[i], t[i] = max((log(prob(s[i:i+k])/d['_t_'])+p[i+k], k)\n\t\t\tfor k in xrange(1, l-i+1))\n\twhile p[l]<l:\n\t\tyield s[p[l]:p[l]+t[p[l]]]\n\t\tp[l] += t[p[l]]\n\ndef cos_dist(a, b):\n\tif len(a) != len(b):\n\t\treturn None\n\tpart_up = 0.0\n\ta_sq = 0.0\n\tb_sq = 0.0\n\tfor a1, b1 in zip(a,b):\n\t\tpart_up += a1*b1\n\t\ta_sq += a1**2\n\t\tb_sq += b1**2\n\tpart_down = math.sqrt(a_sq*b_sq)\n\tif part_down == 0.0:\n\t\treturn None\n\telse:\n\t\treturn part_up / part_down\n\ndef getDistResult(a1, a2):\n\n\ts1 = list(solve(a1))\n\ts2 = list(solve(a2))\n\n\t# print s1\n\n\tkey = list(set(s1 + s2))\n\tkeyLen=len(key)\n\tkeyValue = 0\n\n\tsk1=[keyValue]*keyLen\n\tsk2=[keyValue]*keyLen\n\tfor index,keyElement in enumerate(key):\n\t\tif keyElement in s1:\n\t\t\tsk1[index]=sk1[index]+1\n\t\tif keyElement in s2:\n\t\t\tsk2[index]=sk2[index]+1 \n\treturn cos_dist(sk1, sk2)\n\ndef getAllKeywords():\n\tcu = cx.cursor()\n\tcu.execute(\"select * from keyword\")\n\treturn cu.fetchall()\n\ndef getArticles(keywordId):\n\tcu = cx.cursor()\n\n\t# cu.execute(\"select title, date, content from article join keyword on keyword.rowid = article.keywordid where name = %s\" % keyword)\n\tcu.execute(\"select * from article where keyword = %d\" % int(keywordId))\n\tcx.commit()\n\t#cu.execute(\"select title, date, content from article, keyword where keyword.rowid = article.keywordid and name = %s\" % keyword)\n\treturn cu.fetchall()\n\ndef getArticle(newsId):\n\tcu = cx.cursor()\n\tcu.execute(\"select * from article where newsId = %d\" % int(newsId))\n\tcx.commit()\n\treturn cu.fetchall()\n\n\ninit()\nkeywordList = getAllKeywords()\n# keyword1 = keywordList[0]\n# # print keyword1[0]\n# # print getArticles(keyword1[0])\n# # print keywordList\nfor key in keywordList:\n\tprint key[1]\n\tif int(key[0])>=634:\n\t\tbaidu(key[1], 60, key[0])\n\n\n\n\n# s1 = '中国官员独董离职潮仍在继续。中国石油[0.52%资金研报]天然气股份有限公司将在5月22日召开年度股东大会,早前公布的会议议程显示,三位担任公司独立董事的前任副部级、部级官员已不在新一届董事会候选人名单中。'\n# s2 = '测试测试测试haha'\n# print getDistResult(s1, s2)\n\n# getSegmentation('中国官员独董离职潮仍在继续。中国石油[0.52%资金研报]天然气股份有限公司将在5月22日召开年度股东大会,早前公布的会议议程显示,三位担任公司独立董事的前任副部级、部级官员已不在新一届董事会候选人名单中。')\n# baidu('卢雍政', 100)\n# newsDetail('http://news.163.com/14/1124/10/ABQEGPBS0001124J.html')\n\n\n\n# url1 = 'http://news.163.com/14/1124/06/ABPVMOLE00014AED.html'\n# url2 = 'http://news.hexun.com/2014-11-24/170701748.html?from=rss'\n# d1 = newsDetail(url1)\n# d2 = newsDetail(url2)\n# print getDistResult(d1, d2)\n\n\n\n\n" }, { "alpha_fraction": 0.487125962972641, "alphanum_fraction": 0.49686846137046814, "avg_line_length": 30.2391300201416, "blob_id": "fb62103200226266b3cf9167168a220a0ad9c63c", "content_id": "7b2f0b82e522799c1c095263f88a3061892190f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1437, "license_type": "no_license", "max_line_length": 114, "num_lines": 46, "path": "/mysite/migrations/0001_initial.py", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Lists',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('pid', models.IntegerField(default=0)),\n ('title', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Pages',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('lid', models.IntegerField(default=0)),\n ('path', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Peoples',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('keywords', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n" }, { "alpha_fraction": 0.6014388203620911, "alphanum_fraction": 0.6043165326118469, "avg_line_length": 29.217391967773438, "blob_id": "767dcd639371e258c66fe366b9343bb08f95ef66", "content_id": "0ce64166f681487bda7a6c4f4fd2314064199263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/mysite/urls.py", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom mysite.views import index, names, list, detail, zhua\nfrom mysite.views import hello, current_datetime, hours_ahead\n#from django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^$', index),\n url(r'^names/$', names),\n\turl(r'^list/', list),\n\turl(r'^detail/$', detail),\n url(r'^zhua/$', zhua),\n\n # Examples:\n # url(r'^time/plus/\\d+/$', hours_ahead),\n #url(r'^time/plus/(\\d{1,2})/$', hours_ahead),\n\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n #url(r'^admin/', include(admin.site.urls)),\n #url(r'^hello/$', hello),\n #url(r'^time/$', current_datetime ),\n)\n" }, { "alpha_fraction": 0.6949602365493774, "alphanum_fraction": 0.7294429540634155, "avg_line_length": 28, "blob_id": "7196566529bd5176e657d59164a93555dc93b8e9", "content_id": "e1fafbb649c26aee2e5a65b7b4c3f2b965c01512", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/mysite/models.py", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Peoples(models.Model):\n name = models.CharField(max_length=50)\n keywords = models.CharField(max_length=100)\n\nclass Lists(models.Model):\n pid = models.IntegerField(default=0)\n title = models.CharField(max_length=100)\n\nclass Pages(models.Model):\n lid = models.IntegerField(default=0)\n path = models.CharField(max_length=100)\n" }, { "alpha_fraction": 0.5908156037330627, "alphanum_fraction": 0.599497377872467, "avg_line_length": 32.15909194946289, "blob_id": "4b6a869ea76e4c039684867ffdce4275baf5ad6e", "content_id": "136351fd55f2dcb6d46d7384adfde46fe79367f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4383, "license_type": "no_license", "max_line_length": 117, "num_lines": 132, "path": "/mysite/views.py", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "# coding=utf8\n#from django.template.loader import get_template\n#from django.template import Context\nfrom django.http import HttpResponse\nfrom django.http import StreamingHttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import render\nimport time\n#from mysite.models import Peoples\nimport news \nimport datetime\nimport urllib2\nimport urllib\nfrom urlparse import urlparse\nfrom urlparse import urljoin\nfrom bs4 import BeautifulSoup\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ndef index(request):\n '''insert only one line'''\n #people = Peoples.objects.create(name = '1', keywords = '23')\n return render_to_response('index.html')\ndef names(request):\n #peoples = keyword.objects.all()\n peoples = news.getAllKeywords()\n #print peoples\n #peoples = keyword.objects.all()\n d={'name': peoples}\n context = {'peoples': peoples}\n # print context\n # print 1\n return render(request, 'names.html', context)\n #return render_to_response('names.html', locals())\n\n\ndef list(request):\n iid = request.GET.get('id')\n print iid\n if iid != None:\n newTemp = news.getArticles(iid)\n # print newTemp\n # new = article.objects.get(id = iid)\n # names = keyword.objects.get(id = iid)\n # news.baidu(n[1], 1, )\n context = {'id': iid, 'new': newTemp}\n else:\n context = {'id': '', 'new': []}\n return render(request, 'list.html', context)\ndef detail(request):\n iid = request.GET.get('id')\n detailAry = news.getArticle(iid)\n context = {'detail': detailAry[0]}\n # cont = article.objects.get(id = iid)\n # context = {'id': iid, 'cont': cont}\n return render(request, 'detail.html', context)\n\n\ndef zhua_stream_response_generator(names):\n index = 0\n count = 0\n\n for keyword in names:\n keywordId = None\n cu = news.cx.cursor()\n cu.execute(\"select * from keyword where name = '%s'\" % keyword)\n keywordRow = cu.fetchone()\n if keywordRow == None:\n cu.execute(\"insert into keyword values(null, '%s')\" % keyword)\n cu.execute(\"select * from keyword where name = '%s'\" % keyword)\n keywordId = cu.fetchone()[0]\n print cu.fetchone()\n print \"xxx\"\n else:\n keywordId = keywordRow[0]\n\n url = 'http://news.baidu.com/ns?word=%s&pn=%d&cl=3&ct=1&tn=news&rn=100&ie=utf-8&bt=0&et=0' % (keyword, index)\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n the_page = response.read()\n soup = BeautifulSoup(the_page)\n result = soup.find_all('li', {'class':'result'})\n print result.count\n for item in result:\n try:\n title = item.h3.a.get_text()\n url = item.h3.a['href'].encode(\"utf-8\")\n s = item.div.p.get_text().encode(\"utf-8\")\n temp = \"  \"\n index = s.find(temp)\n source = s[0:index]\n if index == -1:\n index = -4;\n time = s[index+4:len(s)]\n dateTime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M\")\n print url, keyword, title, source, dateTime, keywordId\n news.newsDetail(url, keyword, title, source, dateTime, keywordId)\n count = count + 1\n yield \"[第%d条][%s]%s\\n\" % (count, keyword, title)\n except Exception, e:\n pass\n\ndef zhua(request):\n namestring = request.GET.get('names')\n if namestring == None or len(namestring) == 0:\n return render_to_response('zhua.html')\n else:\n names = namestring.split(',')\n return StreamingHttpResponse(zhua_stream_response_generator(names))\n\n\ndef hello(request):\n return HttpResponse(\"Hello world\")\n\ndef current_datetime(request):\n current_date = datetime.datetime.now()\n return render_to_response('current_datetime.html', locals())\n\n #t = get_template('current_datetime.html')\n #html = t.render(Context({'current_date': now}))\n #return HttpResponse(html)\n\ndef hours_ahead(request, offset):\n try:\n offset = int(offset)\n except ValueError:\n raise Http404()\n dt = datetime.datetime.now() + datetime.timedelta(hours=offset)\n html = \"<html><body>In %s hour(s), it will be %s.</body></html>\" % (offset, dt) \n return HttpResponse(html)\n" }, { "alpha_fraction": 0.7362637519836426, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 44.5, "blob_id": "e3a18874651df09679ef805c72257b996dec9a13", "content_id": "343e86ce15e0f3cd150f5a663caea8fd8e17ed1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 91, "license_type": "no_license", "max_line_length": 72, "num_lines": 2, "path": "/main.sh", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env sh\nPYTHONPATH=\"$PYTHONPATH:$PWD/lib/python2.7/site-packages\" python news.py\n" }, { "alpha_fraction": 0.6652488708496094, "alphanum_fraction": 0.7023529410362244, "avg_line_length": 52.38164138793945, "blob_id": "490bee0ac20dcc510110d563ed97a9904c3fa6bc", "content_id": "1997b57020b5c6da93974843693ac0b598afa10a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 12215, "license_type": "no_license", "max_line_length": 56, "num_lines": 207, "path": "/list.sh", "repo_name": "dongxinb/TigerInformation", "src_encoding": "UTF-8", "text": "#!/bin/sh \n\nsqlite3 $1 \"insert into keyword values(null, '于广洲');\"\nsqlite3 $1 \"insert into keyword values(null, '习近平');\"\nsqlite3 $1 \"insert into keyword values(null, '马凯');\"\nsqlite3 $1 \"insert into keyword values(null, '马飚');\"\nsqlite3 $1 \"insert into keyword values(null, '马兴瑞');\"\nsqlite3 $1 \"insert into keyword values(null, '马晓天');\"\nsqlite3 $1 \"insert into keyword values(null, '王君');\"\nsqlite3 $1 \"insert into keyword values(null, '王侠');\"\nsqlite3 $1 \"insert into keyword values(null, '王珉');\"\nsqlite3 $1 \"insert into keyword values(null, '王勇');\"\nsqlite3 $1 \"insert into keyword values(null, '王晨');\"\nsqlite3 $1 \"insert into keyword values(null, '王毅');\"\nsqlite3 $1 \"insert into keyword values(null, '王三运');\"\nsqlite3 $1 \"insert into keyword values(null, '王万宾');\"\nsqlite3 $1 \"insert into keyword values(null, '王玉普');\"\nsqlite3 $1 \"insert into keyword values(null, '王正伟');\"\nsqlite3 $1 \"insert into keyword values(null, '王东明');\"\nsqlite3 $1 \"insert into keyword values(null, '王光亚');\"\nsqlite3 $1 \"insert into keyword values(null, '王伟光');\"\nsqlite3 $1 \"insert into keyword values(null, '王安顺');\"\nsqlite3 $1 \"insert into keyword values(null, '王志刚');\"\nsqlite3 $1 \"insert into keyword values(null, '王岐山');\"\nsqlite3 $1 \"insert into keyword values(null, '王沪宁');\"\nsqlite3 $1 \"insert into keyword values(null, '王国生');\"\nsqlite3 $1 \"insert into keyword values(null, '王学军');\"\nsqlite3 $1 \"insert into keyword values(null, '王建平');\"\nsqlite3 $1 \"insert into keyword values(null, '王胜俊');\"\nsqlite3 $1 \"insert into keyword values(null, '王洪尧');\"\nsqlite3 $1 \"insert into keyword values(null, '王宪魁');\"\nsqlite3 $1 \"insert into keyword values(null, '王冠中');\"\nsqlite3 $1 \"insert into keyword values(null, '王家瑞');\"\nsqlite3 $1 \"insert into keyword values(null, '王教成');\"\nsqlite3 $1 \"insert into keyword values(null, '王新宪');\"\nsqlite3 $1 \"insert into keyword values(null, '王儒林');\"\nsqlite3 $1 \"insert into keyword values(null, '支树平');\"\nsqlite3 $1 \"insert into keyword values(null, '尤权');\"\nsqlite3 $1 \"insert into keyword values(null, '车俊');\"\nsqlite3 $1 \"insert into keyword values(null, '尹蔚民');\"\nsqlite3 $1 \"insert into keyword values(null, '巴音朝鲁');\"\nsqlite3 $1 \"insert into keyword values(null, '巴特尔');\"\nsqlite3 $1 \"insert into keyword values(null, '卢展工');\"\nsqlite3 $1 \"insert into keyword values(null, '叶小文');\"\nsqlite3 $1 \"insert into keyword values(null, '田中');\"\nsqlite3 $1 \"insert into keyword values(null, '田修思');\"\nsqlite3 $1 \"insert into keyword values(null, '白玛赤林');\"\nsqlite3 $1 \"insert into keyword values(null, '白春礼');\"\nsqlite3 $1 \"insert into keyword values(null, '令计划');\"\nsqlite3 $1 \"insert into keyword values(null, '吉炳轩');\"\nsqlite3 $1 \"insert into keyword values(null, '朱小丹');\"\nsqlite3 $1 \"insert into keyword values(null, '朱福熙');\"\nsqlite3 $1 \"insert into keyword values(null, '全哲洙');\"\nsqlite3 $1 \"insert into keyword values(null, '刘鹏');\"\nsqlite3 $1 \"insert into keyword values(null, '刘源');\"\nsqlite3 $1 \"insert into keyword values(null, '刘鹤');\"\nsqlite3 $1 \"insert into keyword values(null, '刘云山');\"\nsqlite3 $1 \"insert into keyword values(null, '刘亚洲');\"\nsqlite3 $1 \"insert into keyword values(null, '刘成军');\"\nsqlite3 $1 \"insert into keyword values(null, '刘伟平');\"\nsqlite3 $1 \"insert into keyword values(null, '刘延东');\"\nsqlite3 $1 \"insert into keyword values(null, '刘奇葆');\"\nsqlite3 $1 \"insert into keyword values(null, '刘晓江');\"\nsqlite3 $1 \"insert into keyword values(null, '刘家义');\"\nsqlite3 $1 \"insert into keyword values(null, '刘粤军');\"\nsqlite3 $1 \"insert into keyword values(null, '刘福连');\"\nsqlite3 $1 \"insert into keyword values(null, '许达哲');\"\nsqlite3 $1 \"insert into keyword values(null, '许其亮');\"\nsqlite3 $1 \"insert into keyword values(null, '许耀元');\"\nsqlite3 $1 \"insert into keyword values(null, '孙怀山');\"\nsqlite3 $1 \"insert into keyword values(null, '孙建国');\"\nsqlite3 $1 \"insert into keyword values(null, '孙春兰');\"\nsqlite3 $1 \"insert into keyword values(null, '孙政才');\"\nsqlite3 $1 \"insert into keyword values(null, '孙思敬');\"\nsqlite3 $1 \"insert into keyword values(null, '苏树林');\"\nsqlite3 $1 \"insert into keyword values(null, '杜青林');\"\nsqlite3 $1 \"insert into keyword values(null, '杜金才');\"\nsqlite3 $1 \"insert into keyword values(null, '杜恒岩');\"\nsqlite3 $1 \"insert into keyword values(null, '李伟');\"\nsqlite3 $1 \"insert into keyword values(null, '李斌');\"\nsqlite3 $1 \"insert into keyword values(null, '李从军');\"\nsqlite3 $1 \"insert into keyword values(null, '李东生');\"\nsqlite3 $1 \"insert into keyword values(null, '李立国');\"\nsqlite3 $1 \"insert into keyword values(null, '李纪恒');\"\nsqlite3 $1 \"insert into keyword values(null, '李克强');\"\nsqlite3 $1 \"insert into keyword values(null, '李学勇');\"\nsqlite3 $1 \"insert into keyword values(null, '李建华');\"\nsqlite3 $1 \"insert into keyword values(null, '李建国');\"\nsqlite3 $1 \"insert into keyword values(null, '李鸿忠');\"\nsqlite3 $1 \"insert into keyword values(null, '李源潮');\"\nsqlite3 $1 \"insert into keyword values(null, '杨晶');\"\nsqlite3 $1 \"insert into keyword values(null, '杨传堂');\"\nsqlite3 $1 \"insert into keyword values(null, '杨金山');\"\nsqlite3 $1 \"insert into keyword values(null, '杨栋梁');\"\nsqlite3 $1 \"insert into keyword values(null, '杨洁篪');\"\nsqlite3 $1 \"insert into keyword values(null, '杨焕宁');\"\nsqlite3 $1 \"insert into keyword values(null, '肖钢');\"\nsqlite3 $1 \"insert into keyword values(null, '肖捷');\"\nsqlite3 $1 \"insert into keyword values(null, '吴昌德');\"\nsqlite3 $1 \"insert into keyword values(null, '吴胜利');\"\nsqlite3 $1 \"insert into keyword values(null, '吴爱英');\"\nsqlite3 $1 \"insert into keyword values(null, '吴新雄');\"\nsqlite3 $1 \"insert into keyword values(null, '何毅亭');\"\nsqlite3 $1 \"insert into keyword values(null, '冷溶');\"\nsqlite3 $1 \"insert into keyword values(null, '汪洋');\"\nsqlite3 $1 \"insert into keyword values(null, '汪永清');\"\nsqlite3 $1 \"insert into keyword values(null, '沈跃跃');\"\nsqlite3 $1 \"insert into keyword values(null, '沈德咏');\"\nsqlite3 $1 \"insert into keyword values(null, '宋大涵');\"\nsqlite3 $1 \"insert into keyword values(null, '宋秀岩');\"\nsqlite3 $1 \"insert into keyword values(null, '张阳');\"\nsqlite3 $1 \"insert into keyword values(null, '张茅');\"\nsqlite3 $1 \"insert into keyword values(null, '张毅');\"\nsqlite3 $1 \"insert into keyword values(null, '张又侠');\"\nsqlite3 $1 \"insert into keyword values(null, '张仕波');\"\nsqlite3 $1 \"insert into keyword values(null, '张庆伟');\"\nsqlite3 $1 \"insert into keyword values(null, '张庆黎');\"\nsqlite3 $1 \"insert into keyword values(null, '张志军');\"\nsqlite3 $1 \"insert into keyword values(null, '张国清');\"\nsqlite3 $1 \"insert into keyword values(null, '张宝顺');\"\nsqlite3 $1 \"insert into keyword values(null, '张春贤');\"\nsqlite3 $1 \"insert into keyword values(null, '张高丽');\"\nsqlite3 $1 \"insert into keyword values(null, '张海阳');\"\nsqlite3 $1 \"insert into keyword values(null, '张裔炯');\"\nsqlite3 $1 \"insert into keyword values(null, '张德江');\"\nsqlite3 $1 \"insert into keyword values(null, '陆昊');\"\nsqlite3 $1 \"insert into keyword values(null, '陈希');\"\nsqlite3 $1 \"insert into keyword values(null, '陈雷');\"\nsqlite3 $1 \"insert into keyword values(null, '陈全国');\"\nsqlite3 $1 \"insert into keyword values(null, '陈求发');\"\nsqlite3 $1 \"insert into keyword values(null, '陈宝生');\"\nsqlite3 $1 \"insert into keyword values(null, '陈政高');\"\nsqlite3 $1 \"insert into keyword values(null, '陈敏尔');\"\nsqlite3 $1 \"insert into keyword values(null, '努尔·白克力');\"\nsqlite3 $1 \"insert into keyword values(null, '苗圩');\"\nsqlite3 $1 \"insert into keyword values(null, '范长龙');\"\nsqlite3 $1 \"insert into keyword values(null, '林军');\"\nsqlite3 $1 \"insert into keyword values(null, '林左鸣');\"\nsqlite3 $1 \"insert into keyword values(null, '尚福林');\"\nsqlite3 $1 \"insert into keyword values(null, '罗志军');\"\nsqlite3 $1 \"insert into keyword values(null, '罗保铭');\"\nsqlite3 $1 \"insert into keyword values(null, '周济');\"\nsqlite3 $1 \"insert into keyword values(null, '周强');\"\nsqlite3 $1 \"insert into keyword values(null, '周本顺');\"\nsqlite3 $1 \"insert into keyword values(null, '周生贤');\"\nsqlite3 $1 \"insert into keyword values(null, '郑卫平');\"\nsqlite3 $1 \"insert into keyword values(null, '房峰辉');\"\nsqlite3 $1 \"insert into keyword values(null, '孟学农');\"\nsqlite3 $1 \"insert into keyword values(null, '孟建柱');\"\nsqlite3 $1 \"insert into keyword values(null, '项俊波');\"\nsqlite3 $1 \"insert into keyword values(null, '赵实');\"\nsqlite3 $1 \"insert into keyword values(null, '赵正永');\"\nsqlite3 $1 \"insert into keyword values(null, '赵乐际');\"\nsqlite3 $1 \"insert into keyword values(null, '赵克石');\"\nsqlite3 $1 \"insert into keyword values(null, '赵克志');\"\nsqlite3 $1 \"insert into keyword values(null, '赵宗岐');\"\nsqlite3 $1 \"insert into keyword values(null, '赵洪祝');\"\nsqlite3 $1 \"insert into keyword values(null, '胡泽君');\"\nsqlite3 $1 \"insert into keyword values(null, '胡春华');\"\nsqlite3 $1 \"insert into keyword values(null, '俞正声');\"\nsqlite3 $1 \"insert into keyword values(null, '姜大明');\"\nsqlite3 $1 \"insert into keyword values(null, '姜异康');\"\nsqlite3 $1 \"insert into keyword values(null, '骆惠宁');\"\nsqlite3 $1 \"insert into keyword values(null, '秦光荣');\"\nsqlite3 $1 \"insert into keyword values(null, '袁纯清');\"\nsqlite3 $1 \"insert into keyword values(null, '袁贵仁');\"\nsqlite3 $1 \"insert into keyword values(null, '耿惠昌');\"\nsqlite3 $1 \"insert into keyword values(null, '聂卫国');\"\nsqlite3 $1 \"insert into keyword values(null, '栗战书');\"\nsqlite3 $1 \"insert into keyword values(null, '贾廷安');\"\nsqlite3 $1 \"insert into keyword values(null, '夏宝龙');\"\nsqlite3 $1 \"insert into keyword values(null, '铁凝');\"\nsqlite3 $1 \"insert into keyword values(null, '徐守盛');\"\nsqlite3 $1 \"insert into keyword values(null, '徐绍史');\"\nsqlite3 $1 \"insert into keyword values(null, '徐粉林');\"\nsqlite3 $1 \"insert into keyword values(null, '高虎城');\"\nsqlite3 $1 \"insert into keyword values(null, '郭声琨');\"\nsqlite3 $1 \"insert into keyword values(null, '郭金龙');\"\nsqlite3 $1 \"insert into keyword values(null, '郭庚茂');\"\nsqlite3 $1 \"insert into keyword values(null, '郭树清');\"\nsqlite3 $1 \"insert into keyword values(null, '黄兴国');\"\nsqlite3 $1 \"insert into keyword values(null, '黄奇帆');\"\nsqlite3 $1 \"insert into keyword values(null, '黄树贤');\"\nsqlite3 $1 \"insert into keyword values(null, '曹建明');\"\nsqlite3 $1 \"insert into keyword values(null, '戚建国');\"\nsqlite3 $1 \"insert into keyword values(null, '常万全');\"\nsqlite3 $1 \"insert into keyword values(null, '鹿心社');\"\nsqlite3 $1 \"insert into keyword values(null, '彭勇');\"\nsqlite3 $1 \"insert into keyword values(null, '彭清华');\"\nsqlite3 $1 \"insert into keyword values(null, '蒋定之');\"\nsqlite3 $1 \"insert into keyword values(null, '蒋建国');\"\nsqlite3 $1 \"insert into keyword values(null, '蒋洁敏');\"\nsqlite3 $1 \"insert into keyword values(null, '韩正');\"\nsqlite3 $1 \"insert into keyword values(null, '韩长赋');\"\nsqlite3 $1 \"insert into keyword values(null, '焦焕成');\"\nsqlite3 $1 \"insert into keyword values(null, '谢伏瞻');\"\nsqlite3 $1 \"insert into keyword values(null, '强卫');\"\nsqlite3 $1 \"insert into keyword values(null, '楼继伟');\"\nsqlite3 $1 \"insert into keyword values(null, '解振华');\"\nsqlite3 $1 \"insert into keyword values(null, '褚益民');\"\nsqlite3 $1 \"insert into keyword values(null, '蔡武');\"\nsqlite3 $1 \"insert into keyword values(null, '蔡名照');\"\nsqlite3 $1 \"insert into keyword values(null, '蔡英挺');\"\nsqlite3 $1 \"insert into keyword values(null, '蔡赴朝');\"\nsqlite3 $1 \"insert into keyword values(null, '雒树刚');\"\nsqlite3 $1 \"insert into keyword values(null, '魏亮');\"\nsqlite3 $1 \"insert into keyword values(null, '魏凤和');\"\n" } ]
7
brett-smythe/eleanor_client
https://github.com/brett-smythe/eleanor_client
88def379f85ca4a1135bba9eb2d80cb332362e66
c613735db0318f2400eeae8650e286661be0c903
517ae110a94dcd53be67bebadc97ad154426decc
refs/heads/master
2020-05-21T13:35:28.379480
2016-11-19T21:12:18
2016-11-19T21:12:18
64,245,815
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7520278096199036, "alphanum_fraction": 0.7543452978134155, "avg_line_length": 25.96875, "blob_id": "726ec36937b6302a5681703d98cc98825e522f63", "content_id": "658300ebc24f6235b1d74e56d0d4801582492571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 863, "license_type": "no_license", "max_line_length": 133, "num_lines": 32, "path": "/README.md", "repo_name": "brett-smythe/eleanor_client", "src_encoding": "UTF-8", "text": "# Eleanor Client\n\nClient for interacting with [Eleanor](https://github.com/brett-smythe/eleanor) service.\n\nTo see how this fits in with the other repos please see [Aquatic Services Wiki](https://github.com/brett-smythe/ansible_configs/wiki)\n\n## Install\nRecommended to install and run in a virtualenv\nThe values in eleanor_client/settings/settings.py need to be set for your instance of eleanor\n\n```\npython setup.py install\n```\n\n### External Dependencies\n* Setup and running [Eleanor](https://github.com/brett-smythe/eleanor) service\n\n## Usage\n\n```\nfrom eleanor_client.endpoints import twitter\n\nget_tracked_twitter_users()\n>['twitter_uname_1', 'twitter_uname_2']\n\n# This adds a username to eleanor's list of tracked twitter users\ntrack_new_twitter_user('new_username_to_track')\n>\n\nget_username_last_tweet_id('last_tweet_for_uname')\n>'<last_tweet_id_for_user>'\n```\n" }, { "alpha_fraction": 0.7014651894569397, "alphanum_fraction": 0.7124541997909546, "avg_line_length": 31, "blob_id": "89b8fce7595657de8a84aea6665dd787e6277461", "content_id": "b412bbbf621a4f8f99f0107b32c1ca196d4e9450", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/eleanor_client/settings/settings.py", "repo_name": "brett-smythe/eleanor_client", "src_encoding": "UTF-8", "text": "\"\"\"Settings for eleanor_client\"\"\"\nimport os\nimport ConfigParser\n\neleanor_url = ''\neleanor_port = 5000\n\npath_to_here = os.path.abspath(__file__)\nlocal_settings_path = \"{0}/{1}\".format(path_to_here, 'local_settings.py')\nif os.path.isfile(local_settings_path):\n import local_settings\n eleanor_url = local_settings.eleanor_url\n eleanor_port = local_settings.eleanor_port\nelse:\n config = ConfigParser.ConfigParser()\n config.read('/etc/opt/aquatic_services/service_locations.cfg')\n eleanor_url = config.get('Eleanor', 'ip_address')\n\n\n" }, { "alpha_fraction": 0.587501585483551, "alphanum_fraction": 0.5954846143722534, "avg_line_length": 36.28837203979492, "blob_id": "0df636766f5d7f1aa3060908ab3a7e4675221124", "content_id": "c22213edf46b71bdb1d51729e838108cacca2d6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8017, "license_type": "no_license", "max_line_length": 78, "num_lines": 215, "path": "/tests/endpoints/test_twitter_endpoint.py", "repo_name": "brett-smythe/eleanor_client", "src_encoding": "UTF-8", "text": "\"\"\"Tests for eleanor client twitter endpoint\"\"\"\n# pylint: disable=import-error\nimport unittest\nimport json\n\nfrom datetime import datetime\n\nimport mock\n\nfrom eleanor_client.endpoints import twitter\n\n\nclass TwitterEndpointCase(unittest.TestCase):\n \"\"\"Eleanor client twitter endpoint test class\"\"\"\n # pylint: disable=too-many-public-methods\n json_headers = {'content-type': 'application/json'}\n tl_users_url = '{0}{1}'.format(\n twitter.eleanor_url, twitter.tl_users_endpoint\n )\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_tracked_twitter_users(self, mock_requests):\n \"\"\"Test gettting tracked twitter users from eleanor\"\"\"\n # pylint: disable=no-self-use\n twitter.get_tracked_twitter_users()\n\n mock_requests.get.assert_called_with(self.tl_users_url)\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_track_new_twitter_user(self, mock_requests):\n \"\"\"Test adding a new tracked twitter user to eleanor\"\"\"\n # pylint: disable=no-self-use\n test_uname = 'TestUsername'\n twitter.track_new_twitter_user(test_uname)\n\n payload = json.dumps({'twitter_usernames': [test_uname]})\n mock_requests.post.assert_called_with(\n self.tl_users_url, headers=self.json_headers, data=payload\n )\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_add_tweet_data(self, mock_requests):\n \"\"\"Test adding tweet data to eleanor\"\"\"\n # pylint: disable=no-self-use\n tweet_data = {'some': 'fake tweet data'}\n twitter.add_tweet_data(tweet_data)\n\n req_url = '{0}{1}'.format(twitter.eleanor_url, 'add-tweet-data')\n payload = json.dumps(tweet_data)\n mock_requests.post.assert_called_with(\n req_url, headers=self.json_headers, data=payload\n )\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_tweet_data_no_data(self, mock_requests):\n \"\"\"Test getting tweet data from eleanor with no existing tweet data\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n tweet_id = '10'\n mock_requests.get.return_value = MockResponse(204)\n self.assertEqual(twitter.get_tweet_data(tweet_id), None)\n\n query_url = 'tweet/{0}'.format(tweet_id)\n req_url = '{0}{1}'.format(twitter.eleanor_url, query_url)\n mock_requests.get.assert_called_with(req_url)\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_tweet_data_has_tweet_data(self, mock_requests):\n \"\"\"Test getting tweet data from eleanor when tweet data exists\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n def json(self):\n \"\"\"Return false json data\"\"\"\n # pylint: disable=no-self-use\n return 'json data'\n\n tweet_id = '10'\n mock_requests.get.return_value = MockResponse(200)\n self.assertEqual(twitter.get_tweet_data(tweet_id), 'json data')\n\n query_url = 'tweet/{0}'.format(tweet_id)\n req_url = '{0}{1}'.format(twitter.eleanor_url, query_url)\n mock_requests.get.assert_called_with(req_url)\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_username_last_tweet_id_no_last_tweet(self, mock_requests):\n \"\"\"Test getting last tweet id for a twitter user when no last tweet id\n exists\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n def json(self):\n \"\"\"Return false json data\"\"\"\n # pylint: disable=no-self-use\n return {'last_tweet_id': 42}\n\n test_uname = 'MalcomReynolds'\n mock_requests.get.return_value = MockResponse(204)\n self.assertEqual(\n twitter.get_username_last_tweet_id(test_uname), None\n )\n\n query_url = 'last-tweet-id/{0}'.format(test_uname)\n req_url = '{0}{1}'.format(twitter.eleanor_url, query_url)\n mock_requests.get.assert_called_with(req_url)\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_username_last_tweet_id_with_last_tweet(self, mock_requests):\n \"\"\"Test getting last tweet id for a twitter user when no last tweet id\n exists\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n def json(self):\n \"\"\"Return false json data\"\"\"\n # pylint: disable=no-self-use\n return {'last_tweet_id': 42}\n\n test_uname = 'MalcomReynolds'\n mock_requests.get.return_value = MockResponse(200)\n self.assertEqual(\n twitter.get_username_last_tweet_id(test_uname), 42\n )\n\n query_url = 'last-tweet-id/{0}'.format(test_uname)\n req_url = '{0}{1}'.format(twitter.eleanor_url, query_url)\n mock_requests.get.assert_called_with(req_url)\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_tweet_search_on_data_no_data(self, mock_requests):\n \"\"\"Test getting tweet data from eleanor with no data found\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n def json(self):\n \"\"\"Return false json data\"\"\"\n # pylint: disable=no-self-use\n return 'json data'\n\n test_uname = 'MalcomReynolds'\n search_date = datetime(year=2016, month=01, day=01).strftime(\n '%Y-%m-%d'\n )\n search_term = 'Miranda'\n search_data = {\n 'twitter_username': test_uname,\n 'search_date': search_date,\n 'search_term': search_term\n }\n mock_requests.post.return_value = MockResponse(204)\n self.assertEqual(\n twitter.tweet_search_on_date(\n test_uname, search_date, search_term\n ),\n None\n )\n req_url = '{0}{1}'.format(twitter.eleanor_url, 'stats/tweets-on-date')\n payload = json.dumps(search_data)\n mock_requests.post.assert_called_with(\n req_url, headers=self.json_headers, data=payload\n )\n\n @mock.patch('eleanor_client.endpoints.twitter.requests')\n def test_get_tweet_search_on_data_has_data(self, mock_requests):\n \"\"\"Test getting tweet data from eleanor with no data found\"\"\"\n class MockResponse(object):\n \"\"\"Mock return object for requests get\"\"\"\n\n def __init__(self, status_code):\n self.status_code = status_code\n\n def json(self):\n \"\"\"Return false json data\"\"\"\n # pylint: disable=no-self-use\n return 'json data'\n\n test_uname = 'MalcomReynolds'\n search_date = datetime(year=2016, month=01, day=01).strftime(\n '%Y-%m-%d'\n )\n search_term = 'Miranda'\n search_data = {\n 'twitter_username': test_uname,\n 'search_date': search_date,\n 'search_term': search_term\n }\n mock_requests.post.return_value = MockResponse(200)\n self.assertEqual(\n twitter.tweet_search_on_date(\n test_uname, search_date, search_term\n ),\n 'json data'\n )\n req_url = '{0}{1}'.format(twitter.eleanor_url, 'stats/tweets-on-date')\n payload = json.dumps(search_data)\n mock_requests.post.assert_called_with(\n req_url, headers=self.json_headers, data=payload\n )\n" }, { "alpha_fraction": 0.6337448358535767, "alphanum_fraction": 0.6399176716804504, "avg_line_length": 21.090909957885742, "blob_id": "e0b72d700461287ce7299b3b00c7f56daae770d8", "content_id": "aca5abb50c9b47eeeaef2d1816888e30cd0b76a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/setup.py", "repo_name": "brett-smythe/eleanor_client", "src_encoding": "UTF-8", "text": "\"\"\"Setuptools for eleanor_client\"\"\"\nfrom setuptools import setup, find_packages\n\nreqs = []\n\nwith open('requirements.txt') as inf:\n for line in inf:\n line = line.strip()\n reqs.append(line)\n\n\nsetup(\n name='eleanor-client',\n version='0.3.9',\n description='Package for interacting with eleanor service',\n author='Brett Smythe',\n author_email='smythebrett@gmail.com',\n maintainer='Brett Smythe',\n maintainer_email='smythebrett@gmail.com',\n packages=find_packages(),\n install_reqs=reqs\n)\n" }, { "alpha_fraction": 0.6494538187980652, "alphanum_fraction": 0.6577292084693909, "avg_line_length": 31.483871459960938, "blob_id": "07ad40aac4154f47261fb6c9f030eaf652d558c2", "content_id": "f6a982cd870b2a97a497d8cb20e866d5e9f60a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3021, "license_type": "no_license", "max_line_length": 79, "num_lines": 93, "path": "/eleanor_client/endpoints/twitter.py", "repo_name": "brett-smythe/eleanor_client", "src_encoding": "UTF-8", "text": "\"\"\"Utility for interacting with eleanor's twitter related endpoints\"\"\"\nimport json\nfrom datetime import datetime\n\nimport requests\n\nfrom eleanor_client.settings import settings\n\n\neleanor_url = 'http://{0}:{1}/'.format(\n settings.eleanor_url, settings.eleanor_port\n)\n\ntl_users_endpoint = 'twitter-tl-users'\n\n\ndef get_tracked_twitter_users():\n \"\"\"Get all currently tracked twitter users\"\"\"\n req_url = '{0}{1}'.format(eleanor_url, tl_users_endpoint)\n response = requests.get(req_url)\n try:\n if 'twitter_usernames' in response.json():\n tracked_users = response.json()['twitter_usernames']\n else:\n tracked_users = []\n except ValueError:\n tracked_users = []\n return tracked_users\n\n\ndef track_new_twitter_user(username):\n \"\"\"Track a new twitter user in eleanor\"\"\"\n req_url = '{0}{1}'.format(eleanor_url, tl_users_endpoint)\n headers = {'content-type': 'application/json'}\n payload = json.dumps({'twitter_usernames': [username]})\n requests.post(req_url, headers=headers, data=payload)\n\n\ndef add_tweet_data(tweet):\n \"\"\"Makes a request to eleanor adding tweet (which should be a dictionary\n formatted correctly for the request)\"\"\"\n req_url = '{0}{1}'.format(eleanor_url, 'add-tweet-data')\n headers = {'content-type': 'application/json'}\n payload = json.dumps(tweet)\n requests.post(req_url, headers=headers, data=payload)\n\n\ndef get_tweet_data(tweet_id):\n \"\"\"When given a tweet_id pull the associated tweet data from eleanor, if no\n tweet is found with the given tweet_id returns None\n \"\"\"\n query_url = 'tweet/{0}'.format(tweet_id)\n req_url = '{0}{1}'.format(eleanor_url, query_url)\n response = requests.get(req_url)\n if response.status_code == 204:\n return None\n else:\n return response.json()\n\n\ndef get_username_last_tweet_id(username):\n \"\"\"When given a username check for the latest tweet id associated with it\n if there is no tweet id associated with that username returns None\n \"\"\"\n query_url = 'last-tweet-id/{0}'.format(username)\n req_url = '{0}{1}'.format(eleanor_url, query_url)\n response = requests.get(req_url)\n if response.status_code == 204:\n return None\n else:\n return response.json()['last_tweet_id']\n\n\ndef tweet_search_on_date(username, date, search_term):\n \"\"\"When given the above search parameters pull tweet search data\n this returns the count of tweets by username on date that includes\n search_term\n \"\"\"\n req_url = '{0}{1}'.format(eleanor_url, 'stats/tweets-on-date')\n headers = {'content-type': 'application/json'}\n if isinstance(date, datetime):\n date = date.strftime(\"%Y-%m-%d\")\n search_data = {\n 'twitter_username': username,\n 'search_date': date,\n 'search_term': search_term\n }\n payload = json.dumps(search_data)\n response = requests.post(req_url, headers=headers, data=payload)\n if response.status_code == 204:\n return None\n else:\n return response.json()\n" } ]
5
Gfadebayo/YouTube
https://github.com/Gfadebayo/YouTube
e943589c36113e3162a0d60b28e86e49ff5aa8dd
fc5cc03f6c41bda263a129d44c7fe13cac0989d9
d61efd771ebd8db4c8feab9f5df60a43a4d397d3
refs/heads/main
2023-02-12T10:04:33.644787
2021-01-15T06:17:25
2021-01-15T06:17:25
328,272,827
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 38.20000076293945, "blob_id": "8a9c0953de769ac166edf15244992163a93a41e1", "content_id": "115a6f26039bff4ca31b0a7ce97a42bcfdbaa9b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 196, "license_type": "no_license", "max_line_length": 82, "num_lines": 5, "path": "/README.md", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "**A Simple YouTube Video Downloader Website**\n\nThis project was created using the [pytube](https://github.com/pytube/pytube) api.\n\nPlanned updates are testing deployment of the website to heroku.\n" }, { "alpha_fraction": 0.7030481696128845, "alphanum_fraction": 0.7030481696128845, "avg_line_length": 24.424999237060547, "blob_id": "9361c1e2849e61e2086bdd0e6fcc9ae34ed18bf2", "content_id": "ec38e1af1d11016d0870b6618d98619e481e57df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 76, "num_lines": 40, "path": "/downloader/views.py", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nfrom .wrapper import YouTubeWrapper, StreamTemplate\n\nfrom pytube import exceptions\n\n# Create your views here.\n\ndef home(request, error_message=None):\n context = {}\n\n if(error_message):\n context['message'] = error_message\n\n return render(request, 'downloader/home.html', context)\n\ndef process(request):\n link = request.POST['link']\n error_message = ''\n\n try:\n youtube = YouTubeWrapper(link)\n\n return render(request, 'downloader/done.html', {'video' : youtube})\n\n except exceptions.RegexMatchError:\n error_message='Please Enter a valid link'\n except exceptions.ExtractError:\n error_message = \"The Video you are trying to download doesn't exist\"\n except exceptions.VideoUnavailable:\n error_message = 'The Video is not available'\n\n return HttpResponse(error_message)\n\ndef done(request):\n return HttpResponse('Done')\n\ndef on_complete(stream, file_path):\n pass\n" }, { "alpha_fraction": 0.5227084755897522, "alphanum_fraction": 0.5466556549072266, "avg_line_length": 21.425926208496094, "blob_id": "18130d25c580e8705ce463d7c9cc75ea1724f23c", "content_id": "189106ab5bd5d018fcb7acaa12de1a7584410a5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 73, "num_lines": 54, "path": "/downloader/wrapper.py", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "import pytube\nfrom pytube.streams import Stream\n\nclass YouTubeWrapper(pytube.YouTube):\n\n\n def duration(self):\n duration = []\n length = int(self.length)\n print(f\"Length is {length}\")\n\n if(length >= 3600):\n hours = length // 3600\n length = length - (hours * 3600)\n\n duration.append(f\"{hours} hours \")\n\n\n if(length >= 60):\n minutes = length // 60\n length = length - (minutes * 60)\n\n duration.append(f\"{minutes} minutes \")\n\n\n duration.append(f\"{length} seconds\")\n print(f\"Duration is {duration}\")\n return ''.join(duration)\n\n def streams(self):\n stream_objects = list(super().streams)\n return map(lambda stream: StreamTemplate(stream), stream_objects)\n\n\n\n\nclass StreamTemplate():\n\n def __init__(self, stream: Stream):\n\n self.size = self.resolve_size(stream.filesize)\n\n self.stream = stream\n\n def resolve_size(self, size):\n sizes = ('KB', 'MB', 'GB')\n current = 0\n\n while True:\n size = size / 1000\n if size < 1000:\n return \"{:.2f} {}\".format(size, sizes[current])\n\n current = current+1\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.71875, "avg_line_length": 41.33333206176758, "blob_id": "0a77e2e41d802d2206fb5b5ac97084d0e72a7bb8", "content_id": "1cd0be9f059dc4d0f15db057e10994b227b270cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 128, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/downloader/static/downloader/script/home.js", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "\ndocument.getElementById('text-link').onclick = function() {\n document.getElementById('text-link').setAttribute('value', '')\n}\n" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 21.375, "blob_id": "2dc7877ea4e66e598c9f060cc91915a507b4ec20", "content_id": "77e16ebdf1a2eb904009333273b9f6dee7609db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/downloader/urls.py", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "\nfrom django.urls import path\nfrom . import views\n\napp_name = 'downloader'\nurlpatterns = [\npath('', views.home, name='home'),\npath('processing/', views.process, name='process'),\n]\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.699999988079071, "avg_line_length": 16.5, "blob_id": "ebb476d320d0d28a2f19a94935d7628621ff0565", "content_id": "7b31237d07c0e0cdc06e8b7ff85d21f080e3ba2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 210, "license_type": "no_license", "max_line_length": 24, "num_lines": 12, "path": "/requirements.txt", "repo_name": "Gfadebayo/YouTube", "src_encoding": "UTF-8", "text": "asgiref==3.3.1\ncachetools==4.0.0\ndj-database-url==0.5.0\nDjango==3.1.3\ngunicorn==20.0.4\npsycopg2==2.8.6\npytube==10.0.0\nrequests==2.22.0\nrequests-oauthlib==1.3.0\nsqlparse==0.4.1\nurllib3==1.25.8\nwhitenoise==5.2.0\n" } ]
6
twesttt/DZ4-api
https://github.com/twesttt/DZ4-api
24c0db1cfbbe49a7758a8dbf26f1fb994fe779cf
a88ebfb8d869b474376acdafb592501cfaac2f79
d837508ea7a74cb165da6745007c0c40c3041200
refs/heads/master
2023-05-26T10:31:51.116273
2020-02-11T01:19:22
2020-02-11T01:19:22
237,731,764
0
0
null
2020-02-02T06:56:52
2020-02-11T01:19:31
2023-05-22T22:39:53
Python
[ { "alpha_fraction": 0.5261744856834412, "alphanum_fraction": 0.5261744856834412, "avg_line_length": 28.799999237060547, "blob_id": "a7be5dfdfa313c726bd3d47ad6f900424b68e17e", "content_id": "8dac480ec7d893af25e32ea598d8f715f45acb39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2457, "license_type": "no_license", "max_line_length": 91, "num_lines": 75, "path": "/tests/test_dogs_api.py", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "import pytest\nimport requests\nfrom jsonschema import validate\n\n\nschema_of_sub_breeds = {\n \"type\": \"object\",\n \"properties\": {\n \"message\": {\"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n \"examples\": [\n \"afghan\",\n \"basset\",\n \"blood\",\n \"english\",\n \"ibizan\",\n \"plott\",\n \"walker\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n },\n \"status\": {\n \"type\": \"string\",\n \"examples\": [\n \"success\"\n ],\n \"pattern\": \"^(.*)$\"\n }\n }\n}\n\n\ndef test_dogs_sub_breeds_schema():\n \"\"\"Проверяем схему json списка под пород\"\"\"\n\n r = requests.get('https://dog.ceo/api/breed/hound/list')\n validate(instance=r.json(), schema=schema_of_sub_breeds)\n\n\n@pytest.mark.parametrize(\"breed\", [\"boxer\", \"cairn\", \"borzoi\"])\ndef test_single_random_img(breed):\n \"\"\"Проверяем получение одной рандомной картинки, используем смешанную параметризацию\"\"\"\n\n url = 'https://dog.ceo/api/breed/{!s}/images/random'.format(breed)\n response = requests.get(url)\n response_dict = response.json()\n assert response_dict[\"status\"] == \"success\"\n\n\ndef test_list_all_breeds():\n \"\"\"Проверяем получение списка всех пород\"\"\"\n\n r = requests.get('https://dog.ceo/api/breeds/list/all')\n print(r.headers.items())\n assert r.headers['Content-Type'] == 'application/json'\n\n\n@pytest.mark.parametrize(\"breed\", [\"boxer\", \"cairn\", \"borzoi\"])\ndef test_single_random_img(breed):\n \"\"\"Проверяем получение всех картинок определенной породы\"\"\"\n\n url = 'https://dog.ceo/api/breed/{!s}/images/images'.format(breed)\n response = requests.get(url)\n response_dict = response.json()\n assert response_dict[\"status\"] == \"success\"\n\n\ndef test_random_image():\n \"\"\"Проверяем получение одной рандомной картинки\"\"\"\n\n response = requests.get(\"https://dog.ceo/api/breeds/image/random\")\n response_dict = response.json()\n assert response_dict[\"status\"] == \"success\"\n" }, { "alpha_fraction": 0.6678248643875122, "alphanum_fraction": 0.6768589019775391, "avg_line_length": 30.955554962158203, "blob_id": "d2e10ab0d181b7488f03dcb5fe45aa2170a02f3f", "content_id": "04b843bcf3361ef0797a9bc55c8878a1d63bea79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "no_license", "max_line_length": 104, "num_lines": 45, "path": "/tests/test_openbrewerydb_api.py", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "import pytest\nimport requests\n\n\ndef test_list_by_city(by_city_param):\n \"\"\"Проверяем запрос по городу, с параметризацией фикстурой\"\"\"\n\n url = \"https://api.openbrewerydb.org/breweries?by_city={!s}\".format(by_city_param)\n response = requests.get(url)\n assert response.status_code == 200\n\n\n@pytest.mark.parametrize(\"city\", [\"san_diego\", \"new_york\"])\n@pytest.mark.parametrize(\"brewery_type\", [\"large\", \"brewpub\"])\ndef test_by_state_and_type(city, brewery_type):\n \"\"\"Проверяем запрос по двум фильтрам: по городу и по типу пивной\"\"\"\n\n url = \"https://api.openbrewerydb.org/breweries?by_city={!s}&by_type={!s}\".format(city, brewery_type)\n response = requests.get(url).json()\n assert len(response) > 0\n\n\ndef test_search_by_name():\n \"\"\"Проверяем поиск пивной по ключевому слову в наименовании пивной\"\"\"\n\n url = \"https://api.openbrewerydb.org/breweries?by_name=dog\"\n response = requests.get(url).json()\n for i in response:\n assert i[\"name\"].lower().find(\"dog\") != -1\n\n\ndef test_per_page_return():\n \"\"\"Проверяем запрос на определенное количество пивных\"\"\"\n\n url = \"https://api.openbrewerydb.org/breweries?per_page=30\"\n response = requests.get(url).json()\n assert len(response) == 30\n\n\ndef test_get_brewery():\n \"\"\"Проверяем запрос пивной по ID\"\"\"\n\n url = \"https://api.openbrewerydb.org/breweries/5494\"\n response = requests.get(url).json()\n assert response[\"name\"] == \"MadTree Brewing\"\n\n" }, { "alpha_fraction": 0.6965649127960205, "alphanum_fraction": 0.6965649127960205, "avg_line_length": 26.578947067260742, "blob_id": "f935eb1e35cbd7f200f7c602f47e02ac9340a090", "content_id": "4f556b730a99a82957d93047745810646f169d39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 75, "num_lines": 19, "path": "/tests/test_response_code.py", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "import pytest\nimport requests\n\n\n@pytest.fixture\ndef get_param(request):\n \"\"\"Получаем значения параметров в словарь\"\"\"\n\n config_param = {}\n config_param[\"url\"] = request.config.getoption(\"--url\")\n config_param[\"status_code\"] = request.config.getoption(\"--status_code\")\n return config_param\n\n\ndef test_response(get_param):\n \"\"\"Отправляем get запрос по полученному url и сверяем статусы ответа\"\"\"\n\n response = requests.get(get_param[\"url\"])\n assert response.status_code == int(get_param[\"status_code\"])\n" }, { "alpha_fraction": 0.6796714663505554, "alphanum_fraction": 0.685831606388092, "avg_line_length": 25.94444465637207, "blob_id": "bea876b25d3eeb8ac307f74d2898d263f0d4ee89", "content_id": "04ae93744ded785a4e6d0cf1c62fb57c9eca5ca2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 94, "num_lines": 18, "path": "/conftest.py", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "import pytest\n\n\n@pytest.fixture(params=[\"san_diego\", \"new_york\"])\ndef by_city_param(request):\n return request.param\n\n\n@pytest.fixture(params=[\"comments\", \"albums\", \"posts\"])\ndef nested_resources(request):\n return request.param\n\n\ndef pytest_addoption(parser):\n \"\"\"Добавляем два параметра\"\"\"\n\n parser.addoption(\"--status_code\", action=\"store\", default=200, help=\"This is status code\")\n parser.addoption(\"--url\", action=\"store\", default=\"http://ya.ru\", help=\"This is url\")\n\n\n" }, { "alpha_fraction": 0.8212290406227112, "alphanum_fraction": 0.826815664768219, "avg_line_length": 43.75, "blob_id": "81dc02e3a9f937df764ed9408e9c521a672fb447", "content_id": "861b6b8090878f3ab7c233d6dc8a7400d2c659aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 281, "license_type": "no_license", "max_line_length": 92, "num_lines": 4, "path": "/README.md", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "# DZ4-api\nTesting API\nЦель: Тестирование API сервиса с помощью Python используя библиотеки pytest, requests, json.\nТестирование каждого api оформлено в отдельном тестовом модуле.\n" }, { "alpha_fraction": 0.6261378526687622, "alphanum_fraction": 0.6384915709495544, "avg_line_length": 25.06779670715332, "blob_id": "20a49338f16b21850bb9f566f59a8fca8e50df81", "content_id": "b27da882a75f82a5846bd6adeea5346cec8cbce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1717, "license_type": "no_license", "max_line_length": 85, "num_lines": 59, "path": "/tests/test_jsonplaceholder.py", "repo_name": "twesttt/DZ4-api", "src_encoding": "UTF-8", "text": "import pytest\nimport requests\n\n\ndef test_list_all_resources():\n \"\"\"Проверяем запрос на список всех ресурсов\"\"\"\n\n url = \"http://jsonplaceholder.typicode.com/posts\"\n response = requests.get(url)\n assert len(response.json()) == 100\n\n\ndef test_update_resource():\n \"\"\"Проверяем возможность обновить информацию\"\"\"\n\n new_data = {\n \"id\": \"1\",\n \"title\": \"hello\",\n \"body\": \"bar\",\n \"userId\": \"1\"\n }\n\n url = \"http://jsonplaceholder.typicode.com/posts/1\"\n response = requests.put(url, json=new_data)\n response_dict = response.json()\n assert response_dict[\"title\"] == \"hello\"\n\n\ndef test_create_resource():\n \"\"\"Проверяем возможность создать новый пост\"\"\"\n\n new_data = {\n \"title\": \"New post\",\n \"body\": \"bar\",\n \"userId\": \"1\"\n }\n\n url = \"http://jsonplaceholder.typicode.com/posts/\"\n response = requests.post(url, json=new_data)\n print(response.text)\n response_dict = response.json()\n assert response_dict[\"id\"] == 101\n\n\ndef test_nested_resources(nested_resources):\n \"\"\"Проверяем получение вложенного ресурса\"\"\"\n\n url = \"http://jsonplaceholder.typicode.com/posts/1/{!s}\".format(nested_resources)\n response = requests.get(url)\n assert response.status_code == 200\n\n\n@pytest.mark.parametrize(\"post\", [\"1\", \"2\"])\ndef test_delete_resources(post):\n \"\"\"Проверяем возможность удаления ресурса\"\"\"\n\n url = \"http://jsonplaceholder.typicode.com/posts/{!s}\".format(post)\n response = requests.delete(url)\n assert response.status_code == 200\n" } ]
6
AlexZevallos/TSI-SAX
https://github.com/AlexZevallos/TSI-SAX
842384e9d3811ad246c87b6c55f994f3e1e04659
929cd411908ac08f386ca67eb42304490087a2bc
53f71cdedaadb3c9c994e0cd73a69484ccbf0057
refs/heads/master
2022-10-24T17:18:48.090219
2020-06-16T13:00:15
2020-06-16T13:00:15
272,128,164
0
0
null
2020-06-14T03:47:49
2020-06-14T16:50:28
2020-06-14T16:51:40
Python
[ { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6705882549285889, "avg_line_length": 7.400000095367432, "blob_id": "1e0c09e891fb88d6d934878d7dfe4f21ec97438f", "content_id": "5efae8f534062df1946ff67a00218fd7cd313e4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 85, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/README.md", "repo_name": "AlexZevallos/TSI-SAX", "src_encoding": "UTF-8", "text": "# TSI-SAX\nTrabajo Colaborativo en GitHub Semana 11 \n\nCalderon, Ebner\n\nParedes, Anabella\n\nPizarro, Sofia\n\nZevallos, Alexander\n\n" }, { "alpha_fraction": 0.6510066986083984, "alphanum_fraction": 0.6510066986083984, "avg_line_length": 22.83333396911621, "blob_id": "07ef11b6dddaa16ffbcc43fd13830de15e2a7deb", "content_id": "fab51966194c4eeca15c283898c2d6ae354bb66d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/Factorial.py", "repo_name": "AlexZevallos/TSI-SAX", "src_encoding": "UTF-8", "text": "import os, sys \r\nos.system(\"cls\")\r\nfrom math import factorial\r\n\r\nfa=int(input(\"Ingrese un numero:\"))\r\nprint(\"Factorial(\" ,fa , \")=\" ,factorial(fa))\r\n" } ]
2
Alwaysproblem/simplecode
https://github.com/Alwaysproblem/simplecode
aea5c5a910c357263be1a071bc555e2dab20e332
b1ee0ad390c9e1193109b97b8c66351aaa6f6b8b
6d72a386a7bbf7488d569e11216d3c6c6c073674
refs/heads/master
2023-07-22T16:00:14.859769
2023-07-22T14:59:23
2023-07-22T14:59:23
165,994,898
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.48672565817832947, "alphanum_fraction": 0.5233880877494812, "avg_line_length": 15.829787254333496, "blob_id": "c5021922e6c2106a5328df14172d18780b180af1", "content_id": "6c9e7d16087f98fe661909bf3b1025d4b252a5d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 841, "license_type": "no_license", "max_line_length": 39, "num_lines": 47, "path": "/labuladong/cpp/superPow/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n const int base = 1337;\n int mypow(int a, int k) {\n // 对因子求模\n a %= base;\n int res = 1;\n for (int _ = 0; _ < k; _++) {\n // 这里有乘法,是潜在的溢出点\n res *= a;\n // 对乘法结果求模\n res %= base;\n }\n return res;\n }\n\n int superPow(int a, vector<int>& b) {\n if (b.empty()) return 1;\n if (a == 0) return 0;\n\n int last = b.back();\n b.pop_back();\n\n int m1 = mypow(superPow(a, b), 10);\n int m2 = mypow(a, last);\n\n return (m1 * m2) % base;\n }\n};\n\nint main() {\n int a = 2147483647;\n vector<int> b = {2, 0, 0};\n Solution sol;\n int v = sol.superPow(a, b);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.7149532437324524, "alphanum_fraction": 0.7149532437324524, "avg_line_length": 16.83333396911621, "blob_id": "3e424137bc3c6062f7c277b3e923dfabfd8de960", "content_id": "0b046cff78c6e69a7bb98fafd6a513f77d7af6b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 78, "num_lines": 12, "path": "/SUBprocess_read_console.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from subprocess import Popen, PIPE\nimport sys\nimport os\n\na = Popen(['powershell.exe', 'ls'], stdout=PIPE, encoding=sys.stdout.encoding)\n\nb = a.stdout.readlines()\na.communicate()\n\nprint(''.join(b))\n\nasdvasdnvas = b\n" }, { "alpha_fraction": 0.500750720500946, "alphanum_fraction": 0.5055813193321228, "avg_line_length": 28.80350112915039, "blob_id": "50fd6f693c44fb58813581f7b33d66c1f322a1cc", "content_id": "46f7d65c2cc4aa4f4e434dc62c8bcc3710f5ff6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15319, "license_type": "no_license", "max_line_length": 119, "num_lines": 514, "path": "/Gpackage.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "######################################################\nfrom math import inf \ndef maxSubArraySum(a, size): \n\n max_so_far = -inf - 1\n max_ending_here = 0\n\n for i in range(0, size): \n max_ending_here = max_ending_here + a[i]\n if max_so_far < max_ending_here:\n max_so_far = max_ending_here\n \n if max_ending_here < 0: \n max_ending_here = 0 \n return max_so_far\n#####################################################\nclass Stack(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n \n def push(self, data):\n self.data.append(data)\n\n def pop(self):\n return self.data.pop()\n\n @property\n def top(self):\n return self.data[-1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Stack(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Stack(\" + ', '.join(map(str, self.data)) + \")\"\n \n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.pop()\n \n def __contains__(self, item):\n return item in self.data\n\nclass Queue(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def Enqueue(self, item):\n self.data.append(item)\n\n def Dequeue(self):\n return self.data.pop(0)\n \n def qsize(self):\n return len(self.data)\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return item in self.data\n\nimport bisect\nclass PriorityQueue(object):\n def __init__(self, key = lambda x: x):\n self.key = key\n self.data = []\n \n def Enqueue(self, item):\n # heappush(self.data, (self.key(item), item))\n bisect.insort(self.data, (self.key(item), item))\n\n def Dequeue(self):\n # return heappop(self.data)[1]\n return self.data.pop(0)[1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def qsize(self):\n return len(self.data)\n\n def __repr__(self):\n return \"PriorityQueue(\" + ', '.join(map(str, [i[1] for i in self.data])) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return any([item == pair[1] for pair in self.data])\n\n def __getitem__(self, key):\n for _, item in self.data:\n if item == key:\n return item\n \n def __delitem__(self, key):\n for i, (_, item) in enumerate(self.data):\n if item == key:\n self.data.pop(i)\nclass Vertex():\n def __init__(self, state):\n self.state = state\n\n\nclass Node(Vertex):\n # just for the search\n def __init__(self, idx, cost = 0, prev = '', state = None):\n super().__init__(state)\n self.index = idx\n self.cost = cost\n self.prev =prev\n\n def __repr__(self):\n return \"Node(\" + str(self.index) + \")\"\n\n def __lt__(self, other):\n return self.index < other.index\n\n def __eq__(self, other):\n return self.index == other.index\n\n def __hash__(self):\n return hash(self.index)\n\n def __index__(self):\n return self.index\n\n#%%\nclass Graph():\n def __init__(self, g, nv, mode = None, Direct = True, optdic = {}, valuedic = {}):\n self.Direct = Direct\n self.mode = mode\n self.numV = nv\n self.mapVertices = [Vertex(state=valuedic[i] if valuedic != {} else None) for i in range(self.numV)]\n self.g= self.encode(g)\n self.opdic = optdic\n\n\n def encode(self, g):\n if self.mode == None:\n return g\n\n elif self.mode == 'Alist':\n graph = {ve : [] for ve in range(self.numV)}\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1].append((v2, weight))\n else:\n graph[v1].append((v2, weight))\n graph[v2].append((v1, weight))\n \n return graph\n \n elif self.mode == 'Amatrix':\n graph = [[0] * self.numV for _ in range(self.numV)]\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1][v2] = weight\n else:\n graph[v1][v2] = weight\n graph[v2][v1] = weight\n\n return graph\n\n def isCycle(self):\n pass\n\n def isadjacent(self, v1, v2):\n if self.mode == \"Amatrix\":\n return self.g[v1][v2] != 0\n else:\n return any([i[0] == v2 for i in self.g[v1]])\n\n def __repr__(self):\n s = ''\n if self.mode == \"Amatrix\":\n for i in self.g:\n s += str(i) + '\\n' \n return s\n elif self.mode == \"Alist\":\n for i in self.g:\n s += str(i) + \": \" + str(self.g[i]) + '\\n'\n return s\n else:\n raise NotImplementedError()\n\n\n#%%\nclass Search(object):\n def __init__(self, g: Graph, src:Node, dest:Node):\n self.graph = g\n self.src = Node(src, state=self.graph.mapVertices[src].state)\n self.dest = Node(dest, state=self.graph.mapVertices[dest].state)\n self.visited = set()\n self.solfound = False\n self.path = Stack()\n\n def extend(self, node: Node):\n next_nodes = []\n if self.graph.mode == 'Alist':\n for v2, weight in self.graph.g[node.index]:\n next_nodes.append(Node(v2, cost=node.cost + weight, prev=node, state=self.graph.mapVertices[v2].state))\n elif self.graph.mode == 'Amatrix':\n nex = [v for v in range(self.graph.numV) if self.graph.isadjacent(node.index, v)]\n for v2 in nex:\n weight = self.graph.g[node][v2]\n next_nodes.append(Node(v2, cost=node.cost + weight, prev=node, state=self.graph.mapVertices[v2].state))\n else:\n raise NotImplementedError()\n return next_nodes\n \n def search(self):\n pass\n\n def findPath(self):\n p = self.dest.prev\n self.path.push(self.dest)\n while p.prev != \"\":\n self.path.push(p)\n p = p.prev\n else:\n self.path.push(self.src)\n\n def displayPath(self):\n print(self, \"->\".join(map(str, [ve for ve in self.path])), f\"Path Cost:{self.dest.cost}\", sep=\" \")\n\n\nclass BreadthFirstSearch(Search):\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n self.queue = Queue()\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.queue.Enqueue(self.src)\n \n while not self.queue.isEmpty() and self.solfound is False:\n cur = self.queue.Dequeue()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n # self.pathIndexTable[cur.index] = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.queue:\n self.queue.Enqueue(n_)\n\n def __repr__(self):\n return \"BreadthFirstSearch: \"\n\n\nclass BestFirstSearch(BreadthFirstSearch):\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n self.queue = PriorityQueue(key = self.ScoreFun)\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.queue.Enqueue(self.src)\n \n while not self.queue.isEmpty() and self.solfound is False:\n cur = self.queue.Dequeue()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.queue:\n self.queue.Enqueue(n_)\n elif n_ in self.queue:\n incumbent = self.queue[n_]\n if self.ScoreFun(n_) < self.ScoreFun(incumbent):\n del self.queue[incumbent]\n self.queue.Enqueue(n_)\n\n def ScoreFun(self, src):\n raise NotImplementedError()\n\n\nclass UniformCost(BestFirstSearch):\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n # self.queue = PriorityQueue(key = self.key)\n\n def ScoreFun(self, src: Node):\n return src.cost\n\n def __repr__(self):\n return \"UniformCost: \"\n\nclass BestGreedySearch(BestFirstSearch):\n def __init__(self, g, src, dest, criterion):\n super().__init__(g, src, dest)\n self.criterion = criterion\n\n def ScoreFun(self, src: Node):\n return self.criterion(src, self.dest)\n\n def __repr__(self):\n return \"BestGreedySearch: \"\n\nclass Astar(BestFirstSearch):\n def __init__(self, g, src, dest, criterion):\n super().__init__(g, src, dest)\n self.criterion = criterion\n \n def ScoreFun(self, src: Node):\n return src.cost + self.criterion(src, self.dest)\n\n\n def __repr__(self):\n return \"Astar: \"\n\n\nclass DepthFirstSearch(Search):\n def __init__(self, g, src, dest, key = lambda x: x, order = 'min'):\n '''\n sorted key for extension. reverse is need \n '''\n super().__init__(g, src, dest)\n self.stack = Stack()\n self.key = key\n self.reverse = True if order == 'min' else False\n\n def extend(self, node: Node):\n next_nodes = []\n if self.graph.mode == 'Alist':\n for v2, weight in self.graph.g[node.index]:\n next_nodes.append(Node(v2, cost=node.cost + weight, prev=node, state=self.graph.mapVertices[v2].state))\n elif self.graph.mode == 'Amatrix':\n nex = [v for v in range(self.graph.numV) if self.graph.isadjacent(node.index, v)]\n for v2 in nex:\n weight = self.graph.g[node][v2]\n next_nodes.append(Node(v2, cost=node.cost + weight, prev=node, state=self.graph.mapVertices[v2].state))\n else:\n raise NotImplementedError()\n return sorted(next_nodes, key = self.key, reverse = self.reverse)\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.stack.push(self.src)\n \n while not self.stack.isEmpty() and self.solfound is False:\n cur = self.stack.pop()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n # self.pathIndexTable[cur.index] = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.stack:\n self.stack.push(n_)\n\n def __repr__(self):\n return \"DepthFirstSearch: \"\n\n\nclass Dijkstra(Search):\n def __init__(self, g, src, dest):\n # TODO setting up a table with the node.index\n super().__init__(g, src, dest)\n self.pathTable = [Node(i, cost = inf, state=self.graph.mapVertices[i].state) for i in range(self.graph.numV)]\n self.queue = Queue()\n # self.unvisited = set()\n\n def relax(self, parent: int, child: int, weight):\n if self.pathTable[parent].cost + weight < self.pathTable[child].cost:\n self.pathTable[child].cost = self.pathTable[parent].cost + weight\n self.pathTable[child].prev = self.pathTable[parent]\n\n def extend(self, nodeIdx: int):\n next_nodes = []\n if self.graph.mode == 'Alist':\n return self.graph.g[nodeIdx]\n\n elif self.graph.mode == 'Amatrix':\n return [(v, self.graph.g[nodeIdx][v]) for v in range(self.graph.numV) if self.graph.isadjacent(nodeIdx, v)]\n else:\n raise NotImplementedError()\n return next_nodes\n\n\n def search(self):\n self.pathTable[self.src].prev = ''\n self.pathTable[self.src].cost = 0\n self.visited.add(self.src.index)\n self.queue.Enqueue(self.src.index)\n \n while len(self.visited) < self.graph.numV and not self.queue.isEmpty():\n cur_indx = self.queue.Dequeue()\n self.visited.add(cur_indx)\n for n_, weight in self.extend(cur_indx):\n if n_ not in self.visited and n_ not in self.queue:\n self.queue.Enqueue(n_)\n self.relax(cur_indx, n_, weight)\n \n\n self.dest = self.pathTable[self.dest]\n\n def __repr__(self):\n return \"Dijkstra: \"\n#######################################################\n# O(n^2)\n# Complete the maximumSum function below.\ndef maximumSum(a, m):\n maxSum = 0\n n = len(a)\n for i in range(1, n + 1):\n tmpSum = sum(a[:i])\n if tmpSum % m >= maxSum:\n maxSum = tmpSum % m\n for j in range(0, n - i):\n tmpSum = tmpSum - a[j] + a[j + i]\n if tmpSum % m >= maxSum:\n maxSum = tmpSum % m\n return maxSum\n#########################################################\n# Complete the largestRectangle function below.\ndef largestRectangle(h):\n s = Stack()\n max_area = 0\n ind = 0\n\n while ind < len(h):\n if s.isEmpty() or h[s.top] <= h[ind]:\n s.push(ind)\n ind += 1\n else:\n top = s.pop()\n left = 0 if s.isEmpty() else s.top + 1\n max_area = max(max_area, (ind - left) * h[top])\n\n while not s.isEmpty():\n top = s.pop()\n left = 0 if s.isEmpty() else s.top + 1\n max_area = max(max_area, (ind - left) * h[top])\n\n return max_area\n#########################################################\n# Findallsubstring O(n^3)\ndef findallsubstring(s):\n sub = []\n for ind in range(len(s)):\n for indc in range(ind, len(s)):\n slice_str = s[ind: indc + 1]\n sub.append(slice_str)\n return sub\n############################################################\n# O(n)\n# window slider generator\ndef WindowSlider(seq, n=2):\n it = iter(seq)\n win = list((next(it, None) for _ in range(n)))\n yield win.copy()\n for e in it:\n win.pop(0)\n win.append(e)\n yield win.copy()\n##################################################################\n# O(n)\n# window slider generator sum\ndef WindowSlider(seq, n=2):\n it = iter(seq)\n win = list((next(it, None) for _ in range(n)))\n mm = sum(win)\n yield mm\n for e in it:\n tmp = win.pop(0)\n win.append(e)\n mm = mm - tmp + e\n yield mm\n##################################################################\n" }, { "alpha_fraction": 0.608428418636322, "alphanum_fraction": 0.6189640164375305, "avg_line_length": 23.7608699798584, "blob_id": "9194fde2142f76891d5b25e4a835826b86b6d19f", "content_id": "cc0e0b7878996d445589b1cf086cb0e89cc9ff4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 88, "num_lines": 46, "path": "/labuladong/cpp/reverseBetween/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\nclass Solution {\n public:\n ListNode* reverseBetween(ListNode* head, int left, int right) {\n ListNode* dummyNode = new ListNode(-1);\n dummyNode->next = head;\n ListNode* pre = dummyNode;\n for (int i = 0; i < left - 1; i++) {\n pre = pre->next;\n }\n ListNode *cur = pre->next;\n ListNode *next = nullptr;\n for (int i = 0; i < right - left; i++){\n next = cur->next;\n cur->next = next->next;\n next->next = pre->next; // ! here, from second iteration the `cur` != `pre->next`.\n pre->next = next;\n }\n ListNode *ret = dummyNode->next;\n delete dummyNode;\n return ret;\n }\n};\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode* head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n int left = 2, right = 4;\n Solution sol;\n ListNode* r = sol.reverseBetween(head, left, right);\n showLinkedList<int>(r);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.5373458862304688, "alphanum_fraction": 0.5532994866371155, "avg_line_length": 20.21538543701172, "blob_id": "83f243f6d0e51147796b95ffa7f3a1233abe9235", "content_id": "5bf4903556ed69c87db755a66f331cf29c6c9859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 57, "num_lines": 65, "path": "/leetcode/cpp/maxSubArray/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=53 lang=cpp\n *\n * [53] 最大子数组和\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int maxSubArray(vector<int>& nums) {\n vector<int> dp(nums.size());\n dp[0] = nums[0];\n for (int i = 1; i < nums.size(); i++) {\n dp[i] = max(dp[i - 1] + nums[i], nums[i]);\n }\n return *max_element(dp.begin(), dp.end());\n }\n};\n\nclass Solution_Recur {\n public:\n struct Status {\n int lSum, rSum, mSum, iSum;\n };\n\n Status pushUp(Status l, Status r) {\n int iSum = l.iSum + r.iSum;\n int lSum = max(l.lSum, l.iSum + r.lSum);\n int rSum = max(r.rSum, r.iSum + l.rSum);\n int mSum = max(max(l.mSum, r.mSum), l.rSum + r.lSum);\n return (Status){lSum, rSum, mSum, iSum};\n };\n\n Status get(vector<int>& a, int l, int r) {\n if (l == r) {\n return (Status){a[l], a[l], a[l], a[l]};\n }\n int m = (l + r) >> 1;\n Status lSub = get(a, l, m);\n Status rSub = get(a, m + 1, r);\n return pushUp(lSub, rSub);\n }\n\n int maxSubArray(vector<int>& nums) {\n return get(nums, 0, nums.size() - 1).mSum;\n }\n};\n// @lc code=end\n\nint main() {\n Solution_Recur sol;\n vector<int> v{-2, 1, -3, 4, -1, 2, 1, -5, 4};\n int sum = sol.maxSubArray(v);\n fmt::print(\"{}\\n\", sum);\n return 0;\n}\n" }, { "alpha_fraction": 0.5546942353248596, "alphanum_fraction": 0.5610105991363525, "avg_line_length": 22.219999313354492, "blob_id": "fd5fd1896399c38533716a8c6b34e400e2653b72", "content_id": "ea34c27791078fe466f20d51ab6c1b56177b6248", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3661, "license_type": "no_license", "max_line_length": 77, "num_lines": 150, "path": "/leetcode/cpp/findOrder/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=210 lang=cpp\n *\n * [210] 课程表 II\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"utils/print_2d.hpp\"\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n void buildGraph(vector<vector<int>>& graph, int numCourses,\n vector<vector<int>>& prerequisites) {\n graph.resize(numCourses);\n for (auto& edge : prerequisites) {\n int from = edge[1], to = edge[0];\n // 添加一条从 from 指向 to 的有向边\n // 边的方向是「被依赖」关系,即修完课程 from 才能修课程 to\n graph[from].push_back(to);\n }\n }\n void topology_order_dfs(vector<vector<int>>& graph, int s,\n vector<bool>& visited, vector<bool>& onPath,\n bool& has_cycle, vector<int>& res) {\n if (onPath[s]) {\n // 发现环\n has_cycle = true;\n return;\n }\n\n if (visited[s]) {\n // 已访问过,无需再访问\n return;\n }\n\n // 标记当前节点为已访问\n visited[s] = true;\n\n // 标记当前节点在路径上\n onPath[s] = true;\n\n // 递归访问相邻节点\n for (int node : graph[s]) {\n topology_order_dfs(graph, node, visited, onPath, has_cycle, res);\n }\n\n // 当前节点访问结束,移出路径\n onPath[s] = false;\n res.push_back(s);\n }\n\n vector<int> findOrder_DFS(int numCourses,\n vector<vector<int>>& prerequisites) {\n vector<vector<int>> graph{};\n vector<bool> visited(numCourses, false);\n vector<bool> onPath(numCourses, false);\n vector<int> res;\n bool has_cycle = false;\n\n buildGraph(graph, numCourses, prerequisites);\n // print2D(graph);\n for (int i = 0; i < numCourses && !has_cycle; i++) {\n if (!visited[i]) {\n topology_order_dfs(graph, i, visited, onPath, has_cycle, res);\n }\n }\n\n if (has_cycle) {\n return {};\n }\n\n reverse(res.begin(), res.end());\n return res;\n }\n\n vector<int> topology_order_bfs(vector<vector<int>>& graph) {\n vector<int> in_degree(graph.size(), 0);\n for (auto& neighbors : graph) {\n for (int neighbor : neighbors) {\n in_degree[neighbor]++;\n }\n }\n\n queue<int> q;\n unordered_set<int> visited;\n for (int i = 0; i < in_degree.size(); i++) {\n if (in_degree[i] == 0) {\n q.push(i);\n visited.insert(i);\n }\n }\n\n vector<int> res;\n while (!q.empty()) {\n int node = q.front();\n q.pop();\n res.push_back(node);\n for (int neighbor : graph[node]) {\n in_degree[neighbor]--;\n if (in_degree[neighbor] == 0) {\n q.push(neighbor);\n visited.insert(neighbor);\n }\n }\n }\n\n if (res.size() != graph.size()) {\n return {};\n }\n\n return res;\n }\n\n vector<int> findOrder_BFS(int numCourses,\n vector<vector<int>>& prerequisites) {\n vector<vector<int>> graph{};\n buildGraph(graph, numCourses, prerequisites);\n return topology_order_bfs(graph);\n }\n\n vector<int> findOrder(int numCourses, vector<vector<int>>& prerequisites) {\n const string mode = \"BFS\";\n if (mode == \"DFS\")\n return findOrder_DFS(numCourses, prerequisites);\n else\n return findOrder_BFS(numCourses, prerequisites);\n }\n};\n// @lc code=end\n\nint main() {\n // unordered_map<int, int> v{{1, 3}, {2, 4}, {3, NULL}};\n Solution sol;\n vector<vector<int>> prerequisites{};\n int numCourses = 2;\n vector<int> v = sol.findOrder(numCourses, prerequisites);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.3788461685180664, "alphanum_fraction": 0.4134615361690521, "avg_line_length": 22.636363983154297, "blob_id": "4a43313858caf05d6ccda30d4a9e4e961743b1f0", "content_id": "2e31f54150bac32e0ad5a4de6fd0114cab22e941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/leetcode/python/50.pow-x-n.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=50 lang=python3\n#\n# [50] Pow(x, n)\n#\n\n# @lc code=start\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n \n def Pow(X, N):\n if X == 0:\n return 0\n if N == 0:\n return 1.0\n else:\n y = Pow(X, N // 2)\n return y * y if N % 2 == 0 else y * y * X\n return Pow(x, n) if n >= 0 else 1.0 / Pow(x, -n)\n# @lc code=end\nif __name__ == \"__main__\":\n print(Solution().myPow(2, 5))\n" }, { "alpha_fraction": 0.349953830242157, "alphanum_fraction": 0.4002770185470581, "avg_line_length": 29.957143783569336, "blob_id": "73ec4dd49d842c650aa51f0a170caad247a7f72d", "content_id": "772ca82661bce9945680412cfd0f46bc34ab1d53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2190, "license_type": "no_license", "max_line_length": 82, "num_lines": 70, "path": "/leetcode/python/4.寻找两个正序数组的中位数.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=4 lang=python3\n#\n# [4] 寻找两个正序数组的中位数\n#\nfrom typing import List\n\n# @lc code=start\nfrom math import inf\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n\n def helper(nums1, nums2):\n if len(nums1) > len(nums2):\n return helper(nums2, nums1)\n \n m = len(nums1)\n n = len(nums2)\n mid1, mid2 = 0, 0\n l, r = 0, m\n med1, med2 = 0, 0\n\n while l <= r:\n mid1 = (l + r) // 2\n mid2 = (m + n + 1) // 2 - mid1\n\n a_im1 = -inf if mid1 == 0 else nums1[mid1 - 1]\n a_i = inf if mid1 == m else nums1[mid1]\n b_jm1 = -inf if mid2 == 0 else nums2[mid2 - 1]\n b_j = inf if mid2 == n else nums2[mid2]\n\n if a_im1 < b_j:\n med1, med2 = max([a_im1, b_jm1]), min([b_j, a_i])\n l = mid1 + 1\n else:\n r = med1 - 1\n return (med1 + med2) / 2 if (m + n) % 2 == 0 else med1\n\n return helper(nums1, nums2)\n# @lc code=start\n\nif __name__ == '__main__':\n print(Solution().findMedianSortedArrays([1,2,3,4], [1,]))\n\n # def function(nums1, nums2):\n # if len(nums1) > len(nums2):\n # return function(nums2, nums1)\n \n # m = len(nums1)\n # n = len(nums2)\n # l, r = 0, m\n # med1 , med2 = 0, 0\n\n # while l <= r:\n # i = (l + r) // 2\n # j = (m + n + 1) // 2 - i\n\n # num_im1 = -inf if i == 0 else nums1[i - 1]\n # num_i = inf if i == m else nums1[i]\n # num_jm1 = -inf if j == 0 else nums2[j - 1]\n # num_j = inf if j == n else nums2[j]\n\n # if num_im1 <= num_j:\n # med1, med2 = max(num_im1, num_jm1), min(num_i, num_j)\n # l = i + 1\n # else:\n # r = i - 1\n # return (med1 + med2) / 2 if (m + n) % 2 == 0 else med1\n \n # return function(nums1, nums2)" }, { "alpha_fraction": 0.4906666576862335, "alphanum_fraction": 0.5057777762413025, "avg_line_length": 19.83333396911621, "blob_id": "edc6c92389d2edb61270476e661915f0694d04d0", "content_id": "cd0694a8c0257f421d3cf78e6438e2b16df933c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 46, "num_lines": 54, "path": "/labuladong/cpp/checkInclusion/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n bool checkInclusion(string s1, string s2) {\n if (!s2.size()) return false;\n if (!s1.size()) return false;\n if (s1.size() > s2.size()) return false;\n unordered_map<char, int> need{}, window{};\n for (char i: s1) need[i] ++;\n int left = 0, right = 0;\n char c = 0, d = 0;\n bool res = false;\n int valid = 0;\n while (right < s2.size()){\n c = s2[right];\n right ++;\n\n if (need.count(c)){\n window[c] ++;\n if (need[c] == window[c]) valid ++;\n }\n\n fmt::print(\"({}, {})\", left, right);\n\n while (right - left >= s1.size()){\n if (valid == need.size()) return true;\n d = s2[left];\n left ++;\n if (need.count(d)){\n window[d] --;\n if (need[d] == window[d]) valid --;\n }\n }\n }\n\n return false;\n }\n};\n\nint main() {\n Solution sol;\n string s = \"abb\", t = \"eidbabooo\";\n bool v = sol.checkInclusion(s, t);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4624277353286743, "alphanum_fraction": 0.4797687828540802, "avg_line_length": 26.485980987548828, "blob_id": "11f344ecd4206a10974258a513f72544bd44fd69", "content_id": "73b0929c921d55451a492a6827d1f7dfe3162d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2941, "license_type": "no_license", "max_line_length": 94, "num_lines": 107, "path": "/AGIHack/GreedSearch.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#%%[markdown]\n# remenber all the node that we already extend, just like this table:\n#\n# | node | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n# | :-----------: | :------: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n# | cost | $\\infty$ | 5 | 4 | 6 | 11 | 9 | 18 | 9 | 10 |\n# | previous node | --- | 1 | 1 | 3 | 3 | 2 | 2 | 4 | 8 |\n#\n# then find path with the recurrsive method that used in the Dijkstra.\n# the path for example is 9 -> 8 -> 4 -> 3 -> 1\n#%%\n#!/usr/bin/python\nfrom queue import PriorityQueue\n\ndef criterion(src, dest):\n # TODO manhattan distance\n xs, ys = src\n xd, yd = dest\n if src == dest:\n return 0\n else:\n return abs(xs - xd) + abs(ys - yd)\n\ndef GreedySearch(srcpos, destpos, op, ScoreFun = criterion):\n srctup = (\"\", 0, \"\", srcpos, \"\") # cost, id, previous id, pos, opration_name\n visited_set = set()\n order_extend = [srctup]\n pq = PriorityQueue()\n pq.put_nowait(srctup)\n\n SolFound = False\n\n while pq.empty() is False and SolFound is False:\n n_ = pq.get_nowait()\n _, id_, _, pos, _ = n_\n # order_extend.append(n_)\n visited_set.add(pos)\n if pos == destpos:\n SolFound = True\n else:\n #extend\n for _, opname in enumerate(op.keys(), 1):\n\n x, y = op[opname]\n npos = (pos[0] + x, pos[1] + y)\n\n if npos not in visited_set:\n nid = len(order_extend)\n ncost = ScoreFun(npos, destpos)\n nop = opname\n nprev_id = id_\n pq.put_nowait((ncost, nid, nprev_id, npos, nop))\n order_extend.append((ncost, nid, nprev_id, npos, nop))\n\n # backtrak for find path\n path_list = []\n point = n_\n prev = point[2]\n while prev != \"\":\n path_list.append(point)\n point = order_extend[prev]\n prev = point[2]\n\n # path_list.append(srctup)\n path_list.sort(key = lambda x: x[1])\n\n path = [i[-1] for i in path_list]\n\n return path\n\n#%%\ndef find(grid, src = 'm', dest = 'p'):\n srcpos = None\n destpos = None\n for row, colItem in enumerate(grid):\n if src in set(colItem):\n srcpos = (row, colItem.index(src))\n elif dest in set(colItem):\n destpos = (row, colItem.index(dest))\n\n return srcpos, destpos\n\n#%%\ndef displayPathtoPrincess(n, grid):\n #print all the moves here\n opdic = {\n \"up\": (-1, 0),\n \"down\": (+1, 0),\n \"left\": (0, -1),\n \"right\": (0, +1)\n }\n\n \n startPos, targetPos = find(grid)\n path = GreedySearch(startPos, targetPos, opdic)\n\n print(*path, sep='\\n')\n\n # return path\n\n\nm = int(input())\ngrid = [] \nfor i in range(0, m): \n grid.append(list(input().strip()))\n\ndisplayPathtoPrincess(m, grid)\n" }, { "alpha_fraction": 0.5481012463569641, "alphanum_fraction": 0.5683544278144836, "avg_line_length": 19.789474487304688, "blob_id": "f51c4e4f780122c8874a25f640aaee5883475123", "content_id": "f458ed444e56a1a2d4b5e057624a77b09da20376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 804, "license_type": "no_license", "max_line_length": 69, "num_lines": 38, "path": "/leetcode/cpp/nextGreaterElement/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=503 lang=cpp\n *\n * [503] 下一个更大元素 II\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <stack>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<int> nextGreaterElements(vector<int>& nums) {\n vector<int> ans(nums.size());\n stack<int> s{};\n for (int i = nums.size() * 2 - 1; i >= 0; i--) {\n while (!s.empty() && s.top() <= nums[i % nums.size()]) s.pop();\n ans[i % nums.size()] = s.empty() ? -1 : s.top();\n s.push(nums[i % nums.size()]);\n }\n return ans;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {1, 2, 3, 4, 3};\n Solution sol;\n vector<int> v = sol.nextGreaterElements(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.6093525290489197, "alphanum_fraction": 0.6093525290489197, "avg_line_length": 26.760000228881836, "blob_id": "91fb94a0971c69e8a45b28ad7bfa1fbb06d0c70b", "content_id": "20a3f67087e21170e2a32540fcc5a67928b896a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 226, "num_lines": 50, "path": "/PythonHack/Lists.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#%%[markdown]\n\n# Consider a list (list = []). You can perform the following commands:\n# - insert i e: Insert integer at position.\n# - print: Print the list.\n# - remove e: Delete the first occurrence of integer .\n# - append e: Insert integer at the end of the list.\n# - sort: Sort the list.\n# - pop: Pop the last element from the list.\n# - reverse: Reverse the list. <br>\n#\n#\n# Initialize your list and read in the value of followed by lines of commands where each command will be of the types listed above. Iterate through each command in order and perform the corresponding operation on your list.\n#\n# ## Input Format\n#\n# The first line contains an integer, $n$, denoting the number of commands.\n#\n# Each line $i$ of the $n$ subsequent lines contains one of the commands scribed above.\n#\n# ## Constraints\n#\n# - The elements added to the list must be integers.\n#\n#\n# ## Output Format\n# \n# For each command of type print, print the list on a new line.\n\n#%%\nif __name__ == '__main__':\n N = int(input())\n L = list()\n\n opdic = {\n \"insert\": L.insert,\n \"remove\": L.remove,\n \"append\": L.append,\n \"sort\": L.sort,\n \"pop\": L.pop,\n \"reverse\": L.reverse\n }\n\n for _ in range(N):\n op, *num = input().split()\n num = [int(i) for i in num]\n if op == \"print\":\n print(L)\n else:\n opdic[op](*num)\n\n\n" }, { "alpha_fraction": 0.45484134554862976, "alphanum_fraction": 0.4735557436943054, "avg_line_length": 18.507936477661133, "blob_id": "ead0c0cc3d34b062ef3b4a13cb5c671f03047d2f", "content_id": "63fbcca42b79d0cafcb563018eded7946e126d0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 49, "num_lines": 63, "path": "/COPInterview/sum_subarray.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# def subarraySum(a):\n# import bisect\n# mm,pr=0,0\n# a1=[]\n# for i in a:\n# pr=(pr+i)%m\n# mm=max(mm,pr)\n# ind=bisect.bisect_left(a1,pr+1)\n# if(ind<len(a1)):\n# mm=max(mm,pr-a1[ind]+m)\n# bisect.insort(a1,pr)\n# return mm\n\ndef subarraySum(a):\n import bisect\n mm,pr=0,0\n a1=[]\n for i in a:\n pr = pr+i\n mm = mm + pr\n ind=bisect.bisect_left(a1,pr+1)\n if ind < len(a1):\n mm = mm + pr-a1[ind] + pr + 1\n bisect.insort(a1, pr)\n return mm\n\n# # O(n^2)\n# Sum = 0\n# n = len(a)\n# for i in range(1, n + 1):\n# tmpSum = sum(a[:i])\n# Sum += tmpSum\n# for j in range(0, n - i):\n# tmpSum = tmpSum - a[j] + a[j + i]\n# Sum += tmpSum\n# return Sum\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n arr_count = int(input().strip())\n\n arr = []\n\n for _ in range(arr_count):\n arr_item = int(input().strip())\n arr.append(arr_item)\n\n result = subarraySum(arr)\n\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n" }, { "alpha_fraction": 0.5225955843925476, "alphanum_fraction": 0.5388180613517761, "avg_line_length": 22.97222137451172, "blob_id": "68fce8748358f9eef26a70ab3dedcb9e7eead88a", "content_id": "58b8bcc7d26aefc8865f1d9ab6c3c36354715e29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1726, "license_type": "no_license", "max_line_length": 103, "num_lines": 72, "path": "/python_Interview/getAllCombination.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# give a number n, find all the combination of positive integer, the sum of which is n\n# O(2^n)\ndef increaseOrder(newN, expression, res):\n if \"+\" in expression:\n k = [d >= str(newN) for d in expression.split(\"+\")]\n if all(k) == True:\n res.append(f\"{newN}+{expression}\")\n else:\n if newN <= int(expression):\n res.append(f\"{newN}+{expression}\")\n\ndef getAllCombinationwithMemory(n, mem):\n if n < 0:\n return\n elif n == 1:\n mem[0] = [\"1\"]\n return [\"1\"]\n elif mem[n - 1] != None:\n return mem[n - 1]\n\n res = [f\"{n}\"]\n\n for i in range(1, n):\n Exp = getAllCombinationwithMemory(n - i, mem)\n\n for e in Exp:\n # if you want to ocnsider that \"2+1+1\" is different from \"1+1+2\" then uncomment these code.\n increaseOrder(i, e, res)\n # res.append(f\"{i}+{e}\")\n mem[n - 1] = res\n return res\n\ndef getAllCombination(n):\n if n < 0:\n return\n elif n == 1:\n return [\"1\"]\n\n res = [f\"{n}\"]\n\n for i in range(1, n):\n Exp = getAllCombination(n - i)\n\n for e in Exp:\n # if you want to ocnsider that \"2+1+1\" is different from \"1+1+2\" then uncomment these code.\n increaseOrder(i, e, res)\n # res.append(f\"{i}+{e}\")\n\n return res\n\n\n\n\nif __name__ == \"__main__\":\n n = 25\n import time\n start = time.clock()\n res = getAllCombination(n)\n end = time.clock()\n print(f\"without Memory: {-(start - end)}\")\n\n start = time.clock()\n resm = getAllCombinationwithMemory(n, [None] * n)\n end = time.clock()\n print(f\"with Memory: {-(start - end)}\")\n assert(res == resm)\n" }, { "alpha_fraction": 0.3407258093357086, "alphanum_fraction": 0.36693549156188965, "avg_line_length": 23.825000762939453, "blob_id": "c8e573f387f4c162ce413fba640e0ed17b81117c", "content_id": "8e7560afd5ea28f81ff70fb9f7f4ec055cedc9f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 53, "num_lines": 40, "path": "/leetcode/python/whiteMatrix.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from typing import List\n\npath = [] # note format [([stock of matrix], size)]\n\ndef next_node(cur, head, rm, cm):\n r, c = cur\n return {\n \"r\": (r, min(c + 1, cm)),\n \"d\": (min(r + 1, rm), c),\n \"l\": (r, max(c - 1, 0)),\n \"u\": (max(r - 1, 0), c)\n }[head]\n\ndef is_dead_node(cur):\n \n\ndef findSquare(matrix: List[List[int]]) -> List[int]:\n for r in range(len(matrix)):\n for c in range(len(matrix[r])):\n if matrix[r][c] != 1:\n cur = (r, c + 1)\n stack = [(r, c)]\n head = \"r\"\n size = 1\n while cur != (r, c):\n if matrix[cur[0]][cur[1]] == 0:\n stack.append(cur)\n size += 1\n elif matrix[cur[0]][cur[1]] != 0:\n stack.pop()\n size -= 1\n\n\n\n\nif __name__ == '__main__':\n x = [[1,0,1],\n [0,0,1],\n [0,0,1]]\n print(findSquare(x))" }, { "alpha_fraction": 0.42729151248931885, "alphanum_fraction": 0.44658854603767395, "avg_line_length": 24.438596725463867, "blob_id": "86f92e40e1e2da9614e47db45d154f6e1047b177", "content_id": "8c40067271ee0ea2551411b553f2742ea175f0fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "no_license", "max_line_length": 64, "num_lines": 57, "path": "/leetcode/python/148.排序链表.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=148 lang=python3\n#\n# [148] 排序链表\n#\nfrom typing import *\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# @lc code=start\n# Definition for singly-linked list.\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n\n def sortFun(head, tail):\n if not head:\n return head\n \n if head.next == tail:\n head.next = None\n return head\n # note cut the linked list\n slow = fast = head\n\n while fast != tail:\n fast = fast.next\n slow = slow.next\n if fast != tail:\n fast = fast.next\n mid = slow\n\n return merge(sortFun(head, mid), sortFun(mid, tail))\n\n def merge(head1, head2):\n dummy = ListNode(float(\"inf\"))\n tmp, tmp1, tmp2 = dummy, head1, head2\n\n while tmp1 and tmp2:\n if tmp1.val <= tmp2.val:\n tmp.next = tmp1\n tmp1 = tmp1.next\n else:\n tmp.next = tmp2\n tmp2 = tmp2.next\n\n tmp = tmp.next\n \n if tmp1:\n tmp.next = tmp1\n elif tmp2:\n tmp.next = tmp2\n \n return dummy.next\n return sortFun(head, None)\n\n# @lc code=end\n\n" }, { "alpha_fraction": 0.4808383285999298, "alphanum_fraction": 0.4922155737876892, "avg_line_length": 21.280000686645508, "blob_id": "a33cbb6ea281c79bbe50fb71e3e0afe0310c5592", "content_id": "0911163311a145d3aa28fe3c8fb6a0cc111441ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1678, "license_type": "no_license", "max_line_length": 80, "num_lines": 75, "path": "/leetcode/python/92.反转链表-ii.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=92 lang=python3\n#\n# [92] 反转链表 II\n#\n# from typing import List\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# @lc code=start\n# Definition for singly-linked list.\nclass Solution:\n def reverse(self, head:ListNode, tail:ListNode):\n if head is tail:\n return head\n\n cur = head\n pre = tail\n\n while cur.next is not tail:\n n = cur.next\n cur.next = pre\n pre = cur\n cur = n\n\n cur.next = pre\n\n return cur\n\n def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:\n dummy = ListNode(-1)\n dummy.next = head\n l, r = dummy, dummy\n cur = dummy\n ind = 0\n while cur:\n if ind < left - 1:\n l = l.next\n if ind < right:\n r = r.next\n\n ind += 1\n cur = cur.next\n \n tmp = l.next\n nl = self.reverse(tmp, r.next)\n l.next = nl\n\n return dummy.next\n # return head\n# @lc code=end\n\ndef printList(head: ListNode):\n cur = head\n while cur:\n if cur.next is not None:\n print(cur.val, \"->\", end=\" \")\n else:\n print(cur.val, end=\"\\n\")\n cur = cur.next\n\nif __name__ == '__main__':\n h = ListNode(1)\n # h = ListNode(3)\n # h.next = ListNode(5)\n cur = h\n for i in range(2, 6):\n cur.next = ListNode(i)\n cur = cur.next\n printList(h)\n # hh = Solution().reverse(h, )\n # hh = Solution().reverseBetween(h, 1, 2)\n hh = Solution().reverseBetween(h, 2, 4)\n printList(hh)" }, { "alpha_fraction": 0.5527472496032715, "alphanum_fraction": 0.5736263990402222, "avg_line_length": 21.774999618530273, "blob_id": "e2a3194a6dc33cb7dd3250fb8a6ecbc8d101e371", "content_id": "251559384a2c3723d7f2b49f94542a34451f924a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 163, "num_lines": 40, "path": "/interview/SherlockandValid.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/sherlock-and-valid-string/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=strings\n\nfrom collections import Counter\n# Complete the isValid function below.\ndef isValid(s):\n hash_c = [0] * 26\n for i in s:\n hash_c[ord(i) - ord(\"a\")] += 1\n ReC = Counter(hash_c)\n del(ReC[0])\n if len(ReC) > 2:\n return \"NO\"\n elif len(ReC) == 2:\n c_keys = list(ReC.keys())\n if 1 in c_keys and ReC[1] == 1:\n return \"YES\"\n elif 1 in ReC.values() and abs(c_keys[0] - c_keys[1]) == 1:\n return \"YES\"\n else:\n return \"NO\"\n else:\n return \"YES\"\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = isValid(s)\n\n print(result)" }, { "alpha_fraction": 0.5795112252235413, "alphanum_fraction": 0.5858721137046814, "avg_line_length": 23.891666412353516, "blob_id": "56e1c1dc48fd6ef83757279a58d47ec7659e1d2b", "content_id": "23801c639b0e19676872f8baac4c07faa49f0669", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2987, "license_type": "no_license", "max_line_length": 72, "num_lines": 120, "path": "/labuladong/cpp/ser_and_deser_297/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\n// using TreeNode = BinaryTree::BinTree<int>::BinaryTreeNode ;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\n/**\n * Definition for a binary tree node.\n * struct TreeNode {\n * int val;\n * TreeNode *left;\n * TreeNode *right;\n * TreeNode(int x) : val(x), left(NULL), right(NULL) {}\n * };\n */\n\nvector<string> split(string str, string delimiter = \" \") {\n vector<string> tokens;\n size_t start = 0, end = 0;\n while ((end = str.find(delimiter, start)) != string::npos) {\n tokens.push_back(str.substr(start, end - start));\n start = end + delimiter.length();\n }\n return tokens;\n}\n\nclass Codec {\n public:\n // Encodes a tree to a single string.\n string serialize(TreeNode *root) {\n queue<TreeNode *> q;\n\n q.push(root);\n TreeNode *cur = nullptr;\n string serialize_str = \"\";\n\n while (!q.empty()) {\n int sz = q.size();\n for (int i = 0; i < sz; i++) {\n cur = q.front();\n q.pop();\n if (cur == nullptr) {\n serialize_str += \"null, \";\n continue;\n }\n serialize_str += to_string(cur->val);\n serialize_str += \", \";\n q.push(cur->left);\n q.push(cur->right);\n }\n }\n return serialize_str;\n }\n\n // Decodes your encoded data to tree.\n TreeNode *deserialize(string data) {\n if (data == \"\") return nullptr;\n vector<string> tokens = split(data, string(\", \"));\n queue<TreeNode *> q;\n int idx = 0;\n if (tokens[idx] == \"null\") return nullptr;\n TreeNode *root = new TreeNode(stoi(tokens[idx]));\n q.push(root);\n TreeNode *cur = nullptr;\n\n while (!q.empty() && idx < tokens.size()) {\n cur = q.front();\n q.pop();\n\n if (cur == nullptr) {\n continue;\n }\n\n idx++;\n if (tokens[idx] == \"null\")\n cur->left = nullptr;\n else\n cur->left = new TreeNode(stoi(tokens[idx]));\n q.push(cur->left);\n idx++;\n if (tokens[idx] == \"null\")\n cur->right = nullptr;\n else\n cur->right = new TreeNode(stoi(tokens[idx]));\n q.push(cur->right);\n }\n return root;\n }\n};\n\n// Your Codec object will be instantiated and called as such:\n// Codec ser, deser;\n// TreeNode* ans = deser.deserialize(ser.serialize(root));\n\nint main() {\n TreeNode *root = new TreeNode(3);\n root->left = new TreeNode(9);\n root->right = new TreeNode(20);\n root->right->left = new TreeNode(15);\n root->right->right = new TreeNode(7);\n showBinaryTree<int>(root);\n Codec c;\n string s = c.serialize(root);\n fmt::print(\"The Serialized BinaryTree: {}\\n\", s);\n string data = \"3, 9, 20, null, null, 15, 7, null, null, null, null, \";\n TreeNode *root_d = c.deserialize(data);\n showBinaryTree<int>(root_d);\n return 0;\n}\n" }, { "alpha_fraction": 0.517405092716217, "alphanum_fraction": 0.5340189933776855, "avg_line_length": 20.066667556762695, "blob_id": "59b2bd4ccf96e1908d80be13461eb8d7044b6054", "content_id": "3b9f6fecf24703d888f60283c19b4137dac3fd30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 69, "num_lines": 60, "path": "/leetcode/cpp/isBipartite/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=785 lang=cpp\n *\n * [785] 判断二分图\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n bool dfs(vector<vector<int>>& graph, vector<int>& color, int node,\n vector<bool>& visited) {\n for (int neighbor : graph[node]) {\n if (!visited[neighbor]) {\n color[neighbor] = -color[node];\n visited[neighbor] = true;\n if (!dfs(graph, color, neighbor, visited)) {\n return false;\n }\n } else if (color[neighbor] == color[node]) {\n return false;\n }\n }\n return true;\n }\n\n bool isBipartite(vector<vector<int>>& graph) {\n int n = graph.size();\n vector<int> color(n, 0);\n vector<bool> visited(n, false);\n\n for (int i = 0; i < n; ++i) {\n if (color[i] == 0 && !visited[i]) {\n color[i] = 1;\n visited[i] = true;\n if (!dfs(graph, color, i, visited)) {\n return false;\n }\n }\n }\n return true;\n }\n};\n// @lc code=end\n\nint main() {\n vector<vector<int>> graph = {{1, 2, 3}, {0, 2}, {0, 1, 3}, {0, 2}};\n Solution sol;\n auto v = sol.isBipartite(graph);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.39921337366104126, "alphanum_fraction": 0.43067845702171326, "avg_line_length": 20.659574508666992, "blob_id": "dfb2afa546c8309bd6a604d432eb3498ffdd213f", "content_id": "848cff6036fdaf895cb9386d3700394db391065b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 53, "num_lines": 47, "path": "/leetcode/python/JumpGame.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "arr = [100,-23,-23,404,100,23,23,23,3,404]\n\n# from queue import Queue\n\ndef next_node(arr, i, visited):\n r = []\n if i + 1 < len(arr):\n r.append(i + 1)\n \n if i - 1 >= 0:\n r.append(i - 1) \n \n for j, v in enumerate(arr):\n if j != i and v == arr[i]:\n r.append(j)\n \n return list(set(r).difference(visited))\n\ndef bfs(arr, init = 0, tar = len(arr) - 1):\n que = [init]\n p = {}\n solfound = False\n visited = set()\n\n while len(que) != 0 and solfound is False:\n cur = que.pop(0)\n visited.add(cur)\n\n if cur == tar:\n solfound = True\n else:\n n_cur = next_node(arr, cur, visited)\n for n in n_cur:\n if n not in que and n not in visited:\n que.append(n)\n if n not in p:\n p[n] = cur\n t = tar\n print(t, \" -> \")\n while t != init:\n try:\n t = p[t]\n except:\n pass\n print(t, \" -> \")\n\nbfs(arr)" }, { "alpha_fraction": 0.41689151525497437, "alphanum_fraction": 0.42120885848999023, "avg_line_length": 25.86231803894043, "blob_id": "b0491154c0dd8f7eae76ed6143f41d2277e96946", "content_id": "19e6d5918f844373ace581c31b20b3272c70c946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3706, "license_type": "no_license", "max_line_length": 95, "num_lines": 138, "path": "/PythonHack/MG/MinionGameRecur.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# #%%\n# import re\n\n# def findmatch(string, substr):\n# R = []\n# for ind in range(len(string)):\n# res = re.match(substr, string[ind:])\n# if res != None:\n# R.append(res)\n# return len(R)\n\n#%%\n\n# # S = \"BANANA\"\n\n# # # V = \"AEIOU\"\n\n# # reg = {}\n\n# # def findallRecur(string, reg, V = \"AEIOU\"):\n# # if string == '':\n# # return\n# # elif len(string) == 1:\n# # if string not in reg.keys():\n# # reg[string] = None\n# # return\n# # else:\n# # for i in range(len(string)):\n# # h = string[:i]\n# # t = string[i:]\n# # n_ = [k for k in (h, t) if k != '' and k[0] not in V]\n\n# # if len(n_) == 0:\n# # pass\n# # elif n_[0] == string and n_[0] in reg.keys():\n# # pass\n# # else:\n# # for item in n_:\n# # if item not in reg.keys():\n# # reg[item] = None\n# # findallRecur(item, reg)\n\n# # findallRecur(S, reg)\n# # print(reg)\n# # reg_r = reg\n# #%%\n\n# S = \"BANANA\"\n# reg = {}\n\n# K = \"ANANA\"\n\n# def findallIter(string, reg, V = \"AEIOU\"):\n# for i in range(len(string)):\n# for k in range(i, len(string)):\n# if i == k :\n# if string[i] not in reg.keys() and string[i] not in V:\n# reg[string[i]] = findmatch(string, string[i])\n# else:\n# for j in range(i, k):\n# h = string[i:j]\n# t = string[j:k+1]\n# n_ = [k for k in (h, t) if k != '' and k[0] not in V]\n\n# if len(n_) == 0:\n# pass\n# elif n_[0] == string and n_[0] in reg.keys():\n# pass\n# else:\n# for item in n_:\n# if item not in reg.keys():\n# reg[item] = findmatch(string, item)\n\n# findallIter(K, reg, V = [chr(c) for c in range(ord(\"A\"), ord(\"Z\")) if chr(c) not in \"AEIOU\"])\n\n\n# print(reg)\nimport re\nfrom functools import partial\n\ndef findmatch(string, substr):\n R = []\n for ind in range(len(string)):\n res = re.match(substr, string[ind:])\n if res != None:\n R.append(res)\n return len(R)\n\ndef findallRecur(string, reg, X, V = \"AEIOU\"):\n if string == '':\n return\n elif len(string) == 1:\n if string not in reg.keys():\n reg[string] = findmatch(X, string)\n return\n else:\n for i in range(len(string)):\n h = string[:i]\n t = string[i:]\n n_ = [k for k in (h, t) if k != '' and k[0] not in V]\n\n if len(n_) == 0:\n pass\n elif n_[0] == string and n_[0] in reg.keys():\n pass\n else:\n for item in n_:\n if item not in reg.keys() and item[0] not in V:\n reg[item] = findmatch(X, item)\n findallRecur(item, reg, X, V=V)\n\ndef minion_game(string):\n # your code goes here\n reg_s = {}\n reg_k = {}\n findallIter = partial(findallRecur, X = string)\n\n findallIter(string, reg_s)\n findallIter(string, reg_k, V=\\\n [chr(c) for c in range(ord(\"A\"), ord(\"Z\")) if chr(c) not in \"AEIOU\"])\n\n print(f\"reg_s is {reg_s}\")\n print(f\"reg_k is {reg_k}\")\n \n sum_s = sum(reg_s.values())\n sum_k = sum(reg_k.values())\n\n if sum_k < sum_s:\n print(f\"Stuart {sum_s}\")\n elif sum_k > sum_s:\n print(f\"Kevin {sum_k}\")\n else:\n print(\"Draw\")\n\n\nif __name__ == '__main__':\n s = input()\n minion_game(s)" }, { "alpha_fraction": 0.6838046312332153, "alphanum_fraction": 0.6992287635803223, "avg_line_length": 19.473684310913086, "blob_id": "593fc4cf54a3729466154a8abc63902d963a0c88", "content_id": "72bf47b67393f781c2d4de1894f8815c09a70ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 389, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/labuladong/cpp/LL/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode *head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.48986124992370605, "alphanum_fraction": 0.5208110809326172, "avg_line_length": 20.05617904663086, "blob_id": "88ac5ec3ebf45a8f60edc2e565224d0fc666bdf6", "content_id": "894a38f480a5dd8ab974661f9fad2b7660df83b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1884, "license_type": "no_license", "max_line_length": 73, "num_lines": 89, "path": "/leetcode/cpp/openLock/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=752 lang=cpp\n *\n * [752] 打开转盘锁\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n string plusOne(string current, int j) {\n if (current[j] == '9')\n current[j] = '0';\n else\n current[j] += 1;\n return current;\n }\n\n string minusOne(string current, int j) {\n if (current[j] == '0')\n current[j] = '9';\n else\n current[j] -= 1;\n return current;\n }\n\n int BFS(string start, string target, unordered_set<string> visited) {\n queue<string> q;\n visited.insert(start);\n q.push(start);\n int step = 0;\n\n string cur = \"\";\n string p = \"\";\n string m = \"\";\n while (!q.empty()) {\n int sz = q.size();\n for (int i = 0; i < sz; ++i) {\n cur = q.front();\n q.pop();\n\n if (cur == target) {\n return step;\n }\n for (int i = 0; i <= 3; i++) {\n p = plusOne(cur, i);\n if (visited.count(p) == 0) {\n q.push(p);\n visited.insert(p);\n }\n m = minusOne(cur, i);\n if (visited.count(m) == 0) {\n q.push(m);\n visited.insert(m);\n }\n }\n }\n step++;\n }\n return -1;\n }\n\n int openLock(vector<string> &deadends, string target) {\n unordered_set<string> deadends_set(deadends.begin(), deadends.end());\n if (\"0000\" == target) return 0;\n\n if (deadends_set.count(\"0000\")) return -1;\n\n return BFS(\"0000\", target, deadends_set);\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<string> deadends{\"0201\", \"0101\", \"0102\", \"1212\", \"2002\"};\n int t = s.openLock(deadends, \"0202\");\n fmt::print(\"You need to try at least {} times to open the lock.\\n\", t);\n return 0;\n}\n" }, { "alpha_fraction": 0.41551247239112854, "alphanum_fraction": 0.42659279704093933, "avg_line_length": 13.755102157592773, "blob_id": "c228cf1d5ec24644dd17c3d01ea3c96a49894ce2", "content_id": "ce766190a8b1164a93ed2d3bb565369ee2101dae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 39, "num_lines": 49, "path": "/NewCoder/wangyi5.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n#%%\ndef splitShell(a, m):\n if m == 0:\n raise ValueError()\n Niushell = 0\n tmp = a\n while tmp > m:\n tmp = tmp - m - (tmp - m) // 10\n Niushell += m\n else:\n Niushell += tmp\n\n return Niushell\n#%%\ndef minM(n):\n l = 1\n r = n\n while l < r:\n m = (l + r) // 2\n score = splitShell(n, m)\n if score >= n // 2:\n r = m\n else:\n l = m + 1\n else:\n if l == r:\n return l\n else:\n return m\n\n\ndef main():\n # input\n n = int(input())\n\n # solution\n result = minM(n)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5160680413246155, "alphanum_fraction": 0.532136082649231, "avg_line_length": 24.80487823486328, "blob_id": "80714bc53830ed8692e394345627900049c7b193", "content_id": "8affc7ad38ead70674839a4fdf71b0d17f5b4bda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 63, "num_lines": 41, "path": "/leetcode/python/129.求根到叶子节点数字之和.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=129 lang=python3\n#\n# [129] 求根到叶子节点数字之和\n#\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# @lc code=start\n# Definition for a binary tree node.\n\nclass Solution:\n def sumNumbers(self, root: TreeNode) -> int:\n if root is None:\n return 0\n S = []\n def helper(root):\n if root.left is None and root.right is None:\n S.append(root.val)\n return\n if root.left is not None:\n root.left.val = root.val * 10 + root.left.val\n helper(root.left)\n if root.right is not None:\n root.right.val = root.val * 10 + root.right.val\n helper(root.right)\n \n helper(root)\n return sum(S)\n# @lc code=end\n\nif __name__ == \"__main__\":\n root = TreeNode(0)\n root.left=TreeNode(1)\n # root.left.left=TreeNode(5)\n # root.left.right=TreeNode(1)\n # root.right=TreeNode(0)\n print(Solution().sumNumbers(root))\n" }, { "alpha_fraction": 0.5238735675811768, "alphanum_fraction": 0.5359784960746765, "avg_line_length": 16.91566276550293, "blob_id": "60036efd102022e42d5d7f9ee41b91df7bf5883d", "content_id": "d8dd52be0ca1a566a0638133080604944b83aca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1505, "license_type": "no_license", "max_line_length": 61, "num_lines": 83, "path": "/leetcode/cpp/equationsPossible/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=990 lang=cpp\n *\n * [990] 等式方程的可满足性\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass UnionFind {\n public:\n UnionFind() = delete;\n UnionFind(int n) : count(n) {\n parent.reserve(n);\n for (int i = 0; i < n; i++) {\n parent.emplace_back(i);\n }\n }\n\n void Union(int p, int q) {\n int rootP = find(p);\n int rootQ = find(q);\n\n if (rootP == rootQ) return;\n parent[rootQ] = rootP;\n count--;\n }\n\n bool connected(int p, int q) { return find(p) == find(q); }\n\n int find(int x) {\n if (parent[x] != x) {\n parent[x] = find(parent[x]);\n }\n return parent[x];\n }\n\n int get_count() const { return count; }\n\n private:\n int count;\n vector<int> parent;\n};\n\nclass Solution {\n public:\n bool equationsPossible(vector<string>& equations) {\n UnionFind uf(26);\n char lhs = 0, rhs = 0;\n for (string &e : equations){\n lhs = e.at(0);\n rhs = e.at(3);\n if (e.at(1) == '=')\n uf.Union(lhs - 'a', rhs - 'a');\n }\n\n for (string &e : equations){\n lhs = e.at(0);\n rhs = e.at(3);\n if (e.at(1) == '!')\n if (uf.connected(lhs - 'a', rhs - 'a'))\n return false;\n }\n\n return true;\n }\n};\n// @lc code=end\n\nint main() {\n vector<string> equations = {\"a==b\", \"b!=a\"};\n Solution sol;\n bool v = sol.equationsPossible(equations);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.49352750182151794, "alphanum_fraction": 0.49676376581192017, "avg_line_length": 21.10714340209961, "blob_id": "820d965187fcdd3b0750f4519bc9899a8cd3beef", "content_id": "912b81f705241bfa05aa749c0629632aeed03334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 798, "license_type": "no_license", "max_line_length": 37, "num_lines": 28, "path": "/leetcode/cpp/note_template/bfs.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "// 计算从起点 start 到终点 target 的最近距离\nint BFS(Node start, Node target) {\n Queue<Node> q; // 核心数据结构\n Set<Node> visited; // 避免走回头路\n\n q.offer(start); // 将起点加入队列\n visited.add(start);\n int step = 0; // 记录扩散的步数\n\n while (q not empty) {\n int sz = q.size();\n /* 将当前队列中的所有节点向四周扩散 */\n for (int i = 0; i < sz; i++) {\n Node cur = q.poll();\n /* 划重点:这里判断是否到达终点 */\n if (cur is target) return step;\n /* 将 cur 的相邻节点加入队列 */\n for (Node x : cur.adj()) {\n if (x not in visited) {\n q.offer(x);\n visited.add(x);\n }\n }\n }\n /* 划重点:更新步数在这里 */\n step++;\n }\n}" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 22.5, "blob_id": "43f86b1042a8a108d43676a39f026f0105979031", "content_id": "03f832500d37832c97674d9fc28bfefd084acac4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/README.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# simplecode\nthe simple answer for coding test\n" }, { "alpha_fraction": 0.47123289108276367, "alphanum_fraction": 0.5068492889404297, "avg_line_length": 25.10714340209961, "blob_id": "601d6fcc4becb53dd3481ea32cb0c7d7ef0f2b6d", "content_id": "a4814916040a9e675df1b2da811b0b9b47c57011", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "no_license", "max_line_length": 98, "num_lines": 28, "path": "/leetcode/python/152.乘积最大子数组.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=152 lang=python3\n#\n# [152] 乘积最大子数组\n#\nfrom typing import *\n# @lc code=start\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return nums[0]\n\n max_dp = [0] * (len(nums) + 1)\n max_dp[0] = nums[0]\n min_dp = [0] * (len(nums) + 1)\n min_dp[0] = nums[0]\n\n for ind in range(1, len(nums)):\n max_dp[ind] = max(max_dp[ind - 1] * nums[ind], min_dp[ind - 1] * nums[ind], nums[ind])\n min_dp[ind] = min(max_dp[ind - 1] * nums[ind], min_dp[ind - 1] * nums[ind], nums[ind])\n\n ans = max(max_dp)\n return ans\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().maxProduct([2,3,-2,4]))" }, { "alpha_fraction": 0.52073734998703, "alphanum_fraction": 0.5345622301101685, "avg_line_length": 13.466666221618652, "blob_id": "8a7d928be3f825b6bc4675166eb2e44af7217e41", "content_id": "bcaf072543af2acfbc951d202770a2707153f5d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/COPInterview/liulishuo.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# find the directed cycle in the graph\na = eval(input())\nprint(a)\n\nsize = 0\n\ndef is_adj(g, a, b):\n return g[a][b]\n\ndef extend\n\ndef find(a):\n for i in range(len(a)):\n if a[i][i] == 1:\n return 1\n" }, { "alpha_fraction": 0.39140811562538147, "alphanum_fraction": 0.40572792291641235, "avg_line_length": 19.899999618530273, "blob_id": "7f3f37d8d121a5d67c51845897a4a06845c100c4", "content_id": "b4220734672b17da74349e62d519f13ed8047933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 34, "num_lines": 20, "path": "/leetcode/python/generateParenthesis.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\nres = []\ndef generateParenthesis(n: int):\n \n def backtrack(l, r, S):\n if len(S) == 2 * n:\n res.append(\"\".join(S))\n return\n if l < n:\n S.append(\"(\")\n backtrack(l + 1, r, S)\n S.pop()\n if r < l:\n S.append(\")\")\n backtrack(l, r + 1, S)\n S.pop()\n\n backtrack(0,0,[])\n\ngenerateParenthesis(2, res)\nprint(res)\n" }, { "alpha_fraction": 0.4843243360519409, "alphanum_fraction": 0.5081080794334412, "avg_line_length": 20.022727966308594, "blob_id": "e7727dac173d09f2fc12a06a9557bffe36957ad2", "content_id": "b98669f7e8a06229864913a2d6ea1e8776d3e9cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 933, "license_type": "no_license", "max_line_length": 65, "num_lines": 44, "path": "/leetcode/cpp/superEggDrop/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=887 lang=cpp\n *\n * [887] 鸡蛋掉落\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int superEggDrop(int K, int N) {\n // define a problem that you have k eggs,\n // and drop m times, at most the level you can achieve.\n vector<vector<double>> dp(K + 1, vector<double>(N + 1));\n for (int m = 1; m <= N && dp[K][m] < N; m++) {\n for (int k = 1; k <= K; k++) {\n dp[k][m] = dp[k][m - 1] + dp[k - 1][m - 1] + 1;\n }\n }\n // fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n for (int i = 0; i <= N; i++) {\n if (dp[K][i] >= N) {\n return i;\n }\n }\n return 0;\n }\n};\n// @lc code=end\n\nint main() {\n int k = 10, n = 100;\n Solution sol;\n int v = sol.superEggDrop(k, n);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4584040641784668, "alphanum_fraction": 0.4668930470943451, "avg_line_length": 19.78823471069336, "blob_id": "6d7de0dd1749249a0e6c81848fdc113be7b7d9dd", "content_id": "498eb07fd12cd30d17d81801ad780fac695efc42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "no_license", "max_line_length": 76, "num_lines": 85, "path": "/interview/makingCandles.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\ndef maxNcandies(m, w, p, total):\n c = math.floor(total/ p)\n axes = (w + c - m) / 2\n\n if axes <= 0:\n a = 0\n elif axes >= c :\n a = c\n else:\n a = round(axes)\n \n b = c - a\n\n rest = total - p*c\n return a, b, rest\n\n\n# Complete the minimumPasses function below.\ndef minimumPasses(m, w, p, n):\n rest = 0\n day = 0\n Total = 0\n ans = math.inf\n while Total < n:\n Total = m * w + rest\n day += 1\n ans = min(ans, day + math.ceil((n - Total) / (m * w)))\n a, b, rest = maxNcandies(m, w, p, Total)\n m += a\n w += b\n\n return min(ans, day)\n\n\n# def minimumPasses(machine, worker, p, n): \n# if n <= p: \n# return math.ceil(n / (machine * worker))\n# curr = candy = 0 \n# ans = float('inf')\n# while candy < n:\n# if candy < p:\n# i = math.ceil((p - candy) / (machine * worker))\n# curr += i\n# candy += machine * worker * i\n# continue\n# buy,candy = divmod(candy , p) \n# total = machine + worker + buy \n# half = total // 2\n# if machine > worker : \n# machine = max(machine, half) \n# worker = total - machine\n# else:\n# worker = max(worker, half) \n# machine = total - worker\n# curr += 1 \n# candy += machine * worker \n# ans = min(ans, curr + math.ceil((n - candy) / (machine * worker)))\n \n# return min(ans, curr)\n\n\n\nif __name__ == '__main__':\n\n mwpn = input().split()\n\n m = int(mwpn[0])\n\n w = int(mwpn[1])\n\n p = int(mwpn[2])\n\n n = int(mwpn[3])\n\n result = minimumPasses(m, w, p, n)\n\n print(result)\n" }, { "alpha_fraction": 0.4323607385158539, "alphanum_fraction": 0.44429707527160645, "avg_line_length": 22.53125, "blob_id": "b4955a5852d3eddc16db597403146a50ad424501", "content_id": "375d9c5ecf78a0d83e1df53609db7831cc9609b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 84, "num_lines": 32, "path": "/leetcode/python/39.组合总和.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=39 lang=python3\n#\n# [39] 组合总和\n#\nfrom typing import *\n# @lc code=start\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n if len(candidates) == 0:\n return []\n \n n = len(candidates) - 1\n com = []\n coms = []\n\n def backtrace(com):\n\n if sum(com) == target:\n coms.append(com.copy())\n return\n if sum(com) < target:\n for i in candidates:\n if len(com) == 0 or i >= com[-1]:\n com.append(i)\n backtrace(com)\n com.pop()\n\n backtrace(com)\n\n return coms\n# @lc code=end\n\n" }, { "alpha_fraction": 0.3458608388900757, "alphanum_fraction": 0.37023869156837463, "avg_line_length": 29.765625, "blob_id": "e7c34ca8f5ab9a42c8d7b0dee5cf457f8adfd328", "content_id": "1f416ebbbf44d9318b858cc185c35b22981aa189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1969, "license_type": "no_license", "max_line_length": 76, "num_lines": 64, "path": "/labuladong/cpp/maximalSquare/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int maximalSquare(vector<vector<char>>& matrix) {\n int n = matrix.size();\n int m = matrix.at(0).size();\n vector<vector<int>> dp(n, vector<int>(m));\n int max_value = 0;\n\n for (int i = 0; i < m; i++) {\n dp.at(0).at(i) = matrix.at(0).at(i) - '0';\n max_value = max_value < dp.at(0).at(i) ? dp.at(0).at(i) : max_value;\n }\n for (int i = 0; i < n; i++) {\n dp.at(i).at(0) = matrix.at(i).at(0) - '0';\n max_value = max_value < dp.at(i).at(0) ? dp.at(i).at(0) : max_value;\n }\n\n for (int col = 1; col < n; col++) {\n for (int row = 1; row < m; row++) {\n if (matrix.at(col).at(row) - '0') {\n dp.at(col).at(row) =\n min(min(dp.at(col - 1).at(row - 1), dp.at(col).at(row - 1)),\n dp.at(col - 1).at(row)) +\n 1;\n } else {\n dp.at(col).at(row) = 0;\n }\n max_value =\n max_value < dp.at(col).at(row) ? dp.at(col).at(row) : max_value;\n }\n }\n\n fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return max_value * max_value;\n }\n};\n\nint main() {\n std::vector<vector<char>> matrix = {{\n '0',\n '1',\n },\n {\n '1',\n '0',\n }};\n // std::vector<vector<char>> matrix = {{'1', '0', '1', '0', '0'},\n // {'1', '0', '1', '1', '1'},\n // {'1', '1', '1', '1', '1'},\n // {'1', '0', '0', '1', '0'}};\n Solution s;\n int v = s.maximalSquare(matrix);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4612244963645935, "alphanum_fraction": 0.4612244963645935, "avg_line_length": 15.399999618530273, "blob_id": "c5ccd6f67df33ff1a960daa8474d90975d892566", "content_id": "0c178bbb375dd9ef2b863fad37fbf2c42f70cb03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 31, "num_lines": 15, "path": "/PythonHack/recurring.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def recurring(string):\n b = set()\n for i in string:\n if i not in b:\n b.add(i)\n else:\n return i\n\ndef main():\n a = input(\"please string:\")\n print(recurring(a))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.8415507674217224, "alphanum_fraction": 0.8487147092819214, "avg_line_length": 28.296297073364258, "blob_id": "e13140956e54d626ab263efc73ea722d86d305fa", "content_id": "aeaf4c1bd35426fc22617a0e307dab30b05c5fd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 2373, "license_type": "no_license", "max_line_length": 52, "num_lines": 81, "path": "/leetcode/cpp/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.0.0)\nproject(simplecode VERSION 0.1.0)\n\nfind_package(fmt CONFIG REQUIRED)\n\n# set(CMAKE_CXX_STANDARD c++17)\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++17\")\n\ninclude(CTest)\nenable_testing()\n\nset(CPACK_PROJECT_NAME ${PROJECT_NAME})\nset(CPACK_PROJECT_VERSION ${PROJECT_VERSION})\ninclude(CPack)\n\ninclude_directories(${CMAKE_CURRENT_SOURCE_DIR})\n\nadd_subdirectory(2_sum)\nadd_subdirectory(permutation)\nadd_subdirectory(permute_unique)\nadd_subdirectory(subsets)\nadd_subdirectory(coinChange)\nadd_subdirectory(binsearch)\nadd_subdirectory(change)\nadd_subdirectory(openLock)\nadd_subdirectory(minDepth)\nadd_subdirectory(ser_and_deser_297)\nadd_subdirectory(detectCycle)\nadd_subdirectory(LL_getKthFromEnd)\nadd_subdirectory(searchRange)\nadd_subdirectory(minWindow)\nadd_subdirectory(checkInclusion)\nadd_subdirectory(findAnagrams)\nadd_subdirectory(lengthOfLongestSubstring)\nadd_subdirectory(lengthOfLIS)\nadd_subdirectory(maxEnvelopes)\nadd_subdirectory(maxSubArray)\nadd_subdirectory(longestCommonSubsequence)\nadd_subdirectory(minDistance)\nadd_subdirectory(longestPalindromeSubseq)\nadd_subdirectory(minInsertions)\nadd_subdirectory(isMatch)\nadd_subdirectory(superEggDrop)\nadd_subdirectory(maxCoins)\nadd_subdirectory(canPartition)\nadd_subdirectory(rob)\nadd_subdirectory(rob213)\nadd_subdirectory(robbt)\nadd_subdirectory(findTargetSumWays)\nadd_subdirectory(isValidBST)\nadd_subdirectory(searchBST)\nadd_subdirectory(insertIntoBST)\nadd_subdirectory(deleteNode)\nadd_subdirectory(countNodes)\nadd_subdirectory(lowestCommonAncestor)\nadd_subdirectory(nextGreaterElement)\nadd_subdirectory(dailyTemperatures)\nadd_subdirectory(maxSlidingWindow)\nadd_subdirectory(isPalindrome)\nadd_subdirectory(isPalindrome_LL)\nadd_subdirectory(reverseBetween)\nadd_subdirectory(reverseList)\nadd_subdirectory(reverseKGroup)\nadd_subdirectory(carPooling)\nadd_subdirectory(findRepeatedDnaSequences)\nadd_subdirectory(getRandom_LL)\nadd_subdirectory(trailingZeroes)\nadd_subdirectory(preimageSizeFZF)\nadd_subdirectory(countPrimes)\nadd_subdirectory(superPow)\nadd_subdirectory(findErrorNums)\nadd_subdirectory(allPathsSourceTarget)\nadd_subdirectory(networkDelayTime)\nadd_subdirectory(strStr)\nadd_subdirectory(maximalSquare)\nadd_subdirectory(solve)\nadd_subdirectory(equationsPossible)\nadd_subdirectory(numIslands)\nadd_subdirectory(canFinish)\nadd_subdirectory(findOrder)\nadd_subdirectory(isBipartite)\n" }, { "alpha_fraction": 0.5324729681015015, "alphanum_fraction": 0.5474604368209839, "avg_line_length": 23.272727966308594, "blob_id": "56236a62df6591249f7942b9255dab5d97f37d9d", "content_id": "5eea17543e2b50e27850ebb39da9d7b00ff22b3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3010, "license_type": "no_license", "max_line_length": 52, "num_lines": 99, "path": "/leetcode/cpp/note_template/bisearch.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "int binarySearch(int[] nums, int target) {\n int left = 0, right = ...;\n\n while (...) {\n int mid = left + (right - left) / 2;\n if (nums[mid] == target) {\n ...\n } else if (nums[mid] < target) {\n left = ...\n } else if (nums[mid] > target) {\n right = ...\n }\n }\n return ...;\n}\n\n// 查找\n// 因为我们初始化 right = nums.length - 1\n// 所以决定了我们的「搜索区间」是 [left, right]\n// 所以决定了 while (left <= right)\n// 同时也决定了 left = mid+1 和 right = mid-1\n// 因为我们只需找到一个 target 的索引即可\n// 所以当 nums[mid] == target 时可以立即返回\n\n// 左边界\n// 因为我们初始化 right = nums.length\n// 所以决定了我们的「搜索区间」是 [left, right)\n// 所以决定了 while (left < right)\n// 同时也决定了 left = mid + 1 和 right = mid\n// 因为我们需找到 target 的最左侧索引\n// 所以当 nums[mid] == target 时不要立即返回\n// 而要收紧右侧边界以锁定左侧边界\n\n// 右边界\n// 因为我们初始化 right = nums.length\n// 所以决定了我们的「搜索区间」是 [left, right)\n// 所以决定了 while (left < right)\n// 同时也决定了 left = mid + 1 和 right = mid\n// 因为我们需找到 target 的最右侧索引\n// 所以当 nums[mid] == target 时不要立即返回\n// 而要收紧左侧边界以锁定右侧边界\n// 又因为收紧左侧边界时必须 left = mid + 1\n// 所以最后无论返回 left 还是 right, 必须减一\n\nint binary_search(int[] nums, int target) {\n int left = 0, right = nums.length - 1;\n while (left <= right) {\n int mid = left + (right - left) / 2;\n if (nums[mid] < target) {\n left = mid + 1;\n } else if (nums[mid] > target) {\n right = mid - 1;\n } else if (nums[mid] == target) {\n // 直接返回\n return mid;\n }\n }\n // 直接返回\n return -1;\n}\n\nint left_bound(int[] nums, int target) {\n int left = 0, right = nums.length - 1;\n while (left <= right) {\n int mid = left + (right - left) / 2;\n if (nums[mid] < target) {\n left = mid + 1;\n } else if (nums[mid] > target) {\n right = mid - 1;\n } else if (nums[mid] == target) {\n // 别返回,锁定左侧边界\n right = mid - 1;\n }\n }\n // 判断 target 是否存在于 nums 中\n // 此时 target 比所有数都大,返回 -1\n if (left == nums.length) return -1;\n // 判断一下 nums[left] 是不是 target\n return nums[left] == target ? left : -1;\n}\n\nint right_bound(int[] nums, int target) {\n int left = 0, right = nums.length - 1;\n while (left <= right) {\n int mid = left + (right - left) / 2;\n if (nums[mid] < target) {\n left = mid + 1;\n } else if (nums[mid] > target) {\n right = mid - 1;\n } else if (nums[mid] == target) {\n // 别返回,锁定右侧边界\n left = mid + 1;\n }\n }\n // 此时 left - 1 索引越界\n if (left - 1 < 0) return -1;\n // 判断一下 nums[left] 是不是 target\n return nums[left - 1] == target ? (left - 1) : -1;\n}" }, { "alpha_fraction": 0.4919484555721283, "alphanum_fraction": 0.5032206177711487, "avg_line_length": 19.032258987426758, "blob_id": "51ebfda095ad5ba84a5d512021d7873980ad3195", "content_id": "2d4e897e980d684bebeb17f7705d799507ef7068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2484, "license_type": "no_license", "max_line_length": 70, "num_lines": 124, "path": "/interview/SwapNode.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport os\nimport sys\n\n\n#%%\nclass Queue(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def Enqueue(self, item):\n self.data.append(item)\n\n def Dequeue(self):\n return self.data.pop(0)\n \n def qsize(self):\n return len(self.data)\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return item in self.data\n\n\nclass BTree():\n def __init__(self, val, left = None, right = None):\n self.val = val\n self.left = left\n self.right = right\n\n def swapNodes(self):\n self.left, self.right = self.right, self.left\n\ndef show(subtree):\n if subtree.val == -1:\n return\n else:\n show(subtree.left)\n print(subtree.val, end = ' ')\n show(subtree.right)\n\ndef InputTree(indexes):\n # Nodes = enumerate(sum(indexes, [1]), 1)\n t = BTree(1)\n q = Queue()\n q.Enqueue(t)\n IDX = iter(indexes)\n\n while not q.isEmpty():\n cur = q.Dequeue()\n if cur.val == -1:\n continue\n l, r = next(IDX)\n\n cur.left = BTree(l)\n cur.right = BTree(r)\n\n q.Enqueue(cur.left)\n q.Enqueue(cur.right)\n\n return t\n\ndef Swap(tree: BTree , level: int, k: int):\n if tree.val == -1:\n return\n else:\n Swap(tree.left, level + 1, k)\n Swap(tree.right, level + 1, k)\n if level % k == 0:\n tree.swapNodes()\n\ndef swapNodes(indexes, queries):\n T = InputTree(indexes)\n # show(T)\n # print()\n for q in queries:\n Swap(T, 1, q)\n show(T)\n print()\n\n\n# s = [[2, 3], [4, 5], [6, 7], [-1, -1], [-1, -1], [-1, -1], [-1, -1]]\n# q = [2]\n\n# swapNodes(s, q)\n\nif __name__ == '__main__':\n n = int(input())\n\n indexes = []\n\n for _ in range(n):\n indexes.append(list(map(int, input().rstrip().split())))\n\n queries_count = int(input())\n\n queries = []\n\n for _ in range(queries_count):\n queries_item = int(input())\n queries.append(queries_item)\n\n swapNodes(indexes, queries)\n" }, { "alpha_fraction": 0.3972652554512024, "alphanum_fraction": 0.42898815870285034, "avg_line_length": 23.596412658691406, "blob_id": "fb43e0e1ac55a269d1b7fa963f132d41c246af00", "content_id": "dfb5694ca9cd3eedd53b28fa04b5a2f6bb47278b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5493, "license_type": "no_license", "max_line_length": 66, "num_lines": 223, "path": "/leetcode/cpp/numIslands/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=200 lang=cpp\n *\n * [200] 岛屿数量\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"utils/print_2d.hpp\"\n\nusing namespace std;\n\n// @lc code=start\nclass UnionFind {\n public:\n UnionFind() = delete;\n UnionFind(int n) : count(n) {\n parent.reserve(n);\n for (int i = 0; i < n; i++) {\n parent.emplace_back(i);\n }\n }\n\n void Union(int p, int q) {\n int rootP = find(p);\n int rootQ = find(q);\n\n if (rootP == rootQ) return;\n parent[rootQ] = rootP;\n count--;\n }\n\n bool connected(int p, int q) { return find(p) == find(q); }\n\n int find(int x) {\n if (parent[x] != x) {\n parent[x] = find(parent[x]);\n }\n return parent[x];\n }\n\n int get_count() const { return count; }\n\n vector<int> get_parent() const { return parent; }\n\n private:\n int count;\n vector<int> parent;\n};\n\nclass Solution {\n public:\n // -------------DFS----------------\n void dfs(vector<vector<char>>& grid, int i, int j,\n vector<vector<bool>>& visited) {\n int n = grid.size();\n int m = grid[0].size();\n\n if (i < 0 || j < 0 || j >= n || i >= m) return;\n if (visited[j][i]) return;\n if (grid[j][i] == '0') return;\n\n visited[j][i] = true;\n\n vector<pair<int, int>> actions = {\n {-1, 0},\n {0, -1},\n {1, 0},\n {0, 1},\n };\n\n int x = 0, y = 0;\n for (auto& p : actions) {\n x = p.first, y = p.second;\n dfs(grid, i + x, j + y, visited);\n }\n }\n\n int numIslands(vector<vector<char>>& grid) {\n int n = grid.size();\n int m = grid[0].size();\n vector<vector<bool>> visited(n, vector<bool>(m));\n int cnt = 0;\n for (int row = 0; row < n; row++) {\n for (int col = 0; col < m; col++) {\n if (!visited[row][col] && grid[row][col] == '1') {\n dfs(grid, col, row, visited);\n cnt++;\n }\n }\n }\n\n // print2D(visited);\n return cnt;\n }\n\n // -------------Union Find-----------------\n int get_index(int row, int col, int m) { return col * m + row; }\n\n char get_value(vector<vector<char>>& grid, int i, int j) {\n int n = grid.size();\n int m = grid[0].size();\n if (i < 0 || i >= n) return '0';\n if (j < 0 || j >= m) return '0';\n return grid[i][j];\n }\n\n int numIslands_with_uf(vector<vector<char>>& grid) {\n int n = grid.size();\n int m = grid[0].size();\n UnionFind uf{n * m};\n vector<pair<int, int>> actions = {\n {-1, 0},\n {0, -1},\n {1, 0},\n {0, 1},\n };\n\n // collect the count number about single 1.\n // UnionFind can only find the multiple 1.\n // like this:\n // 0\n // 0 1 0\n // 0\n int single_1_count = 0;\n for (int col = 0; col < n; col++) {\n for (int row = 0; row < m; row++) {\n if (get_value(grid, col, row) == '1' &&\n get_value(grid, col - 1, row) == '0' &&\n get_value(grid, col, row - 1) == '0' &&\n get_value(grid, col + 1, row) == '0' &&\n get_value(grid, col, row + 1) == '0') {\n single_1_count++;\n }\n }\n }\n\n int x = 0, y = 0;\n for (int col = 0; col < n; col++) {\n for (int row = 0; row < m; row++) {\n for (auto& p : actions) {\n x = p.first, y = p.second;\n if (((row + x) >= 0 && (col + y) >= 0) &&\n ((row + x) < m && (col + y) < n))\n if (grid[col][row] == '1') {\n if (grid[col + y][row + x] == '1') {\n uf.Union(get_index(row, col, m),\n get_index(row + x, col + y, m));\n }\n }\n }\n }\n }\n\n // update the parent of UnionFind.\n // this is important for get precise 1 block location.\n vector<vector<int>> board(n, vector<int>(m, -1));\n for (int col = 0; col < n; col++) {\n for (int row = 0; row < m; row++) {\n int idx = get_index(row, col, m);\n board[col][row] = uf.find(idx);\n }\n }\n\n vector<int> par = uf.get_parent();\n int multi_1_count = 0;\n unordered_map<int, int> counter{};\n for (const int pn : par) {\n counter[pn]++;\n }\n\n for (auto& c : counter) {\n if (c.second > 1) {\n multi_1_count++;\n }\n }\n\n // fmt::print(\"parents: {}\\n\", par);\n // fmt::print(\"single 1 count: {}\\n\", single_1_count);\n // fmt::print(\"counter: {}\\n\", counter);\n // fmt::print(\"multi_1_count: {}\\n\", multi_1_count);\n // print2D(board);\n // -----------\n // 1 1 1\n // 0 1 0\n // 1 1 1\n\n // parents: [6, 6, 6, 3, 6, 5, 6, 6, 6]\n // single 1 count: 0\n // counter: {5: 1, 3: 1, 6: 7}\n // multi_1_count: 1\n // board:\n // 6 6 6\n // 3 6 5\n // 6 6 6\n\n return multi_1_count + single_1_count;\n }\n};\n// @lc code=end\n\nint main() {\n // vector<vector<char>> grid = {\n // {'1', '1', '1'}, {'0', '1', '0'}, {'1', '1', '1'}};\n // vector<vector<char>> grid = {{'1', '1', '0', '0', '0'},\n // {'1', '1', '0', '0', '0'},\n // {'0', '0', '1', '0', '0'},\n // {'0', '0', '0', '1', '1'}};\n vector<vector<char>> grid = {{'1', '1', '1', '1', '0'},\n {'1', '1', '0', '1', '0'},\n {'1', '1', '0', '0', '0'},\n {'0', '0', '0', '0', '0'}};\n print2D(grid);\n Solution sol;\n auto v = sol.numIslands(grid);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.3884848356246948, "alphanum_fraction": 0.4224242568016052, "avg_line_length": 23.604476928710938, "blob_id": "b7155038da057a3ec14f432ae36da8b44d62731b", "content_id": "6e4ba303a653800eec56176821e1e511f8ebd4f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 108, "num_lines": 134, "path": "/COPInterview/CycleGraph.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# assume the graph is like 2 patterns.\n\n# vertex vertex weight\n\n# directly matrix\n# from collections import deque\n\nfrom functools import partial\nfrom math import inf\n\nclass Vertex():\n def __init__(self, state):\n self.state = state\n\nclass Node(Vertex):\n # just for the search\n def __init__(self, idx, cost = 0, prev = '', state = None):\n super().__init__(state)\n self.index = idx\n self.cost = cost\n self.prev =prev\n\n def __repr__(self):\n return \"Node(\" + str(self.index) + \")\"\n\n def __lt__(self, other):\n return self.index < other.index\n\n def __eq__(self, other):\n return self.index == other.index\n\n def __hash__(self):\n return hash(self.index)\n\n def __index__(self):\n return self.index\n\n#%%\nclass Graph():\n def __init__(self, g, nv, mode = None, Direct = True, optdic = {}, valuedic = {}):\n self.Direct = Direct\n self.mode = mode\n self.numV = nv\n self.mapVertices = [Vertex(state=valuedic[i] if valuedic != {} else None) for i in range(self.numV)]\n self.g= self.encode(g)\n self.opdic = optdic\n\n def encode(self, g):\n if self.mode == None:\n return g\n\n elif self.mode == 'Alist':\n graph = {ve : [] for ve in range(self.numV)}\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1].append((v2, weight))\n else:\n graph[v1].append((v2, weight))\n graph[v2].append((v1, weight))\n \n return graph\n \n elif self.mode == 'Amatrix':\n graph = [[0] * self.numV for _ in range(self.numV)]\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1][v2] = weight\n else:\n graph[v1][v2] = weight\n graph[v2][v1] = weight\n\n return graph\n\n def isadjacent(self, v1, v2):\n if self.mode == \"Amatrix\":\n return self.g[v1][v2] != 0\n else:\n return any([i[0] == v2 for i in self.g[v1]])\n\n def __repr__(self):\n s = ''\n if self.mode == \"Amatrix\":\n for i in self.g:\n s += str(i) + '\\n' \n return s\n elif self.mode == \"Alist\":\n for i in self.g:\n s += str(i) + \": \" + str(self.g[i]) + '\\n'\n return s\n else:\n raise NotImplementedError()\n\n\nclass MAP(Graph):\n def __init__(self, g, nv, pos, mode = None, Direct = True):\n super().__init__(g, nv, mode, Direct, valuedic=pos)\n\nif __name__ == \"__main__\":\n nV = 8\n\n V = {\n 0: (0, 0),\n 1: (1, 1),\n 2: (2, 2),\n 3: (1, 0),\n 4: (5, 5),\n 5: (3, 2),\n 6: (2, 1),\n 7: (3, 3)\n }\n\n B = [[0, 1, 1],\n [0, 2, 1], \n [0, 3, 1], \n [1, 2, 1], \n [1, 4, 1], \n [2, 4, 1], \n [2, 5, 1], \n [2, 3, 1],\n [3, 6, 1],\n [6, 7, 1]]\n\n W = [[0, 1, 2],\n [0, 2, 5], \n [0, 3, 1], \n [1, 2, 2], \n [1, 4, 6], \n [2, 4, 7], \n [2, 5, 1], \n [2, 3, 4],\n [3, 6, 3],\n [6, 7, 4]]\n\n m = MAP(W, nV, V, mode=\"Alist\")\n\n\n\n" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5548872351646423, "avg_line_length": 23.592592239379883, "blob_id": "c9989c8ae1c2b2bb7488c705a6eb5a87250144db", "content_id": "45cfc19da49cb8e66f715e5f38bc49b8475230bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 67, "num_lines": 27, "path": "/leetcode/python/347.前-k-个高频元素.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=347 lang=python3\n#\n# [347] 前 K 个高频元素\n#\nfrom typing import *\n# @lc code=start\nfrom collections import Counter\nfrom heapq import heappushpop, heappush, heappop\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n cnt = dict(Counter(nums))\n index = [value for _, value in cnt.items() ]\n\n pq = []\n\n for ind in index:\n if len(pq) < k:\n heappush(pq, ind)\n elif len(pq) >= k and pq[0] < ind:\n heappushpop(pq, ind)\n \n cnt_set = set(pq)\n\n return [ key for key, val in cnt.items() if val in cnt_set]\n\n# @lc code=end\n\n" }, { "alpha_fraction": 0.5020408034324646, "alphanum_fraction": 0.5142857432365417, "avg_line_length": 19.704225540161133, "blob_id": "15d71d97201cb77f61f7827b821b30b13b703f67", "content_id": "5f54db13e609c08a14b30edcb68cf036acff1c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 68, "num_lines": 71, "path": "/interview/MaximumSubarraySumMWithMode.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# O(n)\n# def WindowSlider(seq, n=2):\n# it = iter(seq)\n# win = list((next(it, None) for _ in range(n)))\n# yield win.copy()\n# for e in it:\n# win.pop(0)\n# win.append(e)\n# yield win.copy()\n\n# slideing windows can be treated as CycleMove and take top n number\n\n# def CycleMove(lst, k):\n# return lst[k:] + lst[:k]\n\n# https://www.hackerrank.com/challenges/maximum-subarray-sum/problem\n\nimport bisect\ndef maximumSumWithMode(a, m):\n mm,pr=0,0\n a1=[]\n for i in a:\n pr=(pr+i)%m\n mm=max(mm,pr)\n ind=bisect.bisect_left(a1,pr+1)\n if(ind<len(a1)):\n mm=max(mm,pr-a1[ind]+m)\n bisect.insort(a1,pr)\n return mm\n\n\n# # O(n^2)\n# # Complete the maximumSum function below.\n# def maximumSum(a, m):\n# maxSum = 0\n# n = len(a)\n# for i in range(1, n + 1):\n# tmpSum = sum(a[:i])\n# if tmpSum % m >= maxSum:\n# maxSum = tmpSum % m\n# for j in range(0, n - i):\n# tmpSum = tmpSum - a[j] + a[j + i]\n# if tmpSum % m >= maxSum:\n# maxSum = tmpSum % m\n# return maxSum\n\n\nif __name__ == '__main__':\n q = int(input())\n\n for q_itr in range(q):\n nm = input().split()\n\n n = int(nm[0])\n\n m = int(nm[1])\n\n a = list(map(int, input().rstrip().split()))\n\n result = maximumSumWithMode(a, m)\n\n print(result)\n" }, { "alpha_fraction": 0.44730332493782043, "alphanum_fraction": 0.45522019267082214, "avg_line_length": 19.835052490234375, "blob_id": "4469c8229fe81a652446544ec6b724ebbf76e696", "content_id": "9bf8954b1ca5523a1774640ac43058c0db2abd85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2021, "license_type": "no_license", "max_line_length": 48, "num_lines": 97, "path": "/python_Interview/LNode.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "class LLele(object):\n def __init__(self, x):\n self.data = x\n self.next = None\n \n def __repr__(self):\n return str(self.data)\n\n\nclass LL(object):\n def __init__(self, head: LLele):\n self.head = head\n self.len = len(self)\n\n def insert(self, Node: LLele, index: int):\n pass\n\n def delete(self, item: LLele):\n tmp = self.head\n post = tmp.next\n\n if len(self) == 1:\n self.head = None\n return 1\n\n while post != None:\n if post.data == item:\n tmp.next = post.next\n return 1\n tmp = post\n post = tmp.next\n else:\n return 0\n\n return 0\n\n\n def reverse(self):\n prev = None\n head = self.head\n post = None\n\n while head != None:\n post = head.next\n if post == None:\n self.head = head\n \n head.next = prev\n prev = head\n head = post\n\n def __contains__(self, item):\n tmpP = self.head\n\n while tmpP != None:\n if tmpP.data == item:\n return True\n tmpP = tmpP.next\n\n return False\n\n def __repr__(self):\n if self.head == None:\n return \"None\"\n charList = []\n tmpHead = self.head\n while self.head.next != None:\n charList.append(str(self.head.data))\n self.head = self.head.next\n else:\n charList.append(str(self.head.data))\n self.head = tmpHead\n return '->'.join(charList)\n\n def __len__(self):\n length = 0\n tmpP = self.head\n\n while tmpP != None:\n length += 1\n tmpP = tmpP.next\n\n return length\n\n\nif __name__ == \"__main__\":\n from itertools import repeat\n a = [LLele(i) for i in repeat(1, times = 5)]\n\n for l1, l2 in zip(a, a[1:]):\n l1.next = l2\n l = LL(a[0])\n\n l.delete(1)\n print(l)\n # l.reverse()\n # print(l)\n" }, { "alpha_fraction": 0.3643926680088043, "alphanum_fraction": 0.41763725876808167, "avg_line_length": 16.647058486938477, "blob_id": "4bb574133293e37c0113fb399e16382c6f99c891", "content_id": "116deee27dfba3cfbee305c4e08c428dcee3d13d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/interview/TreeList.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport os\nimport sys\n\n\n#\n# Complete the swapNodes function below.\n#\n\ndef TreeList(indexes):\n Nodes = sum(indexes, [1])\n NNode = len(Nodes)\n # NNode must be 2**k - 1\n tree = [\"-\"] * NNode\n m = (NNode + 1) // 2\n\n k = 1\n while 2 ** k - 1 <= NNode and m != 0:\n\n for i in range(1, 2 ** k):\n if tree[m * i - 1] == \"-\":\n item = Nodes.pop(0)\n tree[m * i - 1] = (k, item)\n k += 1\n m = (NNode + 1)// (2 ** k)\n\n return tree\n\n\n\ns = [[2, 3], [4, 5], [6, 7], [-1, -1], [-1, -1], [-1, -1], [-1, -1]]\n\nTreeList(s)\n\n" }, { "alpha_fraction": 0.3865336775779724, "alphanum_fraction": 0.4002493619918823, "avg_line_length": 16.844444274902344, "blob_id": "812cca5334b844d15a93e250416ad7f691808396", "content_id": "248a2e9f465406306a183ad0d470485a552afcf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 57, "num_lines": 45, "path": "/COPInterview/SearchRoot.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# given an expression, search the positive integer roots.\n\n#%%\ndef Solution(eq: str):\n dd = eq.split(\"=\")\n a = '=='.join(dd)\n b = '<'.join(dd)\n c = '>'.join(dd)\n\n res = []\n\n Base = 1\n R = 10 ** 7 + 1\n L = Base\n\n X = Base\n\n for _ in range(2):\n while L < R:\n if eval(a) == True:\n if X not in res:\n res.append(X)\n elif X in res:\n break\n elif eval(b) == True:\n L = X\n X = (L + R) // 2\n\n elif eval(c) == True:\n R = X\n X = (L + R) // 2\n\n return res[0] if len(res) == 1 else -1\n\n\nif __name__ == \"__main__\":\n eq = input()\n\n print(Solution(eq))" }, { "alpha_fraction": 0.49201521277427673, "alphanum_fraction": 0.493536114692688, "avg_line_length": 19.24615478515625, "blob_id": "5a55dba1916bd1e5d5b1c1df5ce1aecf217f7b39", "content_id": "b2ace6f2b86316a98950dc03764666bd5b1fc6b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1315, "license_type": "no_license", "max_line_length": 62, "num_lines": 65, "path": "/practice/BusYuanFuDao.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\nclass Queue(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def Enqueue(self, item):\n self.data.append(item)\n\n def Dequeue(self):\n return self.data.pop(0)\n \n def qsize(self):\n return len(self.data)\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return item in self.data\n\n\ndef BusOrder(n, m, N):\n F = []\n q = Queue()\n for i in N:\n q.Enqueue(i)\n if q.qsize() == m:\n F.append([j for j in q])\n else:\n if not q.isEmpty():\n F.append([j for j in q])\n \n F.reverse()\n return sum(F, [])\n\nif __name__ == \"__main__\":\n n, m = list(map(int, input().split()))\n N = list(map(int, input().split()))\n\n result = BusOrder(n, m, N)\n\n print(' '.join(map(str, result)))" }, { "alpha_fraction": 0.6769722700119019, "alphanum_fraction": 0.6823027729988098, "avg_line_length": 22.450000762939453, "blob_id": "835e3104ac1b0c25614e0f4eaf4c97f2753b9afc", "content_id": "0ed1a9037e561ed2bfa2593d7a8d661d94503fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 938, "license_type": "no_license", "max_line_length": 52, "num_lines": 40, "path": "/labuladong/cpp/insertIntoBST/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nclass Solution {\n public:\n TreeNode* insertIntoBST(TreeNode* root, int val) {\n if (!root) return new TreeNode(val);\n if (root->val == val) return root;\n if (val < root->val)\n root->left = insertIntoBST(root->left, val);\n else\n root->right = insertIntoBST(root->right, val);\n return root;\n }\n};\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{5, 1, 7};\n TreeNode* root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n int val = 6;\n Solution sol;\n TreeNode *r = sol.insertIntoBST(root, val);\n showBinaryTree<int>(root);\n return 0;\n}\n" }, { "alpha_fraction": 0.593406617641449, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 17.100000381469727, "blob_id": "e923c89638d2a15500b5cb1e127eee6e2563c69b", "content_id": "38a5c4276dbbf5f1a1808b0854925e9c5326bccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/leetcode/python/1539.第-k-个缺失的正整数.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=1539 lang=python3\n#\n# [1539] 第 k 个缺失的正整数\n#\n\n# @lc code=start\nclass Solution:\n def findKthPositive(self, arr: List[int], k: int) -> int:\n# @lc code=end\n\n" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.5740259885787964, "avg_line_length": 23.838708877563477, "blob_id": "b369de7d73a5fb8353476a29df71583270c235db", "content_id": "fad962ae69d9025670d84404fd5ed5979f9734f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 69, "num_lines": 62, "path": "/leetcode/cpp/subsets/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=78 lang=cpp\n *\n * [78] 子集\n */\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<vector<int>> res{};\n unordered_map<int, int> visited{};\n vector<vector<int>> subsets(vector<int>& nums) {\n vector<int> s{};\n for (int i : nums) visited[i] = 0;\n int empty_set = *min_element(nums.begin(), nums.end()) - 1;\n backtrack(nums, s, visited, empty_set);\n return res;\n }\n\n void backtrack(vector<int>& nums, vector<int>& solution,\n unordered_map<int, int> visited, int empty_set) {\n if (!solution.empty() && solution.back() == empty_set) {\n solution.pop_back();\n if (solution.size() <= visited.size()) res.push_back(solution);\n solution.push_back(empty_set);\n return;\n }\n\n for (int ix = -1; ix < int(nums.size()); ++ix) {\n if (ix < 0) {\n solution.push_back(empty_set);\n backtrack(nums, solution, visited, empty_set);\n solution.pop_back();\n } else {\n if (visited[nums[ix]] != 0) continue;\n solution.push_back(nums[ix]);\n visited[nums[ix]] = 1;\n backtrack(nums, solution, visited, empty_set);\n visited[nums[ix]] = 0;\n solution.pop_back();\n visited[nums[ix]] = 1;\n }\n }\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{1, 2, 3};\n vector<vector<int>> result = s.subsets(v);\n fmt::print(\"res: {}\\n\", result);\n return 0;\n}\n" }, { "alpha_fraction": 0.5016382932662964, "alphanum_fraction": 0.5564050674438477, "avg_line_length": 24.742971420288086, "blob_id": "70e2c8d0e7582b130c3cfbcc30f415cedf07c579", "content_id": "4d4f5b217b2fcaa07dd82013f4dec491639dee31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6411, "license_type": "no_license", "max_line_length": 496, "num_lines": 249, "path": "/COPInterview/takehome.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#%%[markdown]\n# # Takehome\n# \n# Hi prospective Junior Data Engineer! Here is your assignment. \n# \n# You are allowed to use the Python standard library and basic utilities from Numpy. Points are given for **succinct** but __clear__ code, and when there are ambiguities, comments should be provided. Using functional programming style is allowed. For Python, using the pep8 standard is encouraged. The challenges below will give you a few correct inputs and outputs, however we will be testing your functions against unseen inputs. So make sure you understand exactly the purpose of the function.\n# \n# All code is to be submitted that works against Python 3 and a current version of Numpy.\n# \n# Submit the code as separate `takehome.py` file.\n# \n# ## Functional Arrays\n# \n# Create a function that takes a lambda, a dimensions shape and the Numpy dtype, and produces an array.\n# \n# ```py\n# import numpy as np\n# \n# def create_array_from_function(f, d, dtype=None):\n# pass\n# \n# print(create_array_from_function(lambda i,j: (i - j)**2, [4, 4]))\n# # [[0. 1. 4. 9.]\n# # [1. 0. 1. 4.]\n# # [4. 1. 0. 1.]\n# # [9. 4. 1. 0.]]\n# ```\n#\n#%%\nimport numpy as np\n\ndef create_array_from_function(f, d, dtype=None):\n \"\"\"\n >>> print(create_array_from_function(lambda i,j: (i - j)**2, [4, 4]))\n [[0. 1. 4. 9.]\n [1. 0. 1. 4.]\n [4. 1. 0. 1.]\n [9. 4. 1. 0.]]\n \"\"\"\n return np.fromfunction(f, d, dtype=dtype)\n\n#%%[markdown]\n# ## Removing Boundaries\n# \n# Create a function that takes an array and a binary mask and produces a cropped array based on the binary mask.\n# \n# ```py\n# import numpy as np\n# \n# def boundary_cropping(a, m):\n# pass\n# \n# a1 = np.array([[0,0,0,0,0], [0,0,0,0,0], [0,1,0,1,1], [0,0,0,0,0]])\n# a2 = np.array([[ [0,0,0], [0,1,0], [0,1,0] ], [ [0,0,0], [0,1,0], [0,0,0] ], [ [0,0,0], [0,1,0], [0,0,0] ]])\n# \n# print(boundary_cropping(a1, a1 != 0))\n# # [[1 0 1 1]]\n# print(boundary_cropping(a2, a2 != 0))\n# # [[[1] [1]] [[1] [0]] [[1] [0]]]\n# ```\n# \n\n#%%\nimport numpy as np\n\ndef boundary_cropping(a, m):\n \"\"\"\n >>> a1 = np.array([[0,0,0,0,0], [0,0,0,0,0], [0,1,0,1,1], [0,0,0,0,0]])\n >>> a2 = np.array([[ [0,0,0], [0,1,0], [0,1,0] ], [ [0,0,0], [0,1,0], [0,0,0] ], [ [0,0,0], [0,1,0], [0,0,0] ]])\n >>> print(boundary_cropping(a1, a1 != 0))\n [[1 0 1 1]]\n >>> print(boundary_cropping(a2, a2 != 0))\n [[[1] [1]] [[1] [0]] [[1] [0]]]\n \"\"\"\n # res = None\n ind = np.argwhere(m)\n min_ind = ind.min(axis = 0)\n max_ind = ind.max(axis = 0) + 1\n\n sliced = [slice(i, j) for i, j in zip(min_ind, max_ind)]\n res = a[tuple(sliced)]\n #processing output format\n if \"\\n\" in str(res):\n return \" \".join(str(res).split())\n else:\n return str(res)\n\n#%%[markdown]\n# ## Block Reshaping\n# \n# Create a function that takes an 2D matrix, a number of rows and an number of columns which reshapes the 2D matrix into blocks of the rows and columns.\n# \n# ```py\n# import numpy as np\n# \n# def shape_as_blocks(a, r, c):\n# pass\n# \n# arr = np.array([[1,2,3,4], [5,6,7,8], [9,0,1,2]])\n# print(shape_as_blocks(arr, 2, 2))\n# # array([[[[1, 2],\n# # [7, 8]],\n# # \n# # [[3, 4],\n# # [9, 0]],\n# # \n# # [[5, 6],\n# # [1, 2]]]])\n# ```\n# \n#%%\nimport numpy as np\n\ndef shape_as_blocks(a, r, c):\n \"\"\"\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,0,1,2]])\n >>> shape_as_blocks(arr, 2, 2)\n array([[[[1, 2],\n [7, 8]],\n <BLANKLINE>\n [[3, 4],\n [9, 0]],\n <BLANKLINE>\n [[5, 6],\n [1, 2]]]])\n \"\"\"\n # chech if the number is divisible by r * c\n # assert(round(len(a.flat) // (r * c)) * r * c == len(a.flat))\n # return a.reshape((1, len(a.flat) // (r * c), r, c))\n\n # adjust the order to fit the answer.\n a = a.reshape(-1, c)\n a = np.hstack(np.vsplit(a, r))\n\n # chech if the number is divisible by r * c\n assert(round(len(a.flat) // (r * c)) * r * c == len(a.flat))\n return a.reshape((1, len(a.flat) // (r * c), r, c))\n\n# arr = np.array([[1,2,3,4], [5,6,7,8], [9,0,1,2]])\n# shape_as_blocks(arr, 1, 1)\n\n#%%[markdown]\n# ## Population Variance from Subpopulation Variance\n# \n# Given a list of numpy arrays, where each array is a subpopulation and the entire list is the population, calculate the variance of the entire population from the variance of the subpopulations.\n# \n# ```py\n# import numpy as np\n# \n# def pop_var_from_subpop_var(groups):\n# pass\n# \n# groups = [np.array([1,2,3,4]), np.array([5,6])]\n# print(pop_var_from_subpop_var(groups))\n# # 2.9166666666666665\n# ```\n# \n\nimport numpy as np\n\ndef pop_var_from_subpop_var(groups):\n \"\"\"\n >>> groups = [np.array([1,2,3,4]), np.array([5,6])]\n >>> print(pop_var_from_subpop_var(groups))\n 2.9166666666666665\n \"\"\"\n return np.hstack(groups).var()\n\n#%%[markdown]\n# ## Shuffle a Large List\n# \n# Given a very large list of numbers, randomly shuffle the list while using constant memory.\n# \n# ```py\n# import random\n# \n# l = [1,2,3,4,5]\n# \n# def shuffle_list_inplace_constant_memory(l):\n# pass\n# ```\n#\n\n#%%\nimport random\n\nl = [1,2,3,4,5]\n\n#complement with the Fisher–Yates shuffle.\ndef shuffle_list_inplace_constant_memory(l):\n for i in range(1, len(l)):\n ind = random.randint(0, i)\n l[ind], l[i] = l[i], l[ind]\n\n\n#%%[markdown]\n# ## Acquiring Coordinates\n# \n# Given an array and a step shape, return a list of coordinates based on each step.\n# \n# ```py\n# import itertools\n# import numpy as np\n# \n# def coordinates_from_steps(a, s, dtype=int):\n# pass\n# \n# print(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,1)))\n# # [[0 0]\n# # [0 1]\n# # [1 0]\n# # [1 1]]\n# \n# print(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,2)))\n# # [[0 0]\n# # [1 0]]\n# ```\n# \n\n#%%\nimport itertools\nimport numpy as np\n\ndef coordinates_from_steps(a, s, dtype=int):\n \"\"\"\n this function can be seen as Converlutional layer.\n\n >>> print(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,1)))\n [[0 0]\n [0 1]\n [1 0]\n [1 1]]\n >>> print(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,2)))\n [[0 0]\n [1 0]]\n \"\"\"\n # compute the range of window slide\n Shape = np.array(s) - 1\n step_cord = np.array(a.shape) - Shape\n \n # product all the \n all_cord = [*itertools.product(*[range(i) for i in step_cord])]\n\n return np.array(all_cord)\n\n#%%\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()" }, { "alpha_fraction": 0.4457593560218811, "alphanum_fraction": 0.4674556255340576, "avg_line_length": 21, "blob_id": "959f8aa9664e5ccd5558a84e34d89cf31784b26a", "content_id": "8c519c19297b90d80b5526ecd9ec74b6c0566694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/leetcode/python/139.单词拆分.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=139 lang=python3\n#\n# [139] 单词拆分\n#\nfrom typing import *\n# @lc code=start\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n dp = [False] * (len(s) + 1)\n dp[0] = True\n\n for i in range(1, len(s) + 1):\n for word in wordDict:\n if len(word) <= i and dp[i - len(word)]:\n if s[i - len(word):i] == word:\n dp[i] = True\n \n return dp[len(s)]\n\n\n\n# @lc code=end\n\n" }, { "alpha_fraction": 0.4879383146762848, "alphanum_fraction": 0.4945293962955475, "avg_line_length": 25.619298934936523, "blob_id": "f72bf28fd19902cba541ea43ec03d834b6b4e09d", "content_id": "2f5d018d042b417c5d85a1bbc45415aba4840803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 15172, "license_type": "no_license", "max_line_length": 97, "num_lines": 570, "path": "/CHack/doc.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <assert.h>\n\n#define MAX_CHARACTERS 1005\n#define MAX_PARAGRAPHS 5\n\nstruct word {\n char* data;\n};\n\nstruct sentence {\n struct word* data;\n int word_count;//denotes number of words in a sentence\n};\n\nstruct paragraph {\n struct sentence* data ;\n int sentence_count;//denotes number of sentences in a paragraph\n};\n\nstruct document {\n struct paragraph* data;\n int paragraph_count;//denotes number of paragraphs in a document\n};\n\ntypedef char * string;\ntypedef struct word word;\ntypedef struct sentence sentence;\ntypedef struct paragraph paragraph;\ntypedef struct document document;\n\nstruct document get_document(char* text) {\n // string para[MAX_PARAGRAPHS][256][256];\n word * w;\n w = (word *)malloc(sizeof(word));\n sentence *s;\n s = (sentence *)malloc(sizeof(sentence));\n paragraph *p;\n p = (paragraph *)malloc(sizeof(paragraph));\n document d;\n char *para = text;\n char *sente = text;\n char *Word = text;\n string seppara = \"\\n\";\n string sepsent = \".\";\n string sepword = \" \";\n char * one_para = \"\";\n char * one_sente = \"\";\n char * one_word = \"\";\n int i, j, k;\n\n for(i = 0; (para != NULL)&&(strcmp(para, \"\") != 0)&&(one_para != NULL); i++){\n one_para = strtok_r(para, seppara, &para);\n if (one_para == NULL){\n i --;\n continue;\n }\n sente = one_para;\n one_sente = sente;\n for (j = 0; (sente != NULL)&& (strcmp(sente, \"\") != 0) &&(one_sente != NULL); j++){\n one_sente = strtok_r(sente, sepsent, &sente);\n if(one_sente == NULL){\n j --;\n continue;\n }\n Word = one_sente;\n one_word = Word;\n for(k = 0; (Word != NULL)&& (strcmp(Word, \"\") != 0) && (one_word != NULL); k++){\n one_word = strtok_r(Word, sepword, &Word);\n word* tmpw = (word *)realloc(w, (k + 1) * sizeof(word));\n assert(tmpw != NULL);\n w = tmpw;\n w[k].data = one_word; // need to get rid of '\\n'\n }\n sentence * tmps = (sentence *)realloc(s, (j + 1) * sizeof(sentence));\n assert(tmps != NULL);\n s = tmps;\n s[j].data = w;\n s[j].word_count = k;\n w = (word *)malloc(sizeof(word));\n }\n paragraph * tmpp = (paragraph *)realloc(p, (i + 1) * sizeof(paragraph));\n assert(tmpp != NULL);\n p = tmpp;\n p[i].data = s;\n p[i].sentence_count = j;\n s = (sentence *)malloc(sizeof(sentence));\n }\n d.data = p;\n d.paragraph_count = i;\n\n free(w);\n free(s);\n\n return d;\n}\n\nstruct word kth_word_in_mth_sentence_of_nth_paragraph(struct document Doc, int k, int m, int n) {\n word w;\n w.data = NULL;\n k -= 1;\n m -= 1;\n n -= 1;\n if(n < Doc.paragraph_count){\n if (m < Doc.data[n].sentence_count){\n if (k < Doc.data[n].data[m].word_count){\n w = Doc.data[n].data[m].data[k];\n }\n }\n }\n return w;\n}\n\nstruct sentence kth_sentence_in_mth_paragraph(struct document Doc, int k, int m) { \n sentence s;\n s.data = NULL;\n s.word_count = 0;\n k -= 1;\n m -= 1;\n if(m < Doc.paragraph_count){\n if(k < Doc.data[m].sentence_count){\n s = Doc.data[m].data[k];\n }\n }\n return s;\n}\n\nstruct paragraph kth_paragraph(struct document Doc, int k) {\n paragraph p;\n p.data = NULL;\n p.sentence_count = 0;\n k -= 1;\n if (k < Doc.paragraph_count){\n p = Doc.data[k];\n }\n return p;\n}\n\n\nvoid print_word(struct word w) {\n printf(\"%s\", w.data);\n}\n\nvoid print_sentence(struct sentence sen) {\n for(int i = 0; i < sen.word_count; i++) {\n print_word(sen.data[i]);\n if (i != sen.word_count - 1) {\n printf(\" \");\n }\n }\n}\n\nvoid print_paragraph(struct paragraph para) {\n for(int i = 0; i < para.sentence_count; i++){\n print_sentence(para.data[i]);\n printf(\".\");\n }\n}\n\nvoid print_document(struct document doc) {\n for(int i = 0; i < doc.paragraph_count; i++) {\n print_paragraph(doc.data[i]);\n if (i != doc.paragraph_count - 1)\n printf(\"\\n\");\n }\n}\n\nchar* get_input_text() {\t\n int paragraph_count;\n scanf(\"%d\", &paragraph_count);\n\n char p[MAX_PARAGRAPHS][MAX_CHARACTERS], doc[MAX_CHARACTERS];\n memset(doc, 0, sizeof(doc));\n getchar();\n for (int i = 0; i < paragraph_count; i++) {\n scanf(\"%[^\\n]%*c\", p[i]);\n strcat(doc, p[i]);\n if (i != paragraph_count - 1)\n strcat(doc, \"\\n\");\n }\n\n char* returnDoc = (char*)malloc((strlen (doc)+1) * (sizeof(char)));\n strcpy(returnDoc, doc);\n return returnDoc;\n}\n\nint main() \n{\n char* text = get_input_text();\n struct document Doc = get_document(text);\n\n // printf(\"\\n\");\n // print_document(Doc);\n // printf(\"\\n\");\n // struct word w = kth_word_in_mth_sentence_of_nth_paragraph(Doc, 3, 1, 2);\n // print_word(w);\n // printf(\"\\n\");\n // struct sentence sen= kth_sentence_in_mth_paragraph(Doc, 2, 2);\n // print_sentence(sen);\n // printf(\"\\n\");\n // struct paragraph para = kth_paragraph(Doc, 2);\n // print_paragraph(para);\n // printf(\"\\n\");\n\n int q;\n scanf(\"%d\", &q);\n\n while (q--) {\n int type;\n scanf(\"%d\", &type);\n\n if (type == 3){\n int k, m, n;\n scanf(\"%d %d %d\", &k, &m, &n);\n struct word w = kth_word_in_mth_sentence_of_nth_paragraph(Doc, k, m, n);\n print_word(w);\n }\n\n else if (type == 2) {\n int k, m;\n scanf(\"%d %d\", &k, &m);\n struct sentence sen= kth_sentence_in_mth_paragraph(Doc, k, m);\n print_sentence(sen);\n }\n\n else{\n int k;\n scanf(\"%d\", &k);\n struct paragraph para = kth_paragraph(Doc, k);\n print_paragraph(para);\n }\n printf(\"\\n\");\n }\n}\n\n// #include <stdio.h>\n// #include <stdlib.h>\n// #include <string.h>\n// #include <assert.h>\n// #define MAX_CHARACTERS 1005\n// #define MAX_PARAGRAPHS 5\n\n// struct word {\n// char* data;\n// };\n\n// struct sentence {\n// struct word* data;\n// int word_count;//denotes number of words in a sentence\n// };\n\n// struct paragraph {\n// struct sentence* data ;\n// int sentence_count;//denotes number of sentences in a paragraph\n// };\n\n// struct document {\n// struct paragraph* data;\n// int paragraph_count;//denotes number of paragraphs in a document\n// };\n\n// typedef char * string;\n// typedef struct word word;\n// typedef struct sentence sentence;\n// typedef struct paragraph paragraph;\n// typedef struct document document;\n\n// struct document get_document(char* text) {\n// word * w;\n// w = (word *)malloc(sizeof(word));\n// sentence *s;\n// s = (sentence *)malloc(sizeof(sentence));\n// paragraph *p;\n// p = (paragraph *)malloc(sizeof(paragraph));\n// document d;\n// char *para = text;\n// char *sente = text;\n// char *Word = text;\n// string seppara = \"\\n\";\n// string sepsent = \".\";\n// string sepword = \" \";\n// char * one_para;\n// char * one_sente;\n// char * one_word;\n// int i, j, k;\n\n// for(i = 0; one_para != NULL; i++){\n// one_para = strtok_r(para, seppara, &para);\n// if (one_para == NULL){\n// i --;\n// continue;\n// }\n// sente = one_para;\n// one_sente = sente;\n// for (j = 0; (sente != NULL)&&(one_sente != NULL); j++){\n// one_sente = strtok_r(sente, sepsent, &sente);\n// if(one_sente == NULL){\n// j --;\n// continue;\n// }\n// Word = one_sente;\n// one_word = Word;\n// for(k = 0; (Word != NULL) && (one_word != NULL); k++){\n// one_word = strtok_r(Word, sepword, &Word);\n// word* tmpw = (word *)realloc(w, (k + 1) * sizeof(word));\n// assert(tmpw != NULL);\n// w = tmpw;\n// w[k].data = one_word; // need to get rid of '\\n'\n// }\n// sentence * tmps = (sentence *)realloc(s, (j + 1) * sizeof(sentence));\n// assert(tmps != NULL);\n// s = tmps;\n// s[j].data = w;\n// s[j].word_count = k;\n// w = (word *)malloc(sizeof(word));\n// }\n// paragraph * tmpp = (paragraph *)realloc(p, (i + 1) * sizeof(paragraph));\n// assert(tmpp != NULL);\n// p = tmpp;\n// p[i].data = s;\n// p[i].sentence_count = j;\n// s = (sentence *)malloc(sizeof(sentence));\n// }\n// d.data = p;\n// d.paragraph_count = i;\n\n// free(w);\n// free(s);\n\n// printf(\"%s\", d.data[0].data[0].data[0].data);\n\n// return d;\n// }\n\n// struct word kth_word_in_mth_sentence_of_nth_paragraph(struct document Doc,\n// int k, int m, int n) {\n// word w;\n// w.data = NULL;\n// k -= 1;\n// m -= 1;\n// n -= 1;\n// if (n < Doc.paragraph_count) {\n// if (m < Doc.data[n].sentence_count) {\n// if (k < Doc.data[n].data[m].word_count) {\n// w = Doc.data[n].data[m].data[k];\n// }\n// }\n// }\n// return w;\n// }\n\n// struct sentence kth_sentence_in_mth_paragraph(struct document Doc, int k,\n// int m) {\n// sentence s;\n// s.data = NULL;\n// s.word_count = 0;\n// k -= 1;\n// m -= 1;\n// if (m < Doc.paragraph_count) {\n// if (k < Doc.data[m].sentence_count) {\n// s = Doc.data[m].data[k];\n// }\n// }\n// return s;\n// }\n\n// struct paragraph kth_paragraph(struct document Doc, int k) {\n// paragraph p;\n// p.data = NULL;\n// p.sentence_count = 0;\n// k -= 1;\n// if (k < Doc.paragraph_count) {\n// p = Doc.data[k];\n// }\n// return p;\n// // }\n\n// typedef struct word word;\n// typedef struct sentence sentence;\n// typedef struct paragraph paragraph;\n// typedef struct document document;\n\n// void add_char(word *_word, char character) {\n// static int size;\n\n// if (_word->data == NULL) {\n// size = 0;\n// _word->data = (char *)malloc(0);\n// }\n\n// _word->data = (char *)realloc(_word->data, (size + 1) * sizeof(char));\n// _word->data[size] = character;\n// _word->data[size + 1] = 0;\n\n// size++;\n// }\n\n// void add_word(sentence *_sentence, word *_word) {\n// if (_sentence->data == NULL) {\n// _sentence->data = (word *)malloc(0);\n// _sentence->word_count = 0;\n// }\n\n// _sentence->word_count++;\n// _sentence->data =\n// (word *)realloc(_sentence->data, _sentence->word_count * sizeof(word));\n// _sentence->data[_sentence->word_count - 1] = *_word;\n// _word->data = NULL;\n// }\n\n// void add_sentence(paragraph *_paragraph, sentence *_sentence) {\n// if (_paragraph->data == NULL) {\n// _paragraph->data = (sentence *)malloc(0);\n// _paragraph->sentence_count = 0;\n// }\n\n// _paragraph->sentence_count++;\n// _paragraph->data = (sentence *)realloc(\n// _paragraph->data, _paragraph->sentence_count * sizeof(sentence));\n// _paragraph->data[_paragraph->sentence_count - 1] = *_sentence;\n// _sentence->data = NULL;\n// }\n\n// void add_paragraph(document *_document, paragraph *_paragraph) {\n// if (_document->data == NULL) {\n// _document->data = (paragraph *)malloc(0);\n// _document->paragraph_count = 0;\n// }\n\n// _document->paragraph_count++;\n// _document->data = (paragraph *)realloc(\n// _document->data, _document->paragraph_count * sizeof(paragraph));\n// _document->data[_document->paragraph_count - 1] = *_paragraph;\n// _paragraph->data = NULL;\n// }\n\n// struct document get_document(char *text) {\n// document _document;\n// paragraph _paragraph;\n// sentence _sentence;\n// word _word;\n\n// _document.data = NULL;\n// _paragraph.data = NULL;\n// _sentence.data = NULL;\n// _word.data = NULL;\n\n// for (unsigned int i = 0; i <= strlen(text); i++) {\n// switch (text[i]) {\n// case ' ':\n// add_word(&_sentence, &_word);\n// break;\n\n// case '.':\n// add_word(&_sentence, &_word);\n// add_sentence(&_paragraph, &_sentence);\n// break;\n\n// case '\\n':\n// case '\\0':\n// add_paragraph(&_document, &_paragraph);\n// break;\n\n// default:\n// add_char(&_word, text[i]);\n// break;\n// }\n// }\n\n// return _document;\n// }\n\n// struct word kth_word_in_mth_sentence_of_nth_paragraph(struct document Doc,\n// int k, int m, int n) {\n// return Doc.data[n - 1].data[m - 1].data[k - 1];\n// }\n\n// struct sentence kth_sentence_in_mth_paragraph(struct document Doc, int k,\n// int m) {\n// return Doc.data[m - 1].data[k - 1];\n// }\n\n// struct paragraph kth_paragraph(struct document Doc, int k) {\n// return Doc.data[k - 1];\n// }\n\n// void print_word(struct word w) {\n// printf(\"%s\", w.data);\n// }\n\n// void print_sentence(struct sentence sen) {\n// for(int i = 0; i < sen.word_count; i++) {\n// print_word(sen.data[i]);\n// if (i != sen.word_count - 1) {\n// printf(\" \");\n// }\n// }\n// }\n\n// void print_paragraph(struct paragraph para) {\n// for(int i = 0; i < para.sentence_count; i++){\n// print_sentence(para.data[i]);\n// printf(\".\");\n// }\n// }\n\n// void print_document(struct document doc) {\n// for(int i = 0; i < doc.paragraph_count; i++) {\n// print_paragraph(doc.data[i]);\n// if (i != doc.paragraph_count - 1)\n// printf(\"\\n\");\n// }\n// }\n\n// char* get_input_text() {\t\n// int paragraph_count;\n// scanf(\"%d\", &paragraph_count);\n\n// char p[MAX_PARAGRAPHS][MAX_CHARACTERS], doc[MAX_CHARACTERS];\n// memset(doc, 0, sizeof(doc));\n// getchar();\n// for (int i = 0; i < paragraph_count; i++) {\n// scanf(\"%[^\\n]%*c\", p[i]);\n// strcat(doc, p[i]);\n// if (i != paragraph_count - 1)\n// strcat(doc, \"\\n\");\n// }\n\n// char* returnDoc = (char*)malloc((strlen (doc)+1) * (sizeof(char)));\n// strcpy(returnDoc, doc);\n// return returnDoc;\n// }\n\n// int main() \n// {\n// char* text = get_input_text();\n// struct document Doc = get_document(text);\n\n// int q;\n// scanf(\"%d\", &q);\n\n// while (q--) {\n// int type;\n// scanf(\"%d\", &type);\n\n// if (type == 3){\n// int k, m, n;\n// scanf(\"%d %d %d\", &k, &m, &n);\n// struct word w = kth_word_in_mth_sentence_of_nth_paragraph(Doc, k, m, n);\n// print_word(w);\n// }\n\n// else if (type == 2) {\n// int k, m;\n// scanf(\"%d %d\", &k, &m);\n// struct sentence sen= kth_sentence_in_mth_paragraph(Doc, k, m);\n// print_sentence(sen);\n// }\n\n// else{\n// int k;\n// scanf(\"%d\", &k);\n// struct paragraph para = kth_paragraph(Doc, k);\n// print_paragraph(para);\n// }\n// printf(\"\\n\");\n// } \n// }" }, { "alpha_fraction": 0.5691472291946411, "alphanum_fraction": 0.5768125057220459, "avg_line_length": 21.688405990600586, "blob_id": "222d86be49dd5157d6ab440e697c1fbe83b19a68", "content_id": "69904a2773e34c97c52820b8065d233d711217d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3309, "license_type": "no_license", "max_line_length": 77, "num_lines": 138, "path": "/leetcode/cpp/canFinish/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=207 lang=cpp\n *\n * [207] 课程表\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"utils/print_2d.hpp\"\n\nusing namespace std;\n\n// @lc code=start\n\nclass Solution {\n public:\n void buildGraph(vector<vector<int>>& graph, int numCourses,\n vector<vector<int>>& prerequisites) {\n graph.resize(numCourses);\n for (auto& edge : prerequisites) {\n int from = edge[1], to = edge[0];\n // 添加一条从 from 指向 to 的有向边\n // 边的方向是「被依赖」关系,即修完课程 from 才能修课程 to\n graph[from].push_back(to);\n }\n }\n\n void hasCycle_dfs(vector<vector<int>>& graph, int s, vector<bool>& visited,\n vector<bool>& onPath, bool& has_cycle) {\n if (onPath[s]) {\n // 发现环\n has_cycle = true;\n return;\n }\n\n if (visited[s]) {\n // 已访问过,无需再访问\n return;\n }\n\n // 标记当前节点为已访问\n visited[s] = true;\n\n // 标记当前节点在路径上\n onPath[s] = true;\n\n // 递归访问相邻节点\n for (int node : graph[s]) {\n hasCycle_dfs(graph, node, visited, onPath, has_cycle);\n }\n\n // 当前节点访问结束,移出路径\n onPath[s] = false;\n }\n\n bool canFinish_DFS(int numCourses, vector<vector<int>>& prerequisites) {\n vector<vector<int>> graph{};\n vector<bool> visited(numCourses, false);\n vector<bool> onPath(numCourses, false);\n bool has_cycle = false;\n\n buildGraph(graph, numCourses, prerequisites);\n\n for (int i = 0; i < numCourses; i++) {\n if (!visited[i]) hasCycle_dfs(graph, i, visited, onPath, has_cycle);\n }\n\n return !has_cycle;\n }\n\n bool hasCycle_bfs(vector<vector<int>>& graph) {\n vector<int> inDegree(graph.size(), 0);\n for (auto& neighbors : graph) {\n for (int neighbor : neighbors) {\n inDegree[neighbor]++;\n }\n }\n\n queue<int> q;\n unordered_set<int> visited;\n for (int i = 0; i < inDegree.size(); i++) {\n if (inDegree[i] == 0) {\n q.push(i);\n visited.insert(i);\n }\n }\n\n int count = 0;\n while (!q.empty()) {\n int node = q.front();\n q.pop();\n count++;\n for (int neighbor : graph[node]) {\n if (!visited.count(neighbor)) {\n inDegree[neighbor]--;\n if (inDegree[neighbor] == 0) {\n q.push(neighbor);\n visited.insert(neighbor);\n }\n }\n }\n }\n return count == graph.size();\n }\n\n bool canFinish_BFS(int numCourses, vector<vector<int>>& prerequisites) {\n vector<vector<int>> graph{};\n buildGraph(graph, numCourses, prerequisites);\n return hasCycle_bfs(graph);\n }\n\n bool canFinish(int numCourses, vector<vector<int>>& prerequisites) {\n const string mode = \"BFS\";\n if (mode == \"DFS\")\n return canFinish_DFS(numCourses, prerequisites);\n else\n return canFinish_BFS(numCourses, prerequisites);\n }\n};\n\n// @lc code=end\n\nint main() {\n // unordered_map<int, int> v{{1, 3}, {2, 4}, {3, NULL}};\n Solution sol;\n vector<vector<int>> prerequisites{{1, 0}};\n int numCourses = 2;\n bool v = sol.canFinish(numCourses, prerequisites);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.49702027440071106, "alphanum_fraction": 0.5268176198005676, "avg_line_length": 23.676469802856445, "blob_id": "04897e2482227eafa3df76114a30ebfae85fb780", "content_id": "aa09c063eaf3d2ded66643f6985bf15df651b8df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 839, "license_type": "no_license", "max_line_length": 79, "num_lines": 34, "path": "/labuladong/cpp/longestCommonSubsequence/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int longestCommonSubsequence(string text1, string text2) {\n vector<vector<int>> dp(text1.size() + 1, vector<int>(text2.size() + 1, 0));\n for (int i = 1; i <= text1.size(); i++) {\n for (int j = 1; j <= text2.size(); j++) {\n if (text1[i - 1] == text2[j - 1]) {\n dp[i][j] = dp[i - 1][j - 1] + 1;\n } else {\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1]);\n }\n }\n }\n fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.back().back();\n }\n};\n\nint main() {\n Solution sol;\n string text1 = \"abc\", text2 = \"def\";\n int v = sol.longestCommonSubsequence(text1, text2);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5973920226097107, "alphanum_fraction": 0.6071719527244568, "avg_line_length": 20.910715103149414, "blob_id": "f7580316588e5fdbe322c9032304d56391bb72e3", "content_id": "d1cd728ae71d43d493f9025a946ff6e9ee1a4ea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 67, "num_lines": 56, "path": "/leetcode/cpp/reverseKGroup/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=25 lang=cpp\n *\n * [25] K 个一组翻转链表\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n ListNode* reverse(ListNode* head, ListNode* target) {\n ListNode *pre = nullptr, *cur = head, *nxt = head;\n while (cur != target) {\n nxt = cur->next;\n cur->next = pre;\n pre = cur;\n cur = nxt;\n }\n return pre;\n }\n ListNode* reverseKGroup(ListNode* head, int k) {\n if (!head) return head;\n ListNode* cur = nullptr;\n int i = 0;\n for (cur = head; cur != nullptr && i < k; cur = cur->next, i++)\n ;\n if (i < k) return head;\n ListNode* tail = reverseKGroup(cur, k);\n ListNode* new_head = reverse(head, cur);\n head->next = tail;\n return new_head;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n int k = 2;\n ListNode* head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n Solution sol;\n ListNode* r = sol.reverseKGroup(head, k);\n showLinkedList<int>(r);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.49349239468574524, "alphanum_fraction": 0.5065075755119324, "avg_line_length": 20.44186019897461, "blob_id": "1b8d0e0ce0b54530e22c7e78e658a3a1e643aa16", "content_id": "c91d118ec5ee831f84f36ab5faf93b5463bcf685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1844, "license_type": "no_license", "max_line_length": 128, "num_lines": 86, "path": "/interview/castleOntheGrid.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nfrom collections import deque\n\nn = int(input().strip())\n\ngval_line = [0 for _ in range(n)]\ngval = [gval_line[:] for _ in range(n)] #trick [:] usage clones gval_line, otherwise one change changes all (like pointer usage)\ngrid = []\nqueue = deque([])\n\nfor _ in range(n):\n grid.append(input().strip())\n\ncoords = [int(i) for i in input().strip().split(' ')]\ninitial = (coords[0], coords[1])\nfinal = (coords[2], coords[3])\n\nqueue.append(initial)\n\nwhile len(queue) != 0:\n #print(str(queue))\n #print(str(gval))\n #print(str(grid))\n curr = queue.popleft()\n y, x = curr\n if curr == final:\n print(str(gval[y][x]))\n break\n cval = gval[y][x] + 1\n \n for i in range(y+1, n):\n if grid[i][x] == 'X':\n break\n elif gval[i][x] == 0:\n gval[i][x] = cval\n queue.append((i, x))\n for i in range(y-1, -1, -1):\n if grid[i][x] == 'X':\n break\n elif gval[i][x] == 0:\n gval[i][x] = cval\n queue.append((i, x))\n \n for i in range(x+1, n):\n if grid[y][i] == 'X':\n break\n elif gval[y][i] == 0:\n gval[y][i] = cval\n queue.append((y, i))\n for i in range(x-1, -1, -1):\n if grid[y][i] == 'X':\n break\n elif gval[y][i] == 0:\n gval[y][i] = cval\n queue.append((y, i))\n\nif __name__ == '__main__':\n\n n = int(input())\n\n grid = []\n\n for _ in range(n):\n grid_item = input()\n grid.append(grid_item)\n\n startXStartY = input().split()\n\n startX = int(startXStartY[0])\n\n startY = int(startXStartY[1])\n\n goalX = int(startXStartY[2])\n\n goalY = int(startXStartY[3])\n\n result = minimumMoves(grid, startX, startY, goalX, goalY)\n\n print(result)\n" }, { "alpha_fraction": 0.4794520437717438, "alphanum_fraction": 0.5009784698486328, "avg_line_length": 22.227272033691406, "blob_id": "7637693e888c53f0c35b6ac4de1b26114a18c533", "content_id": "ac8ea35d0c8322bfd01fbb98528865162ce7fa0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1022, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/labuladong/cpp/canPartition/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n bool canPartition(vector<int>& nums) {\n int total_sum = 0;\n for (int x : nums) {\n total_sum += x;\n }\n if (total_sum % 2) return false;\n int half_total_sum = total_sum / 2;\n vector<vector<bool>> dp(nums.size() + 1, vector<bool>(half_total_sum + 1));\n dp[0][0] = true;\n for (int i = 1; i <= nums.size(); i++) {\n dp[i][0] = true;\n }\n\n for (int i = 1; i <= nums.size(); i++) {\n for (int j = 1; j <= half_total_sum; j++) {\n if (j - nums[i - 1] >= 0)\n dp[i][j] = dp[i - 1][j - nums[i - 1]] || dp[i - 1][j];\n else\n dp[i][j] = dp[i - 1][j];\n }\n }\n fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.back().back();\n }\n};\n\nint main() {\n vector<int> nums{1, 2, 3, 4};\n Solution sol;\n bool v = sol.canPartition(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.47592848539352417, "alphanum_fraction": 0.4828060567378998, "avg_line_length": 20.41176414489746, "blob_id": "350d9045a1377801a5613f99ed587dd86964cb61", "content_id": "5d94364584b635c295e9336f496fa61ae1464746", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "no_license", "max_line_length": 94, "num_lines": 34, "path": "/reverseLink.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "class Node(object):\n def __init__(self, val = None, next_node = None):\n self.val = val\n self.next = next_node\n\n def print(self):\n p = self\n while p != None:\n if p.next is None:\n print(p.val)\n else:\n print(p.val, end=\"->\")\n p = p.next\n\n\ndef reverse(head: Node) -> Node:\n cur = head\n pre = None\n\n while cur.next is not None:\n n = cur.next\n cur.next = pre\n pre = cur\n cur = n\n cur.next = pre\n\n return cur\n\n\nif __name__ == '__main__':\n node = Node(0, next_node=Node(1, next_node=Node(2, next_node=Node(3, next_node=Node(4)))))\n node.print()\n reverse(node).print()\n # node.print()" }, { "alpha_fraction": 0.45471179485321045, "alphanum_fraction": 0.4643183946609497, "avg_line_length": 21.316326141357422, "blob_id": "1e697518c44ca54bd976ca7911a18d17a582c73b", "content_id": "5cd629377c726b7244f3cec2c763666452128b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2266, "license_type": "no_license", "max_line_length": 94, "num_lines": 98, "path": "/COPInterview/assembleTeam.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#%%\n# 每三种职业 组成小组 求 最大组成小组的个数\n# 例\n# 1(询问个数)\n# 3 2 2 3\n# |职业数|选每种职业的人数|\n# 输出:\n# 2\n\n#%%\n\n\nfrom functools import reduce\nimport bisect\n\nclass PriorityQueue(object):\n def __init__(self, key = lambda x: x):\n self.key = key\n self.data = []\n \n def Enqueue(self, item):\n # heappush(self.data, (self.key(item), item))\n bisect.insort(self.data, (self.key(item), item))\n\n def Dequeue(self):\n # return heappop(self.data)[1]\n return self.data.pop(0)[1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def qsize(self):\n return len(self.data)\n\n def __repr__(self):\n return \"PriorityQueue(\" + ', '.join(map(str, [i[1] for i in self.data])) + \")\"\n\n # def __str__(self):\n # return \"PriorityQueue(\" + ', '.join(map(str, sorted(self.data, key=self.key))) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return any([item == pair[1] for pair in self.data])\n\n def __getitem__(self, key):\n for _, item in self.data:\n if item == key:\n return item\n \n def __delitem__(self, key):\n for i, (_, item) in enumerate(self.data):\n if item == key:\n self.data.pop(i)\n\ndef function(t):\n z = t.pop(0)\n pq = PriorityQueue(key=lambda x: -x)\n if reduce(lambda x, y: x*y, t) == 1 and len(t) == z:\n return len(t) // 3\n else:\n a = [i for i in t if i != 0]\n if len(a) < 3:\n return \"None\"\n \n for i in a:\n pq.Enqueue(i)\n \n cnt = 0\n while pq.qsize() >= 3:\n s = []\n for _ in range(3):\n s.append(pq.Dequeue())\n \n mm = min(s)\n cnt += mm\n k = [j - mm for j in s]\n for v in k:\n if v != 0:\n pq.Enqueue(v)\n\n return cnt\n\n\n\nif __name__ == \"__main__\":\n c = int(input())\n\n for _ in range(c):\n t = list(map(int, input().split()))\n print(function(t))" }, { "alpha_fraction": 0.5084566473960876, "alphanum_fraction": 0.5179703831672668, "avg_line_length": 17.568628311157227, "blob_id": "897578fbba960ff139dbf1bc5d6c25d340e2f3f7", "content_id": "f997991b77976b418f00fb9b44b855ede943c50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "no_license", "max_line_length": 50, "num_lines": 51, "path": "/NewCoder/wangyi12.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n#%%\n# O(n)\n# window slider generator\ndef WindowSlider(seq, n=2):\n it = iter(seq)\n win = list((next(it, None) for _ in range(n)))\n mm = sum(win)\n # yield mm\n for e in it:\n tmp = win.pop(0)\n win.append(e)\n if mm - tmp + e > mm:\n mm = mm - tmp + e\n # yield mm\n return mm\n\n# def largestkSum(I, k):\n# return max([w for w in WindowSlider(I, k)])\n\nWindowSlider([1, 2, 3, 4])\n\n#%%\ndef MaxInterest(I, awake, k):\n sc = 0\n for ind, a in enumerate(awake):\n if a == 1:\n sc += I[ind]\n I[ind] = 0\n\n return sc + WindowSlider(I, k)\n\ndef main():\n # input\n _, k = list(map(int, input().split()))\n I = list(map(int, input().split()))\n awake = list(map(int, input().split()))\n\n # solution\n result = MaxInterest(I, awake, k)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.502970278263092, "alphanum_fraction": 0.5287128686904907, "avg_line_length": 22, "blob_id": "9c875b23f7e88f871cd913431078abac8d44321b", "content_id": "c9025747ac89910d8022eeae31f67ed34780080f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/leetcode/python/215.数组中的第k个最大元素.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=215 lang=python3\n#\n# [215] 数组中的第K个最大元素\n#\nfrom typing import *\n\n# @lc code=start\nfrom heapq import *\nclass Solution:\n def findKthLargest(self, nums: List[int], k: int) -> int:\n pq = []\n for i in nums:\n if len(pq) >= k and pq[0] < i:\n heappushpop(pq, i)\n elif len(pq) < k :\n heappush(pq, i)\n return heappop(pq)\n# @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().findKthLargest([1,2,4,3], 3))" }, { "alpha_fraction": 0.4938271641731262, "alphanum_fraction": 0.4938271641731262, "avg_line_length": 14.037036895751953, "blob_id": "1bf291a671b356c2a4790d4f271596f30f3bb46c", "content_id": "2a5561b769d96dec905e2ce3d445b4afc547d034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/NewCoder/wangyi15.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\ndef Solution(k):\n a, b, c = k\n\n X = max(max(a * b, a + b) * c, max(a * b, a + b) + c)\n Y = max(max(b * c, b + c) * a, max(b * c, b + c) + a)\n\n return max(X, Y)\n\n\ndef main():\n # input\n k = list(map(int, input().split()))\n\n # solution\n result = Solution(k)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.38679245114326477, "alphanum_fraction": 0.4033018946647644, "avg_line_length": 19.214284896850586, "blob_id": "bd1af8c4aab5f46ac218d0a55d73cd93189f5034", "content_id": "53b61f92f3f7665c986a8df764c2c6701be4c4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 55, "num_lines": 42, "path": "/leetcode/python/22.括号生成.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=22 lang=python3\n#\n# [22] 括号生成\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n if n == 0:\n return [\"\"]\n \n l = 0\n r = 0\n\n com = list()\n coms = list()\n\n def backtrace(l, r, com):\n if len(com) == 2 * n:\n coms.append(\"\".join(com))\n return\n \n if l < n :\n com.append(\"(\")\n backtrace(l + 1, r, com)\n com.pop()\n \n if l > r:\n com.append(\")\")\n backtrace(l, r + 1, com)\n com.pop()\n\n backtrace(0, 0, com)\n\n return coms\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.generateParenthesis(3))" }, { "alpha_fraction": 0.6319493055343628, "alphanum_fraction": 0.6333568096160889, "avg_line_length": 22.882352828979492, "blob_id": "74987948da98db4ec12e86ec96aa844f8d2a773e", "content_id": "15834c8d1bdb2a6101db4f97807c78e36881402a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2842, "license_type": "no_license", "max_line_length": 74, "num_lines": 119, "path": "/leetcode/cpp/common_types/LinkedList/LinkedList.h", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#ifndef LINKEDLIST_H_\n#define LINKEDLIST_H_\n\ntemplate <typename T>\nclass LinkedListNode {\n public:\n LinkedListNode() : val(0), next(nullptr){};\n explicit LinkedListNode(T item) : val(item), next(nullptr){};\n ~LinkedListNode() = default;\n T val;\n LinkedListNode<T> *next;\n};\n\ntemplate <typename T>\nvoid showLinkedList(LinkedListNode<T> *node) {\n for (; node != nullptr; node = node->next) {\n if (node->next == nullptr) {\n std::cout << node->val;\n } else {\n std::cout << node->val << \" -> \";\n }\n }\n std::cout << std::endl;\n}\n\ntemplate <typename T>\nLinkedListNode<T> *BuildLinkedlist(std::vector<T> &array) {\n LinkedListNode<T> *head = new LinkedListNode<T>();\n LinkedListNode<T> *cur = head;\n for (T ival : array) {\n cur->next = new LinkedListNode<T>(ival);\n cur = cur->next;\n }\n\n cur = head;\n head = head->next;\n delete cur;\n cur = nullptr;\n return head;\n}\n\ntemplate <typename T>\nLinkedListNode<T> *BuildLinkedlist(std::vector<T> &&array) {\n std::vector<T> tmp = array;\n return BuildLinkedlist<T>(tmp);\n}\n\ntemplate <typename T>\nvoid DestroyLinkedlist(LinkedListNode<T> *head) {\n if (head == nullptr) return;\n DestroyLinkedlist<T>(head->next);\n delete head;\n head = nullptr;\n}\n\ntemplate <typename T>\nLinkedListNode<T> *BuildCycleLinkedlist(std::vector<T> &array, int pos) {\n LinkedListNode<T> *head = new LinkedListNode<T>();\n LinkedListNode<T> *cur = head;\n int idx = 0;\n LinkedListNode<T> *cycle_pos = nullptr;\n for (T ival : array) {\n cur->next = new LinkedListNode<T>(ival);\n if (pos >= 0 && idx == pos + 1) {\n cycle_pos = cur;\n }\n cur = cur->next;\n idx++;\n }\n if (cycle_pos) {\n cur->next = cycle_pos;\n }\n cur = head;\n head = head->next;\n delete cur;\n cur = nullptr;\n return head;\n}\n\ntemplate <typename T>\nLinkedListNode<T> *BuildCycleLinkedlist(std::vector<T> &&array, int pos) {\n std::vector<T> tmp = array;\n return BuildCycleLinkedlist(tmp, pos);\n}\n\ntemplate <typename T>\nLinkedListNode<T> *DetectCycle(LinkedListNode<T> *head) {\n if (head == nullptr) return nullptr;\n LinkedListNode<T> *slow = head, *fast = head;\n while (slow->next != nullptr && fast->next != nullptr &&\n fast->next->next != nullptr) {\n slow = slow->next;\n fast = fast->next->next;\n if (slow == fast) {\n break;\n }\n }\n\n if (slow->next == nullptr) return nullptr;\n if (fast->next == nullptr) return nullptr;\n if (fast->next->next == nullptr) return nullptr;\n\n LinkedListNode<T> *res = head;\n while (res != slow) {\n res = res->next;\n slow = slow->next;\n }\n return res;\n}\n\ntemplate <typename T>\nvoid DestroyCycleLinkedlist(LinkedListNode<T> *head) {\n if (head == nullptr) return;\n LinkedListNode<T> *cycle_start = DetectCycle<T>(head);\n if (cycle_start != nullptr) cycle_start->next = nullptr;\n return DestroyLinkedlist<T>(head);\n}\n\n#endif\n" }, { "alpha_fraction": 0.46347030997276306, "alphanum_fraction": 0.4789954423904419, "avg_line_length": 21.336734771728516, "blob_id": "9423aed76f35593b515fbe7eb6d910ab1ad6b1b6", "content_id": "4cc64a440fe00ef1cc307ea7a3bd4ef5307b805a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2190, "license_type": "no_license", "max_line_length": 160, "num_lines": 98, "path": "/interview/CountIversion.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/ctci-merge-sort/problem\n\ndef Merge(arr, l, m, r):\n cnt = 0\n s = []\n i, j = l, m + 1\n while i != m + 1 and j != r + 1:\n if arr[i] > arr[j]:\n s.append(arr[j])\n # suppose_ind = len(s) - 1\n cnt += m + 1 - l - (i - l) # the length of len - the current index because left > right, we need len(left) - i steps to put the right into the left.\n j += 1\n elif arr[i] <= arr[j]:\n s.append(arr[i])\n i += 1\n else:\n if i == m + 1:\n s += arr[j:r + 1]\n elif j == r + 1:\n s += arr[i: m + 1]\n arr[l: r+1] = s\n\n return cnt\n\n\n# MergeSort(arr, l, len(arr) - 1, cnt)\ndef MergeSort(arr, l, r):\n if l > r:\n return 0\n elif r == l:\n return 0\n elif r - l == 1:\n if arr[r] < arr[l]:\n arr[r], arr[l] = arr[l], arr[r]\n return 1\n return 0\n else:\n m = (l + r) // 2\n cntl = MergeSort(arr, l, m)\n cntr = MergeSort(arr, m + 1, r)\n cntM = Merge(arr, l, m, r)\n return cntl + cntr + cntM\n\n#wrong\n# from collections import deque\n# # Complete the countInversions function below.\n# def countInversions(arr):\n# L = arr.copy()\n# L.sort()\n# if L == arr:\n# return 0\n# tmp = []\n# tab = {k:deque() for k in set(L)}\n# for ind, val in enumerate(L, 1):\n# tab[val].append(ind)\n \n# for i in arr:\n# tmp.append(tab[i].popleft())\n \n# res = sum([abs(ind - val) for ind, val in enumerate(tmp, 1)]) // 2\n\n# return res\n\n# s = [2, 3, 1]\n# cnt = MergeSort(s, 0, len(s) - 1)\n# print(s)\n# print(cnt)\n\ndef countInversions(arr):\n L = arr.copy()\n L.sort()\n if L == arr:\n return 0\n else:\n cnt = MergeSort(arr, 0, len(arr) - 1)\n return cnt\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = countInversions(arr)\n\n print(result)\n\n" }, { "alpha_fraction": 0.39478763937950134, "alphanum_fraction": 0.41119691729545593, "avg_line_length": 17.836362838745117, "blob_id": "7e10a1fc0b0df6b5b8d13a986307adf2956f1c18", "content_id": "5c2270bef71021406bcc20354d85e181f7929cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1046, "license_type": "no_license", "max_line_length": 51, "num_lines": 55, "path": "/leetcode/cpp/isPalindrome/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=125 lang=cpp\n *\n * [125] 验证回文串\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n bool isPalindrome(string& s) {\n char r[s.size() + 1];\n int j;\n int i;\n for (i = 0, j = 0; j < s.size(); j++) {\n if (s[j] >= 'a' && s[j] <= 'z') {\n r[i] = s[j];\n i++;\n } else if (s[j] >= 'A' && s[j] <= 'Z') {\n r[i] = s[j] | ' '; // note 'A' | ' ' = 'a'\n // r[i] = s[j] + ('a' - 'A');\n i++;\n } else if (s[j] >= '0' && s[j] <= '9') {\n r[i] = s[j];\n i++;\n }\n }\n r[i] = 0;\n string rs = &r[0];\n // fmt::print(\"{}\\n\", rs);\n int f = 0, b = rs.size() - 1;\n while (f < b) {\n if (rs[f] != r[b]) return false;\n f++;\n b--;\n }\n return true;\n }\n};\n// @lc code=end\n\nint main() {\n string s = \"0P\";\n Solution sol;\n bool v = sol.isPalindrome(s);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4694189727306366, "alphanum_fraction": 0.47400611639022827, "avg_line_length": 17.18055534362793, "blob_id": "7acf66dfd91ddb2b22b736090e0c2310baa8df95", "content_id": "b7f96b955b5648c015e6291bd5772d8db9916b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "no_license", "max_line_length": 60, "num_lines": 72, "path": "/NewCoder/wangyi16.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n#%%\nimport bisect\n\nclass cube(object):\n def __init__(self, ind, c):\n self.idx = ind\n self.h = c\n \n def __eq__(self, item):\n return (self.h == item.h) and (self.idx == item.idx)\n\n def __lt__(self, item):\n if self.h < item.h:\n return True\n elif self.h == item.h:\n if self.idx < item.idx:\n return True\n return False\n\n def __gt__(self, item):\n if self.h > item.h:\n return True\n elif self.h == item.h:\n if self.idx > item.idx:\n return True\n return False\n\n def __repr__(self):\n return f\"({self.idx}, {self.h})\"\n\na = [(2, 8), (1, 5), (3, 5)]\nc = [cube(i, j) for i, j in a]\nd = []\nfor cc in c:\n bisect.insort(d, cc)\n\nprint(d)\n\n\n#%%\n\n\ndef Solution(s, k):\n pq = PriorityQueue(key= lambda x: if )\n return \"None\"\n\ndef main():\n # input\n n, k = list(map(int, input().split()))\n Cards = list(map(int, input().split()))\n\n # solution\n result = Solution(Cards, k)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n # if not debug:\n # try:\n # while True:\n # main()\n # except EOFError:\n # exit()\n # else:\n main()" }, { "alpha_fraction": 0.533923327922821, "alphanum_fraction": 0.5693215131759644, "avg_line_length": 17.83333396911621, "blob_id": "288a3660bf52a5982655332699a0ab0850b4585b", "content_id": "22471f79ae6185ba8e544c3e0566504c1e511ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 339, "license_type": "no_license", "max_line_length": 58, "num_lines": 18, "path": "/leetcode/cpp/add_new.sh", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n_template=${2:-\"template\"}\n\nif [[ -d $1 ]]; then\n echo \"The $1 already exsist.\"\n exit 1\nfi\n\ncp -R temp/$_template $1\n\nif [[ $OSTYPE == 'darwin'* ]] ;then\n sed -i '' \"1 s/^.*$/set(TARGET $1)/\" \"$1/CMakeLists.txt\"\nelse\n sed -i \"1 s/^.*$/set(TARGET $1)/\" \"$1/CMakeLists.txt\"\nfi\n\necho \"add_subdirectory($1)\" >> CMakeLists.txt\n" }, { "alpha_fraction": 0.4797441363334656, "alphanum_fraction": 0.4968017041683197, "avg_line_length": 25.02777862548828, "blob_id": "7624c6a6e06aeee99ec8c2b8d7b2232a3de487b7", "content_id": "cc9123c574fbcc662f3e515cace0a39838b5731a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/leetcode/python/199.二叉树的右视图.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=199 lang=python3\n#\n# [199] 二叉树的右视图\n#\nfrom typing import List\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# @lc code=start\n# Definition for a binary tree node.\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n if root is None:\n return []\n queue = [(root, 0)]\n res = []\n\n def extend(cur, height):\n return (cur.left, height + 1,), (cur.right, height + 1, )\n \n while len(queue) != 0:\n cur, h = queue.pop(0)\n if len(queue) == 0 or queue[0][1] != h:\n res.append(cur.val)\n (l, hl), (r, hr) = extend(cur, h)\n if l is not None:\n queue.append((l, hl))\n if r is not None:\n queue.append((r, hr))\n\n return res\n# @lc code=end\n\n" }, { "alpha_fraction": 0.638248860836029, "alphanum_fraction": 0.6474654674530029, "avg_line_length": 15.692307472229004, "blob_id": "6566d7a115bc520ca34196f15d9c7a68593607fe", "content_id": "a36bdbd5101611e7074256483944f7c8e51c85ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 61, "num_lines": 26, "path": "/interview/minAbs.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\n# Complete the minimumAbsoluteDifference function below.\ndef minimumAbsoluteDifference(arr):\n arr.sort()\n diff = map(lambda x: abs(x[1] - x[0]), zip(arr, arr[1:]))\n return min(diff)\n\n\nif __name__ == '__main__':\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = minimumAbsoluteDifference(arr)\n\n print(result)\n" }, { "alpha_fraction": 0.5266343951225281, "alphanum_fraction": 0.5466101765632629, "avg_line_length": 23.294116973876953, "blob_id": "0643c91d9e0df299c04d656651a26be6b786c99c", "content_id": "b43420aa49f537621392214034ea52485711306b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1694, "license_type": "no_license", "max_line_length": 58, "num_lines": 68, "path": "/leetcode/cpp/searchRange/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=34 lang=cpp\n *\n * [34] 在排序数组中查找元素的第一个和最后一个位置\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int searchLowerbound(vector<int>& nums, int target) {\n if (!nums.size()) return -1;\n int left = 0, right = nums.size() - 1;\n int mid = 0;\n while (left <= right) {\n mid = left + (right - left) / 2;\n if (nums[mid] == target) {\n right = mid - 1;\n } else if (nums[mid] > target) {\n right = mid - 1;\n } else if (nums[mid] < target) {\n left = mid + 1;\n }\n }\n if (left >= nums.size()) return -1;\n return target == nums[left] ? left : -1;\n }\n int searchUpperbound(vector<int>& nums, int target) {\n int left = 0, right = nums.size() - 1;\n int mid = 0;\n while (left <= right) {\n mid = left + (right - left) / 2;\n if (nums[mid] == target) {\n left = mid + 1;\n } else if (nums[mid] > target) {\n right = mid - 1;\n } else if (nums[mid] < target) {\n left = mid + 1;\n }\n }\n if (right < 0) return -1;\n return target == nums[right] ? right : -1;\n }\n vector<int> searchRange(vector<int>& nums, int target) {\n vector<int> res;\n int lower = searchLowerbound(nums, target);\n int upper = searchUpperbound(nums, target);\n res.push_back(lower);\n res.push_back(upper);\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{5, 7, 7, 8, 8, 10};\n vector<int> r = s.searchRange(v, 8);\n fmt::print(\"The index should be {}\\n\", r);\n return 0;\n}\n" }, { "alpha_fraction": 0.7075471878051758, "alphanum_fraction": 0.7226415276527405, "avg_line_length": 22.04347801208496, "blob_id": "7484a3e6764cf5481db81a18e74154c1f5b9282b", "content_id": "1c5f8a54debb970c89f52daacc14ed32536e41d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 530, "license_type": "no_license", "max_line_length": 49, "num_lines": 23, "path": "/labuladong/cpp/BinaryTree/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{3, 9, 20, null, null, 15, 7};\n TreeNode *root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n return 0;\n}\n" }, { "alpha_fraction": 0.5470852255821228, "alphanum_fraction": 0.5874439477920532, "avg_line_length": 17.41666603088379, "blob_id": "6d619c666f36721c8af8da0098e191a0329b22ab", "content_id": "c9b9bb8a095bf5cf613ff235113c22a2e24e28e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 223, "license_type": "no_license", "max_line_length": 51, "num_lines": 12, "path": "/labuladong/cpp/add_new.sh", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n_template=${2:-\"temp\"}\n\nif [[ -d $1 ]]; then\n echo \"The $1 already exsist.\"\n exit 1\nfi\n\ncp -R $_template $1\nsed -i \"1 s/^.*$/set(TARGET $1)/\" $1/CMakeLists.txt\necho \"add_subdirectory($1)\" >> CMakeLists.txt\n\n\n" }, { "alpha_fraction": 0.4176100492477417, "alphanum_fraction": 0.43144655227661133, "avg_line_length": 21.08333396911621, "blob_id": "98b25b613aa8a8d8b443fae1fefffeba055421e0", "content_id": "824ff09ec1f811a7ff2e36004fde69df7ecab04f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 65, "num_lines": 36, "path": "/leetcode/python/90.子集-ii.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=90 lang=python3\n#\n# [90] 子集 II\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n\n def Subset(nums: list):\n if len(nums) == 0:\n return [set()]\n if len(nums) == 1:\n return [set(), set([nums[0]])]\n \n k = nums.pop()\n res, new_res = Subset(nums)\n\n res_ = []\n for i in res:\n co = i.copy()\n co.add(k)\n res_.append(co)\n \n new_res = len(res_)\n\n res_ += res\n\n return res_\n return Subset(nums)\n# @lc code=end\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.subsetsWithDup([1,2,2]))\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 37.33333206176758, "blob_id": "18d8648444c591c6b45e6bb54ab46c81f943f543", "content_id": "93d0f19ee171e3d4907c88308b8c8281f16bfe08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 115, "license_type": "no_license", "max_line_length": 49, "num_lines": 3, "path": "/leetcode/cpp/equationsPossible/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "set(TARGET equationsPossible)\nadd_executable(${TARGET} main.cpp)\ntarget_link_libraries(${TARGET} PRIVATE fmt::fmt)\n" }, { "alpha_fraction": 0.5687500238418579, "alphanum_fraction": 0.5718749761581421, "avg_line_length": 18.42424201965332, "blob_id": "f9084b5ef5e2383bf245b75c143fa0e11537a9d3", "content_id": "147cde38e85ac37f351456193a8ddaeee3f78ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/labuladong/python/common_types.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from typing import List, Tuple, Set\n\nclass BiLinkedListNode:\n def __init__(self, val=0, next = None, prev = None):\n self.val = val\n self.next = next\n self.prev = prev\n\nclass LinkedListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass BinaryTreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.children: List[TreeNode] = []\n\n__all__ = [\n List,\n Tuple,\n Set,\n BiLinkedListNode,\n LinkedListNode,\n BinaryTreeNode,\n TreeNode,\n]" }, { "alpha_fraction": 0.46645864844322205, "alphanum_fraction": 0.47581902146339417, "avg_line_length": 17.882352828979492, "blob_id": "bbc26eaf13a2402119e5089ea0704c4442a0ffee", "content_id": "cb5db8de416836126f10180ff8b9c59387145e45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 49, "num_lines": 34, "path": "/leetcode/python/permute.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def permute(arr):\n if len(arr) <= 1:\n return [arr]\n\n n = len(arr) - 1\n com = list()\n coms = list()\n\n def backtrace(com, cans):\n if len(cans) == 0:\n # if ind > n - 1:\n coms.append(com.copy())\n return\n \n cands = cans.copy()\n for c in cans:\n com.append(c)\n t = cands.pop(0)\n # cands.pop(ind)\n backtrace(com, cands)\n com.pop()\n cands.append(t)\n\n backtrace(com, arr)\n\n return coms\n\na = [1]\n\nprint(permute(a))\n\nfrom itertools import permutations\n\nprint(len([*permutations(a)]) == len(permute(a)))" }, { "alpha_fraction": 0.4417831003665924, "alphanum_fraction": 0.45775115489959717, "avg_line_length": 20.47142791748047, "blob_id": "4aa56e42d03a5e10f664f996b6a30209d97ffdfe", "content_id": "1d9b3a2f78aa122db2f6736fccb30236c73a9588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1503, "license_type": "no_license", "max_line_length": 60, "num_lines": 70, "path": "/interview/expenditureDays.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n#%%\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the activityNotifications function below.\n# from statistics import median\nfrom collections import Counter\n#%%\ndef median(l, d):\n m = 0\n c = Counter(l)\n a = list(c.keys())\n a.sort()\n if d%2 == 0:\n find = d // 2\n for i, v in enumerate(a):\n if find - c[v] > 0:\n find = find - c[v]\n else:\n if c[v] == 1:\n m = v + a[i + 1]\n else:\n m = v * 2\n return m\n else:\n find = d // 2 + 1\n for i, v in enumerate(a):\n if find - c[v] > 0:\n find = find - c[v]\n else:\n m = v * 2\n return m\n\ns = [3, 4, 2 ,3 ,6]\nm = median(s, len(s))\nprint(m)\n\n# #%%\n# def activityNotifications(expenditure, d):\n# note = 0\n# for day in range(0, len(expenditure) - d):\n# ex_day = expenditure[day: d + day]\n# expend = expenditure[d + day]\n# med = median(ex_day, d)\n# if expend >= 2 * med:\n# note += 1\n# return note\n\n\n# if __name__ == '__main__':\n# # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n# nd = input().split()\n\n# n = int(nd[0])\n\n# d = int(nd[1])\n\n# expenditure = list(map(int, input().rstrip().split()))\n\n# result = activityNotifications(expenditure, d)\n\n# print(result)\n# # fptr.write(str(result) + '\\n')\n\n# # fptr.close()\n" }, { "alpha_fraction": 0.4826412498950958, "alphanum_fraction": 0.501701831817627, "avg_line_length": 24.771930694580078, "blob_id": "92722c451a07ba1395b093cf8e7fbb924d217de6", "content_id": "e2c1d640a2a4bd29d6f1ab5f1d3bd168a8488295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1637, "license_type": "no_license", "max_line_length": 97, "num_lines": 57, "path": "/COPInterview/MaxSumScore.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#%%[marckdown]\n# description:\n# 小Q在假期的时候去探险,这个密室可以看成是 n*m 的小格子, 小Q最开始可以选择任意一个格子进入\n# 此后的每一步 只能在 ,(x + 1, y - 1), (x + 1, y + 1), (x + 1, y) 中选择. 每个格子都有数字\n# 当 Q 经过 为0 的格子后 分数取反, 求 能得最大分数 是多少\n\n#%%\n# dynamic programming.\nfrom IPython.display import Image\nImage('img/maxScoreSum.png')\n\n\n#%%\n\ndef maxScore(g, n, m = 3):\n # res = [[0] * m for _ in range(n)]\n res_max = [[0] * m for _ in range(n)]\n res_min = [[0] * m for _ in range(n)]\n\n for i in range(m):\n # res[0][i] = g[0][i]\n res_max[0][i] = g[0][i]\n res_min[0][i] = g[0][i]\n \n for step in range(n - 1):\n for i in range(m):\n if g[step + 1][i] == 0:\n res_max[step + 1][i] = - min([res_min[step][j] for j in range(m)]) \n # if we only have 3 choices, then we need to modify for j in range(m) to\n # for j in child(res_min[step][j])\n res_min[step + 1][i] = - max([res_max[step][j] for j in range(m)])\n else:\n res_max[step + 1][i] = max([res_max[step][j] + g[step + 1][i] for j in range(m)])\n res_min[step + 1][i] = min([res_min[step][j] + g[step + 1][i] for j in range(m)])\n\n return max(res_max[-1])\n\nif __name__ == '__main__':\n n = int(input())\n\n Gmap = []\n for _ in range(n):\n q = list(map(int, input().split()))\n Gmap.append(q)\n\n print(maxScore(Gmap, n))\n\n\n#%%\n" }, { "alpha_fraction": 0.616150438785553, "alphanum_fraction": 0.6300885081291199, "avg_line_length": 39.34821319580078, "blob_id": "9bfc66d783bba612ccd4cdef7daec22ceaffeb68", "content_id": "0768fc4b3325197dcfaa157430e05552d8540916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4520, "license_type": "no_license", "max_line_length": 505, "num_lines": 112, "path": "/interview/Shelockanagram.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n#%%\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# from itertools import permutations, combinations, accumulate\n# from collections import Counter\n# from functools import reduce\n\n#%%\n# from collections import Counter\n# from itertools import combinations\n\n# def sherlockAndAnagrams(s):\n# count = []\n# for i in range(1,len(s)+1):\n# a = [\"\".join(sorted(s[j:j+i])) for j in range(len(s)-i+1)]\n# b = Counter(a)\n# for j in b:\n# tmp = list(combinations(['a']*b[j], 2))\n# count.append(sum([len(tmp)]))\n# return sum(count)\n\n\n\n#%%[markdown]\n# Two string are anagrams if and only if for every letter occurring in any of them the number of its occurrences is equal in both the strings.\n# \n# This definition is crucial and will lead to the solution. Since the only allowed letters are lowercase English letters, from $a$ to $z$, the alphabet size is constant and its size is $26$. This allows us to assign a constant size signature to each of the substring of $s$.\n# \n# A signature of some string $w$ will be a tuple of $26$ elements where the -th element denotes the number of occurrences of the $i$-th letter of the alphabet in $w$.\n# \n# So, for example, if $w$ then its signature is $[0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,0,0,0,0,0,0,0,0,0,0,0]$, so the only non-zero elements are the ones corresponding to letter $m$ with value of 2 and letter $o$ with value of 1.\n# \n# Notice, that any string that is an anagram of $\"mom\"$ will have the same signature as $\"mom\"$, and every string that is not an anagram of $\"mom\"$ will definitely have a different signature.\n# \n# This concept of signatures allows the following approach.\n# \n# Let's iterate over all substrings of $s$ and for each fixed substring let's compute its signature and add that signature to signatures hashmap, where $signatures[sig]$ denotes the number of substrings of $s$ with a signature $sig$.\n# \n# Finally, the only remaining thing to do is to get the number of pairs of substrings of $s$ that are anagrams. It's easy to do having our hashmap. Notice that if there are $n$ substrings of $s$ with signature $sig$, then they can form $n\\times(n-1) \\div 2$ pairs of substrings with signature $sig$, so we can just iterate over all values in the hashmap and for each value $n$ add $n\\times(n-1) \\div 2$ to the final result.\n# \n# The below, commented code, in Python, illustrates this exact approach.\n# \n# The time complexity is $O(\\lvert s^{3} \\rvert )$ since we iterate over all $O(\\lvert s^{2} \\rvert )$ substrings of s and for each substring we compute its signature in $O(\\lvert s \\rvert )$ time. It's worth to mention that each operation on hashmap has constant running time since our signatures have a constant size, i.e. $26$ which is the size of our alphabet. Otherwise, if the alphabet size is not constant, this approach will have $O(\\lvert s^{3} \\rvert ) \\times ALPHABET_SIZE$ time complexity.#%%\n# mysolution:\n\n#%%\n# from collections import Counter\n# # Complete the sherlockAndAnagrams function below.\n# def sherlockAndAnagrams(s):\n# def findallsubstring(s):\n# sub = []\n# for ind in range(len(s)):\n# for indc in range(ind, len(s)):\n# if s != s[ind:indc+1]:\n# sub.append(s[ind:indc+1])\n# # print(set(sub))\n# return sub\n# sub = findallsubstring(s)\n# # print(sub)\n\n# a = Counter()\n# order = {chr(i + ord('a')): i for i in range(26)}\n# for i in sub:\n# aaa = [0] * 26\n# aa = Counter(i)\n# for c in aa.keys():\n# aaa[order[c]] += aa[c]\n# d = str(aaa)\n# if d in a:\n# a[d] += 1\n# else:\n# a[d] = 1\n\n# dd = sum(map(lambda x: x*(x - 1) / 2, [i for i in a.values() if i > 1]))\n# return int(dd)\n\nfrom collections import Counter\n# Complete the sherlockAndAnagrams function below.\ndef sherlockAndAnagrams(s):\n Count = 0\n def findallsubstring(s):\n sub = []\n for ind in range(len(s)):\n for indc in range(ind, len(s)):\n if s != s[ind: indc + 1]:\n sub.append(''.join(sorted(s[ind: indc + 1])))\n # print(set(sub))\n return sub\n sub = findallsubstring(s)\n b = Counter(sub)\n for i in b:\n Count += b[i] * (b[i] - 1) / 2\n\n return int(Count)\n\n\n\nif __name__ == '__main__':\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = sherlockAndAnagrams(s)\n\n print(result)\n\n" }, { "alpha_fraction": 0.44990476965904236, "alphanum_fraction": 0.4643809497356415, "avg_line_length": 22.026315689086914, "blob_id": "ca4d3de343a41ff5a09d9759007c96f225c49fcd", "content_id": "1304a4c26cfb91fc3384a825c2f9bdb10184d487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2637, "license_type": "no_license", "max_line_length": 77, "num_lines": 114, "path": "/leetcode/cpp/solve/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=130 lang=cpp\n *\n * [130] 被围绕的区域\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"utils/print_2d.hpp\"\n\nusing namespace std;\n\n// @lc code=start\nclass UnionFind {\n public:\n UnionFind() = delete;\n UnionFind(int n) : count(n) {\n parent.reserve(n);\n for (int i = 0; i < n; i++) {\n parent.emplace_back(i);\n }\n }\n\n void Union(int p, int q) {\n int rootP = find(p);\n int rootQ = find(q);\n\n if (rootP == rootQ) return;\n parent[rootQ] = rootP;\n count--;\n }\n\n bool connected(int p, int q) { return find(p) == find(q); }\n\n int find(int x) {\n if (parent[x] != x) {\n parent[x] = find(parent[x]);\n }\n return parent[x];\n }\n\n int get_count() const { return count; }\n\n private:\n int count;\n vector<int> parent;\n};\n\nclass Solution {\n public:\n int get_index(int row, int col, int m) { return col * m + row; }\n\n void solve(vector<vector<char>>& board) {\n int n = board.size();\n int m = board[0].size();\n UnionFind uf{n * m + 1};\n int dummy = m * n;\n for (int col = 0; col < n; col++) {\n if (board[col][0] == 'O') uf.Union(dummy, col * m + 0);\n if (board[col][m - 1] == 'O') uf.Union(dummy, col * m + m - 1);\n }\n for (int row = 0; row < m; row++) {\n if (board[0][row] == 'O') uf.Union(dummy, row);\n if (board[n - 1][row] == 'O') uf.Union(dummy, m * (n - 1) + row);\n }\n\n vector<pair<int, int>> actions = {\n {-1, 0},\n {0, -1},\n {1, 0},\n {0, 1},\n };\n int x = 0, y = 0;\n for (int col = 1; col < n - 1; col++) {\n for (int row = 1; row < m - 1; row++) {\n for (auto& p : actions) {\n x = p.first, y = p.second;\n if (board[col][row] == 'O' && board[col + y][row + x] == 'O') {\n uf.Union(get_index(row, col, m), get_index(row + x, col + y, m));\n }\n }\n }\n }\n\n for (int col = 0; col < n; col++) {\n for (int row = 0; row < m; row++) {\n if (board[col][row] == 'O' &&\n !uf.connected(dummy, get_index(row, col, m)))\n board[col][row] = 'X';\n }\n }\n }\n};\n// @lc code=end\n\nint main() {\n vector<vector<char>> board = {{'X'}};\n // vector<vector<char>> board = {{'X', 'X', 'X', 'X'},\n // {'X', 'O', 'O', 'X'},\n // {'X', 'X', 'O', 'X'},\n // {'X', 'O', 'X', 'X'}};\n Solution s;\n fmt::print(\"The inputs:\\n\");\n print2D(board);\n s.solve(board);\n fmt::print(\"The outputs after solve:\\n\");\n print2D(board);\n return 0;\n}\n" }, { "alpha_fraction": 0.4549950659275055, "alphanum_fraction": 0.47675567865371704, "avg_line_length": 23.071428298950195, "blob_id": "394b9b9e6babd890087debccc64768d3f238914d", "content_id": "62836d121e88c4e10b01d107c9e7fb573aab2575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 70, "num_lines": 42, "path": "/labuladong/cpp/change/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int change(int amount, vector<int>& coins) {\n vector<vector<int>> dp(amount + 1, vector<int>(coins.size() + 1));\n for (int j = 1; j <= coins.size(); ++j) {\n dp.at(0).at(j) = 1;\n }\n for (int a = 0; a <= amount; ++a) {\n dp.at(a).at(0) = 0;\n }\n for (int a = 1; a <= amount; ++a) {\n for (int j = 1; j <= coins.size(); ++j) {\n if (a > coins.at(j - 1))\n dp.at(a).at(j) =\n dp.at(a).at(j - 1) + dp.at(a - coins.at(j - 1)).at(j);\n else if (a == coins.at(j - 1))\n dp.at(a).at(j) = dp.at(a).at(j - 1) + 1;\n else\n dp.at(a).at(j) = dp.at(a).at(j - 1);\n }\n }\n fmt::print(\"{}\", dp);\n return dp.at(amount).at(coins.size());\n }\n};\n\nint main() {\n Solution s;\n vector<int> v{1, 2, 5};\n int res = s.change(5, v);\n fmt::print(\"\\n{}\\n\", res);\n return 0;\n}\n" }, { "alpha_fraction": 0.5632184147834778, "alphanum_fraction": 0.568965494632721, "avg_line_length": 15.838709831237793, "blob_id": "62163be2a93c0aee2e0c23ddd32e699b8261a11b", "content_id": "351851a2a3705d0be074fa952fbd6c0dd35a968d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 53, "num_lines": 31, "path": "/interview/pairs.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/pairs/problem\n# Complete the pairs function below.\ndef pairs(k, arr):\n hashT = set(arr)\n resl = []\n for a_i in arr:\n if a_i - k in hashT:\n resl.append((a_i, a_i - k))\n\n return len(resl)\n\nif __name__ == '__main__':\n nk = input().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n arr = list(map(int, input().rstrip().split()))\n\n result = pairs(k, arr)\n\n print(result)\n" }, { "alpha_fraction": 0.469852089881897, "alphanum_fraction": 0.5153583884239197, "avg_line_length": 23.44444465637207, "blob_id": "c98b76a4c3a68c84484c6c1bd201dcc03b608dcd", "content_id": "c77579bc67926a81d3e359205601bd53eac11b66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "no_license", "max_line_length": 92, "num_lines": 36, "path": "/Graphcore/getMgroup-subarray.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from copy import deepcopy\n\ndef DFS_search(arr, num_groups):\n\n res_set = []\n\n def brutal_search_helper(arr, num_groups, res):\n if not arr or not num_groups:\n return\n\n if num_groups == 1:\n res.append(arr.copy())\n res_set.append(deepcopy(res))\n res.pop()\n return\n\n\n for i in range(1, len(arr)):\n first_group = arr[:i].copy()\n res.append(first_group)\n brutal_search_helper(arr[i:], num_groups - 1, res)\n res.pop()\n\n brutal_search_helper(arr, num_groups, [])\n return res_set\n\n# TODO: need to implement non-recursive version for python \n\n\nif __name__ == '__main__':\n\n a = [1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1200, 1, 1, 1, 1, 1, 1, 34, 56, 74, 39, 26, 49]\n r = DFS_search(a, 4)\n\n with open(\"out\", 'w') as f:\n print(*r, file=f, sep='\\n')" }, { "alpha_fraction": 0.42988505959510803, "alphanum_fraction": 0.46206897497177124, "avg_line_length": 20.799999237060547, "blob_id": "4e307a4265fd4b64685404d7340d128c7e73efed", "content_id": "3c2c7ff38876dfd91067c75143d7a14df8518320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/CountSort.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def CountSort(nums):\n\n # if min(nums) > 0:\n # cnt_list = [0] * max(nums)\n # else:\n cnt_list = [0] * (abs(max(nums)) + abs(min(nums)) + 1)\n delta = min(nums)\n res = []\n\n for i in nums:\n cnt_list[i - delta] += 1\n \n for idx, cnt in enumerate(cnt_list):\n if cnt != 0:\n res += [idx + delta] * cnt\n \n return res\n\nif __name__ == '__main__':\n print(CountSort([1,1,1,1,1,1,11,]))" }, { "alpha_fraction": 0.49307480454444885, "alphanum_fraction": 0.5207756161689758, "avg_line_length": 20.65999984741211, "blob_id": "b41672d9d183580fb9dfece9f1bc42f7c8fde980", "content_id": "ddf40c99b0c12db6b4817eb940f63cb617a0edf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1093, "license_type": "no_license", "max_line_length": 78, "num_lines": 50, "path": "/leetcode/cpp/findErrorNums/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=645 lang=cpp\n *\n * [645] 错误的集合\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<int> findErrorNums(vector<int>& nums) {\n int n = nums.size();\n int dup = -1, idx = 0;\n for (int i = 0; i < n; i++) {\n idx = (int)abs(nums.at(i)) - 1;\n if (nums.at(idx) < 0) {\n dup = idx;\n } else {\n nums.at(idx) *= -1;\n }\n }\n int missing = -1;\n for (int i = 0; i < n; i++) {\n if (nums.at(i) > 0) {\n missing = i + 1;\n }\n }\n // fmt::print(\"{}, dup: {}, missing: {}\\n\", nums, dup, missing);\n return vector<int>{dup + 1, missing};\n // return vector<int>();\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {1, 2, 4, 4, 6, 6};\n // vector<int> nums = {1, 2, 4, 4, 7, 6}; this will cause malloc error it is\n // new things for debug! you can try it.\n Solution sol;\n vector<int> v = sol.findErrorNums(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5292682647705078, "alphanum_fraction": 0.5341463685035706, "avg_line_length": 19.549999237060547, "blob_id": "d7e7267e5c38817217e6484e59d6078056439cf0", "content_id": "b652f7c71104e150212681c93962e23416d5f695", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/leetcode/python/detectCycle.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef detectCycle(head: ListNode) -> ListNode:\n visited = {}\n p = head\n index = 0\n while p.next is not None:\n if p not in visited:\n visited[p] = index\n else:\n return visited[p]\n p = p.next\n index += 1\n\nprint(detectCycle())" }, { "alpha_fraction": 0.4641255736351013, "alphanum_fraction": 0.4641255736351013, "avg_line_length": 13.833333015441895, "blob_id": "1a44e65a7e53241ba968259fa939b4d5146233bf", "content_id": "06de8d15558451aa6e31dafa3d7f2d8d58b8b4a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 48, "num_lines": 30, "path": "/COPInterview/getRidofElements.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\n#%%\n# 去掉重复次数 超过 m 的 元素\n\nfrom collections import Counter\n\n#%%\ndef solution(n, m, N):\n cnt = Counter(N)\n s = set()\n\n for i in cnt:\n if cnt[i] > m:\n s.add(i)\n\n res = []\n for i in N:\n if i not in s:\n res.append(i)\n\n return res\n\n\n\n#%%\nif __name__ == \"__main__\":\n n, m = list(map(int, input().split()))\n\n N = list(map(int, input().split()))\n\n print(' '.join(map(str, solution(n, m, N))))\n" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.774193525314331, "avg_line_length": 30, "blob_id": "886cda4809a99ebe4d7b4a778a9f520658ee80fc", "content_id": "db0ffffa8895f6beacb8c43e29e9780ad3e544f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 31, "license_type": "no_license", "max_line_length": 30, "num_lines": 1, "path": "/labuladong/cpp/2_sum/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "add_executable(2_sum main.cpp)\n" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6172839403152466, "avg_line_length": 16.35714340209961, "blob_id": "6e34b3256fb1f11b644e25612f94812c1ca56873", "content_id": "61a12a129dee9f3dd01a66f77e6a825284366940", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 243, "license_type": "no_license", "max_line_length": 55, "num_lines": 14, "path": "/labuladong/cpp/temp/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nint main() {\n unordered_map<int, int> v{{1, 3}, {2, 4}, {3, NULL}};\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.45888158679008484, "alphanum_fraction": 0.4802631437778473, "avg_line_length": 17.42424201965332, "blob_id": "ef11e54d59efb55e913bcc0c95a0d6f929e51387", "content_id": "8594547d1c53fd8622c015e100422885a632303d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 52, "num_lines": 33, "path": "/interview/MaxSubsetArray.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the maxSubsetSum function below.\ndef maxSubsetSum(arr):\n res = [0] * (len(arr) + 1)\n\n for i, v in enumerate(arr, 1):\n if i == 1:\n res[i] = v\n elif v > 0:\n res[i] = max(res[i - 2] + v, res[i - 1])\n else:\n res[i] = max(res[i - 2], res[i - 1])\n if res[i] < 0:\n res[i] = 0\n \n return res[-1]\n \nif __name__ == '__main__':\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = maxSubsetSum(arr)\n\n print(res)\n" }, { "alpha_fraction": 0.6236842274665833, "alphanum_fraction": 0.6263157725334167, "avg_line_length": 19, "blob_id": "ca6fc4504be5f710a8181189fe1a35da7fb0a985", "content_id": "33b89fe467f6dc608b19ea1214c4029ee7cf0b9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 380, "license_type": "no_license", "max_line_length": 66, "num_lines": 19, "path": "/labuladong/cpp/utils/debug_recursive.hpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#ifndef DEBUG_RECURSIVE_HPP\n#define DEBUG_RECURSIVE_HPP\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <string>\n\ntemplate <typename... T>\nvoid debug_recursive(std::string format, int level, T&&... args) {\n std::string indent = \" \";\n for (int i = 0; i <= level; i++) {\n fmt::print(\"{}\", indent);\n }\n fmt::print(format, args...);\n}\n\n#endif\n" }, { "alpha_fraction": 0.5802919864654541, "alphanum_fraction": 0.5912408828735352, "avg_line_length": 18.60714340209961, "blob_id": "19335880b11e6939d14f028193d7d4e064a7a522", "content_id": "bdc11039bb8340c9226213467a1bd79903999da3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 57, "num_lines": 28, "path": "/interview/GreedyFlorist.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the getMinimumCost function below.\ndef getMinimumCost(k, c):\n weight = [i // k + 1 for i in range(len(c))]\n c.sort(reverse=True)\n # res = sum([i*j for i,j in zip(weight, c)])\n res = sum(map(lambda z: z[0] * z[1], zip(weight, c)))\n return res\n\nif __name__ == '__main__':\n nk = input().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n c = list(map(int, input().rstrip().split()))\n\n minimumCost = getMinimumCost(k, c)\n\n print(minimumCost)" }, { "alpha_fraction": 0.5423242449760437, "alphanum_fraction": 0.5624103546142578, "avg_line_length": 17.83783721923828, "blob_id": "a322460a470d4fcc924e820091240eb88e9523a9", "content_id": "df89ec9af877e1f116d1d2fc91a81458d0e2f798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 158, "num_lines": 37, "path": "/interview/make_anagram.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/ctci-making-anagrams/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=strings\n\n# Complete the makeAnagram function below.\ndef makeAnagram(a, b):\n c, d = [0] * 26, [0] * 26\n\n for i in a:\n c[ord(i) - ord('a')] += 1\n\n for j in b:\n d[ord(j) - ord('a')] += 1\n \n cnt = 0\n for i,j in zip(c, d):\n if i != j:\n cnt += abs(i - j)\n\n return cnt\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n a = input()\n\n b = input()\n\n res = makeAnagram(a, b)\n\n print(res)\n" }, { "alpha_fraction": 0.7111670970916748, "alphanum_fraction": 0.7178841233253479, "avg_line_length": 20.672727584838867, "blob_id": "57720ec1c4fd15be423410faa8e8a9721c7c07dd", "content_id": "6169cf1c3a3714ab372a834b92d79d4ec37a24b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 78, "num_lines": 55, "path": "/PythonHack/MG/README.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "Kevin and Stuart want to play the _'The Minion Game'_.\n\n**Game Rules**\n\nBoth players are given the same string, $S$.\n\nBoth players have to make substrings using the letters of the string $S$.\n\nStuart has to make words starting with consonants.\n\nKevin has to make words starting with vowels.\n\nThe game ends when both players have made all possible substrings. \n\n**Scoring**\nA player gets +1 point for each occurrence of the substring in the string $S$.\n\n**For Example:**\n\nString $S$ = BANANA\n\nKevin's vowel beginning word = ANA\n\nHere, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points. \n\nFor better understanding, see the image below: \n\n![img](banana.png)\n\nYour task is to determine the winner of the game and their score.\n\n**Input Format**\n\nA single line of input containing the string $S$.\n\n**Note:** The string $S$ will contain only uppercase letters: $[A-Z]$.\n\n**Constraints**\n\n$0 < len(S) \\leq 10^{6}$\n\n**Output Format**\n\nPrint one line: the name of the winner and their score separated by a space.\n\nIf the game is a draw, print Draw.\n\nSample Input\n\n BANANA\nSample Output\n\n Stuart 12\n**Note :**\n**Vowels are only defined as . In this problem, is not considered a vowel.**" }, { "alpha_fraction": 0.5257510542869568, "alphanum_fraction": 0.5547210574150085, "avg_line_length": 19.711111068725586, "blob_id": "584034c0b952c92a852d621b195fbf0586627777", "content_id": "bfccc0637a11bb79dc7ed0f8744c9b1f4dc3f1e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 940, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/leetcode/cpp/dailyTemperatures/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=739 lang=cpp\n *\n * [739] 每日温度\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <stack>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<int> dailyTemperatures(vector<int>& temperatures) {\n int n = temperatures.size();\n stack<int> s{};\n stack<int> idx{};\n vector<int> ans(n);\n for (int i = n - 1; i >= 0; i--) {\n while (!s.empty() && s.top() <= temperatures[i]) {\n s.pop();\n idx.pop();\n }\n ans[i] = idx.empty() ? 0 : idx.top() - i;\n // ans[i] = s.empty() ? 0 : s.top();\n s.push(temperatures[i]);\n idx.push(i);\n }\n return ans;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> temperatures = {73, 74, 75, 71, 69, 72, 76, 73};\n Solution sol;\n vector<int> v = sol.dailyTemperatures(temperatures);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5276243090629578, "alphanum_fraction": 0.5561694502830505, "avg_line_length": 21.625, "blob_id": "50d0dcd7a09d41922dcd17c990bdfae6409f087f", "content_id": "69231bf9ab54d447e326ce48220c0428ea074130", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 145, "num_lines": 48, "path": "/interview/2DDS.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/2d-array/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays\n\n# Complete the hourglassSum function below.\ndef hourglassSum(arr):\n # conv_window = [[1] * 3, [0, 1, 0], [1] * 3]\n # print(conv_window)\n Row = len(arr)\n Col = len(arr[0])\n list_sum = []\n\n def conv(aa):\n return sum(aa[0] + [aa[1][1]] + aa[2])\n \n def chop(a, r, c):\n lista =[]\n for i in a[r: r + 3]:\n lista.append(i[c: c+3])\n # print(lista)\n return lista\n\n # r = conv(arr[0:3][0:3])\n # print(chop(arr, 1, 0))\n\n for row in range(0, Row - 2):\n for col in range(0, Col - 2):\n list_sum.append(conv(chop(arr, row, col)))\n\n return max(list_sum)\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n arr = []\n\n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = hourglassSum(arr)\n\n print(result)\n" }, { "alpha_fraction": 0.3994038701057434, "alphanum_fraction": 0.402384489774704, "avg_line_length": 21.399999618530273, "blob_id": "ce618b3e55f28746989ae006135035aa64444579", "content_id": "efc4b0c5d8457734cc5157e5c773a2698b504e75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 877, "license_type": "no_license", "max_line_length": 50, "num_lines": 30, "path": "/labuladong/cpp/note_template/slide_window.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/* 滑动窗口算法框架 */\nvoid slidingWindow(string s) {\n unordered_map<char, int> window;\n \n int left = 0, right = 0;\n while (right < s.size()) {\n // c 是将移入窗口的字符\n char c = s[right];\n // 增大窗口\n right++;\n // 进行窗口内数据的一系列更新\n ...\n\n /*** debug 输出的位置 ***/\n // 注意在最终的解法代码中不要 print\n // 因为 IO 操作很耗时,可能导致超时\n printf(\"window: [%d, %d)\\n\", left, right);\n /********************/\n \n // 判断左侧窗口是否要收缩\n while (window needs shrink) {\n // d 是将移出窗口的字符\n char d = s[left];\n // 缩小窗口\n left++;\n // 进行窗口内数据的一系列更新\n ...\n }\n }\n}" }, { "alpha_fraction": 0.5411298274993896, "alphanum_fraction": 0.5490584969520569, "avg_line_length": 18.403846740722656, "blob_id": "d9859f094d87635e0dbb2932f47870819bf56820", "content_id": "f0ff08f1e2e2e8b949372047d396f76ca0b1e5b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 80, "num_lines": 52, "path": "/leetcode/cpp/permutation/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=46 lang=cpp\n *\n * [46] 全排列\n */\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<vector<int>> res = {};\n vector<vector<int>> permute(vector<int>& nums) {\n vector<int> s{};\n backtrack(nums, s);\n return res;\n }\n\n void backtrack(vector<int>& nums, vector<int> solution) {\n if (solution.size() == nums.size()) {\n res.push_back(solution);\n return;\n }\n for (int c : nums) {\n if (find(solution.begin(), solution.end(), c) != solution.end()) continue;\n solution.push_back(c);\n backtrack(nums, solution);\n solution.pop_back();\n }\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{1, 2, 3};\n vector<vector<int>> result = s.permute(v);\n cout << \"[\";\n for (auto& i : result) {\n cout << \"[\";\n for (auto& j : i) {\n cout << j << \" \";\n }\n cout << \"] \";\n }\n cout << \"] \" << endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.3681652545928955, "alphanum_fraction": 0.4252733886241913, "avg_line_length": 23.235294342041016, "blob_id": "16c5a13b744a918f2b1270078fde224474843004", "content_id": "fa284b260f509974197b72f90d495e1df6e38eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/leetcode/python/1143.最长公共子序列.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=1143 lang=python3\n#\n# [1143] 最长公共子序列\n#\n\n# @lc code=start\nclass Solution:\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\n n1 = len(text1)\n n2 = len(text2)\n\n dp = [ [0] * (n2 + 1) for _ in range(n1 + 1)]\n\n # for i in range(1, n1 + 1):\n # dp[i][0] = 0\n \n # for j in range(1, n2 + 1):\n # dp[0][j] = 0\n \n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n if text1[i - 1] == text2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n \n return dp[n1][n2]\n\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().longestCommonSubsequence(\"abcde\", \"ace\"))" }, { "alpha_fraction": 0.5149157047271729, "alphanum_fraction": 0.5291828513145447, "avg_line_length": 16.133333206176758, "blob_id": "4a4e1f6c4bc98491c61a5da84cf9f8b84f60a364", "content_id": "ecdd59c03dc7934b38bcbe8d9f44bc67cbbe8eac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 779, "license_type": "no_license", "max_line_length": 53, "num_lines": 45, "path": "/leetcode/cpp/2_sum/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=1 lang=cpp\n *\n * [1] 两数之和\n */\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\n\nclass Solution {\n public:\n vector<int> twoSum(vector<int> &nums, int target) {\n unordered_map<int, int> m;\n vector<int> v;\n int n = nums.size();\n for (int i = 0; i < n; i++) {\n int diff = target - nums[i];\n if (m.find(diff) != m.end()) {\n auto p = m.find(diff);\n v.push_back(p->second);\n v.push_back(i);\n }\n m.insert(make_pair(nums[i], i));\n }\n\n return v;\n }\n};\n\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{1, 6, 3, 2, 5};\n vector<int> result = s.twoSum(v, 11);\n for (int i : result) {\n cout << i << endl;\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.4791666567325592, "alphanum_fraction": 0.4845430254936218, "avg_line_length": 20.257143020629883, "blob_id": "66de337478d0c569afaafda0a52ea0c814835387", "content_id": "50536c8ecada12f9e6d637b7509b4d0f79d49475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1718, "license_type": "no_license", "max_line_length": 51, "num_lines": 70, "path": "/leetcode/cpp/note_template/monostack_queue.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*******************STACK************************/\nint[] nextGreaterElement(int[] nums) {\n int n = nums.length;\n // 存放答案的数组\n int[] res = new int[n];\n Stack<Integer> s = new Stack<>();\n // 倒着往栈里放\n for (int i = n - 1; i >= 0; i--) {\n // 判定个子高矮\n while (!s.isEmpty() && s.peek() <= nums[i]) {\n // 矮个起开,反正也被挡着了。。。\n s.pop();\n }\n // nums[i] 身后的更大元素\n res[i] = s.isEmpty() ? -1 : s.peek();\n s.push(nums[i]);\n }\n return res;\n}\n\n/*******************QUEUE************************/\n/* 单调队列的实现 */\nclass MonotonicQueue {\n LinkedList<Integer> maxq = new LinkedList<>();\n public\n void push(int n) {\n // 将小于 n 的元素全部删除\n while (!maxq.isEmpty() && maxq.getLast() < n) {\n maxq.pollLast();\n }\n // 然后将 n 加入尾部\n maxq.addLast(n);\n }\n\n public\n int max() { return maxq.getFirst(); }\n\n public\n void pop(int n) {\n if (n == maxq.getFirst()) {\n maxq.pollFirst();\n }\n }\n}\n\n/* 解题函数的实现 */\nint[] maxSlidingWindow(int[] nums, int k) {\n MonotonicQueue window = new MonotonicQueue();\n List<Integer> res = new ArrayList<>();\n\n for (int i = 0; i < nums.length; i++) {\n if (i < k - 1) {\n //先填满窗口的前 k - 1\n window.push(nums[i]);\n } else {\n // 窗口向前滑动,加入新数字\n window.push(nums[i]);\n // 记录当前窗口的最大值\n res.add(window.max());\n // 移出旧数字\n window.pop(nums[i - k + 1]);\n }\n }\n // 需要转成 int[] 数组再返回\n int[] arr = new int[res.size()];\n for (int i = 0; i < res.size(); i++) {\n arr[i] = res.get(i);\n }\n return arr;\n}\n" }, { "alpha_fraction": 0.5654423236846924, "alphanum_fraction": 0.5752716064453125, "avg_line_length": 25.47945213317871, "blob_id": "55373eae1363363789eb9efb2d446799a8724e6a", "content_id": "3ce8522fc74cc39ccc047b2880fe0d25209d9b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1933, "license_type": "no_license", "max_line_length": 97, "num_lines": 73, "path": "/CHack/qsortSolve.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n\n// /* Comparison function. Receives two generic (void) pointers to the items under comparison. */\n// int compare_ints(const void *p, const void *q) {\n// int x = *(const int *)p;\n// int y = *(const int *)q;\n\n// /* Avoid return x - y, which can cause undefined behaviour\n// because of signed integer overflow. */\n// if (x < y)\n// return -1; // Return -1 if you want ascending, 1 if you want descending order. \n// else if (x > y)\n// return 1; // Return 1 if you want ascending, -1 if you want descending order. \n\n// return 0;\n// }\n\n// /* Sort an array of n integers, pointed to by a. */\n// void sort_ints(int *a, size_t n) {\n// qsort(a, n, sizeof *a, &compare_ints);\n// }\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n\nstruct triangle\n{\n\tint a;\n\tint b;\n\tint c;\n};\n\ntypedef struct triangle triangle;\n\nfloat S(triangle tr){\n double p = (tr.a + tr.b + tr.c) / 2.0;\n return sqrt(p * (p - tr.a) * (p - tr.b) * (p- tr.c));\n}\n\n/* Comparison function. Receives two generic (void) pointers to the items under comparison. */\nint key(const void *p, const void *q) {\n triangle x = *(triangle *)p;\n triangle y = *(triangle *)q;\n\n /* Avoid return x - y, which can cause undefined behaviour\n because of signed integer overflow. */\n if (S(x) < S(y))\n return -1; // Return -1 if you want ascending, 1 if you want descending order. \n else if (S(x) > S(y))\n return 1; // Return 1 if you want ascending, -1 if you want descending order. \n\n return 0;\n}\n\nvoid sort_by_area(triangle* tr, int n) {\n qsort(tr, n, sizeof(*tr), &key);\n}\n\nint main()\n{\n\tint n;\n\tscanf(\"%d\", &n);\n\ttriangle *tr = malloc(n * sizeof(triangle));\n\tfor (int i = 0; i < n; i++) {\n\t\tscanf(\"%d%d%d\", &tr[i].a, &tr[i].b, &tr[i].c);\n\t}\n\tsort_by_area(tr, n);\n\tfor (int i = 0; i < n; i++) {\n\t\tprintf(\"%d %d %d\\n\", tr[i].a, tr[i].b, tr[i].c);\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4270557165145874, "alphanum_fraction": 0.4323607385158539, "avg_line_length": 20, "blob_id": "880fb1395b628acf0851f434396ce09ef5ecb2f9", "content_id": "028499136d0b9cc306a15b0b50888a353190d38c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/PythonHack/MG/MinionGame.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def minion_game(string):\n # your code goes here\n k = 0\n s = 0\n n = len(string)\n for i, c in enumerate(string):\n if c in \"AEIOU\":\n k += n - i \n else:\n s += n - i\n if s == k:\n print(\"Draw\")\n else:\n print(f\"Stuart {s}\" if s > k else f\"Kevin {k}\")\n\nif __name__ == '__main__':\n s = input()\n minion_game(s)" }, { "alpha_fraction": 0.5331771969795227, "alphanum_fraction": 0.5425449013710022, "avg_line_length": 20.350000381469727, "blob_id": "291ca076539b5d9ef3f47a16cf549d6d5bb3e35e", "content_id": "a81351544de3e43e53bf395261aa7f1a6ad51d52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1287, "license_type": "no_license", "max_line_length": 77, "num_lines": 60, "path": "/leetcode/cpp/permute_unique/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=47 lang=cpp\n *\n * [47] 全排列 II\n */\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n vector<vector<int>> res = {};\n unordered_map<int, int> visited;\n\n vector<vector<int>> permuteUnique(vector<int>& nums) {\n vector<int> s{};\n for (int i : nums) visited[i]++;\n std::sort(nums.begin(), nums.end());\n backtrack(nums, s, visited);\n return res;\n }\n\n void backtrack(vector<int>& nums, vector<int> solution,\n unordered_map<int, int> visited) {\n if (solution.size() == nums.size()) {\n res.push_back(solution);\n return;\n }\n\n for (int j = 0; j < nums.size(); ++j) {\n if (visited[nums[j]] == 0 || j > 0 && nums[j - 1] == nums[j]) continue;\n visited[nums[j]]--;\n solution.push_back(nums[j]);\n backtrack(nums, solution, visited);\n solution.pop_back();\n visited[nums[j]]++;\n }\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{1, 1, 2};\n vector<vector<int>> result = s.permuteUnique(v);\n cout << \"[\";\n for (auto& i : result) {\n cout << \"[\";\n for (auto& j : i) {\n cout << j << \" \";\n }\n cout << \"] \";\n }\n cout << \"] \" << endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.3699340224266052, "alphanum_fraction": 0.5127238631248474, "avg_line_length": 28.48611068725586, "blob_id": "8c970bd0347ccde31c91e38f7a42bf577335fe53", "content_id": "4726aeb07338b03b6ecc1ae6072c418e4c2a57f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2122, "license_type": "no_license", "max_line_length": 398, "num_lines": 72, "path": "/PythonHack/2_sum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# def twoSum(nums, target):\n# \"\"\"\n# :type nums: List[int]\n# :type target: int\n# :rtype: List[int]\n# \"\"\"\n# from collections import Counter\n# ind_dic = {}\n# if len(set(nums)) != len(nums):\n# pans = Counter(nums).most_common(1)[0][0]\n# if pans*2 == target:\n# fi = nums.index(pans)\n# nums[fi] = ''\n# return [fi, nums.index(pans)]\n# else:\n# for ne in range(len(nums)):\n# if nums[ne] == pans:\n# nums[ne] = ''\n# find_ans = nums\n\n# elif len(set(nums)) == len(nums):\n# find_ans = nums\n# else:\n# pass\n \n# for i, n in enumerate(find_ans):\n# ind_dic[n] = i\n# for c in find_ans:\n# if c != '':\n# if target - c in ind_dic and ind_dic[c] != ind_dic[target - c]:\n# return [ind_dic[c], ind_dic[target - c]]\n \n# return \"None\"\n# 60 ms\n\n\n# find the two number that sum up to target sum\n# hash tabel solve the problem\n\ndef twoSum(nums, target):\n # 96 ms\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n ind_dic = {}\n\n for i, n in enumerate(nums):\n if ind_dic.get(n) is None:\n ind_dic[n] = [i]\n else:\n ind_dic[n].append(i)\n \n for c in nums:\n if len(ind_dic[c]) > 1 and c * 2 == target:\n return [ind_dic[c][0], ind_dic[c][1]]\n\n elif target - c in ind_dic and ind_dic[c] != ind_dic[target - c]:\n return [ind_dic[c][0], ind_dic[target - c][0]]\n\n\n\ndef main():\n nums = [230,863,916,585,981,404,316,785,88,12,70,435,384,778,887,755,740,337,86,92,325,422,815,650,920,125,277,336,221,847,168,23,677,61,400,136,874,363,394,199,863,997,794,587,124,321,212,957,764,173,314,422,927,783,930,282,306,506,44,926,691,568,68,730,933,737,531,180,414,751,28,546,60,371,493,370,527,387,43,541,13,457,328,227,652,365,430,803,59,858,538,427,583,368,375,173,809,896,370,789]\n # 542\n tar = int(input(\"target:\"))\n ans = twoSum(nums, tar)\n print(ans)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5592543482780457, "alphanum_fraction": 0.5785619020462036, "avg_line_length": 22.841270446777344, "blob_id": "fa1edabcb228ba9c0e92a5a45c4104553b8c7b1d", "content_id": "764df1195d7d1f2e85fe46aec953dd4cb7648edb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 73, "num_lines": 63, "path": "/leetcode/cpp/allPathsSourceTarget/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=797 lang=cpp\n *\n * [797] 所有可能的路径\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\n\nclass Solution {\n public:\n vector<bool> visited{};\n vector<vector<int>> paths = {};\n vector<int> neighbour(vector<vector<int>>& graph, int node) {\n return graph[node];\n }\n\n void DFS(vector<vector<int>>& graph, int node, vector<int>& path) {\n if (node >= graph.size() || node < 0) return;\n if (visited[node]) return;\n if (0 == neighbour(graph, node).size() || node == graph.size() - 1) {\n if (node != graph.size() - 1) return;\n path.push_back(node);\n paths.push_back(vector<int>(path));\n path.pop_back();\n return;\n }\n path.push_back(node);\n for (int child : neighbour(graph, node)) {\n DFS(graph, child, path);\n }\n path.pop_back();\n // visited[node] = true;\n }\n\n vector<vector<int>> allPathsSourceTarget(vector<vector<int>>& graph) {\n visited.assign(graph.size(), false);\n visited.reserve(graph.size());\n vector<int> path{};\n DFS(graph, 0, path);\n return paths;\n }\n};\n\n// @lc code=end\n\nint main() {\n Solution s;\n // vector<vector<int>> graph = {{2}, {}, {1}};\n vector<vector<int>> graph = {{4, 3, 1}, {3, 2, 4}, {}, {4}, {}};\n // vector<vector<int>> graph = {{4, 3, 1}, {3, 2, 4}, {3}, {4}, {}};\n vector<vector<int>> res = s.allPathsSourceTarget(graph);\n fmt::print(\"{}\\n\", res);\n return 0;\n}\n" }, { "alpha_fraction": 0.46005773544311523, "alphanum_fraction": 0.47930702567100525, "avg_line_length": 20.66666603088379, "blob_id": "7fcc2fd7d5e1747915b104e7c9f89beb858eb3b8", "content_id": "2587370c1ff6ed44fcc6f6de6b2dd1216cafc694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 73, "num_lines": 48, "path": "/leetcode/python/detectCycleGraph.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n \nclass Graph():\n def __init__(self, V):\n self.V = V\n self.graph = defaultdict(list)\n \n def addEdge(self, u, v):\n self.graph[u].append(v)\n \n\n def isCyclic(self):\n color = { u: \"white\" for u in range(self.V)}\n def backtrace(u, color):\n\n if color[u] == \"gray\":\n return True\n \n color[u] = \"gray\"\n\n for v in self.graph[u]:\n if color[v] == \"gray\":\n return True\n elif color[v] is \"white\" and backtrace(v, color) == True:\n return True\n else:\n pass\n\n color[u] = \"black\"\n \n return False\n return backtrace(0, color)\n\ng = Graph(7)\ng.addEdge(0, 1)\ng.addEdge(1, 2)\ng.addEdge(2, 3)\n# g.addEdge(4, 1)\n# g.addEdge(4, 0)\ng.addEdge(3, 4)\ng.addEdge(4, 5)\n# g.addEdge(2, 6)\ng.addEdge(5, 2)\n\nif(g.isCyclic()):\n print (\"Graph has a cycle\")\nelse:\n print (\"Graph has no cycle\")" }, { "alpha_fraction": 0.5192878246307373, "alphanum_fraction": 0.5252225399017334, "avg_line_length": 18.852941513061523, "blob_id": "2df9789d5f697bfd4ead1b537535059abfbcdcc1", "content_id": "055d23febc6c79dc42c54b239a02de32d3c5c10f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 54, "num_lines": 34, "path": "/NewCoder/wangyi2.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\nfrom collections import Counter\ndef ChooseCard(s, k):\n cnt = Counter(s)\n priority = [(char, cnt[char]) for char in cnt]\n priority.sort(key= lambda x: x[1], reverse = True)\n\n chosen = k\n score = 0\n while chosen > 0:\n _, t = priority.pop(0)\n if t > chosen:\n score += chosen * chosen\n break\n score += t * t\n chosen -= t\n\n return score\n\n\nif __name__ == \"__main__\":\n try:\n while True:\n n, k = list(map(int, input().split()))\n Cards = input()\n\n print(ChooseCard(Cards, k))\n except EOFError:\n exit()" }, { "alpha_fraction": 0.5856671929359436, "alphanum_fraction": 0.592257022857666, "avg_line_length": 19.576271057128906, "blob_id": "457a380dadecf566e4128fece90a6b905c8040a9", "content_id": "cb25079802d103cff94f09db3a45c5c6170fcabd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1236, "license_type": "no_license", "max_line_length": 71, "num_lines": 59, "path": "/leetcode/cpp/LL_getKthFromEnd/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=19 lang=cpp\n *\n * [19] 删除链表的倒数第 N 个结点\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n ListNode* removeNthFromEnd(ListNode* head, int k) {\n if (!head) return nullptr;\n if (!k) return nullptr;\n ListNode *slow = head, *fast = head;\n int count = 0;\n while (fast != nullptr && count < k) {\n fast = fast->next;\n count++;\n }\n if (!fast) {\n if (count == k) {\n ListNode* del = head;\n head = head->next;\n delete del;\n return head;\n }\n return head;\n };\n\n for (; fast->next != nullptr; slow = slow->next, fast = fast->next)\n ;\n ListNode* del = slow->next;\n slow->next = slow->next->next;\n delete del;\n return head;\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v = {1};\n ListNode* head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n ListNode* k = s.removeNthFromEnd(head, 1);\n showLinkedList<int>(k);\n DestroyLinkedlist<int>(k);\n return 0;\n}\n" }, { "alpha_fraction": 0.43493151664733887, "alphanum_fraction": 0.4472602605819702, "avg_line_length": 21.461538314819336, "blob_id": "648e8a9f63801a8a26e84a1b4d4d406d29bbd775", "content_id": "c38985d280e7ac16dc0a8af76900f14f3a650578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 64, "num_lines": 65, "path": "/interview/FrequcyQuery.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the freqQuery function below.\nfrom collections import Counter\n# def freqQuery(queries):\n# a = Counter()\n# res = []\n# for mode, content in queries:\n# if mode == 1:\n# a[content] += 1\n# elif mode == 2:\n# if a[content] != 0:\n# a[content] -= 1\n# elif mode == 3:\n# for i in a.keys():\n# if a[i] == content:\n# res.append(1)\n# break\n# else:\n# res.append(0)\n# else:\n# pass\n# return res\n\n# from collections import Counter\ndef freqQuery(queries):\n # a = Counter()\n a = {}\n res = []\n for mode, content in queries:\n if mode == 1:\n try:\n a[content] += 1\n except KeyError:\n a[content] = 1\n elif mode == 2:\n if content in a.keys() and a[content] != 0:\n a[content] -= 1\n elif mode == 3:\n if content in set(a.values()):\n res.append(1)\n else:\n res.append(0)\n else:\n pass\n return res\n\nif __name__ == '__main__':\n\n q = int(input().strip())\n\n queries = []\n\n for _ in range(q):\n queries.append(list(map(int, input().rstrip().split())))\n\n ans = freqQuery(queries)\n\n print('\\n'.join(map(str, ans)))\n" }, { "alpha_fraction": 0.5096774101257324, "alphanum_fraction": 0.536774218082428, "avg_line_length": 17.452381134033203, "blob_id": "af4fbbdd63d8dc36a01ab9adc1ec99e37d791e26", "content_id": "e37d37bc1d2eb533a5271e26b2f1c3445fc89d71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 783, "license_type": "no_license", "max_line_length": 53, "num_lines": 42, "path": "/leetcode/cpp/coinChange/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=322 lang=cpp\n *\n * [322] 零钱兑换\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int INF = 1e9;\n int dp_min = INF;\n int coinChange(vector<int>& coins, int amount) {\n vector<int> dp(amount + 1, INF);\n dp.at(0) = 0;\n for (int a = 1; a < amount + 1; ++a) {\n for (int c : coins) {\n if (a >= c) {\n dp.at(a) = min(dp.at(a), dp.at(a - c) + 1);\n }\n }\n }\n return dp.at(amount) == INF ? -1 : dp.at(amount);\n }\n};\n// @lc code=end\n\nint main() {\n Solution s;\n vector<int> v{1, 2, 5};\n int result = s.coinChange(v, 11);\n fmt::print(\"{} coins.\\n\", result);\n return 0;\n}\n" }, { "alpha_fraction": 0.46048471331596375, "alphanum_fraction": 0.47839832305908203, "avg_line_length": 22.75, "blob_id": "0d1465e0d6f9c9369ec7fcc9c67ddb68ae4fac83", "content_id": "bba29eba1bdaeb5ae66982b1dc8f8e8d42a04a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 85, "num_lines": 40, "path": "/leetcode/python/40.组合总和-ii.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=40 lang=python3\n#\n# [40] 组合总和 II\n\nfrom typing import *\n# @lc code=start\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n coms = list()\n com = list()\n\n if not candidates:\n return []\n \n def backtrack(com, cans, target):\n if sum(com) == target:\n coms.append(com.copy())\n return\n \n if target < sum(com):\n return\n\n cs = cans.copy()\n for ind, i in enumerate(cans):\n com.append(i)\n cs.pop(0)\n backtrack(com, cs, target)\n cs.append(i)\n com.pop()\n \n backtrack(com, sorted(candidates), target)\n\n return coms\n\n\n# @lc code=end\n\nif __name__ == '__main__':\n print(Solution().combinationSum2(candidates = [10,1,2,7,6,1,5], target = 8,))" }, { "alpha_fraction": 0.4652862250804901, "alphanum_fraction": 0.47624847292900085, "avg_line_length": 22.428571701049805, "blob_id": "a878343bbecb3d565545331246f0c44915f19905", "content_id": "eaadee8a4f4c24b5ff7564b292751b0191b6d04b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 61, "num_lines": 35, "path": "/leetcode/python/144.二叉树的前序遍历.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=144 lang=python3\n#\n# [144] 二叉树的前序遍历\n#\nfrom typing import *\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n# @lc code=start\n# Definition for a binary tree node.\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n \n stack = [(\"w\", root)]\n res = []\n\n while len(stack) != 0:\n clr, cur = stack.pop()\n\n if cur is None:\n continue\n \n if clr == \"w\":\n stack.append((\"w\", cur.right))\n stack.append((\"w\", cur.left))\n stack.append((\"g\", cur))\n \n else:\n res.append(cur.val)\n \n return res\n# @lc code=end\n\n" }, { "alpha_fraction": 0.5272296667098999, "alphanum_fraction": 0.5438042879104614, "avg_line_length": 23.365385055541992, "blob_id": "aeb595c6ade6e2c9ac36eadcd1bc3980cfd93201", "content_id": "254e569360b9a9aa678cfd48f1a4192cb80a9306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1275, "license_type": "no_license", "max_line_length": 67, "num_lines": 52, "path": "/leetcode/python/113.路径总和-ii.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=113 lang=python3\n#\n# [113] 路径总和 II\n#\nfrom typing import List\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n# @lc code=start\n# Definition for a binary tree node.\n\nclass Solution:\n def pathSum(self, root: TreeNode, Sum: int) -> List[List[int]]:\n com = []\n coms = []\n\n def backtrace(root, com):\n if root is None:\n return\n\n if not root.left and not root.right:\n c = com.copy()\n c.append(root.val)\n if sum(c) == Sum:\n coms.append(c)\n return\n\n com.append(root.val)\n backtrace(root.left, com)\n backtrace(root.right, com)\n com.pop()\n\n backtrace(root, com)\n return coms\n# @lc code=end\n\n\nif __name__ == \"__main__\":\n root = TreeNode(5)\n root.left=TreeNode(4)\n root.right=TreeNode(8)\n root.right.left=TreeNode(13)\n root.right.right=TreeNode(4)\n root.right.right.left=TreeNode(5)\n root.right.right.right=TreeNode(1)\n root.left.left=TreeNode(11)\n root.left.left.left=TreeNode(7)\n root.left.left.right=TreeNode(2)\n print(Solution().pathSum(root, 22))\n" }, { "alpha_fraction": 0.6386256217956543, "alphanum_fraction": 0.649289071559906, "avg_line_length": 25.375, "blob_id": "a2fc672ce7cd915cf2b404b25a597af1f0c8fa3b", "content_id": "69dd4de448b2be6c95bbe08b53028ea693f126f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1708, "license_type": "no_license", "max_line_length": 76, "num_lines": 64, "path": "/leetcode/cpp/lowestCommonAncestor/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=236 lang=cpp\n *\n * [236] 二叉树的最近公共祖先\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nTreeNode *findNode(TreeNode *root, int val) {\n if (!root) return nullptr;\n if (root->val == val) return root;\n TreeNode *l = nullptr, *r = nullptr;\n if (root->left) l = findNode(root->left, val);\n if (root->right) r = findNode(root->right, val);\n return l != nullptr ? l : r;\n}\n\n// @lc code=start\nclass Solution {\n public:\n TreeNode *lowestCommonAncestor(TreeNode *root, TreeNode *p, TreeNode *q) {\n if (!root) return nullptr;\n if (root == p) return p;\n if (root == q) return q;\n\n TreeNode *lca_l = nullptr, *lca_r = nullptr;\n if (root->left) lca_l = lowestCommonAncestor(root->left, p, q);\n if (root->right) lca_r = lowestCommonAncestor(root->right, p, q);\n\n if (lca_l == nullptr) return lca_r;\n if (lca_r == nullptr) return lca_l;\n if (lca_r == nullptr && lca_l == nullptr) return nullptr;\n return root;\n }\n};\n// @lc code=end\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{3, 5, 1, 6, 2, 0, 8, null, null, 7, 4};\n int val_p = 0;\n int val_q = 8;\n TreeNode *root = BuildBinaryTree<int>(a);\n TreeNode *p = findNode(root, val_p);\n TreeNode *q = findNode(root, val_q);\n showBinaryTree<int>(root);\n Solution sol;\n TreeNode *lca = sol.lowestCommonAncestor(root, p, q);\n showBinaryTree<int>(lca);\n return 0;\n}\n" }, { "alpha_fraction": 0.599413275718689, "alphanum_fraction": 0.6535202264785767, "avg_line_length": 25.22222137451172, "blob_id": "483342a285e9be81beab01d07e868bbb33fd178d", "content_id": "adf9bb54a81eb647931d832909fb7fe18793a47b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3068, "license_type": "no_license", "max_line_length": 494, "num_lines": 117, "path": "/COPInterview/takehome.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Takehome\n\nHi prospective Junior Data Engineer! Here is your assignment. \n\nYou are allowed to use the Python standard library and basic utilities from Numpy. Points are given for **succinct** but __clear__ code, and when there are ambiguities, comments should be provided. Using functional programming style is allowed. For Python, using the pep8 standard is encouraged. The challenges below will give you a few correct inputs and outputs, however we will be testing your functions against unseen inputs. So make sure you understand exactly the purpose of the function.\n\nAll code is to be submitted that works against Python 3 and a current version of Numpy.\n\nSubmit the code as separate `takehome.py` file.\n\n## Functional Arrays\n\nCreate a function that takes a lambda, a dimensions shape and the Numpy dtype, and produces an array.\n\n```py\nimport numpy as np\n\ndef create_array_from_function(f, d, dtype=None):\n pass\n \nprint(create_array_from_function(lambda i,j: (i - j)**2, [4, 4]))\n# [[0. 1. 4. 9.]\n# [1. 0. 1. 4.]\n# [4. 1. 0. 1.]\n# [9. 4. 1. 0.]]\n```\n\n## Removing Boundaries\n\nCreate a function that takes an array and a binary mask and produces a cropped array based on the binary mask.\n\n```py\nimport numpy as np\n\ndef boundary_cropping(a, m):\n pass\n\na1 = np.array([[0,0,0,0,0], [0,0,0,0,0], [0,1,0,1,1], [0,0,0,0,0]])\na2 = np.array([[ [0,0,0], [0,1,0], [0,1,0] ], [ [0,0,0], [0,1,0], [0,0,0] ], [ [0,0,0], [0,1,0], [0,0,0] ]])\n\nprint(boundary_cropping(a1, a1 != 0))\n# [[1 0 1 1]]\nprint(boundary_cropping(a2, a2 != 0))\n# [[[1] [1]] [[1] [0]] [[1] [0]]]\n```\n\n## Block Reshaping\n\nCreate a function that takes an 2D matrix, a number of rows and an number of columns which reshapes the 2D matrix into blocks of the rows and columns.\n\n```py\nimport numpy as np\n\ndef shape_as_blocks(a, r, c):\n pass\n\narr = np.array([[1,2,3,4], [5,6,7,8], [9,0,1,2]])\nprint(shape_as_blocks(arr, 2, 2))\n# array([[[[1, 2],\n# [7, 8]],\n# \n# [[3, 4],\n# [9, 0]],\n# \n# [[5, 6],\n# [1, 2]]]])\n```\n\n## Population Variance from Subpopulation Variance\n\nGiven a list of numpy arrays, where each array is a subpopulation and the entire list is the population, calculate the variance of the entire population from the variance of the subpopulations.\n\n```py\nimport numpy as np\n\ndef pop_var_from_subpop_var(groups):\n pass\n \ngroups = [np.array([1,2,3,4]), np.array([5,6])]\nprint(pop_var_from_subpop_var(groups))\n# 2.9166666666666665\n```\n\n## Shuffle a Large List\n\nGiven a very large list of numbers, randomly shuffle the list while using constant memory.\n\n```py\nimport random\n\nl = [1,2,3,4,5]\n\ndef shuffle_list_inplace_constant_memory(l):\n pass\n```\n\n## Acquiring Coordinates\n\nGiven an array and a step shape, return a list of coordinates based on each step.\n\n```py\nimport itertools\nimport numpy as np\n\ndef coordinates_from_steps(a, s, dtype=int):\n pass\n\nprint(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,1)))\n# [[0 0]\n# [0 1]\n# [1 0]\n# [1 1]]\n\nprint(coordinates_from_steps(np.array([[1,2],[3,4]]), (1,2)))\n# [[0 0]\n# [1 0]]\n```\n" }, { "alpha_fraction": 0.6071829199790955, "alphanum_fraction": 0.6172839403152466, "avg_line_length": 19.25, "blob_id": "37254f143169db411faf743d42f33d96d7895466", "content_id": "250fb5108829fa6783b6762e7ceb03d2222d900d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 891, "license_type": "no_license", "max_line_length": 147, "num_lines": 44, "path": "/interview/tripleNum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/triple-sum/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=search\n# Complete the triplets function below.\nfrom bisect import bisect_right\ndef triplets(A, B, C):\n\n A = sorted(set(A))\n B = sorted(set(B))\n C = sorted(set(C))\n\n res = 0\n for b in B:\n i = bisect_right(A, b)\n j = bisect_right(C, b)\n res += i * j\n return res\n\n\nif __name__ == '__main__':\n\n lenaLenbLenc = input().split()\n\n lena = int(lenaLenbLenc[0])\n\n lenb = int(lenaLenbLenc[1])\n\n lenc = int(lenaLenbLenc[2])\n\n arra = list(map(int, input().rstrip().split()))\n\n arrb = list(map(int, input().rstrip().split()))\n\n arrc = list(map(int, input().rstrip().split()))\n\n ans = triplets(arra, arrb, arrc)\n\n print(ans)\n" }, { "alpha_fraction": 0.5588558912277222, "alphanum_fraction": 0.5632563233375549, "avg_line_length": 21.170732498168945, "blob_id": "1c47e0e66e2de23ddf89720a70578b330e48b747", "content_id": "c974c301f11113fe05ab64c24df8442a76dc9c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 909, "license_type": "no_license", "max_line_length": 67, "num_lines": 41, "path": "/leetcode/cpp/utils/print_2d.hpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#ifndef PRINT_2D_HPP\n#define PRINT_2D_HPP\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <string>\n#include <type_traits>\n#include <vector>\n\ntemplate <typename T>\nint max_length_of(const std::vector<std::vector<T>>& v) {\n int max_length = 0;\n for (const auto& vec : v) {\n for (const T x : vec) {\n max_length = max_length > fmt::format(\"{}\", x).size()\n ? max_length\n : fmt::format(\"{}\", x).size();\n }\n }\n return max_length;\n}\n\ntemplate <typename T>\nvoid print2D(const std::vector<std::vector<T>>& v) {\n int space_len = max_length_of(v);\n std::string format_str = \"{} \";\n if (space_len) format_str = fmt::format(\"{{:>{}}} \", space_len);\n\n for (const auto& col : v) {\n for (const T& row : col) {\n fmt::print(format_str, row);\n }\n fmt::print(\"\\n\");\n }\n fmt::print(\"\\n\");\n}\n\n#endif\n" }, { "alpha_fraction": 0.5325967073440552, "alphanum_fraction": 0.5690608024597168, "avg_line_length": 24.13888931274414, "blob_id": "d9e65109da826daf8147b5e64409fb6a071c3a94", "content_id": "9411934f3c65110340dd31dbffde685f890dcc30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 905, "license_type": "no_license", "max_line_length": 64, "num_lines": 36, "path": "/CHack/halfday.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "// calculate the mid-day time given sunrise time and sunset time\n\n#include <stdio.h>\n#include <stdlib.h>\n\nchar * halfday(char* sunrise, char* sunset);\n\nint main()\n{\n char sunrise[] = \"06:00\";\n char sunset[] = \"18:12\";\n printf(\"please input sunrise time:\\n\");\n scanf(\"%s\", sunrise);\n printf(\"please input sunset time:\\n\");\n scanf(\"%s\", sunset);\n char *a = halfday(sunrise, sunset);\n printf(\"%s\", a);\n free(a);\n return 0;\n}\n\nchar * halfday(char* sunrise, char* sunset){\n int hour, min;\n int sunR = 0;\n int sunS = 0;\n int half = 0;\n char *ans = malloc(sizeof(char)*6);\n sscanf(sunrise, \"%d:%d\",&hour, &min);\n sunR = hour * 60 + min;\n sscanf(sunset, \"%d:%d\",&hour, &min);\n sunS = hour * 60 + min;\n half = (sunS+sunR+1)/2;\n // sprintf(ans, \"%2d:%2d\", int(half/60), int(half%60));\n sprintf(ans, \"%02d:%02d\", half/60, half%60);\n return ans;\n}\n" }, { "alpha_fraction": 0.2647058963775635, "alphanum_fraction": 0.31960785388946533, "avg_line_length": 62.875, "blob_id": "8c3dbf60a2f224d04442b7f7d211570289d69f6b", "content_id": "acd35a2c844230009677eb0a3b6330ce06b6d4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 510, "license_type": "no_license", "max_line_length": 92, "num_lines": 8, "path": "/AGIHack/README.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "remenber all the node that we already extend, just like this table:\n\n| node | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n| :-----------: | :------: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n| cost | $\\infty$ | 5 | 4 | 6 | 11 | 9 | 18 | 9 | 10 |\n| previous node | --- | 1 | 1 | 3 | 3 | 2 | 2 | 4 | 8 |\n\nthen find path with the recurrsive method that used in the Dijkstra." }, { "alpha_fraction": 0.8082539439201355, "alphanum_fraction": 0.8097959160804749, "avg_line_length": 15.214705467224121, "blob_id": "07371ab1ac3b64a1e3da9ed3a9cfb3c083ac8274", "content_id": "a939613937bfcdf0213e6dcb16a3f7fcb84e30ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 24299, "license_type": "no_license", "max_line_length": 109, "num_lines": 680, "path": "/leetcode/cpp/cpp.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "C++常见面试问题汇总\n\n指针和引用的区别\n\n```\n指针和引用在本质上都用于访问同一个对象,但是语法和使用方式上有些差异:\n\n定义语法不同:指针需要使用*来定义,而引用使用&。例如:\nint* p; // 定义指针p\nint& r = x; // 定义对x的引用r\n内存占用不同:指针是一个变量,它存储了目标对象的内存地址。引用不是一个变量,它直接与目标对象绑定。\n可重新赋值:指针可以在运行时指向不同的对象,而引用在初始化后不能重新绑定到另一个对象。\n可为空:指针可以赋值为NULL,而引用必须绑定到一个对象,不能为空。\n传递方式不同:指针传递的是地址,引用传递的是对象本身。\n解引用方式不同:*用于解引用指针,而引用可以直接当成对象使用。\n总体来说,引用提供了一种更为安全和方便的访问对象的方式,是C++对指针的增强。但指针提供了更大的灵活性,可以重新指向不同对象。选择使用指针还是引用,需要根据具体场景而定。\n```\n\n堆和栈的区别\n\n```\n存储位置不同:\n堆是动态内存分配,位于内存的自由存储区。\n栈是自动分配的内存,位于内存的栈区。\n\n分配方式不同:\n堆是动态分配,使用 new/malloc 在运行时申请,使用 delete/free 释放。\n栈是编译器自动分配和释放。进入一个作用域时分配,退出作用域时释放。\n\n空间大小不同:\n堆的大小可以动态调整,没有固定大小限制。\n栈的大小在编译时就确定,并且有平台限制。\n\n使用方式不同:\n堆分配的内存需要程序员手动管理,容易产生内存泄漏。\n栈的内存管理由编译器自动完成,使用简单。\n\n生命周期不同:\n堆中的对象生命周期由程序员控制。\n栈中的对象生命周期由其作用域控制。\n\n总之,对于需要动态大小和生命周期的对象,应该使用堆;对于大小固定且生命周期由作用域控制的对象,应该使用栈。\n```\n\nnew和delete是如何实现的,new 与 malloc的异同处\n\n```\nnew和delete是C++中的运算符,用于对象的动态内存分配和释放,而malloc和free是C语言中的函数,用于一般内存的分配和释放。\nnew在分配内存的同时还会调用对象的构造函数进行初始化,delete在释放内存前会调用析构函数; malloc/free 仅仅负责内存空间的分配和释放。\nnew和delete由编译器在背后实现,可能进行额外的内存管理以配合C++的特性,如构造/析构、异常处理等;而 malloc/free 由程序库实现,仅仅完成内存的分配/释放。\nnew/delete 的内存来自自由存储区(free store), malloc/free 的内存来自堆区(heap),C++中自由存储区是对堆区的封装。\nnew会在内存不足时抛出异常,malloc会返回空指针;delete接受空指针,free不能。\nnew/delete 可以被重载, malloc/free 不可以。\n总之, new/delete 比 malloc/free 更符合C++的面向对象特性,但后者运行时效率可能更高。选择要根据具体情况决定。\n```\n\nStruct和class的区别\n\n```\n默认访问权限不同:\nstruct的成员默认是public的\nclass的成员默认是private的\n\n继承方式不同:\nstruct默认是public继承\nclass默认是private继承\n\n用途不同:\nstruct更适合构建数据结构,类似C中的struct\nclass更适合封装功能\n\n效率不同:\nstruct通常效率略高,因为默认public访问简化了内存布局\nclass访问控制会影响优化,效率略低\n\n总结:\nstruct更适合构建相对简单的数据结构;class更适合封装复杂的功能。但这只是一个约定,C++本身并不强制要求这种用法。\n```\n\n\ndefine 和const的区别(编译阶段、安全性、内存占用等)\n\n```\n定义阶段不同:\ndefine是预处理器指令,在预编译阶段展开\nconst是关键字,在编译阶段处理\n\n作用域不同:\ndefine没有作用域概念,只在定义处替换\nconst具有作用域,只在作用域内生效\n\n类型不同:\ndefine没有类型,直接替换文本\nconst可以指定变量类型,有类型检查\n\n内存占用不同:\ndefine不占用内存,只是文本替换\nconst变量会占用内存\n\n安全性不同:\ndefine比较危险,可能导致意外行为\nconst较为安全,有类型检查\n\n调试不同:\ndefine不会出现在调试信息中\nconst出现在调试信息中,便于调试\n\n总结:\nconst较define更安全,应尽量用const代替define,特别是与类型相关的场景。但define运行效率较高。需根据场景选择。\n```\n\n在C++中const和static的用法(定义,用途)\n\n```\n\nconst:\n\n定义常量,不可修改的变量。例如:\n\nconst int a = 10;\n定义常量指针,指向不可修改的对象。例如:\n\nconst int* p = &a; // p是指针,指向的值不可修改\n定义成员函数,表示不修改成员变量。例如:\n\nclass A {\nint val;\npublic:\nvoid set(int v) { val = v; } \nint get() const { return val; } // 不修改val\n};\nstatic:\n\n定义静态变量,全局只有一份内存,在程序整个运行期间存在。例如:\n\nstatic int count = 0; \n定义静态成员变量/函数,不需要通过对象就可以访问。例如:\n\nclass A {\npublic:\nstatic int v;\nstatic void func() {}\n};\n限制函数或变量只在某文件内可见。例如:\n\nstatic int x; // x只在当前文件可见\n总结:const用于定义常量,防止值被修改;static用于定义静态变量/函数,或限制作用域。\n\n```\n\nconst和static在类中使用的注意事项(定义、初始化和使用)\n\n```\n对于const和static在类中的使用,需要注意以下几点:\n\n定义时的区别\nconst成员变量必须在定义时初始化,而非const成员变量可以在构造函数中初始化。\n\nstatic成员变量可以在类内初始化,也可以在类外初始化。\n\n初始化时的区别\nconst成员变量只能在定义时初始化一次,后续不可修改。\n\n非const成员变量可以在构造函数中初始化,也可以后续修改。\n\nstatic成员变量无论在类内还是类外初始化,都仅初始化一次。\n\n使用时的区别\nconst成员变量在整个类中都为只读,不能修改。\n\n非const成员变量可以通过对象修改。\n\nstatic成员变量可以通过类名直接访问,不需要创建对象。\n\n命名约定\nconst成员变量通常使用大写字母命名。\n\nstatic成员变量通常加前缀s_ 或 k_。\n\n总之,const用于定义常量,static用于静态成员。初始化和使用需要注意其特殊性。\n```\n\nC++中的const类成员函数(用法和意义),以及和非const成员函数的区别\n\n```\n在C++中,const成员函数的用法和意义以及它与非const成员函数的区别主要如下:\n\n用法\n在成员函数声明后加const关键字,表示该函数是const函数,不修改类中的任何成员变量,如:\n\nclass A {\n int val;\npublic:\n void set(int v) { val = v; }\n int get() const { return val; }\n};\n意义\n对类的接口进行限制,明确指定哪些函数可以修改类成员,哪些不能。\n可以让const对象调用成员函数,非const对象不一定可以调用const函数。\n可以避免const对象被无意间修改。\n区别\nconst函数只能访问类中的const成员变量,非const函数可以访问所有成员变量。\nconst对象只能调用const函数,非const对象可以调用所有成员函数。\nconst函数不能修改成员变量,非const函数可以。\n注意\nconst函数如果需要访问非const成员,需要将该成员声明为mutable。\n\n总之,const成员函数用于增加类的接口约束,表示不修改成员变量,提高代码可读性和安全性\n```\n\nC++的顶层const和底层const\n\n```\n顶层const:\n用于对一个变量整体进行const修饰,使其变为只读,不能通过这个变量修改其所指向的内容。例如:\n\nconst int* p;\n\n这里p是一个指向int的指针常量,不能通过p来修改它指向的int的值。\n\n底层const:\n用于对一个变量所指向的内容进行const修饰,使其不能通过这个变量来修改其所指向的内容,但变量本身是可修改的。例如:\n\nint const * p;\n\n这里p是一个指向const int的指针,可以修改p本身,但不能通过p来修改它所指向的const int的值。\n\n顶层const和底层const可以组合使用:\nconst int const * p;\n\n这里p既是一个指针常量,也是一个指向const int的指针,既不能修改p本身,也不能通过p修改它所指向的值。\n\n总结:顶层const修饰变量本身,底层const修饰所指向的内容。\n```\n\nfinal和override关键字\n\n```\nfinal:\n\n可以用来修饰类,表示这个类不能被继承,比如:\nclass Base final {\n //...\n};\n\nclass Derived : Base { // 错误,Base不能被继承\n}; \n可以用来修饰虚函数,表示这个虚函数不能被子类重写,比如:\nclass Base {\npublic:\n virtual void foo() final; \n};\n\nclass Derived : Base {\n void foo() override; // 错误,foo不能重写\n};\noverride:\n\n用于修饰虚函数,表示这个函数重写了基类的虚函数,如果函数签名与基类中虚函数不一致,编译器会报错。\n\n所以override可以帮助检查是否正确重写了基类虚函数。\n\n总结:\n\nfinal防止类的继承和函数的重写;override用于标识重写基类虚函数,检查是否正确重写。\n```\n\n拷贝初始化和直接初始化,初始化和赋值的区别\n\n```\n拷贝初始化:\n使用已存在的对象来初始化一个新对象。形式为T x = y; 这里y是已存在的对象。\n\n直接初始化:\n直接用括号或花括号给对象赋初值。形式为 T x(args); 或 T x{args};\n\n赋值:\n将已存在对象的值赋给另一个已存在的对象。形式为 x = y; 这里x和y都是已存在的对象。\n\n区别:\n\n拷贝初始化和直接初始化都是初始化,发生在对象创建时;赋值是赋值,发生在对象已创建后。\n\n拷贝初始化会调用拷贝构造函数或转换构造函数;直接初始化会调用普通构造函数。\n\n拷贝初始化可以使用=也可以不使用;直接初始化和赋值必须使用括号或花括号。\n\n赋值只能在对象创建后进行,而初始化只能在对象创建时进行。\n\n总结:\n\n初始化发生在对象创建时,只进行一次;赋值可以在对象生命周期的任意时刻进行多次。\n```\n\nextern \"C\" 的用法\n\n```\nextern \"C\"的主要用法是为了兼容C语言,在C++中使用extern \"C\"可以避免C++名字修饰(name mangling)。\n\n具体来说,extern \"C\"有以下两个主要作用:\n\n引用C语言代码:\n在C++中调用C语言代码时,可以使用extern \"C\"来防止C++编译器对C语言函数名进行修饰。\n\n例如:\n\n// test.c\nvoid test(); \n\n// main.cpp\nextern \"C\" {\n void test(); // 引用test.c中的test函数 \n}\n允许其他语言调用C++代码:\n在C++中使用extern \"C\"可以避免名字修饰,使得其他语言(如C语言)可以方便调用C++代码。\n\n例如:\n\n// test.cpp\nextern \"C\" void test() {\n // ...\n}\n这样test函数就可以被C语言直接调用,而不需要处理编译器名字修饰的问题。\n\n总之,extern \"C\"使得C++代码兼容C语言调用,是连接C++与C语言的重要手段。\n```\n\n模板函数和模板类的特例化\n\n```\n模板特例化的语法如下:\n\n对于模板函数:\n\n// 原模板\ntemplate<typename T>\nvoid func(T t) {\n // 通用实现 \n}\n\n// 特例化 \ntemplate<> \nvoid func<int>(int t) {\n // int的特例化实现\n}\n对于模板类:\n\n// 原模板\ntemplate<typename T>\nclass MyClass {\n // 通用实现\n}; \n\n// 特例化\ntemplate<>\nclass MyClass<double> {\n // double的特例化实现 \n};\n特例化允许我们针对某些类型提供自定义的实现,提高代码复用性。\n\n常见的特例化场景包括:\n\n为某些类型提供更优化的实现\n为某些类型添加额外的功能或属性\n为某些类型使用完全不同的实现逻辑\n需要注意,特例化只能针对整个类型进行,不能对类型的一部分进行特例化。\n```\n\nC++的STL源码其中包括内存池机制,各种容器的底层实现机制,算法的实现原理等)\n\n```\n内存池机制\nSTL会预先分配一块内存作为内存池,各种容器从这个池中申请和释放内存,避免频繁调用 new/delete 带来的性能损失。\n\n各种容器的实现\nvector使用连续内存空间,可快速随机访问,但插入删除效率低。\n\nlist使用双向链表,插入删除快,但随机访问慢。\n\ndeque使用分段连续空间,兼具vector和list的优点。\n\nset/map使用红黑树实现,可以自动维护有序。\n\nunordered_set/unordered_map使用哈希表实现。\n\n算法的实现\nsort使用快速排序、归并排序或插入排序。\n\nfind、count等算法使用线性搜索。\n\nlower_bound、upper_bound等使用二分查找。\n\nSTL源码中应用了大量算法设计与分析的知识,通过空间换时间的思想,实现高效的容器和算法。这为我们提供了很好的学习模板。\n```\n\nSTL源码中的hashtable的实现\n\n```\n\nSTL源码中的哈希表(hashtable)主要有以下几点实现:\n\n使用开放寻址法解决冲突,探查序列包括线性探查、二次探查、双重散列等。\n\n动态扩容,当桶数使用比例超过阈值(通常75%)时,会重新分配更大的桶数组,并重新散列。\n\n桶数一般设置为素数或次素数,可以减少冲突。\n\n负载因子控制桶数和容量的动态平衡。\n\n使用红黑树优化性能,当桶内元素过多时,会转为红黑树结构,提高查找速度。\n\n采用链式散列解决冲突,每个桶是一个链表。\n\n使用空桶和删除标记优化空间利用率。\n\n实现哈希函数和相等比较函数,支持自定义键类型。\n\n使用分配器(allocator)管理内存分配。\n\n优化缓存行利用,增强局部性。\n\n总体来说,STL的hashtable通过空间换时间实现高效插入和查询,是实现关联容器的核心组件。\n```\n\nSTL中unordered_map和map的区别和应用场景\n\nSTL中vector的实现\n\n```\nSTL中的vector主要通过以下几点实现:\n\n使用连续的动态数组作为底层存储,可以快速随机访问。\n\n空间预分配和扩容机制 - 当数组容量不足时,按一定策略(通常是2倍)扩容,以减少重新分配的次数。\n\n存储在堆上,使用智能指针管理数组内存,实现RAII。\n\n封装动态数组,提供迭代器、容量信息、插入删除等接口。\n\n移动语义 - 支持移动构造和移动赋值,避免不必要的拷贝。\n\n空间优化 - 使用空间压缩和小对象优化,减少内存占用。\n\n缓存友好 - 数据结构对缓存访问友好,增强局部性。\n\n算法和容器分离 - 算法由迭代器定义,可应用于不同容器。\n\n异常安全 - 异常不会破坏vector的结构。\n\n综上,vector通过空间换时间提供快速随机访问,是最常用的STL序列容器。\n```\n\nSTL容器的几种迭代器以及对应的容器(输入迭代器,输出迭代器,前向迭代器,双向迭代器,随机访问迭代器)\n\n```\n您好,STL容器主要有以下几种迭代器及对应的容器:\n\n输入迭代器(input iterator):只能单向进行读取操作的迭代器,如istream_iterator。\n\n输出迭代器(output iterator):只能单向进行写入操作的迭代器,如ostream_iterator。\n\n前向迭代器(forward iterator):支持读操作,并能向后单步移动的迭代器,如unordered_set的迭代器。\n\n双向迭代器(bidirectional iterator):支持读写操作,并能双向移动的迭代器,如list、set、map的迭代器。\n\n随机访问迭代器(random access iterator):支持读写操作,并能随机访问任意位置的迭代器,如vector、deque的迭代器。\n\n主要容器对应的迭代器类型:\n\nvector,deque:随机访问迭代器\nlist:双向迭代器\nset,map,multiset,multimap:双向迭代器\nunordered_set,unordered_map:前向迭代器\n\n容器适配器stack,queue,priority_queue没有对应的迭代器。\n\n迭代器的类型决定了容器支持的算法和功能,选择容器时需要根据访问方式选择匹配的迭代器类型。\n\n顺序容器:vector, deque是随机访问迭代器, list是双向迭代器\n\n容器适配器:stack,queue,priority_queue没有迭代器\n\n关联容器:set,map,multiset,multimap是双向迭代器\n\nunordered_set,unordered_map,unordered_multiset,unordered_multimap是前向迭代器\n```\n\n\nSTL中的traits技法\ntype_traits\n\niterator_traits\n\nchar traits\n\nallocator_traits\n\npointer_traits\n\narray_traits\n\n```\n\nSTL中使用了traits技术来实现泛型编程,主要的traits类有:\n\ntype_traits: 提供了查询和修改类型属性的模板类,如is_integral、is_pointer等来判断类型的特征。\n\niterator_traits: 提取迭代器的特征,提供统一的接口访问迭代器的属性,如迭代器的类别、值类型等。\n\nchar_traits: 定义字符类型的相关属性,用于给基于字符的容器提供类型信息。\n\nallocator_traits: 定义标准容器的分配器的统一接口。\n\npointer_traits: 定义指针的统一访问接口。\n\narray_traits: 提供访问数组属性的接口。\n\ntraits技术通过模板和泛型编程实现代码复用,将类型的属性和功能分离,提高了扩展性和复用性。这些traits为STL提供了跨平台和可扩展的实现基础。\n\n总体来说,traits在STL中发挥了参数化和重用的作用,是STL设计的重要组成部分。\n```\n\n\nvector使用的注意点及其原因,频繁对vector调用push_back()对性能的影响和原因。\n\n```\n频繁调用push_back会触发vector扩容,影响性能。\nvector在元素数量达到容量上限时会触发扩容,每次典型扩容为当前容量的2倍,这会重新分配内存和复制元素,影响效率。\n\n如果知道元素数量,可以通过reserve()预先分配容量,减少扩容次数。\n\n避免在vector中间插入删除元素。\nvector使用连续存储,中间插入删除效率低,需要移动大量元素。应该仅在末尾进行 push/pop 操作。\n\n如果需要频繁随机访问,vector效率很高。\nvector使用连续存储,支持通过下标快速随机访问任意元素。\n\n传入vector的对象类型应该轻量,避免大对象拷贝。\nvector在扩容时需要拷贝所有元素,如果元素较大会带来不必要开销。\n\n可以通过数据结构组合得到需要的性能,例如vector+list。\n综上,使用vector时根据场景选择合适的使用方式很重要,才能发挥其性能优势。\n```\n\nC++中的重载和重写的区别\n\nC++内存管理,内存池技术(热门问题),与csapp中几种内存分配方式对比学习加深理解\n\n介绍面向对象的三大特性,并且举例说明每一个\n\nC++多态的实现\n\nC++虚函数相关(虚函数表,虚函数指针),虚函数的实现原理(包括单一继承,多重继承等)(拓展问题:为什么基类指针指向派生类对象时可以调用派生类成员函数,基类的虚函数存放在内存的什么区,虚函数表指针vptr的初始化时间)\n\n```\n虚函数表(Virtual Table)\n每个类中都有一个虚函数表,存放类中所有虚函数的函数指针。虚函数表是个静态数组,由编译器生成。\n\n虚函数指针(vptr)\n每个类实例中都有一个虚函数指针,指向该类的虚函数表,用于动态找到正确的虚函数。\n\n单继承\n对于单继承,派生类的虚函数表中包含自己的虚函数和从基类继承的虚函数。基类指针可以调用派生类虚函数,是通过虚函数表指针找到派生类的虚函数实现。\n\n多继承\n多继承时,每个派生类有多个虚函数表指针,指向不同基类的虚函数表,用于区分不同基类的虚函数。\n\n虚函数表位于程序只读数据区,不允许修改。\n\n虚函数表指针vptr是类的数据成员,在构造函数中初始化。\n\n基类的虚函数存放在内存的只读数据区(read-only data)。\n\nC++中的虚函数表(Virtual Table)以及虚函数指针(vptr)都是存放在程序的只读数据区的,不允许修改。\n\n只读数据区主要包括程序代码段和常量数据,通常是可共享的内存区域。把虚函数表放在这里可以让程序中的多个实例对象共享使用同一份虚函数表,而不需要每一个实例都复制一份虚函数表。\n\n另外,只读数据区由操作系统管理,有较高的访问效率。放在这里可以加速虚函数的访问。\n\n所以为了节省内存空间,以及加速访问速度,C++的虚函数和虚函数表都是放在程序只读数据区的。\n\n\n```\n\n\nC++中类的数据成员和成员函数内存分布情况\n\n```\n在C++中,类的数据成员和成员函数的内存分布情况如下:\n\n数据成员:\n非静态数据成员存放在每个类实例的内存空间中。\n静态数据成员存放在全局静态存储区,所有实例共享一份。\n成员函数:\n非虚成员函数直接存放在代码段中。\n虚函数的函数体存放在代码段中,虚函数表存放在只读数据段中。\n对于每个类实例:\n在栈上分配实例本身所需的内存。\n在堆上通过new分配的数据成员内存。\n包含一个this指针,指向实例本身。\n包含一个虚函数表指针 vptr,如果有虚函数的话。\n所以简单来说,数据成员和this指针在实例内存中,代码在代码段中,虚函数表在只读数据段中。成员的布局由编译器控制。\n```\n\nthis指针\n析构函数一般写成虚函数的原因\n\n```\n析构函数之所以通常声明为虚函数,主要有以下原因:\n\n允许正确调用派生类的析构函数\n如果基类析构函数是非虚函数,那么通过基类指针删除派生类对象时,只会调用基类析构函数,而不会调用派生类的析构函数,可能导致资源泄露。\n\n实现运行时多态\n虚析构函数可以与虚函数配合实现运行时多态,正确析构对象。\n\n维护类的继承体系\n如果基类析构函数不是虚函数,那么继承体系中所有的析构函数都需要是虚函数,否则删除基类指针时可能导致未定义行为。\n\n符合用户的预期\n用户通常会认为使用基类指针删除对象会调用相应的析构函数,这与虚析构函数的语义一致。\n\n提高复用性\n如果基类析构函数是虚函数,那么用户不需要关心具体调用哪个类的析构函数,可以复用基类指针安全删除对象。\n\n所以虚析构函数可以增强程序的健壮性,使得类继承体系中的析构行为更加可预测,也更符合用户的直觉。这是虚析构函数的重要意义。\n```\n\n构造函数、拷贝构造函数和赋值操作符的区别\n构造函数:对象不存在,没用别的对象初始化\n\n拷贝构造函数:对象不存在,用别的对象初始化\n\n赋值运算符:对象存在,用别的对象给它赋值\n\n构造函数声明为explicit\n构造函数为什么一般不定义为虚函数\n构造函数的几种关键字(default delete 0)\n= default:将拷贝控制成员定义为=default显式要求编译器生成合成的版本\n\n= delete:将拷贝构造函数和拷贝赋值运算符定义删除的函数,阻止拷贝(析构函数不能是删除的函数 C++Primer P450)\n\n= 0:将虚函数定义为纯虚函数(纯虚函数无需定义,= 0只能出现在类内部虚函数的声明语句处;当然,也可以为纯虚函数提供定义,不过函数体必须定义在类的外部)\n\n构造函数或者析构函数中调用虚函数会怎样\n纯虚函数\n静态类型和动态类型,静态绑定和动态绑定的介绍\n引用是否能实现动态绑定,为什么引用可以实现\n深拷贝和浅拷贝的区别(举例说明深拷贝的安全性)\n对象复用的了解,零拷贝的了解\n介绍C++所有的构造函数\n什么情况下会调用拷贝构造函数(三种情况)\n结构体内存对齐方式和为什么要进行内存对齐?\n内存泄露的定义,如何检测与避免?\n手写智能指针的实现(shared_ptr和weak_ptr实现的区别)\n智能指针的循环引用\n遇到coredump要怎么调试\n内存检查工具的了解\n模板的用法与适用场景\n成员初始化列表的概念,为什么用成员初始化列表会快一些(性能优势)?\n用过C++ 11吗,知道C++ 11哪些新特性?\nC++的调用惯例(简单一点C++函数调用的压栈过程)\nC++的四种强制转换\nstatic_cast\n\ndynamic_cast\n\nconst_cast\n\nreinterpret_cast\n\nC++中将临时变量作为返回值的时候的处理过程(栈上的内存分配、拷贝过程)\nC++的异常处理\nvolatile关键字\n优化程序的几种方法\npublic,protected和private访问权限和继承\nclass和struct的区别\ndecltype()和auto\ninline和宏定义的区别\nC++和C的类型安全" }, { "alpha_fraction": 0.5229166746139526, "alphanum_fraction": 0.53125, "avg_line_length": 24.289474487304688, "blob_id": "5da46e45afcf9e642b1300de83c62bf616e4f586", "content_id": "5bfac2e407a2e912ad62828d64eca1186963294d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 960, "license_type": "no_license", "max_line_length": 77, "num_lines": 38, "path": "/interview/HashTables.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/ctci-ice-cream-parlor/problem\n# Complete the whatFlavors function below.\ndef whatFlavors(cost, money):\n hashDic = {}\n for i, c in enumerate(cost, 1):\n if c in hashDic:\n hashDic[c].append(i)\n else:\n hashDic[c] = [i]\n \n for ind, c in enumerate(cost, 1):\n if 2 * c == money and len(hashDic[c]) == 2:\n print(\" \".join(map(str, hashDic[c])))\n return\n elif money - c in hashDic:\n if len(hashDic[money - c]) == 1 and ind != hashDic[money - c][0]:\n print(ind, hashDic[money - c][0], sep=\" \")\n return\n\nif __name__ == '__main__':\n t = int(input())\n\n for t_itr in range(t):\n money = int(input())\n\n n = int(input())\n\n cost = list(map(int, input().rstrip().split()))\n\n whatFlavors(cost, money)" }, { "alpha_fraction": 0.3679369390010834, "alphanum_fraction": 0.41787123680114746, "avg_line_length": 24.383333206176758, "blob_id": "aab19da539a1ae7a45fa0c2aafc26a0879e988f6", "content_id": "90908dac7e012f6433a105f0c4821ad841f598cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 68, "num_lines": 60, "path": "/leetcode/python/2.两数相加.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=2 lang=python3\n#\n# [2] 两数相加\n#\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# @lc code=start\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n l1p = l1\n l2p = l2\n\n l3 = ListNode()\n l3p = l3\n last = l3\n flag = 0\n\n while l1p != None or l2p != None:\n if l1p is None:\n l3p.val += l2p.val\n if l3p.val >= 10:\n flag = 1\n l3p.val -= 10\n else:\n flag = 0\n \n\n elif l2p is None:\n l3p.val += l1p.val\n if l3p.val >= 10:\n flag = 1\n l3p.val -= 10\n else:\n flag = 0\n\n elif l2p.val + l1p.val + flag >= 10:\n l3p.val += l2p.val + l1p.val - 10\n flag = 1\n else:\n l3p.val += l2p.val + l1p.val\n flag = 0\n\n l1p = l1p.next if l1p != None else None\n l2p = l2p.next if l2p != None else None\n l3p.next = ListNode(val=flag)\n last = l3p\n l3p = l3p.next\n if last.next is not None and last.next.val == 0:\n last.next = None\n return l3\n# @lc code=end\n\nif __name__ == \"__main__\":\n s = Solution()\n s.addTwoNumbers([9,9,9,9,9,9,9], [9,9,9,9])" }, { "alpha_fraction": 0.7253628969192505, "alphanum_fraction": 0.7261743545532227, "avg_line_length": 69.20252990722656, "blob_id": "17db1c7c986164af5b79f26eaf3653b5b45d6174", "content_id": "e2ffd6bf1f197f0f21dee61ef49d572bc56b5e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15395, "license_type": "no_license", "max_line_length": 101, "num_lines": 158, "path": "/labuladong/cpp/todo.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "### [第零章、核心框架汇总](https://labuladong.github.io/algo/)\n* [x] [学习算法和刷题的框架思维](https://labuladong.github.io/article/fname.html?fname=学习数据结构和算法的高效方法)\n* [ ] [我的刷题心得](https://labuladong.github.io/article/fname.html?fname=算法心得)\n* [ ] [双指针技巧秒杀七道链表题目](https://labuladong.github.io/article/fname.html?fname=链表技巧)\n* [ ] [双指针技巧秒杀七道数组题目](https://labuladong.github.io/article/fname.html?fname=双指针技巧)\n* [x] [东哥带你刷二叉树(纲领篇)](https://labuladong.github.io/article/fname.html?fname=二叉树总结)\n* [x] [动态规划解题套路框架](https://labuladong.github.io/article/fname.html?fname=动态规划详解进阶)\n* [x] [回溯算法解题套路框架](https://labuladong.github.io/article/fname.html?fname=回溯算法详解修订版)\n* [x] [回溯算法秒杀所有排列/组合/子集问题](https://labuladong.github.io/article/fname.html?fname=子集排列组合)\n* [x] [BFS 算法解题套路框架](https://labuladong.github.io/article/fname.html?fname=BFS框架)\n* [x] [我写了首诗,把二分搜索算法变成了默写题](https://labuladong.github.io/article/fname.html?fname=二分查找详解)\n* [x] [我写了首诗,把滑动窗口算法变成了默写题](https://labuladong.github.io/article/fname.html?fname=滑动窗口技巧进阶)\n* [ ] [一个方法团灭 LeetCode 股票买卖问题](https://labuladong.github.io/article/fname.html?fname=团灭股票问题)\n* [ ] [一个方法团灭 LeetCode 打家劫舍问题](https://labuladong.github.io/article/fname.html?fname=抢房子)\n* [ ] [一个方法团灭 nSum 问题](https://labuladong.github.io/article/fname.html?fname=nSum)\n* [ ] [算法时空复杂度分析实用指南](https://labuladong.github.io/article/fname.html?fname=时间复杂度)\n* [x] [算法笔试「骗分」套路](https://labuladong.github.io/article/fname.html?fname=刷题技巧)\n\n\n### [第一章、手把手刷数据结构](https://labuladong.github.io/algo/)\n* [手把手刷链表算法](https://labuladong.github.io/algo/)\n * [ ] [双指针技巧秒杀七道链表题目](https://labuladong.github.io/article/fname.html?fname=链表技巧)\n * [x] [递归魔法:反转单链表](https://labuladong.github.io/article/fname.html?fname=递归反转链表的一部分)\n * [x] [如何 K 个一组反转链表](https://labuladong.github.io/article/fname.html?fname=k个一组反转链表)\n * [x] [如何判断回文链表](https://labuladong.github.io/article/fname.html?fname=判断回文链表)\n\n* [手把手刷数组算法](https://labuladong.github.io/algo/)\n * [ ] [双指针技巧秒杀七道数组题目](https://labuladong.github.io/article/fname.html?fname=双指针技巧)\n * [x] [小而美的算法技巧:前缀和数组](https://labuladong.github.io/article/fname.html?fname=前缀和技巧)\n * [x] [小而美的算法技巧:差分数组](https://labuladong.github.io/article/fname.html?fname=差分技巧)\n * [ ] [二维数组的花式遍历技巧](https://labuladong.github.io/article/fname.html?fname=花式遍历)\n * [ ] [我写了首诗,把滑动窗口算法算法变成了默写题](https://labuladong.github.io/article/fname.html?fname=滑动窗口技巧进阶)\n * [x] [滑动窗口算法延伸:Rabin Karp 字符匹配算法](https://labuladong.github.io/article/fname.html?fname=rabinkarp)\n * [x] [我写了首诗,让你闭着眼睛也能写对二分搜索](https://labuladong.github.io/article/fname.html?fname=二分查找详解)\n * [x] [带权重的随机选择算法](https://labuladong.github.io/article/fname.html?fname=随机权重)\n * [ ] [二分搜索怎么用?我又总结了套路](https://labuladong.github.io/article/fname.html?fname=二分运用)\n * [ ] [田忌赛马背后的算法决策](https://labuladong.github.io/article/fname.html?fname=田忌赛马)\n * [x] [常数时间删除/查找数组中的任意元素](https://labuladong.github.io/article/fname.html?fname=随机集合)\n * [ ] [一道数组去重的算法题把我整不会了](https://labuladong.github.io/article/fname.html?fname=单调栈去重)\n\n* [手把手刷二叉树算法](https://labuladong.github.io/algo/)\n * [x] [东哥带你刷二叉树(纲领篇)](https://labuladong.github.io/article/fname.html?fname=二叉树总结)\n * [x] [东哥带你刷二叉树(思路篇)](https://labuladong.github.io/article/fname.html?fname=二叉树系列1)\n * [x] [东哥带你刷二叉树(构造篇)](https://labuladong.github.io/article/fname.html?fname=二叉树系列2)\n * [x] [东哥带你刷二叉树(序列化篇)](https://labuladong.github.io/article/fname.html?fname=二叉树的序列化)\n * [x] [东哥带你刷二叉树(后序篇)](https://labuladong.github.io/article/fname.html?fname=二叉树系列3)\n * [ ] [归并排序详解及应用](https://labuladong.github.io/article/fname.html?fname=归并排序)\n * [x] [东哥带你刷二叉搜索树(特性篇)](https://labuladong.github.io/article/fname.html?fname=BST1)\n * [x] [东哥带你刷二叉搜索树(基操篇)](https://labuladong.github.io/article/fname.html?fname=BST2)\n * [x] [东哥带你刷二叉搜索树(构造篇)](https://labuladong.github.io/article/fname.html?fname=BST3)\n * [ ] [快速排序详解及应用](https://labuladong.github.io/article/fname.html?fname=快速排序)\n * [ ] [题目不让我干什么,我偏要干什么](https://labuladong.github.io/article/fname.html?fname=nestInteger)\n * [x] [Git原理之最近公共祖先](https://labuladong.github.io/article/fname.html?fname=公共祖先)\n * [x] [如何计算完全二叉树的节点数](https://labuladong.github.io/article/fname.html?fname=完全二叉树节点数)\n\n* [手把手刷图算法](https://labuladong.github.io/algo/)\n * [ ] [图论基础及遍历算法](https://labuladong.github.io/article/fname.html?fname=图)\n * [ ] [环检测及拓扑排序算法](https://labuladong.github.io/article/fname.html?fname=拓扑排序)\n * [ ] [二分图判定算法](https://labuladong.github.io/article/fname.html?fname=二分图)\n * [ ] [并查集(Union-Find)算法](https://labuladong.github.io/article/fname.html?fname=UnionFind算法详解)\n * [ ] [Kruskal 最小生成树算法](https://labuladong.github.io/article/fname.html?fname=kruskal)\n * [ ] [Prim 最小生成树算法](https://labuladong.github.io/article/fname.html?fname=prim算法)\n * [ ] [Dijkstra 算法模板及应用](https://labuladong.github.io/article/fname.html?fname=dijkstra算法)\n * [ ] [众里寻他千百度:名流问题](https://labuladong.github.io/article/fname.html?fname=名人问题)\n\n* [手把手设计数据结构](https://labuladong.github.io/algo/)\n * [-] [算法就像搭乐高:带你手撸 LRU 算法](https://labuladong.github.io/article/fname.html?fname=LRU算法)\n * [-] [算法就像搭乐高:带你手撸 LFU 算法](https://labuladong.github.io/article/fname.html?fname=LFU)\n * [ ] [前缀树算法模板秒杀五道算法题](https://labuladong.github.io/article/fname.html?fname=trie)\n * [ ] [一道求中位数的算法题把我整不会了](https://labuladong.github.io/article/fname.html?fname=数据流中位数)\n * [ ] [单调栈结构解决三道算法题](https://labuladong.github.io/article/fname.html?fname=单调栈)\n * [ ] [单调队列结构解决滑动窗口问题](https://labuladong.github.io/article/fname.html?fname=单调队列)\n * [ ] [二叉堆详解实现优先级队列](https://labuladong.github.io/article/fname.html?fname=二叉堆详解实现优先级队列)\n * [ ] [队列实现栈以及栈实现队列](https://labuladong.github.io/article/fname.html?fname=队列实现栈栈实现队列)\n * [ ] [设计朋友圈时间线功能](https://labuladong.github.io/article/fname.html?fname=设计Twitter)\n\n### [第二章、手把手刷动态规划](https://labuladong.github.io/algo/)\n* [动态规划基本技巧](https://labuladong.github.io/algo/)\n * [x] [动态规划解题套路框架](https://labuladong.github.io/article/fname.html?fname=动态规划详解进阶)\n * [x] [动态规划设计:最长递增子序列](https://labuladong.github.io/article/fname.html?fname=动态规划设计:最长递增子序列)\n * [x] [最优子结构原理和 dp 数组遍历方向](https://labuladong.github.io/article/fname.html?fname=最优子结构)\n * [x] [base case 和备忘录的初始值怎么定?](https://labuladong.github.io/article/fname.html?fname=备忘录等基础)\n * [x] [对动态规划进行降维打击](https://labuladong.github.io/article/fname.html?fname=状态压缩技巧)\n\n* [子序列类型问题](https://labuladong.github.io/algo/)\n * [x] [经典动态规划:编辑距离](https://labuladong.github.io/article/fname.html?fname=编辑距离)\n * [x] [动态规划设计:最长递增子序列](https://labuladong.github.io/article/fname.html?fname=动态规划设计:最长递增子序列)\n * [x] [动态规划设计:最大子数组](https://labuladong.github.io/article/fname.html?fname=最大子数组)\n * [x] [经典动态规划:最长公共子序列](https://labuladong.github.io/article/fname.html?fname=LCS)\n * [ ] [动态规划之子序列问题解题模板](https://labuladong.github.io/article/fname.html?fname=子序列问题模板)\n\n* [背包类型问题](https://labuladong.github.io/algo/)\n * [x] [经典动态规划:0-1 背包问题](https://labuladong.github.io/article/fname.html?fname=背包问题)\n * [x] [经典动态规划:子集背包问题](https://labuladong.github.io/article/fname.html?fname=背包子集)\n * [x] [经典动态规划:完全背包问题](https://labuladong.github.io/article/fname.html?fname=背包零钱)\n * [x] [动态规划和回溯算法到底谁是谁爹?](https://labuladong.github.io/article/fname.html?fname=targetSum)\n\n* [用动态规划玩游戏](https://labuladong.github.io/algo/)\n * [ ] [动态规划之最小路径和](https://labuladong.github.io/article/fname.html?fname=最小路径和)\n * [ ] [动态规划帮我通关了《魔塔》](https://labuladong.github.io/article/fname.html?fname=魔塔)\n * [ ] [动态规划帮我通关了《辐射4》](https://labuladong.github.io/article/fname.html?fname=转盘)\n * [ ] [旅游省钱大法:加权最短路径](https://labuladong.github.io/article/fname.html?fname=旅行最短路径)\n * [ ] [经典动态规划:正则表达式](https://labuladong.github.io/article/fname.html?fname=动态规划之正则表达)\n * [ ] [经典动态规划:高楼扔鸡蛋](https://labuladong.github.io/article/fname.html?fname=高楼扔鸡蛋问题)\n * [ ] [经典动态规划:戳气球](https://labuladong.github.io/article/fname.html?fname=扎气球)\n * [ ] [经典动态规划:博弈问题](https://labuladong.github.io/article/fname.html?fname=动态规划之博弈问题)\n * [ ] [经典动态规划:四键键盘](https://labuladong.github.io/article/fname.html?fname=动态规划之四键键盘)\n * [ ] [一个方法团灭 LeetCode 打家劫舍问题](https://labuladong.github.io/article/fname.html?fname=抢房子)\n * [ ] [一个方法团灭 LeetCode 股票买卖问题](https://labuladong.github.io/article/fname.html?fname=团灭股票问题)\n * [ ] [有限状态机之 KMP 字符匹配算法](https://labuladong.github.io/article/fname.html?fname=动态规划之KMP字符匹配算法)\n\n* [贪心类型问题](https://labuladong.github.io/algo/)\n * [ ] [贪心算法之区间调度问题](https://labuladong.github.io/article/fname.html?fname=贪心算法之区间调度问题)\n * [ ] [扫描线技巧:安排会议室](https://labuladong.github.io/article/fname.html?fname=安排会议室)\n * [ ] [剪视频剪出一个贪心算法](https://labuladong.github.io/article/fname.html?fname=剪视频)\n * [ ] [如何运用贪心思想玩跳跃游戏](https://labuladong.github.io/article/fname.html?fname=跳跃游戏)\n\n\n### [第三章、必知必会算法技巧](https://labuladong.github.io/algo/)\n* [暴力搜索算法](https://labuladong.github.io/algo/)\n * [ ] [回溯算法解题套路框架](https://labuladong.github.io/article/fname.html?fname=回溯算法详解修订版)\n * [ ] [经典回溯算法:集合划分问题](https://labuladong.github.io/article/fname.html?fname=集合划分)\n * [ ] [回溯算法秒杀所有排列/组合/子集问题](https://labuladong.github.io/article/fname.html?fname=子集排列组合)\n * [ ] [一文秒杀所有岛屿题目](https://labuladong.github.io/article/fname.html?fname=岛屿题目)\n * [ ] [回溯算法最佳实践:解数独](https://labuladong.github.io/article/fname.html?fname=sudoku)\n * [ ] [回溯算法最佳实践:括号生成](https://labuladong.github.io/article/fname.html?fname=合法括号生成)\n * [ ] [BFS 算法解题套路框架](https://labuladong.github.io/article/fname.html?fname=BFS框架)\n * [ ] [如何用 BFS 算法秒杀各种智力题](https://labuladong.github.io/article/fname.html?fname=BFS解决滑动拼图)\n\n* [数学运算技巧](https://labuladong.github.io/algo/)\n * [ ] [谈谈游戏中的随机算法](https://labuladong.github.io/article/fname.html?fname=随机算法)\n * [ ] [常用的位操作](https://labuladong.github.io/article/fname.html?fname=常用的位操作)\n * [ ] [讲两道常考的阶乘算法题](https://labuladong.github.io/article/fname.html?fname=阶乘题目)\n * [ ] [如何高效寻找素数](https://labuladong.github.io/article/fname.html?fname=打印素数)\n * [ ] [如何高效进行模幂运算](https://labuladong.github.io/article/fname.html?fname=superPower)\n * [ ] [如何同时寻找缺失和重复的元素](https://labuladong.github.io/article/fname.html?fname=缺失和重复的元素)\n * [ ] [一行代码就能解决的算法题](https://labuladong.github.io/article/fname.html?fname=一行代码解决的智力题)\n * [ ] [几个反直觉的概率问题](https://labuladong.github.io/article/fname.html?fname=几个反直觉的概率问题)\n\n* [经典面试题](https://labuladong.github.io/algo/)\n * [ ] [分治算法详解:运算优先级](https://labuladong.github.io/article/fname.html?fname=分治算法)\n * [ ] [一个方法解决三道区间问题](https://labuladong.github.io/article/fname.html?fname=区间问题合集)\n * [ ] [谁能想到,斗地主也能玩出算法](https://labuladong.github.io/article/fname.html?fname=斗地主)\n * [ ] [烧饼排序算法](https://labuladong.github.io/article/fname.html?fname=烧饼排序)\n * [ ] [字符串乘法计算](https://labuladong.github.io/article/fname.html?fname=字符串乘法)\n * [ ] [如何实现一个计算器](https://labuladong.github.io/article/fname.html?fname=实现计算器)\n * [ ] [如何高效解决接雨水问题](https://labuladong.github.io/article/fname.html?fname=接雨水)\n * [ ] [如何解决括号相关的问题](https://labuladong.github.io/article/fname.html?fname=括号插入)\n * [ ] [如何判定完美矩形](https://labuladong.github.io/article/fname.html?fname=完美矩形)\n * [ ] [如何调度考生的座位](https://labuladong.github.io/article/fname.html?fname=座位调度)\n * [ ] [二分查找高效判定子序列](https://labuladong.github.io/article/fname.html?fname=二分查找判定子序列)\n\n\n并查集\n最小生成树\n线段树\n树状数组\n字典树" }, { "alpha_fraction": 0.5556628108024597, "alphanum_fraction": 0.5768983364105225, "avg_line_length": 33.921348571777344, "blob_id": "4bf0e5af900f9f0b8a48a1120f236f7d447bf0f7", "content_id": "872f459e11f1057b279043020eb28162b5768155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3108, "license_type": "no_license", "max_line_length": 102, "num_lines": 89, "path": "/UNSWquiz/Week 6 - quiz_5_sol.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Randomly generates a grid of 0s and 1s and determines\n# the maximum number of \"spikes\" in a shape.\n# A shape is made up of 1s connected horizontally or vertically (it can contain holes).\n# A \"spike\" in a shape is a 1 that is part of this shape and \"sticks out\".\n#\n# Written by Eric Martin for COMP9021\n\n\nfrom random import seed, randrange\nimport sys\n\n\ndim = 10\n\n\ndef display_grid():\n for i in range(dim):\n print(' ', end = '')\n for j in range(dim):\n print(' 1', end = '') if grid[i][j] else print(' 0', end = '')\n print()\n print()\n\n# Returns the number of shapes we have discovered and \"coloured\".\n# We \"colour\" the first shape we find by replacing all the 1s that make it with 2.\n# We \"colour\" the second shape we find by replacing all the 1s that make it with 3.\ndef colour_shapes():\n colour = 1\n for i in range(dim):\n for j in range(dim):\n if grid[i][j] == 1:\n colour += 1\n colour_shape_starting_from(i, j, colour)\n return colour - 1\n\n# We have found a 1 at location grid[i][j], hence part of a shape we have not coloured.\n# We \"paint\" this part and then recursively the whole shape with \"colour\".\ndef colour_shape_starting_from(i, j, colour):\n grid[i][j] = colour\n if i and grid[i - 1][j] == 1:\n colour_shape_starting_from(i - 1, j, colour)\n if i < dim - 1 and grid[i + 1][j] == 1:\n colour_shape_starting_from(i + 1, j, colour)\n if j and grid[i][j - 1] == 1:\n colour_shape_starting_from(i, j - 1, colour)\n if j < dim - 1 and grid[i][j + 1] == 1:\n colour_shape_starting_from(i, j + 1, colour)\n\ndef max_number_of_spikes(nb_of_shapes):\n max_nb_of_spikes = 0\n for colour in range(2, nb_of_shapes + 2):\n nb_of_spikes = nb_of_spikes_for_shape(colour)\n max_nb_of_spikes = max(max_nb_of_spikes, nb_of_spikes)\n return max_nb_of_spikes\n\ndef nb_of_spikes_for_shape(colour):\n nb_of_spikes = 0\n for i in range(dim):\n for j in range(dim):\n nb_of_surrounding_0s = 0\n if grid[i][j] != colour:\n continue\n if i == 0 or grid[i - 1][j] == 0:\n nb_of_surrounding_0s += 1\n if i == dim - 1 or grid[i + 1][j] == 0:\n nb_of_surrounding_0s += 1\n if j == 0 or grid[i][j - 1] == 0:\n nb_of_surrounding_0s += 1\n if j == dim - 1 or grid[i][j + 1] == 0:\n nb_of_surrounding_0s += 1\n if nb_of_surrounding_0s > 2:\n nb_of_spikes += 1\n return nb_of_spikes\n\ntry:\n for_seed, n = [int(i) for i in\n input('Enter two integers, the second one being strictly positive: ').split()]\n if n <= 0:\n raise ValueError\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\n\nseed(for_seed)\ngrid = [[randrange(n) != 0 for _ in range(dim)] for _ in range(dim)]\nprint('Here is the grid that has been generated:')\ndisplay_grid()\nnb_of_shapes = colour_shapes()\nprint(f'The maximum number of spikes of some shape is equal to {max_number_of_spikes(nb_of_shapes)}')\n" }, { "alpha_fraction": 0.507056474685669, "alphanum_fraction": 0.5272177457809448, "avg_line_length": 21.044445037841797, "blob_id": "4ff696485bffe64309053ce70240f748c79d4a15", "content_id": "b4ec97fac4309163ce1b09b5d83a1786e722b313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 75, "num_lines": 45, "path": "/leetcode/cpp/minInsertions/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=1312 lang=cpp\n *\n * [1312] 让字符串成为回文串的最少插入次数\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int minInsertions(string s) {\n if (!s.size()) return 0;\n vector<vector<int>> dp(s.size(), vector<int>(s.size()));\n for (int i = 0; i < s.size(); i++) {\n dp[i][i] = 0;\n }\n\n for (int start = s.size() - 1; start >= 0; start--) {\n for (int end = start + 1; end < s.size(); end++) {\n if (s[start] == s[end])\n dp[start][end] = dp[start + 1][end - 1];\n else\n dp[start][end] = min(dp[start + 1][end], dp[start][end - 1]) + 1;\n }\n }\n // fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.front().back();\n }\n};\n// @lc code=end\n\nint main() {\n string s = \"leetcode\";\n Solution sol;\n int v = sol.minInsertions(s);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7628205418586731, "avg_line_length": 77, "blob_id": "187fab98a344d47008baf2a3f5f6045621d2b384", "content_id": "ec6287df44a0afe750577b62e7eb93883635a1d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 156, "license_type": "no_license", "max_line_length": 78, "num_lines": 2, "path": "/leetcode/cpp/.envsetup.sh", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# source /root/anaconda3/etc/profile.d/conda.sh && conda activate <env_name>\nsource ~/opt/anaconda3/etc/profile.d/conda.sh && conda activate cpp-simplecode\n" }, { "alpha_fraction": 0.41771581768989563, "alphanum_fraction": 0.4258093535900116, "avg_line_length": 23.439559936523438, "blob_id": "deeaf7b1aac37137d293f202b04943151f5b989f", "content_id": "9a7c0f43b6a063b73580dbac471770927e083788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2224, "license_type": "no_license", "max_line_length": 80, "num_lines": 91, "path": "/CHack/max_logic.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <math.h>\n#include <stdlib.h>\n//Complete the following function.\n#define INPUTBUFFER 256\n#define MAXSEG 255\n#define SEGM (MAXSEG - 1)\n#define DEFAULT_DIGITS 2\n\n#define type(var, TYPE) (__builtin_types_compatible_p(typeof(var), TYPE))\n#define len(array, element) (sizeof(array) / sizeof(element))\n\ntypedef char *string;\n\n// we define the char *seg[N] like:\n// the seg[N-1] need to be set NULL.\n\nvoid printChar(char c, const string end) {\n fflush(stdout);\n putchar(c);\n printf(\"%s\", end);\n}\n\nvoid printStr(string var, const string end) {\n fflush(stdout);\n printf(\"%s\", var);\n printf(\"%s\", end);\n}\n\nvoid printInt(int num, const string end) {\n fflush(stdout);\n printf(\"%d\", num);\n printf(\"%s\", end);\n}\n\nvoid printF(double num, const string end) {\n fflush(stdout);\n printf(\"%.*f\", DEFAULT_DIGITS, num);\n printf(\"%s\", end);\n}\n\n#define print(X, sep) \\\n do { \\\n _Generic((X), char \\\n : printChar, int \\\n : printInt, float \\\n : printF, string \\\n : printStr)(X, sep); \\\n } while (0)\n\n\nvoid calculate_the_maximum(int n, int k) {\n //Write your code here.\n int a = 1;\n int b = 1;\n int max_and = 0;\n int max_or = 0;\n int max_xor = 0;\n int tmp = 0;\n\n for(; a < n; a++){\n for (b = a + 1; b <= n; b++){\n tmp = a & b;\n if (tmp > max_and && tmp < k){\n max_and = tmp;\n }\n tmp = a | b;\n if (tmp > max_or && tmp < k) {\n max_or = tmp;\n }\n tmp = a ^ b;\n if (tmp > max_xor && tmp < k) {\n max_xor = tmp;\n }\n }\n }\n print(max_and, \"\\n\");\n print(max_or, \"\\n\");\n print(max_xor, \"\\n\");\n \n}\n\nint main() {\n int n, k;\n \n scanf(\"%d %d\", &n, &k);\n calculate_the_maximum(n, k);\n \n return 0;\n}\n" }, { "alpha_fraction": 0.5873320698738098, "alphanum_fraction": 0.6007677316665649, "avg_line_length": 16.399999618530273, "blob_id": "78b16285b863dcf0ee3621958dac10f304ee9874", "content_id": "71c7ea82866e0b48575b5d410f54942170fd2513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 79, "num_lines": 30, "path": "/interview/anagram.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/anagram/problem\n\n# Complete the anagram function below.\n# hanming distance\nfrom collections import Counter\ndef anagram(s):\n if len(s) % 2 == 0:\n return sum((Counter(s[0:len(s)//2]) - Counter(s[len(s)//2:])).values())\n else:\n return \"-1\"\n\n\nif __name__ == '__main__':\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = anagram(s)\n\n print(result)" }, { "alpha_fraction": 0.5513196587562561, "alphanum_fraction": 0.5689149498939514, "avg_line_length": 13.208333015441895, "blob_id": "e1f19558bf9f92048d9808ac5be1da4518fabaa4", "content_id": "4f6e9e845bae8e1f3277a4072203770e601fc8d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 341, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/leetcode/cpp/temp/template/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=* lang=cpp\n *\n * [*] ******\n */\n\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\n\n// @lc code=end\n\nint main() {\n unordered_map<int, int> v{{1, 3}, {2, 4}, {3, NULL}};\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.6179560422897339, "alphanum_fraction": 0.6332378387451172, "avg_line_length": 21.276596069335938, "blob_id": "1b8df9580627ae21a83d979315d121f783c87fca", "content_id": "e3f3e6c9a08ab7744d8e33226dad1ebe9d83dce7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 64, "num_lines": 47, "path": "/labuladong/cpp/countNodes/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n#include \"utils/debug_recursive.hpp\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nclass Solution {\n public:\n int level = 0;\n int countNodes(TreeNode* root) {\n if (!root) return 0;\n int hl = 0;\n int hr = 0;\n TreeNode* cur = root;\n for (hl = 0; cur; cur = cur->left, hl++);\n\n cur = root;\n for (hr = 0; cur; cur = cur->right, hr++);\n if (hr == hl)\n return (int) pow(2, hr) - 1;\n\n return countNodes(root->left) + countNodes(root->right) + 1;\n }\n};\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{1, 2, 3, 4, 5, 6};\n TreeNode* root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n Solution sol;\n fmt::print(\"\\n\");\n int v = sol.countNodes(root);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.6600790619850159, "alphanum_fraction": 0.6739130616188049, "avg_line_length": 21.488889694213867, "blob_id": "fffb0018ae4d6b31ae7261347ec6b8b276ce7f73", "content_id": "132c1c06c1680724eaf37e6b8f5f14998739486b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 60, "num_lines": 45, "path": "/leetcode/cpp/searchBST/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=700 lang=cpp\n *\n * [700] 二叉搜索树中的搜索\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\n// @lc code=start\nclass Solution {\n public:\n TreeNode* searchBST(TreeNode* root, int val) {\n if (!root) return nullptr;\n if (root->val == val) return root;\n if (val < root->val) return searchBST(root->left, val);\n if (val > root->val) return searchBST(root->right, val);\n return nullptr;\n }\n};\n// @lc code=end\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{4, 2, 7, 1, 3};\n TreeNode* root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n int val = 12;\n Solution sol;\n TreeNode* v = sol.searchBST(root, val);\n showBinaryTree<int>(v);\n return 0;\n}\n" }, { "alpha_fraction": 0.45438283681869507, "alphanum_fraction": 0.4669051766395569, "avg_line_length": 14.527777671813965, "blob_id": "4b57a64f5fc97c3477596ff58c12cf13b0015816", "content_id": "750233f475625e527cded1fc333b2f25da9667d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 38, "num_lines": 36, "path": "/interview/Candies.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the candies function below.\ndef candies(n, arr):\n c = [1]\n d = 1\n for i, v in enumerate(arr):\n if i > 0:\n if v <= arr[i - 1]:\n d = 1\n else:\n d += 1\n c.append(d)\n \n print(c)\n return sum(c)\n\nif __name__ == '__main__':\n\n n = int(input())\n\n arr = []\n\n for _ in range(n):\n arr_item = int(input())\n arr.append(arr_item)\n\n result = candies(n, arr)\n\n print(result)\n" }, { "alpha_fraction": 0.43473193049430847, "alphanum_fraction": 0.441724956035614, "avg_line_length": 17.673913955688477, "blob_id": "fe4205c915ea7d511970d68b31c938a4e5f57068", "content_id": "d33e8ed632fa283ef0127d1fc71ad9297413895b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 48, "num_lines": 46, "path": "/NewCoder/wangyi13.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# debug = True\nfrom itertools import accumulate\ndef SearchX(N, m):\n NN = list(accumulate(N, lambda x, y: x + y))\n l = 0\n r = len(N) - 1\n\n while l < r:\n mid = (l + r) // 2\n if NN[mid] > m:\n r = mid \n elif NN[mid] < m:\n l = mid + 1\n else:\n return mid + 1\n else:\n if l == r:\n return l + 1\n\ndef main():\n # input\n n = int(input())\n N= list(map(int, input().split()))\n m = int(input())\n M= list(map(int, input().split()))\n\n # solution\n for mm in M:\n result = SearchX(N, mm)\n print(result)\n\nif __name__ == \"__main__\":\n # if not debug:\n # try:\n # while True:\n # main()\n # except EOFError:\n # exit()\n # else:\n main()" }, { "alpha_fraction": 0.5915080308914185, "alphanum_fraction": 0.6046851873397827, "avg_line_length": 23.39285659790039, "blob_id": "e68c22648a0a78c7f01476e55ab20bc06e6acf55", "content_id": "85a71803f409c5d96b95fddc2fdf7effec50a795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 76, "num_lines": 56, "path": "/labuladong/cpp/minDepth/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nclass Solution {\n public:\n int minDepth(TreeNode *root) {\n if (root == nullptr) return 0;\n if (root->left == nullptr && root->right == nullptr) return 1;\n\n queue<TreeNode *> q;\n q.push(root);\n TreeNode *cur = nullptr;\n\n int depth = 0;\n while (!q.empty()) {\n int sz = q.size();\n for (int ix = 0; ix < sz; ++ix) {\n cur = q.front();\n q.pop();\n if (cur->left == nullptr && cur->right == nullptr) {\n return depth + 1;\n }\n if (cur->left != nullptr) q.push(cur->left);\n if (cur->right != nullptr) q.push(cur->right);\n }\n depth++;\n }\n return depth;\n }\n};\n\nint main(int argc, char **argv) {\n const int null = BinaryTree::null<int>();\n auto tree = BuildBinaryTree<int>({2, null, 3, null, 4, null, 5, null, 6});\n // auto tree = BuildBinaryTree<int>({3,9,20,null,null,15,7});\n showBinaryTree<int>(tree);\n\n Solution solution;\n auto minDepth = solution.minDepth(tree);\n fmt::print(\"The min depth is: {}\\n\", minDepth);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5197505354881287, "alphanum_fraction": 0.5343035459518433, "avg_line_length": 20.909090042114258, "blob_id": "8d93f6dbd3f6884c3c9f1f5b7e0eb8830cb12757", "content_id": "e1871147c7738fb7d4921a88654d550a335d0b98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 64, "num_lines": 22, "path": "/leetcode/python/1.两数之和.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] 两数之和\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n\n ind_table = dict()\n\n for ind, v in enumerate(nums):\n if target - v in ind_table:\n return [ind_table[target - v], ind]\n ind_table[v] = ind\n return []\n# @lc code=end\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.twoSum([3,2,4], 6))" }, { "alpha_fraction": 0.5831533670425415, "alphanum_fraction": 0.591792643070221, "avg_line_length": 17.520000457763672, "blob_id": "e61ac68cacc80d9d183e8268360713233233ae18", "content_id": "b1cb9d6897e3b368ded608c22728461cdfd9728d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/interview/JumpOnClouds.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the jumpingOnClouds function below.\ndef jumpingOnClouds(c):\n s = \"\".join(map(str, c))\n S = s.split(\"1\")\n num = sum([len(i)//2 for i in S]) + len(S) - 1\n return num\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n c = list(map(int, input().rstrip().split()))\n\n result = jumpingOnClouds(c)\n\n print(result)\n" }, { "alpha_fraction": 0.3702479302883148, "alphanum_fraction": 0.4082644581794739, "avg_line_length": 16.285715103149414, "blob_id": "d3d494be89f2bb9b60714f64fa839b13672d80b0", "content_id": "1dfef601cea1741d48287ae169178984c8024b91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 39, "num_lines": 35, "path": "/leetcode/python/findNumersfromdisorderarray.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def find(arr, target):\n if len(arr) == 0:\n return -1\n if len(arr) == 1:\n if arr[0] != target:\n return -1\n else:\n return 0\n\n l = 0\n r = len(arr) - 1\n mid = (l + r) // 2\n \n while l <= r:\n if arr[mid] == target:\n return mid\n \n if arr[l] <= arr[mid]:\n if arr[l] <= target < arr[mid]:\n \tr = mid\n else:\n \tl = mid + 1\n \n elif arr[r] > arr[mid]: \n if arr[mid] <= target <= arr[r]:\n l = mid + 1\n else:\n r = mid\n \n mid = (l + r) // 2\n \n return -1\n\nif __name__ == \"__main__\":\n print(find([5,6,7,8,1,2,3,4,6], 8))\n" }, { "alpha_fraction": 0.3969933092594147, "alphanum_fraction": 0.4526726007461548, "avg_line_length": 23.283782958984375, "blob_id": "02d25de49a16a963a9a6e743b523c22218f21eee", "content_id": "f722ed519950a287269934ee62fe7c627d20ff4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1796, "license_type": "no_license", "max_line_length": 56, "num_lines": 74, "path": "/leetcode/python/NumberDiceRollsWithTargetSum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\ndef numRollsToTarget(d: int, f: int, target: int):\n '''\n\n >>> d = 1; f = 6;target = 3\n >>> numRollsToTarget(d, f, target)\n 1\n >>> d = 2; f = 6;target = 7\n >>> numRollsToTarget(d, f, target)\n 6\n >>> d = 2; f = 5;target = 10\n >>> numRollsToTarget(d, f, target)\n 1\n >>> d = 1; f = 2;target = 3\n >>> numRollsToTarget(d, f, target)\n 0\n >>> d = 30; f = 30;target = 500\n >>> numRollsToTarget(d, f, target)\n 222616187\n '''\n # if d * f < target:\n # return 0\n # elif target <= 0:\n # return 0\n # elif d == 1 and f >= target:\n # return 1\n # else:\n # cnt = 0\n # # res = [[0] * (d + 1) for _ in range(f + 1)]\n # for k in range(1, f + 1):\n # a = numRollsToTarget(d - 1, f, target - k)\n # b = numRollsToTarget(d - 1, f, k)\n # cnt += a*b\n if d * f < target:\n return 0\n elif target <= 0:\n return 0\n elif d == 1 and f >= target:\n return 1\n else:\n res = [[0] * (target + 1) for i in range(d + 1)]\n\n dd = min(target, f)\n for face in range(1, dd+1):\n res[1][face] = 1\n \n for D in range(2, d + 1):\n for t in range(1, target + 1):\n for k in range(1, min(f+1, t)):\n res[D][t] += res[D - 1][t - k]\n\n cnt = res[D][target]\n\n return cnt % (10**9 + 7)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n # d = 30\n # f = 30\n # target = 500\n # print(numRollsToTarget(d, f, target))\n # a1 = list(map(int, input().split()))\n # a2 = list(map(int, input().split()))\n # a1 = [2,3,1,3,2,4,6,7,9,2,19]\n # a2 = [2,1,4,3,9,6]\n\n # print(numRollsToTarget(a1, a2))" }, { "alpha_fraction": 0.5433526039123535, "alphanum_fraction": 0.5684008002281189, "avg_line_length": 14.264705657958984, "blob_id": "9fe47ca2701c50c568a81c822b39950ad2785108", "content_id": "2a358ae01bd327b0818389f2299fb421e16a21d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 539, "license_type": "no_license", "max_line_length": 43, "num_lines": 34, "path": "/leetcode/cpp/trailingZeroes/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=172 lang=cpp\n *\n * [172] 完全二叉树的节点个数\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int trailingZeroes(int n) {\n int res = 0;\n for (int d = n; d / 5 > 0; d = d / 5) {\n res += d / 5;\n }\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n int n = 5;\n Solution sol;\n int v = sol.trailingZeroes(n);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5314009785652161, "alphanum_fraction": 0.554347813129425, "avg_line_length": 20.230770111083984, "blob_id": "bede393af0b7ca91522f0a0752a0bf940e9ef3d1", "content_id": "205871620f23723750aa60ba5aef3431dd4844b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 828, "license_type": "no_license", "max_line_length": 61, "num_lines": 39, "path": "/labuladong/cpp/carPooling/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n bool carPooling(vector<vector<int>>& trips, int capacity) {\n int nums_pass = 0, from_ = 0, to_ = 0;\n vector<int> diff(1001);\n for (vector<int>& tr : trips) {\n nums_pass = tr[0];\n from_ = tr[1];\n to_ = tr[2];\n diff[from_] += nums_pass;\n diff[to_] += -nums_pass;\n }\n fmt::print(\"{}\\n\", diff);\n int total_sum = 0;\n for (int& x : diff) {\n total_sum += x;\n if (total_sum > capacity) return false;\n }\n return true;\n }\n};\n\nint main() {\n vector<vector<int>> trips = {{2, 1, 5}, {3, 5, 7}};\n int capacity = 3;\n Solution sol;\n bool v = sol.carPooling(trips, capacity);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.523432731628418, "alphanum_fraction": 0.5642117857933044, "avg_line_length": 31.235294342041016, "blob_id": "3cc87dd1a061b4b7a69250401d6eb7e9f19df8d2", "content_id": "357ed7302b60e401505909242081b831d26a562e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 99, "num_lines": 51, "path": "/Graphcore/getMroup-subarray-2.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from statistics import mean, stdev\nfrom math import fabs\nfrom typing import List\nfrom copy import deepcopy\n\ndef variances(former: List[int], latter: List[int], num_group: int):\n former_mean = (sum(former) + sum(latter)) / num_group\n latter_mean = (1 - 1 / num_group) * (sum(former) + sum(latter))\n return fabs(sum(former) - former_mean) + fabs(sum(latter) - latter_mean) - len(former)\n\ndef citation(*group):\n sum_over_group = []\n for g in group:\n sum_over_group.append(sum(g))\n return stdev(sum_over_group) if len(sum_over_group) > 1 else 0\n\n\ndef find_opt(arr, num_group):\n\n res_set = []\n\n def divide_and_conquer_helper(arr, num_group, res):\n if not arr or not num_group:\n return\n\n if num_group == 1:\n res.append(arr.copy())\n res_set.append(deepcopy(res))\n res.pop()\n return\n\n scores_for_all = [ (i, variances(arr[:i], arr[i:], num_group)) for i in range(1, len(arr))]\n miniumn_score = min([ score for _, score in scores_for_all ])\n scores_for_min = [ (sep, score) for sep, score in scores_for_all if score == miniumn_score]\n\n for sep, _ in scores_for_min:\n res.append(arr[:sep])\n divide_and_conquer_helper(arr[sep:], num_group - 1, res)\n res.pop()\n\n divide_and_conquer_helper(arr, num_group, [])\n return res_set\n\n\nif __name__ == '__main__':\n arr = [1000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1200, 0, 0, 0, 0, 0, 0, 34, 56, 74, 39, 26, 49]\n # arr = [10, 5, 5, 40, 20, 10, 10]\n # arr = [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]\n\n res = find_opt(arr, 4)\n print(*res, sep='\\n')" }, { "alpha_fraction": 0.4212893545627594, "alphanum_fraction": 0.4527736008167267, "avg_line_length": 23.740739822387695, "blob_id": "729f7c51b70318a3ea7ff5173be04ca128a3cd2b", "content_id": "db450bb63c53dfacc17bbbf74a56042692fa7c04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 68, "num_lines": 27, "path": "/leetcode/python/11.盛最多水的容器.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=11 lang=python3\n#\n# [11] 盛最多水的容器\n#\nfrom typing import * \n# @lc code=start\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n left = 0\n right = len(height) - 1\n\n area_max = 0\n\n while left <= right:\n area_max = max(area_max, \n min(height[left], \n height[right]) * (right - left))\n if height[left] > height[right]:\n right -= 1\n else:\n left += 1\n\n return area_max\n# @lc code=end\nif __name__ == \"__main__\":\n print(Solution().maxArea([10, 9, 8, 7, 6, 5, 4, 3, 2, 1]))" }, { "alpha_fraction": 0.4936548173427582, "alphanum_fraction": 0.5164974331855774, "avg_line_length": 21.855072021484375, "blob_id": "809ac99d000ea41c081759e82c7bc78d1caad427", "content_id": "424b46241b8fe24b446b3a812314c6e5eb7d3557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1576, "license_type": "no_license", "max_line_length": 99, "num_lines": 69, "path": "/interview/substringcount.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n#%%\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#O(n^3)\n# def checksame(s):\n# return 1 if len(set(s)) == 1 else 0\n\n# #%%\n# def findallsubstring(s):\n# sub = []\n# for ind in range(len(s)):\n# for indc in range(ind, len(s)):\n# slice_str = s[ind: indc + 1]\n# sub.append(slice_str)\n# return sub\n\n# #%%\n# # Complete the substrCount function below.\n# def substrCount(n, s):\n# sub = findallsubstring(s)\n# count = 0\n# for i in sub:\n# if len(i) == 1:\n# count += 1\n# elif len(i) % 2 == 0:\n# count += checksame(i)\n# elif len(i) % 2 != 0:\n# ss = i[:len(i)//2] + i[len(i)//2+1:]\n# count += checksame(ss)\n# return count\n\ndef triangular_number(n):\n return (n ** 2 + n - 2 * n)//2\n\n# Complete the substrCount function below.\ndef substrCount(n, s):\n # every single letter should be counted\n count = len(s)\n\n #find the \"xx.xx\" \n exp1 = r'(([a-z])\\2*)(?!\\1)(?=[a-z]\\1)'\n m = re.finditer(exp1,s)\n count += sum([len(x.group(0)) for x in m])\n\n # find the \"xxxx\" and if \"aaaa\" then \"a\":4 \"aa\":3 \"aaa\":2 \"aaaa\":1. and \"a\" is already counted.\n # so we need to minus 4. the formular is n(n + 1) / 2 - n\n exp2 = r'([a-z])\\1+'\n m = re.finditer(exp2,s)\n count += sum([triangular_number(len(x.group(0))) for x in m])\n\n return count\n\n\n#%%\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n s = input()\n\n result = substrCount(n, s)\n\n print(result)" }, { "alpha_fraction": 0.671480119228363, "alphanum_fraction": 0.6967508792877197, "avg_line_length": 18.785715103149414, "blob_id": "435e414459f46572fefd430f868c2ab29b52290e", "content_id": "1ffa93e98e98cccc0537d592d07b7fa3471c00f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/leetcode/python/136.只出现一次的数字.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=136 lang=python3\n#\n# [136] 只出现一次的数字\n#\nfrom typing import List\n# @lc code=start\nfrom functools import reduce\nfrom operator import xor\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n return reduce(xor, nums)\n# @lc code=end\n" }, { "alpha_fraction": 0.620413601398468, "alphanum_fraction": 0.6310873627662659, "avg_line_length": 22.421875, "blob_id": "78b4c80d7cb43c291ae9b684016be24bbc8fde96", "content_id": "732bb24bac9fda96da5d644bcce9effd8fedda0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1507, "license_type": "no_license", "max_line_length": 75, "num_lines": 64, "path": "/leetcode/cpp/robbt/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=337 lang=cpp\n *\n * [337] 打家劫舍 III\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\n// @lc code=start\nclass Solution {\n public:\n unordered_map<TreeNode*, int> mem{};\n int backtrack(TreeNode* root) {\n if (!root) return 0;\n if (!root->left && !root->right) return root->val;\n if (mem.count(root)) return mem[root];\n\n // if not choose root\n int max_val = 0;\n max_val = max(max_val, backtrack(root->left) + backtrack(root->right));\n\n // if choose root;\n int sums = 0;\n if (root->left) {\n sums += backtrack(root->left->left) + backtrack(root->left->right);\n }\n if (root->right) {\n sums += backtrack(root->right->left) + backtrack(root->right->right);\n }\n sums += root->val;\n\n // find max value\n max_val = max(max_val, sums);\n mem[root] = max_val;\n return max_val;\n }\n int rob(TreeNode* root) { return backtrack(root); }\n};\n// @lc code=end\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{3, 4, 5, 1, 3, null, 1};\n TreeNode* root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n Solution sol;\n int v = sol.rob(root);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.42954933643341064, "alphanum_fraction": 0.45179691910743713, "avg_line_length": 24.02857208251953, "blob_id": "2c1d3cf3866081d8f183c2d6d9185d15581de8e9", "content_id": "fcf5450c2a891d020e8408377131cfa298fff517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1753, "license_type": "no_license", "max_line_length": 75, "num_lines": 70, "path": "/tmp/tmp.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# import math\n# from typing import List\n\n# def isPrime(n): \n# if n <= 1: \n# return False\n# for i in range(2, int(math.sqrt(n)) + 1): \n# if n % i == 0: \n# return False\n# return True\n\n# def findColor(arr: List[int], K: int):\n# if 1 in arr: return 1\n# table = { i: 0 for i in range(100) if isPrime(i) }\n\n# cunt = 0\n# for a in arr:\n# for t in table:\n# if a % t == 0:\n# if table[t] == 0:\n# cunt += 1\n# table[t] += 1\n# return cunt\n\n# if __name__ == '__main__':\n# print(findColor([4, 8, 12], 4))\n\ndef removeKDigits(num, k):\n n = len(num)\n if n <= k: return num\n stack = []\n for i in num:\n if not stack:\n stack.append(i)\n continue\n if k>0:\n while k>0 and stack:\n if stack[-1]>i:\n stack.pop()\n k-=1\n else:\n break\n stack.append(i)\n if k!=0:\n return ''.join(stack[:-k])\n return ''.join(stack)\n\nif __name__ == '__main__':\n minNum = removeKDigits('2615371',4)\n print(minNum)\n\n# def removeKMinNumber(num, k):\n# if k == 0:\n# return num\n# if k == len(num):\n# return 0\n# StringBuffer nums = new StringBuffer().append(num);\n# Stack<Character> stack = new Stack<>();\n\n# for (int j = 0; j < nums.length(); j++) {\n# while (!stack.isEmpty() && stack.peek() > nums.charAt(j) && k>0){\n# stack.pop();\n# k--;\n# }\n# stack.push(nums.charAt(j));\n# }\n# nums.delete(0,nums.length());\n# while (!stack.isEmpty())\n# nums.insert(0,stack.pop());\n# return Integer.parseInt(nums.toString());\n\n" }, { "alpha_fraction": 0.4910179674625397, "alphanum_fraction": 0.4910179674625397, "avg_line_length": 15.800000190734863, "blob_id": "ef8f39310628751cb492e182b919a4600357dc7f", "content_id": "c42d5f135fc2db0513eff9dadcc4d0b94fbc0baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/leetcode/cpp/note_template/backTrack.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "result = []\ndef backtrack(路径, 选择列表):\n if 满足结束条件:\n result.add(路径)\n return\n \n for 选择 in 选择列表:\n 做选择\n backtrack(路径, 选择列表)\n 撤销选择" }, { "alpha_fraction": 0.46112117171287537, "alphanum_fraction": 0.5587703585624695, "avg_line_length": 31.52941131591797, "blob_id": "27a91000e1abc4a9884952a1e6236894a63ca3ea", "content_id": "373651c8f07f9a30d9a5db22951e67a1cd7486e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/interview/ipv4Adress.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\n#%%\ndef validateAddress(string):\n import re\n from collections import Counter\n if Counter(string)['.'] > 3:\n return False\n else:\n l = re.findall(r\"([0-9]+)\\.([0-9]+)\\.([0-9]+)\\.([0-9]+)\", string)\n tmp = [i for i in list(map(int, l[0])) if 0<=i<256]\n return len(tmp) == 4\n\n\nif __name__ == \"__main__\":\n assert(validateAddress(\"127.0.0.1\") == True)\n assert(validateAddress(\"123.24.34.53.2\") == False)\n assert(validateAddress(\"23.34.5.2\") == True)\n assert(validateAddress(\"1234.23.521.2234\") == False)" }, { "alpha_fraction": 0.40981340408325195, "alphanum_fraction": 0.4174153506755829, "avg_line_length": 22.338708877563477, "blob_id": "7eb56c115afc4594e432401a06631a6cadfeef5f", "content_id": "f3a9c6a60b8351e315a034e6d53e9760a6c6e7a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1447, "license_type": "no_license", "max_line_length": 66, "num_lines": 62, "path": "/leetcode/cpp/build_and_run.sh", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n_target=\"all\"\n_build_dir=\"manual_build\"\n_debug=\"\"\n\nfunction print_popconda_helper_msg(){\ncat <<\"EOF\"\nusage:\n for example:\n $ build_and_run -t 2_sum -- 3 4\n\n -h, --help: print helpful message\n\n -t, --target: choose build target and run target\n\n -g, --debug: add debug info to programme.\n\nEOF\n}\n\n\nfunction parse_args_from_console() {\n local prompt_str=$1\n\n while [ \"$1\" != \"--\" ] && [[ $# -gt 0 ]]; do\n case $1 in\n -t | --target) shift\n _target=$1\n ;;\n -g | --debug) shift\n _debug=\"True\"\n ;;\n -h | --help) print_popconda_helper_msg\n exit 0\n ;;\n *) print_popconda_helper_msg\n exit 0\n ;;\n esac\n shift\n done\n shift\n _ARGUMENT=$@\n}\n\nparse_args_from_console $@\n\nrm -rf ${_build_dir}\nmkdir -p ${_build_dir}\n\nif [[ ${_debug} == \"True\" ]];then\n cmake -GNinja -S `pwd` -B ${_build_dir} -DCMAKE_BUILD_TYPE=Debug\nelse\n cmake -GNinja -S `pwd` -B ${_build_dir}\nfi\n\ncmake --build ${_build_dir} --target ${_target} || exit 1\n\nif [[ $_target != \"all\" ]]; then\n `pwd`/${_build_dir}/${_target}/${_target} ${_ARGUMENT}\nfi\n" }, { "alpha_fraction": 0.52925705909729, "alphanum_fraction": 0.5378040671348572, "avg_line_length": 21.04347801208496, "blob_id": "32aa9b33da720d43561deb127a41f8bdcde9a5e0", "content_id": "189ca9168bb8c15f3b12a9ca31092377c600433b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 65, "num_lines": 69, "path": "/labuladong/cpp/getRandom_LL/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\nclass Solution {\n public:\n int getRandom(ListNode *head) {\n int i = 0, res = 0;\n for (ListNode *cur = head; cur != nullptr; cur = cur->next) {\n i ++;\n if (0 == (rand() % i)) {\n res = cur->val;\n }\n }\n return res;\n }\n vector<int> getKRandom(ListNode *head, int k) {\n vector<int> res(k);\n ListNode *p = head;\n for (int i = 0; i < k; i++){\n res[i] = p->val;\n p = p->next;\n }\n\n int i = 0, j = 0;\n for (ListNode *cur = head; cur != nullptr; cur = cur->next) {\n i ++;\n j = rand() % i;\n if (k > j) {\n res[j++] = cur->val;\n }\n }\n return res;\n }\n};\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode *head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n Solution sol;\n int r = sol.getRandom(head);\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom(head);\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom(head);\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom(head);\n fmt::print(\"{}\\n\", r);\n int k = 3;\n vector<int> res = sol.getKRandom(head, k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(head, k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(head, k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(head, k);\n fmt::print(\"{}\\n\", res);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.437426894903183, "alphanum_fraction": 0.476023405790329, "avg_line_length": 27.5, "blob_id": "00d2abb4ea1900d5efd5f2f213ac9ba51f5ad39a", "content_id": "5252924545b3631bb6d8f83b2c5043d97044a386", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "no_license", "max_line_length": 71, "num_lines": 30, "path": "/leetcode/python/KthMagicNumber.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\ndef getKthMagicNumber(k: int) -> int:\n p3 = p5 = p7 = 0\n state = [1] + [0] * (k - 1)\n\n for i in range(1, k):\n state[i] = min(state[p3] * 3, state[p5] * 5, state[p7] * 7)\n if 3 * state[p3] == state[i]: p3 += 1\n if 5 * state[p5] == state[i]: p5 += 1\n if 7 * state[p7] == state[i]: p7 += 1\n return state[-1]\n\nfrom heapq import heappop, heappush\ndef getKthMagicNumber_heap(self, k: int) -> int:\n heap = [1]\n numbers = set()\n # 每次从小顶堆取一个, 取 k 次即可\n cur = 0\n while k:\n cur = heappop(heap)\n if cur not in numbers:\n k -= 1\n heappush(heap, cur * 3)\n heappush(heap, cur * 5)\n heappush(heap, cur * 7)\n numbers.add(cur)\n return cur\n\nif __name__ == '__main__':\n k = int(input())\n print(getKthMagicNumber(k))" }, { "alpha_fraction": 0.4898785352706909, "alphanum_fraction": 0.5060728788375854, "avg_line_length": 16.23255729675293, "blob_id": "678985d6bf46075363c624694232033f4fca4f2e", "content_id": "9feade3682e0bc7df307563124bec258ca5fa278", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 749, "license_type": "no_license", "max_line_length": 44, "num_lines": 43, "path": "/leetcode/cpp/countPrimes/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=204 lang=cpp\n *\n * [204] 计数质数\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int countPrimes(int n) {\n vector<bool> isPrime(n, true);\n int res = 0;\n for (int i = 2; i * i < n; i++) {\n if (isPrime[i]) {\n for (int j = i * i; j < n; j += i) {\n isPrime[j] = false;\n }\n }\n }\n // fmt::print(\"{}\\n\", isPrime);\n for (int i = 2; i < n; i++) {\n if (isPrime[i]) res += 1;\n }\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n int n = 2;\n Solution sol;\n int v = sol.countPrimes(n);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.40830451250076294, "alphanum_fraction": 0.439446359872818, "avg_line_length": 25.363636016845703, "blob_id": "42ee419197c5df25105009d57cadebb65c18030b", "content_id": "db851c46b13f413a39f76b841900787254795683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/leetcode/python/uniquePaths.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def uniquePaths(m: int, n: int) -> int:\n uP = [[1 for j in range(n)] for i in range(m)]\n\n for r in range(1, m):\n for c in range(1, n): \n uP[r][c] = uP[r - 1][c] + uP[r][c - 1]\n\n return uP[m-1][n-1]\n\nif __name__ == \"__main__\":\n uniquePaths(3, 2)" }, { "alpha_fraction": 0.4944670796394348, "alphanum_fraction": 0.503203272819519, "avg_line_length": 20.462499618530273, "blob_id": "983958cb3feb44e0746ad7ad424a497f816a5406", "content_id": "6d9b6d4f0c9e555fee00a373347667711dbddeb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1717, "license_type": "no_license", "max_line_length": 66, "num_lines": 80, "path": "/interview/posionousPlants.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/poisonous-plants/problem\n\n#%%\n\"\"\"this is the answer.\"\"\"\nfrom IPython.display import Image, display\n\nfor img in [\"img/posion_plants_1.png\", \"img/posion_plants_2.png\"]:\n display(Image(filename=img))\n\n#%%\nclass Stack(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n \n def push(self, data):\n self.data.append(data)\n\n def pop(self):\n return self.data.pop()\n\n @property\n def top(self):\n return self.data[-1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Stack(\" + ','.join(map(str, self.data)) + \")\"\n\n# Complete the poisonousPlants function below.\ndef poisonousPlants(plants):\n s = Stack()\n mDay = 0\n\n for p in plants:\n if s.isEmpty():\n s.push((p, 0))\n elif s.top[0] < p:\n s.push((p, 1))\n mDay = max(mDay, 1)\n else:\n cur_day = s.top[1]\n while not s.isEmpty() and s.top[0] >= p:\n _, n_day = s.pop()\n cur_day = max(cur_day, n_day)\n else:\n if s.isEmpty():\n s.push((p, 0))\n else:\n s.push((p, cur_day + 1))\n mDay = max(mDay, cur_day + 1)\n\n return mDay\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n p = list(map(int, input().rstrip().split()))\n\n result = poisonousPlants(p)\n\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n" }, { "alpha_fraction": 0.4668094217777252, "alphanum_fraction": 0.4903640151023865, "avg_line_length": 17.719999313354492, "blob_id": "7378dbb34caf072c9e48e1f6fce2af16f24f173c", "content_id": "e2f104da09aabc07d1258429ad7d99c454855f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 55, "num_lines": 25, "path": "/leetcode/python/80.删除排序数组中的重复项-ii.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=80 lang=python3\n#\n# [80] 删除排序数组中的重复项 II\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n i = 0\n K = 2\n\n for n in nums:\n if i < K or n != nums[i-K]:\n nums[i] = n\n i += 1\n return i\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n s= Solution()\n a = [1,1, 1,]\n print(s.removeDuplicates(a))\n print(a)" }, { "alpha_fraction": 0.5084566473960876, "alphanum_fraction": 0.5153276920318604, "avg_line_length": 24.741497039794922, "blob_id": "002ca378172c65f4ab0fd57074bcce54b5ce5c57", "content_id": "64a7de86250840c78d05d2e5ab51b6bc693f35c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3784, "license_type": "no_license", "max_line_length": 72, "num_lines": 147, "path": "/interview/largestRectangle.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/largest-rectangle/problem\nclass Stack(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n \n def push(self, data):\n self.data.append(data)\n\n def pop(self):\n return self.data.pop()\n\n @property\n def top(self):\n return self.data[-1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Stack(\" + ','.join(map(str, self.data)) + \")\"\n\n\n# Complete the largestRectangle function below.\ndef largestRectangle(h):\n s = Stack()\n max_area = 0\n ind = 0\n\n while ind < len(h):\n if s.isEmpty() or h[s.top] <= h[ind]:\n s.push(ind)\n ind += 1\n else:\n top = s.pop()\n left = 0 if s.isEmpty() else s.top + 1\n max_area = max(max_area, (ind - left) * h[top])\n\n while not s.isEmpty():\n top = s.pop()\n left = 0 if s.isEmpty() else s.top + 1\n max_area = max(max_area, (ind - left) * h[top])\n\n return max_area\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n h = list(map(int, input().rstrip().split()))\n\n result = largestRectangle(h)\n\n print(result)\n\n# Python3 program to find maximum \n# rectangular area in linear time \n \n# def max_area_histogram(histogram): \n \n# # This function calulates maximum \n# # rectangular area under given \n# # histogram with n bars \n \n# # Create an empty stack. The stack \n# # holds indexes of histogram[] list. \n# # The bars stored in the stack are \n# # always in increasing order of \n# # their heights. \n# stack = list() \n \n# max_area = 0 # Initalize max area \n \n# # Run through all bars of \n# # given histogram \n# index = 0\n# while index < len(histogram): \n \n# # If this bar is higher \n# # than the bar on top \n# # stack, push it to stack \n \n# if (not stack) or (histogram[stack[-1]] <= histogram[index]): \n# stack.append(index) \n# index += 1\n \n# # If this bar is lower than top of stack, \n# # then calculate area of rectangle with \n# # stack top as the smallest (or minimum \n# # height) bar.'i' is 'right index' for \n# # the top and element before top in stack \n# # is 'left index' \n# else: \n# # pop the top \n# top_of_stack = stack.pop() \n \n# # Calculate the area with \n# # histogram[top_of_stack] stack \n# # as smallest bar \n# area = (histogram[top_of_stack] * \n# ((index - stack[-1] - 1) \n# if stack else index)) \n \n# # update max area, if needed \n# max_area = max(max_area, area) \n \n# # Now pop the remaining bars from \n# # stack and calculate area with \n# # every popped bar as the smallest bar \n# while stack: \n \n# # pop the top \n# top_of_stack = stack.pop() \n \n# # Calculate the area with \n# # histogram[top_of_stack] \n# # stack as smallest bar \n# area = (histogram[top_of_stack] * \n# ((index - stack[-1] - 1) \n# if stack else index)) \n \n# # update max area, if needed \n# max_area = max(max_area, area) \n \n# # Return maximum area under \n# # the given histogram \n# return max_area \n \n# Driver Code \n# hist = [6, 2, 5, 4, 5, 1, 6] \n# print(\"Maximum area is\", \n# max_area_histogram(hist)) \n \n# This code is contributed \n# by Jinay Shah " }, { "alpha_fraction": 0.6790123581886292, "alphanum_fraction": 0.6878306865692139, "avg_line_length": 18.55172348022461, "blob_id": "9497369c68109b3f22e2c5bae3034608eb6c4f41", "content_id": "d8f65c6ddd2dd9cd6fda3b6a61136ba3345cf9a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 160, "num_lines": 29, "path": "/interview/Alternating Characters.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/alternating-characters/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=strings\n\nfrom itertools import groupby\n# Complete the alternatingCharacters function below.\ndef alternatingCharacters(s):\n return len(s) - len([*groupby(s)])\n\n\nif __name__ == '__main__':\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = alternatingCharacters(s)\n\n print(str(result))\n\n#%%\n\n#%%\n" }, { "alpha_fraction": 0.492000013589859, "alphanum_fraction": 0.5099999904632568, "avg_line_length": 22.809524536132812, "blob_id": "6ac8a3821e2e05c04f0fe00b77660de0e5377e39", "content_id": "5a61a1aefd52045691c4612d932b2d0ce0d2d0dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/leetcode/python/getKthPermutation.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from math import factorial\nfrom math import ceil\n\ndef getPermutation(n: int, k: int):\n cans = [ str(i) for i in range(1, n + 1)]\n\n def backtrace(cans, n, k, com ):\n if len(cans) == 0:\n return \"\"\n if len(cans) == 1:\n return cans[0]\n div = com // n\n i = ceil(k / div) - 1 \n s = cans.pop(i)\n return s + backtrace(cans, n - 1, k - div * i, div)\n \n res = backtrace(cans, n, k, factorial(n))\n\n return res\n\ngetPermutation(4, 9)\n" }, { "alpha_fraction": 0.46092185378074646, "alphanum_fraction": 0.5020040273666382, "avg_line_length": 22.20930290222168, "blob_id": "3ffdd52c58b8df20cc5309359e512507c886c5dd", "content_id": "aaea0030ab785d31491d8170af33e23825439818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 998, "license_type": "no_license", "max_line_length": 56, "num_lines": 43, "path": "/labuladong/cpp/rob213/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int robRange(vector<int>& nums, int start, int end) {\n vector<int> dp(nums.size() + 1);\n dp[0] = 0;\n for (int i = start + 1; i <= end; i++) {\n if (i - 2 > 0)\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1]);\n else\n dp[i] = max(dp[i - 1], dp[0] + nums[i - 1]);\n }\n fmt::print(\"{}\\n\", dp);\n return dp[end];\n }\n\n int rob(vector<int>& nums) {\n int n = nums.size();\n if (n == 1) return nums[0];\n int s1 = robRange(nums, 1, n - 1);\n int s2 = robRange(nums, 1, n - 2) + nums[n - 1];\n int s3 = robRange(nums, 2, n - 1) + nums[0];\n fmt::print(\"{}, {}, {}\\n\", s1, s2, s3);\n return max(max(s1, s2), s3);\n }\n};\n\nint main() {\n vector<int> nums = {1, 2, 3, 1};\n // vector<int> nums = {2, 7, 9, 3, 1};\n Solution sol;\n int v = sol.rob(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.46823838353157043, "alphanum_fraction": 0.4878847301006317, "avg_line_length": 18.576923370361328, "blob_id": "509a4384ae2cfda399cafa1732af0a9e805b7383", "content_id": "68045b897417d1aa3a07902066bd6a61b21a09ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1527, "license_type": "no_license", "max_line_length": 62, "num_lines": 78, "path": "/interview/MinMaxRiddle.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/min-max-riddle/problem\n\n# O(n)\n# def WindowSlider(seq, n=2):\n# it = iter(seq)\n# win = list((next(it, None) for _ in range(n)))\n# yield win.copy()\n# for e in it:\n# win.pop(0)\n# win.append(e)\n# yield win.copy()\n\n\n# # Complete the riddle function below.\n# def riddle(arr):\n\n# res = []\n# for i in range(1, len(arr) + 1):\n# lista = [min(j) for j in window(arr, i)]\n# res.append(max(lista))\n\n# return res\n\n# O(n)\n# see the minmaxriddle.png in the img folder.\ndef riddle(arr):\n n=len(arr)\n res=[0]*n\n S=[0]*n\n\n win=[0]\n for i in range(1,n):\n while len(win)>0 and arr[win[-1]]>=arr[i]:\n win.pop()\n if len(win)==0:\n S[i]=i+1-1\n else:\n S[i]=i-win[-1]-1\n win.append(i)\n\n win=[n-1]\n for i in range(n-2,-1,-1):\n while len(win)>0 and arr[i]<=arr[win[-1]]:\n win.pop()\n if len(win)==0:\n S[i]+=n-1-i\n else:\n S[i]+=win[-1]-i-1\n win.append(i)\n\n for i in range(n):\n if res[S[i]]<arr[i]:\n res[S[i]]=arr[i]\n\n for j in range(n-2,-1,-1):\n res[j]=max(res[j],res[j+1])\n\n return res\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = riddle(arr)\n\n print(\" \".join(map(str, res)))\n" }, { "alpha_fraction": 0.46266233921051025, "alphanum_fraction": 0.5990259647369385, "avg_line_length": 27.045454025268555, "blob_id": "854d2204d9085cf389be3a1fd8ffa299e05a162d", "content_id": "2873e7cc45ec837de7b565551aaf9383e49ccd45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/PythonHack/tax.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# calculate the Tax from the table\n\ndef calculateTax(income):\n # Write your code here\n if income <= 18200:\n return 0.0\n elif income <= 37000:\n return str((income - 18200) * 0.19 + float(calculateTax(18200)))\n elif income <= 87000:\n return str((income - 37000) * 0.325 + float(calculateTax(37000)))\n elif income <= 180000:\n return str((income - 87000) * 0.37 + float(calculateTax(87000)))\n else:\n return str((income - 180000) * 0.45 + float(calculateTax(180000)))\n\ndef main():\n a = calculateTax(180001)\n print(type(a))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5621052384376526, "avg_line_length": 28.6875, "blob_id": "16955643ea8b5a4b7527f4ac648f15b241638c6e", "content_id": "4646bd9fc8ab5c614252eb501623db27ac16e8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/interview/LargestContiguousSubarraySum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Python program to find maximum contiguous subarray \n# Function to find the maximum contiguous subarray \nfrom math import inf \ndef maxSubArraySum(a, size): \n\n max_so_far = -inf - 1\n max_ending_here = [0] * (size + 1)\n\n optsuffix = [0] * (size + 1)\n\n for i in range(0, size): \n max_ending_here[i + 1] = max_ending_here[i] + a[i]\n\n # if max_so_far < max_ending_here:\n # max_so_far = max_ending_here\n \n if max_ending_here[i + 1] <= 0: \n max_ending_here[i + 1] = a[i]\n optsuffix[i + 1] = i + 1\n else:\n optsuffix[i + 1] = optsuffix[i]\n \n print(optsuffix)\n print(max_ending_here)\n return max_so_far\n\n# Driver function to check the above function \n# a = [-13, -3, -25, -20, -3, -16, -23, -12, -5, -22, -15, -4, -7] \na = list(map(int, input().split()))\nprint(\"Maximum contiguous sum is\", maxSubArraySum(a, len(a))) \n \n#This code is contributed by _Devesh Agrawal_ " }, { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.4296987056732178, "avg_line_length": 22.627119064331055, "blob_id": "7c359e6bfe29613026b2365b0954e8be55576d59", "content_id": "b9207eff6866a79d233575d562ead1f1152c9782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 86, "num_lines": 59, "path": "/labuladong/cpp/isMatch/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n bool match(const string &s, const string &p, int i, int j) {\n if (i == 0) {\n return false;\n }\n if (p[j - 1] == '.') {\n return true;\n }\n return s[i - 1] == p[j - 1];\n }\n\n /*\n f[i][j]=\n if (p[j] != ‘*’)\n f[i−1][j−1] matches(s[i],p[j])\n false otherwise\n else\n f[i−1][j] (match many times) or f[i][j−2] (match 0 times) matches(s[i],p[j−1])\n f[i][j−2] otherwise\n */\n\n bool isMatch(string s, string p) {\n vector<vector<bool>> dp(s.size() + 1, vector<bool>(p.size() + 1, false));\n dp[0][0] = true;\n for (int i = 0; i <= s.size(); i++) {\n for (int j = 1; j <= p.size(); j++) {\n if (p[j - 1] != '*')\n if (match(s, p, i, j))\n dp[i][j] = dp[i - 1][j - 1];\n else\n dp[i][j] = false;\n else if (match(s, p, i, j - 1))\n dp[i][j] = dp[i][j - 2] || dp[i - 1][j];\n else\n dp[i][j] = dp[i][j - 2];\n }\n }\n fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.back().back();\n }\n};\n\nint main() {\n string s = \"aa\", p = \"a*\";\n Solution sol;\n bool v = sol.isMatch(s, p);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.8387096524238586, "alphanum_fraction": 0.8387096524238586, "avg_line_length": 61, "blob_id": "2aa497685dd0f0c4e803cf7e603ae6890ea4d007", "content_id": "5dca17a99a8a4bafb8e6c0cd2a80c2e6c7a131d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 62, "license_type": "no_license", "max_line_length": 61, "num_lines": 1, "path": "/leetcode/cpp/common_types/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "add_library(common_type common_types/LinkedList/LinkedList.h)\n" }, { "alpha_fraction": 0.4528301954269409, "alphanum_fraction": 0.4708777666091919, "avg_line_length": 22.901960372924805, "blob_id": "85f0767cb1877ec260472dedb8d74200b204a0cb", "content_id": "6d47fffe6e24e52a01cf5793b534f96a0a0ee4e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "no_license", "max_line_length": 154, "num_lines": 51, "path": "/interview/Abbreviation.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/abbr/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=dynamic-programming\n\n# Complete the abbreviation function below.\ndef abbreviation(a:str, b:str):\n n, m = len(a) + 1, len(b) + 1\n res = [[False] * n for _ in range(m)]\n\n res[0][0] = True\n AA = a.upper()\n for i, ac in enumerate(AA, 1):\n if ac == b[0]:\n res[1][i] = True\n\n for i, ac in enumerate(a, 1):\n for j, bc in enumerate(b, 1):\n if res[j][i] == True:\n continue\n elif ac.islower():\n Cc = ac.upper()\n if Cc != bc:\n res[j][i] = res[j][i - 1]\n else:\n res[j][i] = any([res[j][i - 1], res[j - 1][i - 1]])\n pass\n else:\n if ac == bc:\n res[j][i] = res[j - 1][i - 1]\n \n return \"YES\" if res[-1][-1] else \"NO\"\n\n\nif __name__ == '__main__':\n\n q = int(input())\n\n for q_itr in range(q):\n a = input()\n\n b = input()\n\n result = abbreviation(a, b)\n\n print(result)\n" }, { "alpha_fraction": 0.4892703890800476, "alphanum_fraction": 0.4892703890800476, "avg_line_length": 14.064516067504883, "blob_id": "f7b3aca617a9227f808843f15bfe4f45b9e4536d", "content_id": "312bf3ecc9fed0ebb9a0034d7afd122d4585e5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 42, "num_lines": 31, "path": "/NewCoder/NCtemplate.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# debug = True\n\ndef Solution(s, k):\n return \"None\"\n\ndef main():\n # input\n _, k = list(map(int, input().split()))\n Cards = input()\n\n # solution\n result = Solution(Cards, k)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n # if not debug:\n # try:\n # while True:\n # main()\n # except EOFError:\n # exit()\n # else:\n main()" }, { "alpha_fraction": 0.6436681151390076, "alphanum_fraction": 0.6820960640907288, "avg_line_length": 29.157894134521484, "blob_id": "5e2bfd92b4844915a09c73d1d1df15bb4f9b2f0c", "content_id": "b74bd650d49cd87bb5fdd860fdec9d5247b5ba59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 216, "num_lines": 38, "path": "/interview/minimumSwap.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Minimum number of swaps required to sort an array\n\nGiven an array of $n$ distinct elements, find the minimum number of swaps required to sort the array.\n\nExamples\n\n```\nInput : {4, 3, 2, 1}\nOutput : 2\nExplanation : Swap index 0 with 3 and 1 with 2 to \n form the sorted array {1, 2, 3, 4}.\n\nInput : {1, 5, 4, 3, 2}\nOutput : 2\n```\n\nThis can be easily done by visualizing the problem as a graph. We will have $n$ nodes and an edge directed from node $i$ to node $j$ if the element at $i$’th index must be present at $j$’th index in the sorted array.\n\nGraph for {4, 3, 2, 1}\n\n![img](../img/a1.png)\n\nThe graph will now contain many non-intersecting cycles. Now a cycle with 2 nodes will only require 1 swap to reach the correct ordering, similarly a cycle with 3 nodes will only require 2 swap to do so.\n\nGraph for {4, 5, 2, 1, 5}\n\n![img](../img/b1.png)\n\nHence,\n- $ans = \\sum_{i = 1}^{k} (CycleSize - 1)$ \n\nwhere $k$ is the number of cycles\n\n**Time Complexity**: $O(n log n)$\n\n**Auxiliary Space**: $O(n)$\n\n[**Reference**](https://stackoverflow.com/questions/20990127/sorting-a-sequence-by-swapping-adjacent-elements-using-minimum-swaps)" }, { "alpha_fraction": 0.5006775259971619, "alphanum_fraction": 0.5250677466392517, "avg_line_length": 21.707693099975586, "blob_id": "2e87adbeac046c7f0ad9b763596bf63ee9dbca56", "content_id": "4f68b3977f63eaea68301a989df84d53d2b2083a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 65, "num_lines": 65, "path": "/leetcode/cpp/findTargetSumWays/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=494 lang=cpp\n *\n * [494] 目标和\n */\n#include \"utils/print_2d.hpp\"\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n /*\n assume that\n set A include all positive.\n set B include all negative.\n sum(A) - sum(B) == target\n sum(A) - sum(B) - sum(B) == target - sum(B)\n - 2 * sum(B) = target - sum(B) -sum(A)\n 2 * sum(B) = sum(nums) - target\n */\n int pack(vector<int>& nums, int target) {\n int n = nums.size();\n vector<vector<int>> dp(n + 1, vector<int>(target + 1));\n\n for (int k = 0; k <= n; k++) dp[k][0] = 1;\n\n for (int i = 1; i <= n; i++) {\n for (int j = 0; j <= target; j++) {\n if (j >= nums[i - 1])\n dp[i][j] = dp[i - 1][j - nums[i - 1]] + dp[i - 1][j];\n else\n dp[i][j] = dp[i - 1][j];\n }\n }\n // print2D(dp);\n // fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.back().back();\n }\n\n int findTargetSumWays(vector<int>& nums, int target) {\n int total_sum = 0;\n for (int x : nums) {\n total_sum += x;\n }\n if (total_sum < target || (total_sum - target) % 2) return 0;\n return pack(nums, (total_sum - target) / 2);\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {100, 100};\n int target = -400;\n Solution sol;\n int v = sol.findTargetSumWays(nums, target);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4683626890182495, "alphanum_fraction": 0.47749510407447815, "avg_line_length": 20.899999618530273, "blob_id": "53c9a5bd3fef3fc8203071708748bde6c6b39cb2", "content_id": "0f22332a98e0ed4148def653b4ccf1edef68cd9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 53, "num_lines": 70, "path": "/leetcode/cpp/minWindow/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=76 lang=cpp\n *\n * [76] 最小覆盖子串\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n string minWindow(string s, string t) {\n if (s.size() == 0 || t.size() == 0) return \"\";\n unordered_map<char, int> need{}, window{};\n const int INTMAX = s.size() + 1;\n for (char c : t) need[c]++;\n int left = 0, right = 0;\n char c = 0;\n char d = 0;\n int start = 0, len = INTMAX;\n int valid = 0;\n while (right < s.size()) {\n c = s[right];\n right++;\n\n // window.add(right)\n if (need.count(c)) {\n window[c]++;\n if (window[c] == need[c]) valid++;\n }\n\n // fmt::print(\"({}, {})\\n\", left, right);\n\n while (valid == need.size()) {\n if (len > right - left) {\n start = left;\n len = right - left;\n }\n d = s[left];\n left++;\n // window.remove(left);\n if (need.count(d)) {\n if (window[d] == need[d]) valid--;\n window[d]--;\n }\n }\n }\n // fmt::print(\"len: {} start: {}\\n\", len, start);\n return len == INTMAX ? \"\" : s.substr(start, len);\n }\n};\n// @lc code=end\n\nint main() {\n Solution sol;\n // string s = \"b\", t = \"b\";\n // string s = \"abc\", t = \"b\";\n // string s = \"ADOBECODEBANC\", t = \"ABC\";\n // string s = \"bba\", t = \"ab\";\n string s = \"aa\", t = \"aa\";\n string v = sol.minWindow(s, t);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5151807069778442, "alphanum_fraction": 0.5253012180328369, "avg_line_length": 21.55434799194336, "blob_id": "5f7879c675cef4296622579bf1d451decf60b393", "content_id": "fa24c1ccdecc9a8a2f8746a9ddfb1f59c4060235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2085, "license_type": "no_license", "max_line_length": 78, "num_lines": 92, "path": "/leetcode/cpp/findRepeatedDnaSequences/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=187 lang=cpp\n *\n * [187] 重复的DNA序列\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int StoN(char c) {\n int res = -1;\n switch (c) {\n case 'A':\n res = 0;\n break;\n case 'G':\n res = 1;\n break;\n case 'C':\n res = 2;\n break;\n case 'T':\n res = 3;\n break;\n\n default:\n cout << \"error\" << endl;\n }\n return res;\n }\n\n int addChar(vector<int> &arr, int windowHash, int right, int R) {\n return R * windowHash + arr[right];\n }\n\n int removeChar(vector<int> &arr, int windowHash, int left, int R, int len) {\n return windowHash - arr[left] * (int)pow(R, len - 1);\n }\n\n vector<string> findRepeatedDnaSequences(string &s) {\n vector<int> arr;\n vector<string> res;\n unordered_map<string, int> res_hash;\n string temp{};\n for (char c : s) arr.push_back(StoN(c));\n // fmt::print(\"{}\\n\", arr);\n unordered_map<int, int> seen{}, window{};\n int left = 0, right = 0, windowHash = 0, R = 4, L = 10;\n while (right < s.size()) {\n // window.add();\n windowHash = addChar(arr, windowHash, right, R);\n right++;\n\n // fmt::print(\"({}, {})\\n\", left, right);\n while (right - left == L) {\n // window.remove\n if (seen.count(windowHash)) {\n // fmt::print(\"already seen {}\\n\", windowHash);\n temp = s.substr(left, L);\n if (!res_hash.count(temp)) {\n res_hash[temp] = 1;\n res.push_back(temp);\n }\n } else {\n seen[windowHash] = 1;\n }\n windowHash = removeChar(arr, windowHash, left, R, L);\n left++;\n }\n }\n // fmt::print(\"{}\\n\", res);\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n string s = \"AAAAAAAAAAAAA\";\n // string s = \"AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT\";\n Solution sol;\n vector<string> v = sol.findRepeatedDnaSequences(s);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4918878972530365, "alphanum_fraction": 0.5081120729446411, "avg_line_length": 20.1875, "blob_id": "aaf38b518e9f9c92380fac261e4a1d90fee53f57", "content_id": "6d34506cf712b068b27a99154b0e4e124fb92ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1356, "license_type": "no_license", "max_line_length": 53, "num_lines": 64, "path": "/labuladong/cpp/preimageSizeFZF/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <climits>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n long trailingZeroes(long n) {\n long res = 0;\n for (long d = n; d / 5 > 0; d = d / 5) {\n res += d / 5;\n }\n return res;\n }\n\n long left_bound(int target) {\n long lo = 0, hi = LONG_MAX - 1, mid = 0;\n while (lo <= hi) {\n mid = lo + (hi - lo) / 2;\n if (trailingZeroes(mid) == target) {\n hi = mid - 1;\n } else if (trailingZeroes(mid) < target) {\n lo = mid + 1;\n } else if (trailingZeroes(mid) > target) {\n hi = mid - 1;\n }\n }\n return lo;\n }\n\n long right_bound(int target) {\n long lo = 0, hi = LONG_MAX - 1, mid = 0;\n while (lo <= hi) {\n mid = lo + (hi - lo) / 2;\n if (trailingZeroes(mid) == target) {\n lo = mid + 1;\n } else if (trailingZeroes(mid) < target) {\n lo = mid + 1;\n } else if (trailingZeroes(mid) > target) {\n hi = mid - 1;\n }\n }\n return hi;\n }\n\n int preimageSizeFZF(int K) {\n fmt::print(\"{}\\n\", right_bound(K));\n fmt::print(\"{}\\n\", left_bound(K));\n return (int)(right_bound(K) - left_bound(K) + 1);\n }\n};\n\nint main() {\n int k = 0;\n Solution sol;\n int v = sol.preimageSizeFZF(k);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4399075508117676, "alphanum_fraction": 0.4609013795852661, "avg_line_length": 29.180233001708984, "blob_id": "6ef8d05b6e01c6d754694cb544a6548dba2fb0ac", "content_id": "e07c2a01facf9b973d8e276074ace2cb67b73772", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5296, "license_type": "no_license", "max_line_length": 132, "num_lines": 172, "path": "/practice/NmaxMultiply.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\n\n#%%[markdown]\n# 给定一个无序数组,包含正数、负数和0,要求从中找出k个数的乘积,使得乘积最大,要求时间复杂度:O(n),空间复杂度:O(1)\n# $A = \\{a_i\\}_{1}^{N}$\n# ### let $f(k)$ is the best selection of k number that maximize the products.\n# ### we considered the $f(k+2)$:\n# ### $f(k + 2) = max\\{f(k) \\times max(b{i} \\times b_{i+1}), f(k) \\times min(b{i} \\times b_{i+1})\\}$\n# ### where the $b_i$ is selected from $A$ \\ $\\{b_j\\}_1^k$.\n\n#%%\nfrom functools import reduce\n\nn = int(input())\na = list(map(int, input().split()))\n\ndef Top2Number(b, mode):\n b = b.copy()\n if mode == 'max':\n if all([i < 0 for i in b]):\n top1 = min(b)\n b.remove(top1)\n top2 = min(b)\n else:\n top1 = max(b)\n b.remove(top1)\n top2 = max(b)\n elif mode == 'min':\n if all([i < 0 for i in b]):\n top1 = max(b)\n b.remove(top1)\n top2 = max(b)\n else:\n top1 = min(b)\n b.remove(top1)\n top2 = min(b)\n\n return top1, top2\n\n\ndef FindTwoProducts(a: list, mode = 'max')-> tuple:\n maxProd, twoNum = 0, ()\n # two number are both pos\n\n pos = [i for i in a if i > 0]\n # two number are both neg\n neg = [i for i in a if i < 0]\n\n if len(pos) < 2:\n top1, top2 = Top2Number(neg, mode)\n maxProd = top1 * top2\n twoNum = (top1, top2)\n elif len(neg) < 2:\n top1, top2 = Top2Number(pos, mode)\n maxProd = top1 * top2\n twoNum = (top1, top2)\n else:\n Ptop1, Ptop2 = Top2Number(pos, mode)\n Ntop1, Ntop2 = Top2Number(neg, mode)\n if mode == 'max':\n maxProd = max([Ptop1 * Ptop2, Ntop1 * Ntop2])\n elif mode == 'min':\n maxProd = min([Ptop1 * Ntop2, Ntop1 * Ptop2])\n \n if maxProd == Ptop1 * Ptop2:\n twoNum = (Ptop1, Ptop2)\n else:\n twoNum = (Ntop1, Ntop2)\n\n return maxProd, twoNum\n\n\ndef NmaxMultiply(a, n = 3, debug = False):\n res = [0] * (n + 1)\n rest_elem = [0] # occupy position to convert index starting with 1\n del_elem = [0]\n\n for i in range(1, n + 1):\n if i == 1:\n b = a.copy()\n res[i] = max(a)\n b.remove(res[i])\n rest_elem.append(tuple(b))\n del_elem.append((res[i],))\n elif i == 2:\n b = a.copy()\n res[i], T = FindTwoProducts(a, 'max')\n del_elem.append(T)\n for t in T:\n b.remove(t)\n rest_elem.append(tuple(b))\n elif i > 2:\n b = list(rest_elem[i - 2])\n\n multmin, Tmin = FindTwoProducts(b, 'min')\n multmax, Tmax = FindTwoProducts(b, 'max')\n\n T = Tmin if res[i - 2] * multmin > multmax * res[i - 2] else Tmax\n res[i] = max(res[i - 2] * multmin, multmax * res[i - 2])\n\n del_elem.append(del_elem[i - 2] + T)\n\n for t in T:\n b.remove(t)\n rest_elem.append(tuple(b))\n\n if debug == True:\n print(del_elem[-1])\n\n return res[-1]\n\nprint(NmaxMultiply(a, n, True))\n\n\n#%%[markdown]\n# ### the second answer: $O(kn)$ k is the number of collection\n# ### $f(k+1) = max\\{f{k} \\times b_i\\}$\n# ### $= max\\{max\\{f(k)\\} \\times max(a_i), max\\{f(k)\\} \\times min(a_i), min\\{f(k)\\} \\times max(a_i), min\\{f(k)\\} \\times min(a_i) \\}$\n#%%\nn = int(input())\na = list(map(int, input().split()))\n\ndef Remove(a:list, l:int):\n b = a.copy()\n b.remove(l)\n return b\n\n\ndef NmaxMultiply2(a, n = 3):\n min_l = [None] * (n + 1)\n max_l = [None] * (n + 1)\n\n rest_elem = [None] * (n + 1)\n del_elem = [None] * (n + 1)\n\n for i in range(n + 1):\n if i == 1:\n b = a.copy()\n max_l[i] = max(a)\n min_l[i] = min(a)\n rest_elem[i] = {\"min\": tuple(Remove(b, min_l[i])),\n \"max\": tuple(Remove(b, max_l[i]))}\n del_elem[i] = {\"min\": (min_l[i],),\n \"max\": (max_l[i],)}\n elif i > 1:\n max_set = list(rest_elem[i - 1][\"max\"])\n min_set = list(rest_elem[i - 1][\"min\"])\n t = [\n max_l[i - 1] * max(max_set),\n max_l[i - 1] * min(max_set),\n min_l[i - 1] * max(min_set),\n min_l[i - 1] * min(min_set)\n ]\n opt = [max(max_set), min(max_set), max(min_set), min(min_set)]\n max_l[i] = max(t)\n min_l[i] = min(t)\n\n # memorize the rest element set where the next selection sample.\n rest_elem[i] = {\"min\": tuple(Remove(min_set if t.index(min_l[i]) > 1 else max_set, opt[t.index(min_l[i])])),\n \"max\": tuple(Remove(min_set if t.index(max_l[i]) > 1 else max_set, opt[t.index(max_l[i])]))}\n \n # cache selection items.\n max_del = del_elem[i-1][\"max\"]\n min_del = del_elem[i-1][\"min\"]\n del_elem[i] = {\"min\": (min_del if t.index(min_l[i]) > 1 else max_del) + (opt[t.index(min_l[i])],),\n \"max\": (min_del if t.index(max_l[i]) > 1 else max_del) + (opt[t.index(max_l[i])],)}\n\n print(del_elem[-1][\"max\"])\n # print(rest_elem[-1][\"max\"])\n\n return max_l[-1]\n\n\nprint(NmaxMultiply2(a, n))" }, { "alpha_fraction": 0.6286370754241943, "alphanum_fraction": 0.6347626447677612, "avg_line_length": 26.20833396911621, "blob_id": "0833c0318a49a6b60d5f185b9828b545505d8fcc", "content_id": "bd4353779da99543dade807862cfd8eaadc76ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1306, "license_type": "no_license", "max_line_length": 57, "num_lines": 48, "path": "/labuladong/cpp/deleteNode/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\nclass Solution {\n public:\n TreeNode* deleteNode(TreeNode* root, int key) {\n if (!root) return root;\n if (root->val == key) {\n if (!root->left && !root->right) return nullptr;\n if (!root->left && root->right) return root->right;\n if (!root->right && root->left) return root->left;\n TreeNode* min_n = root->right;\n while (min_n->left != nullptr) min_n = min_n->left;\n root->val = min_n->val;\n root->right = deleteNode(root->right, min_n->val);\n } else if (root->val > key) {\n root->left = deleteNode(root->left, key);\n } else if (root->val < key) {\n root->right = deleteNode(root->right, key);\n }\n return root;\n }\n};\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{5, 3, 6, 2, 4, null, 7};\n TreeNode* root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n int key = 3;\n Solution sol;\n TreeNode* r = sol.deleteNode(root, key);\n showBinaryTree<int>(r);\n return 0;\n}\n" }, { "alpha_fraction": 0.47120919823646545, "alphanum_fraction": 0.49040305614471436, "avg_line_length": 21.65217399597168, "blob_id": "feb70e60545c3354768999a294b42d3884cb0a41", "content_id": "248f1fb0af2a016111cd7c7a7be4bc0e9cbaac8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 74, "num_lines": 46, "path": "/leetcode/cpp/maxCoins/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=312 lang=cpp\n *\n * [312] 你可以获得的最大硬币数目\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int maxCoins(vector<int>& nums) {\n int n = nums.size();\n vector<int> scores(nums.size() + 2, 1);\n for (int i = 1; i <= nums.size(); i++) {\n scores[i] = nums[i - 1];\n }\n\n vector<vector<int>> dp(scores.size(), vector<int>(scores.size(), 0));\n for (int i = n; i >= 0; i--) {\n for (int j = i + 1; j < n + 2; j++) {\n for (int k = i + 1; k < j; k++) {\n dp[i][j] = max(dp[i][j], dp[i][k] + dp[k][j] +\n scores[i] * scores[k] * scores[j]);\n }\n }\n }\n // fmt::print(\"{}\\n\", fmt::join(dp.begin(), dp.end(), \"\\n\"));\n return dp.front().back();\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {3, 1, 5, 8};\n Solution sol;\n int v = sol.maxCoins(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5502958297729492, "alphanum_fraction": 0.6508875489234924, "avg_line_length": 32.79999923706055, "blob_id": "064a1ae281833653f8389515cc1d964162dbd53e", "content_id": "1d799f28d141da6e18b525892c86193ce177d59a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "no_license", "max_line_length": 118, "num_lines": 5, "path": "/labuladong/cpp/ReadMe.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# ENV with conda (GLIBCXX >= 3.4.29)\n\n```bash\nconda create -n cpp-simplecode python=3.10 gcc=12.* gdb=12.* gxx_linux-64==12.* cmake ninja fmt=10.* -y -c conda-forge\n```\n" }, { "alpha_fraction": 0.45419102907180786, "alphanum_fraction": 0.4658869504928589, "avg_line_length": 15.580645561218262, "blob_id": "9fc43dcf4eab155b5ca1098a8d24bc4977e52ecb", "content_id": "7c58942b565b4d811f01453b84fde7481293e850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 51, "num_lines": 31, "path": "/NewCoder/wangyi11.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\ndef Score(c, n):\n sc = 0\n S = [[] for _ in range(n)]\n for i in c:\n S[i - 1].append(i)\n if len([0 for s in S if len(s) != 0]) == n:\n sc += 1\n for s in S:\n s.pop(0)\n\n return sc\n\ndef main():\n # input\n n, m = list(map(int, input().split()))\n c = list(map(int, input().split()))\n\n # solution\n result = Score(c, n)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5855925679206848, "alphanum_fraction": 0.5964368581771851, "avg_line_length": 21.64912223815918, "blob_id": "6ccd0e983cc4b0c6d9c65c23c91e4907e5e0ab6d", "content_id": "c2828c2f5ca05951a61a23609dc092a1ddd27d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 61, "num_lines": 57, "path": "/leetcode/cpp/detectCycle/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=141 lang=cpp\n *\n * [141] 环形链表\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n ListNode *hasCycle(ListNode *head) {\n if (head == nullptr) return nullptr;\n ListNode *slow = head, *fast = head;\n while (slow->next != nullptr && fast->next != nullptr &&\n fast->next->next != nullptr) {\n slow = slow->next;\n fast = fast->next->next;\n if (slow == fast) {\n break;\n }\n }\n\n if (slow->next == nullptr) return nullptr;\n if (fast->next == nullptr) return nullptr;\n if (fast->next->next == nullptr) return nullptr;\n\n ListNode *res = head;\n while (res != slow) {\n res = res->next;\n slow = slow->next;\n }\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n Solution sol;\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode *head = BuildCycleLinkedlist<int>(v, 1);\n // showLinkedList<int>(head);\n fmt::print(\"the head of cycle is {}\\n\",\n sol.hasCycle(head) == nullptr ? -1 : head->val);\n DestroyCycleLinkedlist<int>(head);\n showLinkedList<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.4475955665111542, "alphanum_fraction": 0.49445128440856934, "avg_line_length": 18.309524536132812, "blob_id": "cf63ed45f68f0a71563f9a9010628c49c3511bc5", "content_id": "4dbd82938d67f72d390d3fedb1e3c8275ac27211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 56, "num_lines": 42, "path": "/leetcode/python/MergeSortedArray.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#%%\nfrom collections import deque\nfrom statistics import median\n\ndef MergeSortedArray(nums1: list, nums2: list) -> float:\n n1 = deque(nums1)\n n2 = deque(nums2)\n result = []\n\n while not len(n1) == 0 and not len(n2) == 0:\n t1 = n1.popleft()\n t2 = n2.popleft()\n\n if t1 > t2:\n result.append(t2)\n n1.appendleft(t1)\n elif t1 < t2:\n result.append(t1)\n n2.appendleft(t2)\n else:\n result.append(t1)\n result.append(t2)\n else:\n while not len(n1) == 0:\n result.append(n1.popleft())\n \n while not len(n2) == 0:\n result.append(n2.popleft())\n \n return result\n\n\n\n\nif __name__ == \"__main__\":\n a = [1,3,5]\n b = [2,4,6]\n\n print(MergeSortedArray(a, b))\n\n\n#%%\n" }, { "alpha_fraction": 0.3540031313896179, "alphanum_fraction": 0.37048664689064026, "avg_line_length": 22.18181800842285, "blob_id": "f3406281509869d989ef73993a0c7ba724a6c7c6", "content_id": "e9056fa8d6b2d1e5c70329ab70c09c76d2e27fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "no_license", "max_line_length": 74, "num_lines": 55, "path": "/leetcode/python/130.被围绕的区域.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=130 lang=python3\n#\n# [130] 被围绕的区域\n#\nfrom typing import *\n# @lc code=start\nclass Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n\n if len(board) <= 2 or len(board[0]) <= 2:\n return\n\n row = len(board)\n col = len(board[0])\n\n def dfs(i, j):\n if i < 0 or i>=row or j < 0 or j >= col or board[i][j] != \"O\":\n return\n\n board[i][j] = \"A\"\n\n dfs(i - 1, j)\n dfs(i + 1, j)\n dfs(i, j - 1)\n dfs(i, j + 1)\n \n for i in range(row):\n dfs(i, 0)\n dfs(i, col - 1)\n \n for j in range(col):\n dfs(0, j)\n dfs(row - 1, j)\n \n for i in range(row):\n\n for j in range(col):\n if board[i][j] == \"O\":\n board[i][j] = \"X\"\n elif board[i][j] == \"A\":\n board[i][j] = \"O\"\n\n# @lc code=end\n\nif __name__ == '__main__':\n board = [['X', 'X', 'X', 'X'], \n ['X', 'O', 'O', 'X'], \n ['X', 'X', 'O', 'X'], \n ['X', 'O', 'X', 'X']]\n Solution().solve(board)\n print(board)" }, { "alpha_fraction": 0.4639175236225128, "alphanum_fraction": 0.47766321897506714, "avg_line_length": 21.423076629638672, "blob_id": "47f45fe54bea93c4d9d8c14a34fccd5e366f27ac", "content_id": "699460924e1a66d48fffa1a3057f0ebd20ba7cd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/leetcode/python/combinationSum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from typing import List\ndef combinationSum(candidates: List[int], target: int):\n if len(candidates) == 0:\n return []\n \n n = len(candidates) - 1\n com = []\n coms = []\n\n def backtrace(com):\n\n if sum(com) == target:\n coms.append(com.copy())\n return\n if sum(com) < target:\n for i in candidates:\n if len(com) == 0 or i >= com[-1]:\n com.append(i)\n backtrace(com)\n com.pop()\n\n backtrace(com)\n\n return coms\n\nprint(combinationSum([2,3,5], 8))" }, { "alpha_fraction": 0.4849624037742615, "alphanum_fraction": 0.5350877046585083, "avg_line_length": 23.9375, "blob_id": "b4864eabb6ff83ac216e958209eba3611409d150", "content_id": "ae912375d4342d2eabf937fd70dde830a5828a3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 78, "num_lines": 32, "path": "/python_Interview/minDistance.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# find the minimum index distance between number 1 and number 2.\ndef minDistance(arr, num1, num2):\n if arr == None or len(arr) <= 0:\n print(\"there is something wrong with input parameter.\")\n return math.inf\n lastpos1 = -1\n lastpos2 = -1\n minDis = math.inf\n\n for i, a in enumerate(arr):\n if a == num1:\n lastpos1 = i\n if lastpos2 >= 0:\n minDis = min(minDis, lastpos1 - lastpos2)\n \n if a == num2:\n lastpos2 = i\n if lastpos1 >= 0:\n minDis = min(minDis, lastpos2 - lastpos1)\n\n return minDis\n\n\nif __name__ == \"__main__\":\n res = minDistance([4, 5, 6, 4, 7, 4, 6, 4, 7, 8, 5, 6, 4, 3, 10, 8], 4, 8)\n print(res)\n" }, { "alpha_fraction": 0.5252525210380554, "alphanum_fraction": 0.5303030014038086, "avg_line_length": 12.266666412353516, "blob_id": "67f3449e06d6d3b0bcbf1e11d6ab827b5eb7657e", "content_id": "2a98a84dd934a6d392c67aa2a2236fdc5094be65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/PythonHack/dot_and_cross.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import numpy as np\n\nN = int(input())\n\nl = []\n\nfor _ in range(N * 2):\n l.append([float(i) for i in input().split()])\n\nT = np.array(l)\n\nA = T[:N, :]\nB = T[N:, :]\n\nprint(np.array(A@B, dtype=np.int))" }, { "alpha_fraction": 0.42686566710472107, "alphanum_fraction": 0.44477611780166626, "avg_line_length": 15.699999809265137, "blob_id": "e76d05eda93806535ef490e32b5e46ef8ff54212", "content_id": "3b86711f99d4f061ab8d7c86166b6fe6bdfae6fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 38, "num_lines": 20, "path": "/leetcode/python/Subset.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def Subset(nums: list):\n if len(nums) == 0:\n return [set()]\n if len(nums) == 1:\n return [set(), set([nums[0]])]\n k = nums.pop()\n res = Subset(nums)\n\n res_ = []\n for i in res:\n co = i.copy()\n co.add(k)\n res_.append(co)\n \n res_ += res\n\n return res_\n\n\nprint(Subset([1,2,3]))\n\n" }, { "alpha_fraction": 0.8478473424911499, "alphanum_fraction": 0.8527397513389587, "avg_line_length": 18.854368209838867, "blob_id": "a931d347db40c986c894b70a82e9eea16878c9f3", "content_id": "d0e18175340d1eca0ce737f77f49ea7707573ff7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4528, "license_type": "no_license", "max_line_length": 109, "num_lines": 103, "path": "/labuladong/cpp/cpp.md", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "C++常见面试问题汇总\n指针和引用的区别\n堆和栈的区别\nnew和delete是如何实现的,new 与 malloc的异同处\nC和C++的区别\nC++、Java的联系与区别,包括语言特性、垃圾回收、应用场景等(java的垃圾回收机制)\nStruct和class的区别\ndefine 和const的区别(编译阶段、安全性、内存占用等)\n在C++中const和static的用法(定义,用途)\nconst和static在类中使用的注意事项(定义、初始化和使用)\nC++中的const类成员函数(用法和意义),以及和非const成员函数的区别\nC++的顶层const和底层const\nfinal和override关键字\n拷贝初始化和直接初始化,初始化和赋值的区别\nextern \"C\"的用法\n模板函数和模板类的特例化\nC++的STL源码(这个系列也很重要,建议侯捷老师的STL源码剖析书籍与视频),其中包括内存池机制,各种容器的底层实现机制,算法的实现原理等)\nSTL源码中的hashtable的实现\nSTL中unordered_map和map的区别和应用场景\nSTL中vector的实现\nSTL容器的几种迭代器以及对应的容器(输入迭代器,输出迭代器,前向迭代器,双向迭代器,随机访问迭代器)\n顺序容器:vector,deque是随机访问迭代器;list是双向迭代器\n\n容器适配器:stack,queue,priority_queue没有迭代器\n\n关联容器:set,map,multiset,multimap是双向迭代器\n\nunordered_set,unordered_map,unordered_multiset,unordered_multimap是前向迭代器\n\nSTL中的traits技法\ntype_traits\n\niterator_traits\n\nchar traits\n\nallocator_traits\n\npointer_traits\n\narray_traits\n\nvector使用的注意点及其原因,频繁对vector调用push_back()对性能的影响和原因。\nC++中的重载和重写的区别\nC++内存管理,内存池技术(热门问题),与csapp中几种内存分配方式对比学习加深理解\n介绍面向对象的三大特性,并且举例说明每一个\nC++多态的实现\nC++虚函数相关(虚函数表,虚函数指针),虚函数的实现原理(包括单一继承,多重继承等)(拓展问题:为什么基类指针指向派生类对象时可以调用派生类成员函数,基类的虚函数存放在内存的什么区,虚函数表指针vptr的初始化时间)\nC++中类的数据成员和成员函数内存分布情况\nthis指针\n析构函数一般写成虚函数的原因\n构造函数、拷贝构造函数和赋值操作符的区别\n构造函数:对象不存在,没用别的对象初始化\n\n拷贝构造函数:对象不存在,用别的对象初始化\n\n赋值运算符:对象存在,用别的对象给它赋值\n\n构造函数声明为explicit\n构造函数为什么一般不定义为虚函数\n构造函数的几种关键字(default delete 0)\n= default:将拷贝控制成员定义为=default显式要求编译器生成合成的版本\n\n= delete:将拷贝构造函数和拷贝赋值运算符定义删除的函数,阻止拷贝(析构函数不能是删除的函数 C++Primer P450)\n\n= 0:将虚函数定义为纯虚函数(纯虚函数无需定义,= 0只能出现在类内部虚函数的声明语句处;当然,也可以为纯虚函数提供定义,不过函数体必须定义在类的外部)\n\n构造函数或者析构函数中调用虚函数会怎样\n纯虚函数\n静态类型和动态类型,静态绑定和动态绑定的介绍\n引用是否能实现动态绑定,为什么引用可以实现\n深拷贝和浅拷贝的区别(举例说明深拷贝的安全性)\n对象复用的了解,零拷贝的了解\n介绍C++所有的构造函数\n什么情况下会调用拷贝构造函数(三种情况)\n结构体内存对齐方式和为什么要进行内存对齐?\n内存泄露的定义,如何检测与避免?\n手写智能指针的实现(shared_ptr和weak_ptr实现的区别)\n智能指针的循环引用\n遇到coredump要怎么调试\n内存检查工具的了解\n模板的用法与适用场景\n成员初始化列表的概念,为什么用成员初始化列表会快一些(性能优势)?\n用过C++ 11吗,知道C++ 11哪些新特性?\nC++的调用惯例(简单一点C++函数调用的压栈过程)\nC++的四种强制转换\nstatic_cast\n\ndynamic_cast\n\nconst_cast\n\nreinterpret_cast\n\nC++中将临时变量作为返回值的时候的处理过程(栈上的内存分配、拷贝过程)\nC++的异常处理\nvolatile关键字\n优化程序的几种方法\npublic,protected和private访问权限和继承\nclass和struct的区别\ndecltype()和auto\ninline和宏定义的区别\nC++和C的类型安全" }, { "alpha_fraction": 0.6719883680343628, "alphanum_fraction": 0.6763425469398499, "avg_line_length": 18.714284896850586, "blob_id": "7e7fbd409e3bdb3faacb5e43d94e589c8b9d0918", "content_id": "7983268740d2928ba9e42c03a4ecd54a3d932da4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 64, "num_lines": 35, "path": "/COPInterview/MaxNumIceCream.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nimport operator\n\n#%%\n# description\nfrom IPython.display import Image, display\n\nfor f in ['img/MaxNumIceCream1.jpg', 'img/MaxNumIceCream2.jpg']:\n display(Image(filename=f))\n\n#%%\n# solution\n\nfrom IPython.display import Image\nImage('img/solution_maxIcecream.png')\n\n\n#%%\n\ndef maxNumofIceCream(money, storage, cost):\n tmp = money + sum(map(operator.mul, storage, cost))\n return math.floor(tmp / sum(cost))\n\nif __name__ == '__main__':\n n, m = list(map(int, input().split()))\n storage = list(map(int, input().split()))\n cost = list(map(int, input().split()))\n print(maxNumofIceCream(m, storage, cost))" }, { "alpha_fraction": 0.6183035969734192, "alphanum_fraction": 0.6272321343421936, "avg_line_length": 20.85365867614746, "blob_id": "9104f9fe9229758217b9a4fe54fa2cb802868483", "content_id": "329cca512af837e2407d5e061123d293b327a2c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 896, "license_type": "no_license", "max_line_length": 71, "num_lines": 41, "path": "/labuladong/cpp/LL_getKthFromEnd/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\nclass Solution {\n public:\n ListNode* getKthFromEnd(ListNode* head, int k) {\n if (!head) return nullptr;\n if (!k) return nullptr;\n ListNode *slow = head, *fast = head;\n int count = 0;\n while (fast != nullptr && count < k) {\n fast = fast->next;\n count++;\n }\n if (!fast) return head;\n\n for (; fast->next != nullptr; slow = slow->next, fast = fast->next)\n ;\n return slow->next;\n }\n};\n\nint main() {\n Solution s;\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode* head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n ListNode* k = s.getKthFromEnd(head, 2);\n showLinkedList<int>(k);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.44759824872016907, "alphanum_fraction": 0.48471614718437195, "avg_line_length": 24.5, "blob_id": "be22cb2727337e6db3a1cdcfaf9d475239e14a30", "content_id": "5759e923214f8e2a89a6552295763eb4430cb044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 78, "num_lines": 18, "path": "/leetcode/python/mergeInterval.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "def mergeInterval(interval: list()):\n if len(interval) <= 1:\n return interval\n\n n = len(interval)\n inc = sorted(interval, key = lambda x: x[0])\n\n res = []\n for i in range(n):\n if len(res) != 0 and inc[i][0] <= res[-1][1]:\n res[-1] = [min(inc[i][0], res[-1][0]), max(inc[i][1], res[-1][1])]\n else:\n res.append(inc[i])\n\n return res\n\nif __name__ == \"__main__\":\n print(mergeInterval([[1,4],[4,5]]))" }, { "alpha_fraction": 0.55030357837677, "alphanum_fraction": 0.5576756000518799, "avg_line_length": 35.60317611694336, "blob_id": "92c0c86beb89d231021c9561101f00daba7d6c29", "content_id": "d912c871ad5873f5520d50bf3fef3fd3ee2f06dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2306, "license_type": "no_license", "max_line_length": 114, "num_lines": 63, "path": "/COPInterview/BProP.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport ast\nimport numpy as np\n\nclass OpConv2D:\n def __init__(self, filters_count, kernel_size, inputs):\n self.inputs = inputs\n self.kernel_size = kernel_size\n \n # Shape of the input feature map\n input_height = inputs.shape[0]\n input_width = inputs.shape[1]\n input_channels = inputs.shape[2]\n \n # Shape of this layer's output feature map\n self.output_height = input_height - kernel_size + 1\n self.output_width = input_width - kernel_size + 1\n self.output_channels = filters_count\n \n # self.weights contains filters_count filters of shape: kernel_size x kernel_size x input_channels\n self.weights = np.random.normal(size=(filters_count, kernel_size, kernel_size, input_channels), scale=0.1)\n \n def forward(self):\n Z = np.zeros((self.output_height, self.output_width, self.output_channels), dtype=np.float32)\n\n for h in range(self.output_height):\n for w in range(self.output_width):\n # Apply all filters\n for c in range(self.output_channels):\n for i in range(self.kernel_size):\n for j in range(self.kernel_size):\n for k in range(self.weights.shape[-1]):\n z += self.inputs[h + i, w + j, k] * self.weights[c][i][j][k]\n Z[h, w, c] = z\n return Z\n\n # dZ is the derivate of the loss with respect to the output of this layer's forward propagation.\n def backward(self, dZ):\n dW = np.zeros(self.weights.shape, dtype=np.float32)\n dA_prev = np.zeros(self.inputs.shape, dtype=np.float32)\n\n X, W = self.inputs, self.weights\n\n ks = self.kernel_size\n\n height, width, channel = dZ.shape\n #enter you code here\n for h in range(height):\n for w in range(width):\n for c in range(self.output_channels):\n for i in range(self.weights.shape[-1]):\n dA_prev[h:h + ks, w:w + ks, i] += W[c, :, :, i] * dZ[h, w, c]\n dW += X[h:h + ks, w:w + ks, i] * dZ[h, w, c]\n \n return (dW, dA_prev)\n\na = eval(input())\na = np.array(a)\n" }, { "alpha_fraction": 0.48521286249160767, "alphanum_fraction": 0.5261618494987488, "avg_line_length": 27.89622688293457, "blob_id": "d704dfba6ff0c57c03b3386a1679be1895709b55", "content_id": "ea69fc16c3c41d3bb8b1940e155843819e86c299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3077, "license_type": "no_license", "max_line_length": 105, "num_lines": 106, "path": "/UNSWquiz/Week 4 - quiz_3_sol.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# Randomly fills an array of size 10x10 with 0s and 1s, and outputs the size of\n# the largest parallelogram with horizontal sides.\n# A parallelogram consists of a line with at least 2 consecutive 1s,\n# with below at least one line with the same number of consecutive 1s,\n# all those lines being aligned vertically in which case the parallelogram\n# is actually a rectangle, e.g.\n# 111\n# 111\n# 111\n# 111\n# or consecutive lines move to the left by one position, e.g.\n# 111\n# 111\n# 111\n# 111\n# or consecutive lines move to the right by one position, e.g.\n# 111\n# 111\n# 111\n# 111\n#\n# Written by Eric Martin for COMP9021\n\n\nfrom random import seed, randrange\nimport sys\n\n\ndim = 10\n\n\ndef display_grid():\n for i in range(dim):\n print(' ', end = '')\n for j in range(dim):\n print(' 1', end = '') if grid[i][j] else print(' 0', end = '')\n print()\n print()\n\n\ndef size_of_largest_parallelogram():\n max_size = 0\n for i in range(dim - 1):\n for j1 in range(dim - 1):\n if not grid[i][j1]:\n continue\n for j2 in range(j1 + 1, dim):\n if not grid[i][j2]:\n break\n max_size = max(max_size,\n size_of_largest_parallelogram_with_given_top_side(i, j1, j2, 'straight'),\n size_of_largest_parallelogram_with_given_top_side(i, j1, j2, 'left'),\n size_of_largest_parallelogram_with_given_top_side(i, j1, j2, 'right'))\n return max_size\n\ndef size_of_largest_parallelogram_with_given_top_side(i1, j1, j2, dir):\n length = j2 - j1 + 1\n i2 = i1\n if dir == 'straight':\n while i2 + 1 < dim:\n i2 += 1\n if any(not grid[i2][j] for j in range(j1, j2 + 1)):\n break\n else:\n i2 += 1\n elif dir == 'left':\n while i2 + 1 < dim and j1 - 1 >= 0:\n i2 += 1\n j1 -= 1\n j2 -= 1\n if any(not grid[i2][j] for j in range(j1, j2 + 1)):\n break\n else:\n i2 += 1\n else:\n while i2 + 1 < dim and j2 + 1 < dim:\n i2 += 1\n j1 += 1\n j2 += 1\n if any(not grid[i2][j] for j in range(j1, j2 + 1)):\n break\n else:\n i2 += 1\n if i2 == i1 + 1:\n return 0\n return (i2 - i1) * length\n\n\ntry:\n for_seed, n = [int(i) for i in\n input('Enter two integers, the second one being strictly positive: ').split()]\n if n <= 0:\n raise ValueError\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\n\nseed(for_seed)\ngrid = [[randrange(n) for _ in range(dim)] for _ in range(dim)]\nprint('Here is the grid that has been generated:')\ndisplay_grid()\nsize = size_of_largest_parallelogram()\nif size:\n print('The largest parallelogram with horizontal sides has a size of', size, end = '.\\n')\nelse:\n print('There is no parallelogram with horizontal sides.')\n \n\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 39, "blob_id": "be6c755a201987d4f83fe3865c1f1219e55f6cfd", "content_id": "b404dc2c5f0cb2d3967bd8a2b089ab1c5ee67f02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 40, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/leetcode/cpp/permute_unique/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "add_executable(permute_unique main.cpp)\n" }, { "alpha_fraction": 0.6154791116714478, "alphanum_fraction": 0.624078631401062, "avg_line_length": 19.871795654296875, "blob_id": "151e54267254e2c50c0a508c0d1e9ac97d7606b0", "content_id": "d2ae102c8075d3806fd04eeb2a2002feb036c297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 168, "num_lines": 39, "path": "/interview/RansomNote.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/ctci-ransom-note/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=dictionaries-hashmaps\n\nfrom collections import Counter\n\n# Complete the checkMagazine function below.\ndef checkMagazine(magazine, note):\n cm = Counter(magazine)\n nm = Counter(note)\n for k in nm.keys():\n try:\n if cm[k] < nm[k]:\n raise ValueError\n except (KeyError, ValueError):\n print(\"No\")\n return\n print(\"Yes\")\n\n\n\nif __name__ == '__main__':\n mn = input().split()\n\n m = int(mn[0])\n\n n = int(mn[1])\n\n magazine = input().rstrip().split()\n\n note = input().rstrip().split()\n\n checkMagazine(magazine, note)\n" }, { "alpha_fraction": 0.49865108728408813, "alphanum_fraction": 0.5125899314880371, "avg_line_length": 23.72222137451172, "blob_id": "3dfac752b699bf2ebe62bc7a70f1df5d4ee28bd6", "content_id": "2f8ced7630a42a98db71bc36d962b456ef5b3f27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2224, "license_type": "no_license", "max_line_length": 73, "num_lines": 90, "path": "/CHack/strsort.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint lexicographic_sort(const void *a, const void *b) {\n const char *aa = *(const char **)a;\n const char *bb = *(const char **)b;\n return strcmp(aa, bb);\n}\n\nint lexicographic_sort_reverse(const void *a, const void *b) {\n const char *aa = *(const char **)a;\n const char *bb = *(const char **)b;\n return strcmp(bb, aa);\n}\n\nint sort_by_number_of_distinct_characters(const void *a, const void *b) {\n const char *aa = *(const char **)a;\n const char *bb = *(const char **)b;\n char hashaa[26];\n char hashbb[26];\n for (int ia = 0; aa[ia] != '\\0'; ia += 1) {\n hashaa[aa[ia] - 'a'] = 1;\n }\n for (int ib = 0; bb[ib] != '\\0'; ib += 1) {\n hashbb[bb[ib] - 'a'] = 1;\n }\n int lena = 0;\n int lenb = 0;\n for (int i = 0; i < 26; i++) {\n if (hashaa[i] != 0)\n lena += 1;\n if (hashbb[i] != 0)\n lenb += 1;\n }\n if (lena == lenb) {\n return strcmp(aa, bb);\n } else\n return (lena - lenb);\n}\n\nint sort_by_length(const void *a, const void *b) {\n const char *aa = *(const char **)a;\n const char *bb = *(const char **)b;\n if (strlen(aa) == strlen(bb)){\n return strcmp(aa, bb);\n }else \n return (strlen(aa) - strlen(bb));\n}\n\nvoid string_sort(char **arr, const int len,\n int (*cmp_func)(const void *a, const void *b)) {\n qsort(arr, len, sizeof(char *), cmp_func);\n}\n\nint main() \n{\n int n;\n scanf(\"%d\", &n);\n \n char** arr;\n\tarr = (char**)malloc(n * sizeof(char*));\n \n for(int i = 0; i < n; i++){\n *(arr + i) = malloc(1024 * sizeof(char));\n scanf(\"%s\", *(arr + i));\n *(arr + i) = realloc(*(arr + i), strlen(*(arr + i)) + 1);\n }\n\n printf(\"\\n\");\n string_sort(arr, n, lexicographic_sort);\n for(int i = 0; i < n; i++)\n printf(\"%s\\n\", arr[i]);\n printf(\"\\n\");\n\n string_sort(arr, n, lexicographic_sort_reverse);\n for(int i = 0; i < n; i++)\n printf(\"%s\\n\", arr[i]); \n printf(\"\\n\");\n\n string_sort(arr, n, sort_by_length);\n for(int i = 0; i < n; i++)\n printf(\"%s\\n\", arr[i]); \n printf(\"\\n\");\n\n string_sort(arr, n, sort_by_number_of_distinct_characters);\n for(int i = 0; i < n; i++)\n printf(\"%s\\n\", arr[i]); \n printf(\"\\n\");\n}" }, { "alpha_fraction": 0.5120174884796143, "alphanum_fraction": 0.5185724496841431, "avg_line_length": 19.176469802856445, "blob_id": "3482de6f5356345cf94a703ea20e4f37da3e96c5", "content_id": "dc28e6b40de30c56ec0b82acf701191626991862", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1373, "license_type": "no_license", "max_line_length": 81, "num_lines": 68, "path": "/interview/ReverseShuffleMerge.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n#%%\n#%%\n\n# https://www.hackerrank.com/challenges/reverse-shuffle-merge/problem\n\n\nclass Stack(object):\n def __init__(self, data = None):\n if data == None:\n self.data = []\n else:\n self.data = data\n \n def push(self, data):\n self.data.append(data)\n\n def pop(self):\n return self.data.pop()\n\n @property\n def top(self):\n return self.data[-1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Stack(\" + ','.join(map(str, self.data)) + \")\"\n\nfrom collections import Counter\n# Complete the reverseShuffleMerge function below.\ndef reverseShuffleMerge(s):\n Cnt_A = {}\n Cnt_S = Counter(s)\n S_list = list(s)\n ss = Stack()\n assume = Cnt_S.copy()\n\n for i in Counter(s):\n Cnt_A[i] = Cnt_S[i] // 2\n \n S_list.reverse()\n for c in S_list:\n assume[c] -= 1\n if Cnt_A[c] == 0:\n continue\n \n while not ss.isEmpty() and ss.top > c and assume[ss.top] > Cnt_A[ss.top]:\n a = ss.pop()\n Cnt_A[a] += 1\n \n if Cnt_A[c] != 0:\n ss.push(c)\n Cnt_A[c] -= 1\n\n return ''.join(ss.data)\n\nif __name__ == '__main__':\n s = input()\n result = reverseShuffleMerge(s)\n print(result)\n\n" }, { "alpha_fraction": 0.4797891080379486, "alphanum_fraction": 0.49384886026382446, "avg_line_length": 17.899999618530273, "blob_id": "fa61bfee1df8fd01867d5d8041340dda728ec783", "content_id": "2b11567aa0d5fa7df5a1e1af75f2edcc6873508b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 46, "num_lines": 30, "path": "/interview/countValley.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the countingValleys function below.\ndef countingValleys(n, s):\n countV = 0\n level_h, level = 0, 0\n for c in s:\n if c == \"U\":\n level_h = level\n level += 1\n elif c == \"D\":\n level_h = level\n level -= 1\n else:\n pass\n if level_h < 0 and level == 0:\n countV += 1\n return countV\n\nif __name__ == '__main__':\n n = int(input())\n\n s = input()\n\n result = countingValleys(n, s)\n print(result)\n\n\n" }, { "alpha_fraction": 0.4254143536090851, "alphanum_fraction": 0.4377976655960083, "avg_line_length": 23.872037887573242, "blob_id": "a274a7661d418364327068c79193b40ddb5a4d55", "content_id": "baed03777eadc4ad6ecd7e1e9916279331d28e24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5249, "license_type": "no_license", "max_line_length": 87, "num_lines": 211, "path": "/CHack/tmp.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <math.h>\n#include <stdlib.h>\n\n// int main(){\n \n// char s[100];\n// scanf(\"%[^\\n]%*c\", &s);\n// printf(\"Hello, World!\\n\");\n// printf(\"%s\", s);\n// /* Enter your code here. Read input from STDIN. Print output to STDOUT\n// */\n// return 0;\n// }\n\n// int main() {\n\n// /* Enter your code here. Read input from STDIN. Print output to STDOUT */ \n// int n;\n// scanf(\"%d\", &n);\n// int *arr = (int *) malloc(n * sizeof(int));\n// for(int i = 0; i < n; i += 1){\n// scanf(\"%d%*c\", &arr[i]);\n// }\n// int sum = 0;\n// for (int i = 0; i < n; i += 1) {\n// sum += arr[i];\n// }\n\n// free(arr);\n// return 0;\n// }\n\n// void swap(int *p, int * q){\n// int t = *p;\n// *p = *q;\n// *q = t;\n// }\n\n\nint main()\n{\n int hash[26] = {};\n for(int i = 0; i< 26; i++){\n printf(\"%d\", hash[i]);\n }\n // int num, *arr, i;\n // scanf(\"%d\", &num);\n // arr = (int*) malloc(num * sizeof(int));\n // for(i = 0; i < num; i++) {\n // scanf(\"%d\", arr + i);\n // }\n\n // for (i = 0; i < num / 2 ; i += 1) {\n // swap(&arr[i], &arr[num - i - 1]);\n // }\n\n// /* Write the logic to reverse the array. */\n\n// for(i = 0; i < num; i++)\n// printf(\"%d \", *(arr + i));\n// free(arr);\n// return 0;\n}\n\n// typedef char * string;\n\n// int lenstrarray(string * s){\n// int len = 0;\n// for(;s[len] != NULL; len++);\n// return len;\n// }\n\n// void split(string str, string *strtoken, const string sep) {\n// // automatic ignore the space\n// // int init_size = strlen(str);\n// string ptr = strtok(str, sep);\n// string *tmp = strtoken;\n\n// int len = 0;\n// while (ptr != NULL) {\n// *tmp = ptr;\n// ptr = strtok(NULL, sep);\n// tmp += 1;\n// len += 1;\n// }\n// *tmp = ptr;\n// }\n\n// int main() {\n// string *chop = (string *) malloc(sizeof(string) * 1024);\n// char *s;\n// s = malloc(1024 * sizeof(char));\n// scanf(\"%[^\\n]\", s);\n// s = realloc(s, strlen(s) + 1);\n// //Write your logic to print the tokens of the sentence here.\n// split(s, chop, \" \");\n// int lens = lenstrarray(chop);\n// string *tmp = (string *)realloc(chop, sizeof(string) * (lens + 1));\n// if (tmp == NULL){\n// printf(\"fuck me\");\n// return 0;\n// }\n// chop = tmp;\n// for(int len =0; len < lens; len++){\n// printf(\"%s\\n\", chop[len]);\n// }\n\n\n// return 0;\n// }\n\n\n// int main() {\n// int *count = (int *)calloc(10, sizeof(int));\n// char s[1000];\n// scanf(\"%[^\\n]%*c\", s);\n// for(int ind = 0; s[ind] != '\\0'; ind++){\n// if(s[ind] >= '0' && s[ind] =< '9'){\n// count[s[ind] - '0'] += 1;\n// }\n// }\n\n// for(int i = 0; i<10; i += 1){\n// printf(\"%d \", count[i]);\n// }\n\n\n// free(count);\n// /* Enter your code here. Read input from STDIN. Print output to STDOUT */ \n// return 0;\n// }\n\n\n/*\n * This stores the total number of books in each shelf.\n */\n// int* total_number_of_books;\n\n// /*\n// * This stores the total number of pages in each book of each shelf.\n// * The rows represent the shelves and the columns represent the books.\n// */\n// int** total_number_of_pages;\n\n\n\n// int main()\n// {\n// int total_number_of_shelves;\n// scanf(\"%d\", &total_number_of_shelves);\n \n// int total_number_of_queries;\n// scanf(\"%d\", &total_number_of_queries);\n\n// total_number_of_books = (int *) calloc(total_number_of_shelves, sizeof(int));\n// total_number_of_pages = (int **) calloc(total_number_of_shelves, sizeof(int *));\n// for (int ind = 0; ind < total_number_of_shelves; ind += 1){\n// total_number_of_pages[ind] =\n// (int *)malloc(sizeof(int));\n// }\n\n// while (total_number_of_queries--) {\n// int type_of_query;\n// scanf(\"%d\", &type_of_query);\n \n// if (type_of_query == 1) {\n// /*\n// * Process the query of first type here.\n// */\n// int x, y;\n// scanf(\"%d %d\", &x, &y);\n// total_number_of_books[x] += 1;\n// int *p = \n// (int *)realloc(total_number_of_pages[x],\n// total_number_of_books[x]);\n// if (p == NULL){\n// printf(\"fuck\");\n// return -9;\n// }\n// total_number_of_pages[x] = p;\n// total_number_of_pages[x][total_number_of_books[x] - 1] = y;\n\n// } else if (type_of_query == 2) {\n// int x, y;\n// scanf(\"%d %d\", &x, &y);\n// printf(\"%d\\n\", *(*(total_number_of_pages + x) + y));\n// } else {\n// int x;\n// scanf(\"%d\", &x);\n// printf(\"%d\\n\", *(total_number_of_books + x));\n// }\n// }\n\n// if (total_number_of_books) {\n// free(total_number_of_books);\n// }\n \n// for (int i = 0; i < total_number_of_shelves; i++) {\n// if (*(total_number_of_pages + i)) {\n// free(*(total_number_of_pages + i));\n// }\n// }\n \n// if (total_number_of_pages) {\n// free(total_number_of_pages);\n// }\n \n// return 0;\n// }\n\n" }, { "alpha_fraction": 0.4992416501045227, "alphanum_fraction": 0.5184530019760132, "avg_line_length": 29.66666603088379, "blob_id": "35be05d99c98c27334798d6966f7b36e72497040", "content_id": "1ec705d56fa118a1df33e0eb8fe3a4080d29c71e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3964, "license_type": "no_license", "max_line_length": 79, "num_lines": 129, "path": "/leetcode/cpp/minDistance/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=72 lang=cpp\n *\n * [72] 编辑距离\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <utility>\n#include <vector>\n\nusing namespace std;\n\nvoid print_solution(const string word1, const string word2,\n vector<vector<int>>& action);\n\n// @lc code=start\nclass Solution {\n public:\n enum Choice { INSERT = 0, REPLACE, DELETE, SKIP };\n\n int record_action(int deletion, int insertion, int replacement) {\n Choice c = Choice::DELETE;\n if (deletion > insertion) {\n c = Choice::INSERT;\n }\n if (deletion > replacement) {\n c = Choice::REPLACE;\n }\n return c;\n }\n\n int minDistance(string word1, string word2) {\n vector<vector<int>> dp(word2.size() + 1, vector<int>(word1.size() + 1));\n vector<vector<int>> actions(word2.size() + 1,\n vector<int>(word1.size() + 1));\n Choice c;\n for (int i = 1; i <= word1.size(); i++) {\n dp[0][i] = i;\n actions[0][i] = Choice::DELETE;\n }\n for (int i = 1; i <= word2.size(); i++) {\n dp[i][0] = i;\n actions[i][0] = Choice::INSERT;\n }\n for (int row = 1; row <= word2.size(); row++) {\n for (int col = 1; col <= word1.size(); col++) {\n if (word2[row - 1] == word1[col - 1]) {\n dp[row][col] = dp[row - 1][col - 1]; // skip\n actions[row][col] = Choice::SKIP; // skip\n } else {\n dp[row][col] = min(min(dp[row - 1][col], // insertion\n dp[row - 1][col - 1]), // replacement\n dp[row][col - 1] // deletion\n ) +\n 1;\n\n actions[row][col] = record_action(dp[row][col - 1], dp[row - 1][col],\n dp[row - 1][col - 1]);\n }\n }\n }\n // print_solution(word1, word2, actions);\n return dp.back().back();\n }\n};\n// @lc code=end\n\nvoid print_solution(const string word1, const string word2,\n vector<vector<int>>& action) {\n unordered_map<int, string> action_print_table{\n {Solution::Choice::INSERT, \"INSERT\"},\n {Solution::Choice::REPLACE, \"REPLACE\"},\n {Solution::Choice::DELETE, \"DELETE\"},\n {Solution::Choice::SKIP, \"SKIP\"}};\n fmt::print(\"from {} to {}: \\n\", word1, word2);\n int row = word2.size();\n int col = word1.size();\n vector<pair<string, char>> action_order{};\n string a = \"\";\n while (row > 0 && col > 0) {\n if (action[row][col] == Solution::Choice::SKIP) {\n // fmt::print(\"{} \", action_print_table[action[row][col]]);\n row--;\n col--;\n } else if (action[row][col] == Solution::Choice::REPLACE) {\n // fmt::print(\"{} \", action_print_table[action[row][col]]);\n action_order.push_back(make_pair(\"replace\", word1[col - 1]));\n row--;\n col--;\n } else if (action[row][col] == Solution::Choice::DELETE) {\n // fmt::print(\"{} \", action_print_table[action[row][col]]);\n action_order.push_back(make_pair(\"delete\", word1[col - 1]));\n col--;\n } else if (action[row][col] == Solution::Choice::INSERT) {\n // fmt::print(\"{} \", action_print_table[action[row][col]]);\n action_order.push_back(make_pair(\"insert\", word1[col - 1]));\n row--;\n }\n // fmt::print(\"{}\\n\", word1[col]);\n }\n if (row == 0 && col != 0) {\n while (col > 0) {\n action_order.push_back(make_pair(\"delete\", word1[col - 1]));\n col--;\n }\n } else if (row != 0 && col == 0) {\n while (row > 0) {\n action_order.push_back(make_pair(\"insert\", word2[row - 1]));\n row--;\n }\n }\n\n reverse(action_order.begin(), action_order.end());\n for (auto& p : action_order) {\n fmt::print(\"action: {}, target: {}\\n\", p.first, p.second);\n }\n}\n\nint main() {\n string word1 = \"fucking\", word2 = \"funk\";\n Solution sol;\n int v = sol.minDistance(word1, word2);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.28862276673316956, "alphanum_fraction": 0.3461077809333801, "avg_line_length": 25.09375, "blob_id": "3a210ecd3a925ec9764f70976267e4a7fe0dba20", "content_id": "ab3ae1cdf2c3e58532cdf233aa74b186b5163360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "no_license", "max_line_length": 57, "num_lines": 32, "path": "/leetcode/python/91.解码方法.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=91 lang=python3\n#\n# [91] 解码方法\n#\n\n# @lc code=start\nclass Solution:\n def numDecodings(self, s: str) -> int:\n dp = [ 0 ] * (len(s) + 2)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 1 if s[0] != \"0\" else 0\n\n for i in range(1, len(s)):\n if s[i] == '0':\n if s[i - 1] in (\"1\", \"2\"):\n dp[i + 2] = dp[i + 2 - 2]\n else:\n return 0\n elif s[i - 1] == '1':\n dp[i + 2] = dp[i + 2 - 1] + dp[i + 2 - 2]\n elif s[i - 1] == '2' and 1 <= int(s[i]) <= 6:\n dp[i + 2] = dp[i + 2 - 1] + dp[i + 2 - 2]\n else:\n dp[i + 2] = dp[i + 2 - 1]\n \n return dp[-1]\n # @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().numDecodings(\"123\"))\n" }, { "alpha_fraction": 0.4798000752925873, "alphanum_fraction": 0.4935443699359894, "avg_line_length": 20.061403274536133, "blob_id": "1a703c1f52963ad6fb06142df3065045563483fc", "content_id": "fc7020a292dad97357ebc68e39857ad0992531e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4802, "license_type": "no_license", "max_line_length": 73, "num_lines": 228, "path": "/CHack/printd.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <math.h>\n#include <stdlib.h>\n\n#define INPUTBUFFER 256\n#define MAXSEG 255\n#define SEGM (MAXSEG - 1)\n#define DEFAULT_DIGITS 2\n\n#define type(var, TYPE) (__builtin_types_compatible_p(typeof(var), TYPE))\n#define len(array, element) (sizeof(array) / sizeof(element))\n\ntypedef char * string;\n\n// we define the char *seg[N] like:\n// the seg[N-1] need to be set NULL.\n\nvoid printChar(char c, const string end)\n{\n // fflush(stdout);\n putchar(c);\n printf(\"%s\", end);\n}\n\nvoid printStr(string var, const string end)\n{\n // fflush(stdout);\n printf(\"%s\", var);\n printf(\"%s\", end);\n}\n\nvoid printInt(int num, const string end)\n{\n // fflush(stdout);\n printf(\"%d\", num);\n printf(\"%s\", end);\n}\n\nvoid printF(double num, const string end)\n{\n // fflush(stdout);\n printf(\"%.*f\", DEFAULT_DIGITS, num);\n printf(\"%s\", end);\n}\n\n#define print(X, sep) do{ \\\n _Generic((X), \\\n char: printChar, \\\n int: printInt, \\\n float: printF, \\\n string : printStr)(X, sep); \\\n }while(0)\n\nint Int(string str)\n{\n return strtol(str, NULL, 10);\n}\n\ndouble Float(string str)\n{\n return strtof(str, NULL);\n}\n\nvoid float2str(string dest, double number){\n sprintf(dest, \"%*f\", DEFAULT_DIGITS, number);\n}\n\nvoid Int2str(string dest, int number){\n sprintf(dest, \"%d\", number);\n}\n\n#define str(dest, number) do{ \\\n _Generic((number), \\\n int: Int2str, \\\n float: float2str)(dest, number); \\\n }while(0)\n\n\ndouble roundF(double number, int ndigits){\n double pow10 = pow(10, (double) ndigits);\n double tmp = number * pow10;\n tmp = round(tmp);\n return tmp / pow10;\n}\n\nvoid split(string str, string *strtoken, const string sep)\n{\n // automatic ignore the space\n // int init_size = strlen(str);\n string ptr = strtok(str, sep);\n string *tmp = strtoken;\n\n int len = 0;\n while (ptr != NULL)\n {\n *tmp = ptr;\n ptr = strtok(NULL, sep);\n tmp += 1;\n len += 1;\n }\n *tmp = ptr;\n}\n\nvoid join(string sep, string dest, string str1, string str2)\n{\n sprintf(dest, \"%s%s\", str1, sep);\n sprintf(dest, \"%s%s\", dest, str2);\n}\n\nvoid strjoin(string sep, string dest, string * s){\n int i = 0;\n strcpy(dest, s[0]);\n for(i = 1; s[i] != NULL; i++){\n join(sep, dest, dest, s[i]);\n }\n}\n\nvoid input(string InputBuffer)\n{\n scanf(\"%[^\\n]%*c\", InputBuffer);\n}\n\n\n// void printF(double num, int ndigits, const string end)\n// {\n// fflush(stdout);\n// printf(\"%.*f\", ndigits, num);\n// printf(\"%s\", end);\n// }\n \n// Merges two subarrays of arr[]. \n// First subarray is arr[l..m] \n// Second subarray is arr[m+1..r] \nvoid merge(int arr[], int l, int m, int r) \n{ \n int i, j, k; \n int n1 = m - l + 1; \n int n2 = r - m; \n \n /* create temp arrays */\n int L[n1], R[n2]; \n \n /* Copy data to temp arrays L[] and R[] */\n for (i = 0; i < n1; i++) \n L[i] = arr[l + i]; \n for (j = 0; j < n2; j++) \n R[j] = arr[m + 1+ j]; \n \n /* Merge the temp arrays back into arr[l..r]*/\n i = 0; // Initial index of first subarray \n j = 0; // Initial index of second subarray \n k = l; // Initial index of merged subarray \n while (i < n1 && j < n2) \n { \n if (L[i] <= R[j]) \n { \n arr[k] = L[i]; \n i++; \n } \n else\n { \n arr[k] = R[j]; \n j++; \n } \n k++; \n } \n \n /* Copy the remaining elements of L[], if there \n are any */\n while (i < n1) \n { \n arr[k] = L[i]; \n i++; \n k++; \n } \n \n /* Copy the remaining elements of R[], if there \n are any */\n while (j < n2) \n { \n arr[k] = R[j]; \n j++; \n k++; \n } \n} \n \n/* l is for left index and r is right index of the \n sub-array of arr to be sorted */\nvoid mergeSort(int arr[], int l, int r) \n{ \n if (l < r) \n { \n // Same as (l+r)/2, but avoids overflow for \n // large l and h \n int m = l+(r-l)/2; \n \n // Sort first and second halves \n mergeSort(arr, l, m); \n mergeSort(arr, m+1, r); \n merge(arr, l, m, r); \n } \n} \n\nint main()\n{\n int a = 0;\n int b = 0;\n float x = 0.0;\n float y = 0.0;\n char InputBuffer[INPUTBUFFER];\n string Seg[SEGM];\n input(InputBuffer);\n split(InputBuffer, Seg, \" \");\n a = Int(Seg[0]);\n b = Int(Seg[1]);\n input(InputBuffer);\n split(InputBuffer, Seg, \" \");\n x = Float(Seg[0]);\n y = Float(Seg[1]);\n\n print(a + b, \" \");\n print(a - b, \"\\n\");\n print(x + y, \" \");\n print(x - y, \" \");\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5064338445663452, "alphanum_fraction": 0.5211396813392639, "avg_line_length": 19.528301239013672, "blob_id": "c2c2bb56af42c94f2d26d62faf013ca9675e1c9e", "content_id": "2a42c176caa2a80005623a0b842dc2b17e12231d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 49, "num_lines": 53, "path": "/labuladong/cpp/strStr/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n string pat{};\n vector<vector<int>> dp{};\n void KMP(const string &pattern) {\n int all_char_len = 256;\n pat = pattern;\n int M = pat.size();\n dp.assign(M, vector<int>(all_char_len, 0));\n dp[0][pat[0]] = 1;\n int X = 0;\n for (int j = 1; j < M; j++) {\n for (int c = 0; c < all_char_len; c++)\n dp[j][c] = dp[X][c];\n dp[j][pat.at(j)] = j + 1;\n X = dp[X][pat.at(j)];\n }\n // fmt::print(\"{}\\n\", dp);\n }\n\n int search(const string &txt){\n int N = txt.size();\n int M = pat.size();\n int j = 0;\n for (int i = 0; i < N;i++ ){\n j = dp[j][txt.at(i)];\n if (j == M) return i - M + 1;\n }\n return -1;\n }\n\n int strStr(string &haystack, string &needle) { \n KMP(needle);\n return search(haystack);\n }\n};\n\nint main() {\n string haystack = \"leetcode\", needle = \"leet\";\n Solution s;\n int res = s.strStr(haystack, needle);\n fmt::print(\"{}\\n\", res);\n return 0;\n}\n" }, { "alpha_fraction": 0.5540540814399719, "alphanum_fraction": 0.5635135173797607, "avg_line_length": 19, "blob_id": "2300fe491d7044c3917ec0d484c99d28b97500bb", "content_id": "7422bd9bdbfd67cf2321e6d2bcf95ac055d12a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 65, "num_lines": 37, "path": "/interview/LuckBalance.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/luck-balance/problem\n# Complete the luckBalance function below.\ndef luckBalance(k, contests):\n L = [i for i, _ in contests]\n T = [j for _, j in contests]\n\n if k >= len([i for i in T if i != 0]):\n return sum(L)\n else:\n imp = [L[j] for j in range(len(L)) if T[j] != 0]\n imp.sort()\n return sum(L) - 2 * sum(imp[0:len(imp) - k])\n\n\nif __name__ == '__main__':\n nk = input().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n contests = []\n\n for _ in range(n):\n contests.append(list(map(int, input().rstrip().split())))\n\n result = luckBalance(k, contests)\n\n print(result)\n" }, { "alpha_fraction": 0.4834437072277069, "alphanum_fraction": 0.4892384111881256, "avg_line_length": 20.571428298950195, "blob_id": "72972be9ed5fcd616834f08bd231fd8acdb44812", "content_id": "2e4e2c526293e52728644d5353ca32b594860e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1208, "license_type": "no_license", "max_line_length": 56, "num_lines": 56, "path": "/labuladong/cpp/findAnagrams/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n vector<int> findAnagrams(string s, string p) {\n if (!s.size() || !p.size()) return vector<int>();\n if (s.size() < p.size()) return vector<int>();\n unordered_map<char, int> need{}, window{};\n for (char t : p) need[t]++;\n int left = 0, right = 0;\n char c = '0', d = '0';\n int start = 0, valid = 0;\n vector<int> starts{};\n while (right < s.size()) {\n c = s[right];\n right++;\n\n if (need.count(c)) {\n window[c]++;\n if (need[c] == window[c]) valid++;\n }\n\n fmt::print(\"({}, {})\\n\", left, right);\n\n while (left < right && right - left == p.size()) {\n if (valid == need.size()) {\n start = left;\n starts.push_back(start);\n }\n\n d = s[left];\n left++;\n if (need.count(d)) {\n if (need[d] == window[d]) valid--;\n window[d]--;\n }\n }\n }\n return starts;\n }\n};\n\nint main() {\n Solution sol;\n string s = \"abab\", p = \"ab\";\n vector<int> v = sol.findAnagrams(s, p);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.532608687877655, "alphanum_fraction": 0.5681818127632141, "avg_line_length": 21, "blob_id": "14b435e81c017baf01e169902a8338bff99208cd", "content_id": "fc99a2a341df1d470315c629e64c648697da2fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1012, "license_type": "no_license", "max_line_length": 64, "num_lines": 46, "path": "/python_Interview/DuplicatedElement.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\nimport unittest\n\n# first thought\ndef FindDuplication(W):\n if W == []:\n return -1\n HashT = {}\n for i in W:\n try:\n HashT[i] += 1\n except:\n HashT[i] = 1\n \n for i in HashT:\n if HashT[i] > 1:\n return i\n\n return -1\n\n# Second thought\nfrom operator import xor\nfrom functools import reduce\ndef FindDuplication2(W):\n if len(W) == 0:\n return -1\n a = reduce(xor, W) ^ reduce(xor, [k for k in set(W)])\n return a if a != 0 else -1\n\n\nclass Test(unittest.TestCase):\n def testFindDuplication(self):\n self.assertEqual(FindDuplication([1, 1, 2, 3, 4]), 1)\n self.assertEqual(FindDuplication([1, 1]), 1)\n self.assertEqual(FindDuplication([1, 2, 3, 4]), -1)\n self.assertEqual(FindDuplication([1, 3, 4, 2, 5, 3]), 3)\n self.assertEqual(FindDuplication([]), -1)\n\nif __name__ == \"__main__\":\n unittest.main()\n # FindDuplication([1, 2, 3, 4])\n" }, { "alpha_fraction": 0.4293680191040039, "alphanum_fraction": 0.4479553997516632, "avg_line_length": 20.559999465942383, "blob_id": "e51f68274d1b10e81c3e5683ae86b15b9cb69166", "content_id": "aae3a97f8b8233890b54e3a3ab316a39c1d2f01f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 51, "num_lines": 25, "path": "/leetcode/python/131.分割回文串.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=131 lang=python3\n#\n# [131] 分割回文串\n#\nfrom typing import * \n# @lc code=start\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n res = []\n\n def helper(s, tmp):\n if not s:\n res.append(tmp)\n \n for i in range(1, len(s) + 1):\n if s[:i] == s[:i][::-1]:\n helper(s[i:], tmp + [s[:i]])\n helper(s, [])\n\n return res\n# @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().partition(\"aab\"))" }, { "alpha_fraction": 0.5214484930038452, "alphanum_fraction": 0.5325905084609985, "avg_line_length": 20.117647171020508, "blob_id": "30f8d717f80d978e95bf41d1df9c5c1efb5d8ea5", "content_id": "4d758f15d9af9dd6edf81fc0132e631e5da9709c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1807, "license_type": "no_license", "max_line_length": 69, "num_lines": 85, "path": "/leetcode/cpp/getRandom_LL/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=382 lang=cpp\n *\n * [382] 链表随机节点\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n ListNode *head_;\n Solution(ListNode *head) : head_(head) {}\n\n int getRandom(ListNode *cur_head = nullptr) {\n if (!cur_head) {\n cur_head = head_;\n }\n int i = 0, res = 0;\n for (ListNode *cur = cur_head; cur != nullptr; cur = cur->next) {\n i++;\n if (0 == (rand() % i)) {\n res = cur->val;\n }\n }\n return res;\n }\n vector<int> getKRandom(int k = 1, ListNode *cur_head = nullptr) {\n if (!cur_head) {\n cur_head = head_;\n }\n vector<int> res(k);\n ListNode *p = cur_head;\n for (int i = 0; i < k; i++) {\n res[i] = p->val;\n p = p->next;\n }\n\n int i = 0, j = 0;\n for (ListNode *cur = cur_head; cur != nullptr; cur = cur->next) {\n i++;\n j = rand() % i;\n if (k > j) {\n res[j++] = cur->val;\n }\n }\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode *head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n Solution sol(head);\n int r = sol.getRandom();\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom();\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom();\n fmt::print(\"{}\\n\", r);\n r = sol.getRandom();\n fmt::print(\"{}\\n\", r);\n int k = 3;\n vector<int> res = sol.getKRandom(k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(k);\n fmt::print(\"{}\\n\", res);\n res = sol.getKRandom(k);\n fmt::print(\"{}\\n\", res);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.4058009088039398, "alphanum_fraction": 0.46586301922798157, "avg_line_length": 28.7308292388916, "blob_id": "68bc7517361947117f04552b953dfa13817fb844", "content_id": "cd7177a81851bd794806ca3d791d21838e7bbf26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18997, "license_type": "no_license", "max_line_length": 79, "num_lines": 639, "path": "/AGIHack/SearchPack.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "# assume the graph is like 2 patterns.\n\n# vertex vertex weight\n\n# directly matrix\n# from collections import deque\n\nfrom functools import partial\nfrom math import inf, sqrt\n\n\n#%%\nclass Stack(object):\n\n def __init__(self, data=None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def push(self, data):\n self.data.append(data)\n\n def pop(self):\n return self.data.pop()\n\n @property\n def top(self):\n return self.data[-1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Stack(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Stack(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.pop()\n\n def __contains__(self, item):\n return item in self.data\n\n\n#%%\nclass Queue(object):\n\n def __init__(self, data=None):\n if data == None:\n self.data = []\n else:\n self.data = data\n\n def Enqueue(self, item):\n self.data.append(item)\n\n def Dequeue(self):\n return self.data.pop(0)\n\n def qsize(self):\n return len(self.data)\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def __repr__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __str__(self):\n return \"Queue(\" + ', '.join(map(str, self.data)) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return item in self.data\n\n def __len__(self):\n return len(self.data)\n\n\n#%%\nimport bisect\n\n\nclass PriorityQueue(object):\n\n def __init__(self, key=lambda x: x, reverses=False):\n self.key = key\n self.data = []\n self.reverses = reverses\n\n def Enqueue(self, item):\n # heappush(self.data, (self.key(item), item))\n bisect.insort(self.data, (self.key(item) if not self.reverses else -1 *\n self.key(item), item))\n\n def Dequeue(self):\n # return heappop(self.data)[1]\n return self.data.pop(0)[1]\n\n def isEmpty(self):\n return len(self.data) == 0\n\n def qsize(self):\n return len(self.data)\n\n def __repr__(self):\n return \"PriorityQueue(\" + ', '.join(map(\n str, [i[1] for i in self.data])) + \")\"\n\n def __str__(self):\n return \"PriorityQueue(\" + ', '.join(map(\n str, [i[1] for i in self.data])) + \")\"\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.isEmpty():\n raise StopIteration\n else:\n return self.Dequeue()\n\n def __contains__(self, item):\n return any([item == pair[1] for pair in self.data])\n\n def __getitem__(self, key):\n for _, item in self.data:\n if item == key:\n return item\n\n def __delitem__(self, key):\n for i, (_, item) in enumerate(self.data):\n if item == key:\n self.data.pop(i)\n\n def __len__(self):\n return len(self.data)\n\n\n# %%\nclass Vertex(object):\n\n def __init__(self, state=None):\n self.state = state\n\n\nclass Node(Vertex):\n # just for the search\n def __init__(self, idx, cost=0, prev='', state=None):\n super().__init__(state)\n self.index = idx\n self.cost = cost\n self.prev = prev\n\n def __repr__(self):\n return \"Node(\" + str(self.index) + \")\"\n\n def __lt__(self, other):\n return self.index < other.index\n\n def __eq__(self, other):\n return self.index == other.index\n\n def __hash__(self):\n return hash(self.index)\n\n def __index__(self):\n return self.index\n\n\n#%%\nclass Graph():\n\n def __init__(self, g, nv, mode=None, Direct=True, optdic={}, valuedic={}):\n self.Direct = Direct\n self.mode = mode\n self.numV = nv\n self.mapVertices = [\n Vertex(state=valuedic[i] if valuedic != {} else None)\n for i in range(self.numV)\n ]\n self.g = self.encode(g)\n self.opdic = optdic\n\n def encode(self, g):\n if self.mode == None:\n return g\n\n elif self.mode == 'Alist':\n graph = {ve: [] for ve in range(self.numV)}\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1].append((v2, weight))\n else:\n graph[v1].append((v2, weight))\n graph[v2].append((v1, weight))\n\n return graph\n\n elif self.mode == 'Amatrix':\n graph = [[0] * self.numV for _ in range(self.numV)]\n for v1, v2, weight in g:\n if self.Direct == True:\n graph[v1][v2] = weight\n else:\n graph[v1][v2] = weight\n graph[v2][v1] = weight\n\n return graph\n\n def isCycle(self, start):\n # if self.mode == \"Alist\":\n color = {v: \"white\" for v in self.mapVertices}\n\n def backtrack(u, color):\n\n if color[u] == \"gray\":\n return True\n\n color[u] = \"gray\"\n\n for v in self.g[u]:\n if color[v] == \"gray\":\n return True\n elif color[v] == \"white\" and backtrack(v, color) == True:\n return True\n\n color[u] == \"black\"\n return False\n\n return backtrack(start, color)\n\n def isadjacent(self, v1, v2):\n if self.mode == \"Amatrix\":\n return self.g[v1][v2] != 0\n else:\n return any([i[0] == v2 for i in self.g[v1]])\n\n def __repr__(self):\n s = ''\n if self.mode == \"Amatrix\":\n for i in self.g:\n s += str(i) + '\\n'\n return s\n elif self.mode == \"Alist\":\n for i in self.g:\n s += str(i) + \": \" + str(self.g[i]) + '\\n'\n return s\n else:\n raise NotImplementedError()\n\n\n#%%\nclass Search(object):\n\n def __init__(self, g: Graph, src: Node, dest: Node):\n self.graph = g\n self.src = Node(src, state=self.graph.mapVertices[src].state)\n self.dest = Node(dest, state=self.graph.mapVertices[dest].state)\n self.visited = set()\n self.solfound = False\n self.path = Stack()\n\n def extend(self, node: Node):\n next_nodes = []\n if self.graph.mode == 'Alist':\n for v2, weight in self.graph.g[node.index]:\n next_nodes.append(\n Node(v2,\n cost=node.cost + weight,\n prev=node,\n state=self.graph.mapVertices[v2].state))\n elif self.graph.mode == 'Amatrix':\n nex = [\n v for v in range(self.graph.numV)\n if self.graph.isadjacent(node.index, v)\n ]\n for v2 in nex:\n weight = self.graph.g[node][v2]\n next_nodes.append(\n Node(v2,\n cost=node.cost + weight,\n prev=node,\n state=self.graph.mapVertices[v2].state))\n else:\n raise NotImplementedError()\n return next_nodes\n\n def search(self):\n pass\n\n def findPath(self):\n p = self.dest\n while p.prev != \"\":\n self.path.push(p)\n p = p.prev\n else:\n self.path.push(self.src)\n\n def displayPath(self):\n print(self,\n \"->\".join(map(str, [ve for ve in self.path])),\n f\"Path Cost:{self.dest.cost}\",\n sep=\" \")\n\n\nclass BreadthFirstSearch(Search):\n\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n self.queue = Queue()\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.queue.Enqueue(self.src)\n\n while not self.queue.isEmpty() and self.solfound is False:\n cur = self.queue.Dequeue()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n # self.pathIndexTable[cur.index] = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.queue:\n self.queue.Enqueue(n_)\n\n def __repr__(self):\n return \"BreadthFirstSearch: \"\n\n\nclass BestFirstSearch(BreadthFirstSearch):\n\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n self.queue = PriorityQueue(key=self.ScoreFun)\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.queue.Enqueue(self.src)\n\n while not self.queue.isEmpty() and self.solfound is False:\n cur = self.queue.Dequeue()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.queue:\n self.queue.Enqueue(n_)\n elif n_ in self.queue:\n incumbent = self.queue[n_]\n if self.ScoreFun(n_) < self.ScoreFun(incumbent):\n del self.queue[incumbent]\n self.queue.Enqueue(n_)\n\n def ScoreFun(self, src):\n raise NotImplementedError()\n\n\nclass UniformCost(BestFirstSearch):\n\n def __init__(self, g, src, dest):\n super().__init__(g, src, dest)\n # self.queue = PriorityQueue(key = self.key)\n\n def ScoreFun(self, src: Node):\n return src.cost\n\n def __repr__(self):\n return \"UniformCost: \"\n\n\nclass BestGreedySearch(BestFirstSearch):\n\n def __init__(self, g, src, dest, criterion):\n super().__init__(g, src, dest)\n self.criterion = criterion\n\n def ScoreFun(self, src: Node):\n return self.criterion(src, self.dest)\n\n def __repr__(self):\n return \"BestGreedySearch: \"\n\n\nclass Astar(BestFirstSearch):\n\n def __init__(self, g, src, dest, criterion):\n super().__init__(g, src, dest)\n self.criterion = criterion\n\n def ScoreFun(self, src: Node):\n return src.cost + self.criterion(src, self.dest)\n\n def __repr__(self):\n return \"Astar: \"\n\n\nclass DepthFirstSearch(Search):\n\n def __init__(self, g, src, dest, key=lambda x: x, order='min'):\n '''\n sorted key for extension. reverse is need \n '''\n super().__init__(g, src, dest)\n self.stack = Stack()\n self.key = key\n self.reverse = True if order == 'min' else False\n\n def extend(self, node: Node):\n next_nodes = []\n if self.graph.mode == 'Alist':\n for v2, weight in self.graph.g[node.index]:\n next_nodes.append(\n Node(v2,\n cost=node.cost + weight,\n prev=node,\n state=self.graph.mapVertices[v2].state))\n elif self.graph.mode == 'Amatrix':\n nex = [\n v for v in range(self.graph.numV)\n if self.graph.isadjacent(node.index, v)\n ]\n for v2 in nex:\n weight = self.graph.g[node][v2]\n next_nodes.append(\n Node(v2,\n cost=node.cost + weight,\n prev=node,\n state=self.graph.mapVertices[v2].state))\n else:\n raise NotImplementedError()\n return sorted(next_nodes, key=self.key, reverse=self.reverse)\n\n def search(self):\n # self.pathIndexTable[self.src.index] = self.src\n self.stack.push(self.src)\n\n while not self.stack.isEmpty() and self.solfound is False:\n cur = self.stack.pop()\n self.visited.add(cur.index)\n\n if self.dest.index == cur.index:\n self.dest = cur\n # self.pathIndexTable[cur.index] = cur\n self.solfound = True\n else:\n for n_ in self.extend(cur):\n if n_.index not in self.visited and n_ not in self.stack:\n self.stack.push(n_)\n\n def __repr__(self):\n return \"DepthFirstSearch: \"\n\n\nclass Dijkstra(Search):\n\n def __init__(self, g, src, dest):\n # TODO setting up a table with the node.index\n super().__init__(g, src, dest)\n self.pathTable = [\n Node(i, cost=inf, state=self.graph.mapVertices[i].state)\n for i in range(self.graph.numV)\n ]\n self.queue = Queue()\n\n def relax(self, parent: int, child: int, weight):\n self.pathTable[child].cost = self.pathTable[parent].cost + weight\n self.pathTable[child].prev = self.pathTable[parent]\n\n def extend(self, nodeIdx: int):\n if self.graph.mode == 'Alist':\n return self.graph.g[nodeIdx]\n\n elif self.graph.mode == 'Amatrix':\n return [(v, self.graph.g[nodeIdx][v])\n for v in range(self.graph.numV)\n if self.graph.isadjacent(nodeIdx, v)]\n else:\n raise NotImplementedError()\n\n def search(self):\n self.pathTable[self.src].prev = ''\n self.pathTable[self.src].cost = 0\n self.visited.add(self.src.index)\n self.queue.Enqueue(self.src.index)\n\n while len(self.visited) < self.graph.numV and not self.queue.isEmpty():\n cur_indx = self.queue.Dequeue()\n self.visited.add(cur_indx)\n for n_, weight in self.extend(cur_indx):\n if (self.pathTable[cur_indx].cost + weight\n < self.pathTable[n_].cost):\n self.relax(cur_indx, n_, weight)\n self.queue.Enqueue(n_)\n\n self.dest = self.pathTable[self.dest]\n\n def __repr__(self):\n return \"Dijkstra: \"\n\n\nclass MAP(Graph):\n\n def __init__(self, g, nv, pos, mode=None, Direct=True):\n super().__init__(g, nv, mode, Direct, valuedic=pos)\n\n\nif __name__ == \"__main__\":\n nV = 8\n\n V = {\n 0: (0, 0),\n 1: (1, 1),\n 2: (2, 2),\n 3: (1, 0),\n 4: (5, 5),\n 5: (3, 2),\n 6: (2, 1),\n 7: (3, 3)\n }\n\n B = [[0, 1, 1], [0, 2, 1], [0, 3, 1], [1, 2, 1], [1, 4, 1], [2, 4, 1],\n [2, 5, 1], [2, 3, 1], [3, 6, 1], [6, 7, 1]]\n\n W = [[0, 1, 2], [0, 2, 5], [0, 3, 1], [1, 2, 2], [1, 4, 6], [2, 4, 7],\n [2, 5, 1], [2, 3, 4], [3, 6, 3], [6, 7, 4]]\n\n m = MAP(W, nV, V, mode=\"Alist\")\n # m = Graph(B, nV, mode=\"Alist\")\n # print(m)\n src = 0\n dest = 5\n # W = [[15, 8, 1], [7, 10, 41], [7, 9, 34], [9, 4, 31], [12, 13, 50],\n # [14, 3, 52], [4, 11, 99], [4, 7, 86], [10, 13, 57], [9, 6, 10],\n # [1, 7, 51], [7, 15, 38], [1, 9, 11], [12, 7, 94], [9, 13, 34],\n # [11, 7, 79], [7, 6, 28], [5, 3, 34], [2, 6, 97], [14, 1, 97],\n # [6, 10, 90], [12, 10, 37], [13, 3, 73], [11, 14, 7], [15, 1, 39],\n # [6, 5, 90], [13, 6, 43], [6, 9, 32], [4, 6, 45], [11, 10, 2],\n # [2, 13, 4], [14, 15, 29], [1, 14, 88], [14, 6, 19], [6, 2, 29],\n # [3, 14, 72], [1, 15, 4], [11, 5, 2], [6, 7, 56], [8, 7, 88],\n # [13, 14, 70], [14, 12, 58], [14, 2, 86], [11, 3, 57], [5, 2, 56],\n # [3, 10, 26], [2, 11, 21], [14, 5, 54], [5, 12, 40], [14, 4, 81],\n # [15, 2, 99], [5, 7, 57], [13, 12, 5], [4, 9, 60], [12, 15, 48],\n # [6, 14, 1], [9, 7, 44], [13, 7, 69], [5, 13, 42], [4, 1, 7],\n # [11, 9, 76], [8, 1, 76], [5, 14, 29], [2, 3, 69], [7, 3, 23],\n # [12, 14, 28], [11, 4, 85], [10, 1, 10], [15, 12, 36], [1, 11, 69],\n # [15, 10, 96], [11, 13, 69], [7, 12, 49], [1, 2, 95], [6, 4, 46],\n # [8, 12, 94], [12, 4, 93], [13, 5, 31], [12, 2, 60], [6, 1, 87],\n # [4, 14, 20], [5, 11, 89], [4, 15, 88], [4, 10, 21], [1, 6, 5],\n # [10, 8, 26], [8, 2, 51], [3, 15, 23], [7, 2, 12], [11, 1, 47],\n # [2, 1, 75], [3, 8, 63], [8, 10, 19], [6, 8, 18], [4, 2, 55],\n # [14, 11, 80], [10, 3, 73], [3, 5, 22], [12, 3, 61], [1, 13, 33],\n # [9, 3, 98], [9, 12, 69], [15, 9, 6], [7, 13, 76], [11, 12, 22],\n # [11, 15, 51], [13, 15, 46], [5, 10, 58], [1, 10, 26], [13, 4, 85],\n # [7, 14, 58], [5, 8, 46], [11, 6, 32], [10, 9, 41], [9, 14, 35],\n # [14, 13, 60], [3, 9, 97], [2, 5, 39], [7, 11, 19], [1, 12, 27],\n # [7, 5, 13], [8, 4, 34], [9, 15, 25], [5, 1, 93], [15, 13, 97],\n # [14, 9, 35], [8, 6, 67], [9, 5, 39], [13, 11, 35], [7, 4, 21],\n # [12, 9, 64], [14, 8, 8], [10, 12, 94], [8, 9, 76], [8, 5, 71],\n # [2, 9, 64], [10, 14, 59], [1, 4, 74], [7, 1, 69], [15, 5, 55],\n # [6, 15, 80], [13, 8, 84], [8, 13, 63], [8, 3, 91], [10, 4, 87],\n # [1, 5, 39], [8, 11, 0], [1, 3, 79], [4, 5, 82], [4, 12, 87],\n # [3, 11, 29], [7, 8, 92], [10, 7, 77], [6, 12, 42], [13, 2, 40],\n # [9, 10, 13], [4, 13, 65], [2, 4, 34], [3, 13, 44], [2, 14, 69],\n # [3, 4, 42], [5, 15, 98], [14, 7, 6], [15, 3, 94], [10, 2, 37],\n # [15, 11, 7], [9, 2, 15], [13, 9, 66], [4, 8, 83], [8, 15, 23],\n # [13, 1, 50], [6, 13, 57], [2, 10, 37], [10, 6, 38], [2, 7, 45],\n # [9, 8, 8], [3, 12, 28], [3, 2, 83], [2, 12, 75], [1, 8, 91],\n # [4, 3, 70], [12, 6, 48], [3, 1, 13], [5, 6, 42], [6, 11, 96],\n # [3, 6, 22], [15, 6, 34], [11, 8, 43], [15, 7, 40], [9, 11, 57],\n # [11, 2, 11], [2, 8, 22], [9, 1, 73], [2, 15, 40], [12, 11, 10],\n # [15, 4, 78], [12, 8, 75], [10, 15, 37], [13, 10, 44], [8, 14, 33],\n # [3, 7, 82], [5, 4, 46], [12, 5, 79], [15, 14, 43], [10, 5, 65],\n # [5, 9, 34], [12, 1, 54], [6, 3, 16], [14, 10, 83], [10, 11, 67]]\n # m = Graph(W, 16, mode=\"Amatrix\")\n # src = 11\n # dest = 3\n\n bfs = BreadthFirstSearch(m, src, dest)\n bfs.search()\n bfs.findPath()\n bfs.displayPath()\n\n dfs = DepthFirstSearch(m, src, dest, lambda x: x.cost)\n dfs.search()\n dfs.findPath()\n dfs.displayPath()\n\n ufc = UniformCost(m, src, dest)\n ufc.search()\n ufc.findPath()\n ufc.displayPath()\n\n Djik = Dijkstra(m, src, dest)\n Djik.search()\n Djik.findPath()\n Djik.displayPath()\n\n def criterion(src: Node, dest: Node):\n xs, ys = src.state\n xd, yd = dest.state\n if src == dest:\n return 0\n else:\n return sqrt((xs - xd)**2 + (ys - yd)**2)\n\n bgs = BestGreedySearch(m, src, dest, criterion)\n bgs.search()\n bgs.findPath()\n bgs.displayPath()\n\n a = Astar(m, src, dest, criterion)\n a.search()\n a.findPath()\n a.displayPath()\n\n#%%" }, { "alpha_fraction": 0.47662976384162903, "alphanum_fraction": 0.5006150007247925, "avg_line_length": 23.26865577697754, "blob_id": "85b8e1020d39c4baf0cc1d59144427b54e30bd0a", "content_id": "87c14335aa33d72fb1bdf623bcf85067b3a62bb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1640, "license_type": "no_license", "max_line_length": 75, "num_lines": 67, "path": "/leetcode/cpp/lengthOfLIS/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=300 lang=cpp\n *\n * [300] 最长递增子序列\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n// @lc code=start\nclass Solution {\n public:\n int lengthOfLIS(vector<int>& nums) {\n vector<int> dp(nums.size() + 1, 1);\n for (int i = 0; i < nums.size(); i++) {\n for (int j = 0; j <= i; j++) {\n if (nums[j] < nums[i]) {\n dp[i] = max(dp[i], dp[j] + 1);\n }\n }\n }\n return *max_element(dp.begin(), dp.end());\n }\n int lengthOfLIS_nlogn(vector<int>& nums) {\n // vector<int> top(nums.size());\n vector<vector<int>> top(nums.size(), vector<int>(nums.size(), 123456));\n // top.reserve(nums.size());\n int piles = 0;\n for (int i = 0; i < nums.size(); i++) {\n int poker = nums[i];\n\n int left = 0, right = piles;\n while (right > 0 && left < right) {\n int mid = left + (right - left) / 2;\n if (top[mid][0] == poker) {\n right = mid;\n } else if (top[mid][0] > poker) {\n right = mid;\n } else if (top[mid][0] < poker) {\n left = mid + 1;\n }\n }\n\n if (left == piles) piles++;\n // top[left] = poker;\n top[left].push_back(poker);\n push_heap(top[left].begin(), top[left].end(),\n [](int x, int y) { return x > y; });\n }\n // fmt::print(\"{}\", top);\n return piles;\n }\n};\n// @lc code=end\n\nint main() {\n Solution sol;\n vector<int> nums = {10, 9, 2, 5, 3, 7, 101, 18};\n int v = sol.lengthOfLIS_nlogn(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.4833451509475708, "alphanum_fraction": 0.5010630488395691, "avg_line_length": 22.147541046142578, "blob_id": "12064e6f132d31d65d82d89b47e7f5176c993cb3", "content_id": "9044828db3eb20ea742c63f18e1b36d019080216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 152, "num_lines": 61, "path": "/interview/minimumSwap.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/minimum-swaps-2/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays\n\n# Complete the minimumSwaps function below.\ndef minimumSwaps(arr):\n vis = {k:False for k in range(1, len(arr) + 1)}\n arrops = [(i, v) for i, v in enumerate(arr, 1)]\n arrops.sort(key = lambda x: x[1])\n ans = 0\n for i in range(1, n):\n ind, val = arrops[i - 1]\n if ind != val and vis[i] == False:\n j = i\n cycle_size = 0\n while not vis[j]:\n ind, val = arrops[j - 1]\n vis[val] = True\n j = ind\n cycle_size += 1\n ans += cycle_size - 1\n return ans\n\n#my own code\n# def minimumSwaps(arr):\n# b = arr.copy()\n# b.sort()\n# cnt = 0\n# s = []\n\n# g = dict(zip(b, arr))\n\n# unvisit = set(arr)\n\n# while len(unvisit) != 0:\n# t = unvisit.pop()\n# s = [t]\n# while len(s) != 0 and g[s[-1]] != s[0]:\n# unvisit.remove(g[s[-1]])\n# s.append(g[s[-1]])\n# cnt += 1\n \n# return cnt\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = minimumSwaps(arr)\n\n print(res)" }, { "alpha_fraction": 0.36631184816360474, "alphanum_fraction": 0.39323732256889343, "avg_line_length": 20.58108139038086, "blob_id": "317d46d2537ff5757f2900a4b779b424ad3bf55d", "content_id": "dcc73c390d8883251f63af7c09bba39f37c03cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1607, "license_type": "no_license", "max_line_length": 60, "num_lines": 74, "path": "/interview/Common_Child.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/common-child/problem\n\ndef printarr(s):\n m, n = len(s), len(s[0])\n for i in range(m):\n for j in range(n):\n print(s[i][j], end = ' ')\n print()\n\ndef commonChild(s1, s2, show = False):\n m, n = len(s1), len(s2)\n c = [[0 for i in range(n + 1)] for j in range(m + 1)]\n d = [['0' for i in range(n + 1)] for j in range(m + 1)]\n setl = []\n\n for i, c1 in enumerate(s1, 1):\n for j, c2 in enumerate(s2, 1):\n if c1 == c2:\n c[i][j] = c[i - 1][j - 1] + 1\n d[i][j] = \"↖\"\n else:\n c[i][j] = max(c[i - 1][j], c[i][j - 1])\n if c[i - 1][j] >= c[i][j - 1]:\n d[i][j] = \"↑\"\n else:\n d[i][j] = \"←\"\n\n if show == True:\n printarr(c)\n print()\n printarr(d)\n print(''.join(setl))\n a = m\n b = n\n while a > 0 and b > 0:\n if d[a][b] == \"↖\":\n setl.append(s1[a-1])\n a -= 1\n b -= 1\n elif d[a][b] == \"↑\":\n a -= 1\n else:\n b -= 1\n setl.reverse()\n print(''.join(setl))\n return ''.join(setl)\n\n return c[-1][-1]\n\nif __name__ == '__main__':\n\n s1 = input()\n\n s2 = input()\n\n result = commonChild(s1, s2, True)\n\n print(result)\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n\n#%%\n# c = [[None for i in range(3)] for j in range(4)]\n\n#%%\n" }, { "alpha_fraction": 0.6070685982704163, "alphanum_fraction": 0.6174635887145996, "avg_line_length": 21.372093200683594, "blob_id": "c7721473b44eb0746823f5a3db91dba55c7ed2ac", "content_id": "2285374120ff99e0c2f9fc79b921a5f2d2b8ad2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1932, "license_type": "no_license", "max_line_length": 70, "num_lines": 86, "path": "/leetcode/cpp/reverseList/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=206 lang=cpp\n *\n * [206] 反转链表\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n // method 1\n ListNode* reverseList(ListNode* head) {\n if (!head) return nullptr;\n if (!head->next) {\n return head;\n }\n ListNode* tail = reverseList(head->next);\n head->next->next = head;\n head->next = nullptr;\n return tail;\n }\n\n // method 2\n // `ListNode* &tail_ptr` This is the reference of ListNode ptr.\n // with this `tail_ptr = head;` can be worked or every time the code\n // use the `tail_ptr` will be a copy of value not reference.\n ListNode* reverseList1_helper(ListNode* head, ListNode*& tail_ptr) {\n if (!head) return nullptr;\n if (!head->next) {\n tail_ptr = head;\n return head;\n }\n ListNode* tail = reverseList1_helper(head->next, tail_ptr);\n tail->next = head;\n head->next = nullptr;\n return head;\n }\n\n ListNode* reverseList1(ListNode* head) {\n ListNode* tail_ptr = nullptr;\n reverseList1_helper(head, tail_ptr);\n return tail_ptr;\n }\n\n // method 3\n ListNode* reverseList2(ListNode* head) {\n if (!head) return nullptr;\n if (!head->next) {\n return head;\n }\n ListNode* cur = head;\n ListNode* nex = nullptr;\n ListNode* pre = nullptr;\n while (cur->next) {\n nex = cur->next;\n cur->next = pre;\n pre = cur;\n cur = nex;\n }\n cur->next = pre;\n return cur;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> v = {1, 2, 3, 4, 5};\n ListNode* head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n Solution sol;\n ListNode* r = sol.reverseList(head);\n showLinkedList<int>(r);\n DestroyLinkedlist<int>(head);\n // DestroyLinkedlist<int>(r);\n return 0;\n}\n" }, { "alpha_fraction": 0.6456758975982666, "alphanum_fraction": 0.6599496006965637, "avg_line_length": 23.8125, "blob_id": "90916c7dffd33889727e2870071b1d857864eebc", "content_id": "5ab2cac1a13f9481ad4b677955d6b5592bd9ce48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 72, "num_lines": 48, "path": "/leetcode/cpp/isValidBST/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=98 lang=cpp\n *\n * [98] 验证二叉搜索树\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <queue>\n#include <string>\n#include <unordered_set>\n#include <vector>\n\n#include \"common_types/TreeNode/BinaryTreeNode.h\"\n\nusing namespace std;\nusing TreeNode = BinaryTree::BinaryTreeNode<int>;\nusing BinaryTree::BuildBinaryTree;\nusing BinaryTree::showBinaryTree;\n\n// @lc code=start\nclass Solution {\n public:\n bool isValidBST_helper(TreeNode *root, TreeNode *min, TreeNode *max) {\n if (!root) return true;\n if (min && min->val >= root->val) return false;\n if (max && max->val <= root->val) return false;\n return isValidBST_helper(root->left, min, root) &&\n isValidBST_helper(root->right, root, max);\n }\n bool isValidBST(TreeNode *root) {\n return isValidBST_helper(root, nullptr, nullptr);\n }\n};\n// @lc code=end\n\nint main() {\n const int null = BinaryTree::null<int>();\n vector<int> a{5, 4, 6, null, null, 3, 7};\n // vector<int> a{3, 7, 15, null, null, 9, 20};\n TreeNode *root = BuildBinaryTree<int>(a);\n showBinaryTree<int>(root);\n Solution sol;\n bool v = sol.isValidBST(root);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.35310935974121094, "alphanum_fraction": 0.37741243839263916, "avg_line_length": 24.454545974731445, "blob_id": "64bf2cc2671988e4ae8b7720d73945c5da6361d1", "content_id": "9585c4ecc082ba0daf016f84935b2a1eb64c8011", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1415, "license_type": "no_license", "max_line_length": 73, "num_lines": 55, "path": "/leetcode/python/150.逆波兰表达式求值.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=150 lang=python3\n#\n# [150] 逆波兰表达式求值\n#\nfrom typing import *\n# @lc code=start\nfrom operator import add, sub, mul, truediv\nclass Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n\n if not tokens:\n return 0\n cur = tokens.copy()\n op = {\n \"+\": add, \n \"-\": sub, \n \"*\": mul, \n \"/\": truediv\n }\n\n sta = []\n\n while cur:\n \n if len(sta) == 0 or sta[-1] not in (\"/\", \"*\", \"+\", \"-\"):\n sta.append(cur.pop(0))\n \n elif sta[-1] in (\"/\", \"*\", \"+\", \"-\"):\n ops = sta.pop()\n num2 = int(sta.pop())\n num1 = int(sta.pop())\n ans = str(int(op[ops](num1, num2)))\n # if ops == \"/\" and num1 * num2 < 0 and num1 % num2 != 0:\n # ans = str(op[ops](num1, num2) + 1)\n sta.append(ans)\n else:\n if len(sta) == 3:\n ops = sta.pop()\n num2 = int(sta.pop())\n num1 = int(sta.pop())\n sta.append(str(int(op[ops](num1, num2))))\n \n if len(sta) != 1:\n return 0\n else:\n ans = sta.pop()\n return int(ans)\n\n\n\n# @lc code=end\n\nif __name__ == \"__main__\":\n print(Solution().evalRPN([\"0\",\"3\",\"/\"]))" }, { "alpha_fraction": 0.5188405513763428, "alphanum_fraction": 0.5373913049697876, "avg_line_length": 24, "blob_id": "3c8f56403bc5d3261ab8de33068073a9ba6cb926", "content_id": "77fa9424c9cd1d5cee85e38d045408708ef73d4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1725, "license_type": "no_license", "max_line_length": 168, "num_lines": 69, "path": "/interview/CountTriplets.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n#%%\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# https://www.hackerrank.com/challenges/count-triplets-1/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=dictionaries-hashmaps\n\nfrom collections import Counter\n\ndef countTriplets(arr, r):\n # r2 = Counter()\n # r3 = Counter()\n r2 = {}\n r3 = {}\n count = 0\n \n for v in arr:\n # the new value is the third position of the triples:\n # optimal + $r^3[v]$ is Count number.\n if v in r3:\n try:\n count += r3[v]\n except KeyError:\n count = r3[v]\n \n # the new value is the second position of the triples:\n # the possible way increase by r^2[v] which is already found the count number when this value is in second position\n if v in r2:\n try:\n r3[v*r] += r2[v]\n except KeyError:\n r3[v*r] = r2[v]\n\n # the new value is the first position of the triples\n # the possible way increase by 1, which means that it is start by 1 path.\n try:\n r2[v*r] += 1\n except KeyError:\n r2[v*r] = 1\n\n return count\n\n# def countTriplets(arr, r):\n# c = 0\n# for ind, v in enumerate(arr):\n# if v % r == 0:\n# lefti = Counter(arr[:ind])\n# righti = Counter(arr[ind + 1:])\n# c += lefti[v/r] * righti[v*r]\n# return c\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nr = input().rstrip().split()\n\n n = int(nr[0])\n\n r = int(nr[1])\n\n arr = list(map(int, input().rstrip().split()))\n\n ans = countTriplets(arr, r)\n\n print(ans)\n" }, { "alpha_fraction": 0.4805653691291809, "alphanum_fraction": 0.49293285608291626, "avg_line_length": 20.80769157409668, "blob_id": "9264edd238dfedeb653704cecc1fb24a329e9ff5", "content_id": "d7f48a6fed3db0acdc077bbc17dece2b1c14d877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 58, "num_lines": 26, "path": "/leetcode/python/3.无重复字符的最长子串.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=3 lang=python3\n#\n# [3] 无重复字符的最长子串\n#\n\n# @lc code=start\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n windows = []\n maxLen = 0\n n = len(s)\n i = 0\n while i < n:\n if s[i] not in windows:\n windows.append(s[i])\n maxLen = max(maxLen, len(windows))\n i += 1\n else:\n windows.pop(0)\n return maxLen\n\n# @lc code=end\n\nif __name__ == '__main__':\n print(Solution().lengthOfLongestSubstring(\"abcabcbb\"))" }, { "alpha_fraction": 0.6079664826393127, "alphanum_fraction": 0.6205450892448425, "avg_line_length": 21.714284896850586, "blob_id": "4ab0cbf04dcaeb6fc7067655fdf7783d46b04719", "content_id": "4a0b31bd732a0dd88cc3fca761b9fd97e0669c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 158, "num_lines": 42, "path": "/interview/MinimumTimeRequired.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the minTime function below.\n# https://www.hackerrank.com/challenges/minimum-time-required/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=search\ndef totalProducts(numD, machines):\n return sum(map(lambda x: numD // x, machines))\n\ndef minTime(machines, goal):\n machines.sort()\n\n upper = goal // (len(machines) / machines[-1]) + 1\n lower = (goal // (len(machines) / machines[0]))\n\n while lower < upper:\n numD = (upper + lower) // 2\n total = totalProducts(numD, machines)\n if total < goal:\n lower = numD + 1\n else:\n upper = numD\n\n return int(lower)\n\nif __name__ == '__main__':\n\n nGoal = input().split()\n\n n = int(nGoal[0])\n\n goal = int(nGoal[1])\n\n machines = list(map(int, input().rstrip().split()))\n\n ans = minTime(machines, goal)\n\n print(ans)\n" }, { "alpha_fraction": 0.4784688949584961, "alphanum_fraction": 0.5023923516273499, "avg_line_length": 19.950000762939453, "blob_id": "2a7dcac00e1b3af61be46ec1453b4bde7f3e5a88", "content_id": "4377605e71f19a6171a7a19977ac7fc7d73dfd23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/leetcode/python/rotate.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from collections import Counter, defaultdict\nfrom typing import List\n\ndef rotate(matrix):\n n = len(matrix)\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n for m in matrix:\n m.reverse()\n\nif __name__ == \"__main__\":\n matrix = [\n [1,2,3],\n [4,5,6],\n [7,8,9]\n ]\n \n rotate(matrix)\n print(matrix[::-1])" }, { "alpha_fraction": 0.42391303181648254, "alphanum_fraction": 0.45652174949645996, "avg_line_length": 17.399999618530273, "blob_id": "dcf2af05115410e96522d299dc17b98576cc28e6", "content_id": "37a1597b7895e38079b7fbb66bf1cde35cbb1c65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/leetcode/python/canJump.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "\ndef canJump(nums) -> bool:\n maxs = 0\n n = len(nums)\n\n for i in range(n - 1):\n if maxs < i:\n return False\n maxs = max(maxs, nums[i] + i)\n\n if maxs >= n - 1:\n return True\n\n return maxs >= n - 1\n\nprint(canJump([2,3,1,1,4]))" }, { "alpha_fraction": 0.5230769515037537, "alphanum_fraction": 0.5353845953941345, "avg_line_length": 18.5, "blob_id": "1ebcc0ff3edab3c253b82a8b9fa5468bf417de5a", "content_id": "ca2e9081f1d758ccaae931398a16c64a51e9cb81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 995, "license_type": "no_license", "max_line_length": 67, "num_lines": 50, "path": "/leetcode/cpp/lengthOfLongestSubstring/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=3 lang=cpp\n *\n * [3] 无重复字符的最长子串\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int lengthOfLongestSubstring(string s) {\n if (!s.size()) return 0;\n const int _INT_MIN_ = 0;\n unordered_map<char, int> window{};\n int left = 0, right = 0;\n char c = 0, d = 0;\n int start = 0, len = _INT_MIN_;\n while (right < s.size()) {\n char c = s[right];\n right++;\n window[c]++;\n if (window[c] == 1 && right - left > len) len = right - left;\n\n // fmt::print(\"({}, {})\", left, right);\n\n while (left < right && window[c] > 1) {\n char d = s[left];\n left++;\n window[d]--;\n }\n }\n return len;\n }\n};\n// @lc code=end\n\nint main() {\n Solution sol;\n string s = \"pwwkew\";\n int v = sol.lengthOfLongestSubstring(s);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.44431817531585693, "alphanum_fraction": 0.45681819319725037, "avg_line_length": 24.852941513061523, "blob_id": "57cd2df7f9f7b5ce10a05d7d91752cea33b853ec", "content_id": "a3d6e979ae64831f76ad3be76b0890bb570e7f9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/leetcode/python/75.颜色分类.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=75 lang=python3\n#\n# [75] 颜色分类\n#\nfrom typing import List\n# note Counting sort\n# @lc code=start\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n def CountSort(nums):\n # if min(nums) > 0:\n # cnt_list = [0] * max(nums)\n # else:\n cnt_list = [0] * (abs(max(nums)) + abs(min(nums)) + 1)\n delta = min(nums)\n res = []\n\n for i in nums:\n cnt_list[i - delta] += 1\n \n for idx, cnt in enumerate(cnt_list):\n if cnt != 0:\n res += [idx + delta] * cnt\n \n return res\n res = CountSort(nums)\n for idx, value in enumerate(res):\n nums[idx] = value\n\n# @lc code=end\n\n" }, { "alpha_fraction": 0.3374301791191101, "alphanum_fraction": 0.3631284832954407, "avg_line_length": 23.16216278076172, "blob_id": "fc711de0d0dd9fab43688ab102f93552383fe0c6", "content_id": "b48c134f11e120758305175c7ad38aef18ac6237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 895, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/CHack/printsqrt.c", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <math.h>\n#include <stdlib.h>\n\ntypedef char * string;\n\nvoid printInt(int num, const string end) {\n printf(\"%d\", num);\n printf(\"%s\", end);\n}\n\nint main() \n{\n\n int n;\n int i,j;\n scanf(\"%d\", &n);\n \t\n for(i=0; i<= 2*n-1; i+=1){\n for(j=0; j< 2*n-1; j+=1){\n if(i <= n - 1){\n if(j < i) printInt(n - j, \" \");\n else if (j >= i && j < 2*n - i - 1) printInt(n - i, \" \");\n else if (j >= 2*n - i - 1) printInt(j + 2 - n, \" \");\n }else if (i > n)\n {\n int t = 2 * n - i - 1;\n if(j < t) printInt(n - j, \" \");\n else if (j >= t && j < 2*n - t - 1) printInt(n - t, \" \");\n else if (j >= 2*n - t - 1) printInt(j + 2 - n, \" \");\n }\n }\n if(i != n - 1) printf(\"\\n\");\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.4549819827079773, "alphanum_fraction": 0.46818727254867554, "avg_line_length": 15.038461685180664, "blob_id": "70e1a4219ba60bea1226209d8664acf1cd6e6eeb", "content_id": "ede82447fe4487f86017452660b8ec577c7e5d51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 39, "num_lines": 52, "path": "/NewCoder/wangyi6.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\ndebug = True\n\ndef swap(s):\n k = s.copy()\n for ind, v in enumerate(k):\n if v == s[0] and ind > 0:\n break\n \n k.pop(ind)\n k.pop(0)\n return k, ind - 1\n\ndef MinSwap(s):\n if len(s) % 2 != 0:\n return \"Error\"\n elif len(s) == 0:\n return 0\n elif s[1] == s[0]:\n return MinSwap(s[2:])\n else:\n k, times = swap(s)\n return times + MinSwap(k)\n\n return \n\ndef main():\n # input\n n = int(input())\n a = list(map(int, input().split()))\n # Cards = input()\n\n # solution\n result = MinSwap(a)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n if not debug:\n try:\n while True:\n main()\n except EOFError:\n exit()\n else:\n main()" }, { "alpha_fraction": 0.32831326127052307, "alphanum_fraction": 0.34638553857803345, "avg_line_length": 26.70833396911621, "blob_id": "4ca8667f853071d0671c4e28ff5a82a9d448a887", "content_id": "40c845d6c956088a4afffec06cfea743bd88a0a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 47, "num_lines": 24, "path": "/COPInterview/maxWater.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "class Solution:\n def maxWater(self , arr ):\n # write code here\n n = len(arr) - 1\n i, j = 0, n\n left, right = arr[0], arr[n]\n sum_val = 0\n while i < j:\n if left < right:\n i += 1\n if left >= arr[i]:\n sum_val += (left - arr[i])\n else:\n left = arr[i]\n else:\n j -= 1\n if right >= arr[j]:\n sum_val += (right - arr[j])\n else:\n right = arr[j]\n return sum_val\n\nif __name__ == \"__main__\":\n print(Solution().maxWater([3,1,2,5,2,4]))" }, { "alpha_fraction": 0.4765840172767639, "alphanum_fraction": 0.5096418857574463, "avg_line_length": 17.615385055541992, "blob_id": "67aabcb0379302f987fd00ea55a2492c5a32957d", "content_id": "eca297800e027874a58d9c2986ae8c92b7e49a59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 734, "license_type": "no_license", "max_line_length": 56, "num_lines": 39, "path": "/leetcode/cpp/rob/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=198 lang=cpp\n *\n * [198] 打家劫舍\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\nclass Solution {\n public:\n int rob(vector<int>& nums) {\n vector<int> dp(nums.size() + 1);\n dp[0] = 0;\n for (int i = 1; i <= nums.size(); i++) {\n if (i - 2 > 0)\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1]);\n else\n dp[i] = max(dp[i - 1], dp[0] + nums[i - 1]);\n }\n // fmt::print(\"{}\\n\", dp);\n return dp.back();\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {2, 7, 9, 3, 1};\n Solution sol;\n int v = sol.rob(nums);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.6029684543609619, "alphanum_fraction": 0.6178107857704163, "avg_line_length": 19.339622497558594, "blob_id": "b90a04a80fff96ea1c6b46fac75bf20f6368279b", "content_id": "f8244dee63339cb28b01d486bd31f8fdc11cd485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 47, "num_lines": 53, "path": "/leetcode/cpp/isPalindrome_LL/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=234 lang=cpp\n *\n * [234] 回文链表\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\n#include \"common_types/LinkedList/LinkedList.h\"\n\nusing namespace std;\nusing ListNode = LinkedListNode<int>;\n\n// @lc code=start\nclass Solution {\n public:\n ListNode *comp = nullptr;\n int f = 0, b = 0;\n ListNode *reversed(ListNode *head, int b) {\n if (!head->next) return head;\n b++;\n ListNode *tail = reversed(head->next, b);\n if (!tail) return nullptr;\n if (f > b) return tail;\n if (comp->val != tail->val) return nullptr;\n comp = comp->next;\n f++;\n return head;\n }\n\n bool isPalindrome(ListNode *head) {\n comp = head;\n ListNode *p = reversed(head, 0);\n if (!p) return false;\n return true;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> v = {1, 2, 4, 3, 2, 1};\n ListNode *head = BuildLinkedlist<int>(v);\n showLinkedList<int>(head);\n Solution sol;\n bool r = sol.isPalindrome(head);\n fmt::print(\"{}\\n\", r);\n DestroyLinkedlist<int>(head);\n return 0;\n}\n" }, { "alpha_fraction": 0.49233129620552063, "alphanum_fraction": 0.5092024803161621, "avg_line_length": 21.101694107055664, "blob_id": "af1035fd3dbb0a66b10ecb0864e39ae0c4abe9cf", "content_id": "21f303a88b352078a692fa18aed70fece8189c7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 67, "num_lines": 59, "path": "/labuladong/cpp/maxEnvelopes/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <algorithm>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int lengthOfLIS(vector<int>& nums) {\n vector<int> top(nums.size());\n\n int piles = 0;\n for (int i = 0; i < nums.size(); i++) {\n int poker = nums[i];\n\n int left = 0, right = piles;\n while (right > 0 && left < right) {\n int mid = left + (right - left) / 2;\n if (top[mid] >= poker) {\n right = mid;\n } else if (top[mid] < poker) {\n left = mid + 1;\n }\n }\n\n if (left == piles) piles++;\n top[left] = poker;\n fmt::print(\"{}\\n\", top);\n }\n return piles;\n }\n\n int maxEnvelopes(vector<vector<int>>& envelopes) {\n sort(envelopes.begin(), envelopes.end(),\n [](vector<int>& x, vector<int>& y) {\n if (x[0] < y[0]) return true;\n if (x[0] == y[0]) return x[1] > y[1];\n return false;\n });\n\n vector<int> lis = {};\n for (auto& v : envelopes) {\n lis.push_back(v[1]);\n }\n return lengthOfLIS(lis);\n }\n};\n\nint main() {\n Solution sol;\n vector<vector<int>> envelopes = {{5, 4}, {6, 4}, {6, 7}, {2, 3}};\n int v = sol.maxEnvelopes(envelopes);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5215053558349609, "avg_line_length": 17.600000381469727, "blob_id": "aa14bcacc0f36845fd31ee3f7f3d89c9da012dc5", "content_id": "faea8807f33027c991a2a48e7a85add176df6b1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/leetcode/python/lengthOfLongestSubstring.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\ndef lengthOfLongestSubstring(s: str) -> int:\n l = 0\n r = 0\n ans = 0\n counter = defaultdict(int)\n\n for c in s:\n while counter[c] != 0:\n counter[s[l]] -= 1\n l += 1\n r += 1\n counter[c] += 1\n\n ans = max(ans, r - l)\n return ans\n\n\nprint(lengthOfLongestSubstring(\"pwwkew\"))\n" }, { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 36, "blob_id": "fe17f1ca51d38937d4ed67808d59b70a057b2487", "content_id": "7ef2ca601282cee30907c04d5045c89dbe710684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 37, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/leetcode/cpp/permutation/CMakeLists.txt", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "add_executable(permutation main.cpp)\n" }, { "alpha_fraction": 0.5828220844268799, "alphanum_fraction": 0.5889570713043213, "avg_line_length": 19.4375, "blob_id": "32781f6f5cc0215c6e2600c076a460b2dd388f48", "content_id": "8b22748eee0fc7ebedaede7938aed899f495ea97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/interview/sockpair.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "from collections import Counter\n\n# Complete the sockMerchant function below.\ndef sockMerchant(n, ar):\n C = Counter(ar)\n pair_num = 0\n for num in C.values():\n pair_num += num // 2\n\n print(pair_num)\n\n\nif __name__ == \"__main__\":\n N = int(input())\n arr = map(int, input().split())\n sockMerchant(N, arr)" }, { "alpha_fraction": 0.5380116701126099, "alphanum_fraction": 0.5473684072494507, "avg_line_length": 19.35714340209961, "blob_id": "4f5778d03623800b0cfb2a23c33112e56e63a60d", "content_id": "bc11538c0e4d6561e47487d0d9a63e5b351015a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 161, "num_lines": 42, "path": "/interview/balanced.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# https://www.hackerrank.com/challenges/balanced-brackets/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=stacks-queues\n\n# Complete the isBalanced function below.\ndef isBalanced(s):\n stack = []\n opdic = {\n '}':'{',\n ']':'[',\n ')':'('\n }\n for i in s:\n if len(stack) != 0 and (i in opdic.keys()) and stack[-1] == opdic[i]:\n stack.pop()\n else:\n stack.append(i)\n \n return \"YES\" if len(stack) == 0 else \"NO\"\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n s = input()\n\n result = isBalanced(s)\n\n print(result)\n # fptr.write(result + '\\n')\n\n # fptr.close()\n" }, { "alpha_fraction": 0.42411738634109497, "alphanum_fraction": 0.4598807990550995, "avg_line_length": 24.658823013305664, "blob_id": "18a1254f4c46473dd2f3a418d5a35502ebe0d435", "content_id": "a5909125b49cdd9d2c4a3bed8ff610c0a109616a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2181, "license_type": "no_license", "max_line_length": 77, "num_lines": 85, "path": "/interview/All_K_size_subarray_sum.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# accumulate can be used in the dynamic programming (1-D) f(n + 1) = f(n) + n\n\n# #O(n^2)\n# from itertools import accumulate\n# import operator\n# # Complete the maximumSum function below.\n# def All_K_Size_SubarraySum(a, k = None):\n# if k == None:\n# k = len(a)\n \n# S = [0] * (k + 1)\n# for i, _ in enumerate(a, 1):\n# if i == 1:\n# S[i] = a[0]\n# elif i > 1:\n# b = a[:i]\n# # # first idea O(n)\n# # b.reverse()\n# # S[i] = S[i - 1] + sum(accumulate(b))\n\n# # second thought O(n)\n# weight = [i for i in range(1, k + 1)]\n# S[i] = S[i - 1] + sum(map(operator.mul, weight, b))\n\n# return S[-1]\n\n# O(n)\ndef All_K_Size_SubarraySum(a, k = None):\n \"\"\"\n arr[] = [1, 2, 3], n = 3\n All subarrays : [1], [1, 2], [1, 2, 3], \n [2], [2, 3], [3]\n here first element 'arr[0]' appears 3 times \n second element 'arr[1]' appears 4 times \n third element 'arr[2]' appears 3 times\n\n Every element arr[i] appears in two types of subsets:\n i) In subarrays beginning with arr[i]. There are \n (n-i) such subsets. For example [2] appears\n in [2] and [2, 3].\n ii) In (n-i)*i subarrays where this element is not\n first element. For example [2] appears in \n [1, 2] and [1, 2, 3].\n\n Total of above (i) and (ii) = (n-i) + (n-i)*i \n = (n-i)(i+1)\n \n For arr[] = {1, 2, 3}, sum of subarrays is:\n arr[0] * ( 0 + 1 ) * ( 3 - 0 ) + \n arr[1] * ( 1 + 1 ) * ( 3 - 1 ) +\n arr[2] * ( 2 + 1 ) * ( 3 - 2 ) \n\n = 1*3 + 2*4 + 3*3 \n = 20\n \"\"\"\n if k == None:\n k = len(a)\n \n result = [0] * (k + 1)\n \n # computing sum of subarray \n # using formula \n for i in range(0, k):\n result[i + 1] = result[i] + (a[i] * (i+1) * (k-i))\n \n # return all subarray sum \n return result[-1]\n\n\nif __name__ == '__main__':\n\n a = list(map(int, input().rstrip().split()))\n\n result = All_K_Size_SubarraySum(a)\n\n print(result)\n" }, { "alpha_fraction": 0.44870349764823914, "alphanum_fraction": 0.4599774479866028, "avg_line_length": 16.41176414489746, "blob_id": "1d55f8307a06adb8fd68d687f5204fa9631bf43a", "content_id": "0582196dfe0eb524efa5cb2f1f9d1f42cd3e6f80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 40, "num_lines": 51, "path": "/NewCoder/wangyi3.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "import math\nimport os\nimport random\nimport re\nimport sys\n\ndebug = True\n\nfrom collections import Counter\ndef Solution(a):\n cnt = Counter(a)\n ele = sorted(list(cnt))\n\n if len(ele) <= 1:\n return \"Yes\"\n if len(ele) > 3:\n return \"No\"\n elif len(ele) == 2:\n return \"Yes\"\n # if sum(ele) % 2 == 0:\n # return \"Yes\"\n # else:\n # return \"No\"\n elif len(ele) == 3:\n if ele[1]*2 == ele[0] + ele[-1]:\n return \"Yes\"\n else:\n return \"No\" \n \n\ndef main():\n # input\n k = int(input())\n n = int(input())\n a = list(map(int, input().split()))\n\n # solution\n result = Solution(a)\n\n #print\n print(result)\n\nif __name__ == \"__main__\":\n if not debug:\n try:\n while True:\n main()\n except EOFError:\n exit()\n else:\n main()" }, { "alpha_fraction": 0.6158192157745361, "alphanum_fraction": 0.6158192157745361, "avg_line_length": 16.799999237060547, "blob_id": "b7423e8b8be51665e802b2d4783e8f18347fb2fc", "content_id": "b315801c90acde39cb1d49acf75a31baae147690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 201, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/leetcode/cpp/note_template/TreeTravel.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "void traverse(TreeNode root) {\n if (root == null) {\n return;\n }\n // 前序位置 code here\n traverse(root.left);\n // 中序位置 code here\n traverse(root.right);\n // 后序位置 code here\n}" }, { "alpha_fraction": 0.5065104365348816, "alphanum_fraction": 0.5169270634651184, "avg_line_length": 20.91428565979004, "blob_id": "9ac94e9c7682cd2b8e4c78b791af0d3e36f1c59e", "content_id": "8cf2318a778c36d58e9f2e4eadd1e3ac7d1e96b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 786, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/leetcode/python/147.对链表进行插入排序.py", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#\n# @lc app=leetcode.cn id=147 lang=python3\n#\n# [147] 对链表进行插入排序\n#\nfrom typing import *\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n# @lc code=start\n# Definition for singly-linked list.\nclass Solution:\n def insertionSortList(self, head: ListNode) -> ListNode:\n\n def insert(node, pos):\n tmp = pos.next\n pos.next = node\n node.next = tmp\n \n ans = ListNode(float(\"-inf\"))\n\n while head:\n callme = head.next\n cur = ans\n\n while cur.next and cur.next.val <= head.val:\n cur = cur.next\n \n insert(head, cur)\n head = callme\n \n return ans.next\n\n# @lc code=end\n\n" }, { "alpha_fraction": 0.5480769276618958, "alphanum_fraction": 0.5649038553237915, "avg_line_length": 14.407407760620117, "blob_id": "00fafbe8e031a18306d1eceac9e8829fe57d056e", "content_id": "80493f0afc7dd768e0d7dce697f296b04e6c0ef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 416, "license_type": "no_license", "max_line_length": 43, "num_lines": 27, "path": "/labuladong/cpp/trailingZeroes/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n int trailingZeroes(int n) {\n int res = 0;\n for (int d = n; d / 5 > 0; d = d / 5) {\n res += d / 5;\n }\n return res;\n }\n};\n\nint main() {\n int n = 5;\n Solution sol;\n int v = sol.trailingZeroes(n);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" }, { "alpha_fraction": 0.5318647027015686, "alphanum_fraction": 0.5468135476112366, "avg_line_length": 19.5, "blob_id": "b78b6b895d60f6ddc2f530bc207cf7409a952100", "content_id": "0c6180de9206bb928a7abc0fdc77b2726b5e5a83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 58, "num_lines": 62, "path": "/leetcode/cpp/maxSlidingWindow/main.cpp", "repo_name": "Alwaysproblem/simplecode", "src_encoding": "UTF-8", "text": "/*\n * @lc app=leetcode.cn id=239 lang=cpp\n *\n * [239] 滑动窗口最大值\n */\n#include <fmt/format.h>\n#include <fmt/ranges.h>\n\n#include <deque>\n#include <iostream>\n#include <unordered_map>\n#include <vector>\n\nusing namespace std;\n\n// @lc code=start\ntemplate <typename T>\nclass MonotonicQueue {\n public:\n deque<T> q;\n MonotonicQueue() : q(){};\n ~MonotonicQueue() = default;\n // void Print() const { fmt::print(\"{}\\n\", q); }\n void push(T& elem) {\n while (!q.empty() && q.back() < elem) q.pop_back();\n q.push_back(elem);\n }\n void pop(T& elem) {\n if (q.front() == elem) q.pop_front();\n }\n T& max() { return q.front(); }\n};\n\nclass Solution {\n public:\n vector<int> maxSlidingWindow(vector<int>& nums, int k) {\n MonotonicQueue<int> window;\n vector<int> res;\n for (int i = 0; i < nums.size(); i++) {\n if (i < k - 1) {\n window.push(nums[i]);\n } else {\n // window.Print();\n window.push(nums[i]);\n res.push_back(window.max());\n window.pop(nums[i - k + 1]);\n }\n }\n // fmt::print(\"{}\\n\", res);\n return res;\n }\n};\n// @lc code=end\n\nint main() {\n vector<int> nums = {-7, -8, 7, 5, 7, 1, 6, 0};\n int k = 4;\n Solution sol;\n vector<int> v = sol.maxSlidingWindow(nums, k);\n fmt::print(\"{}\\n\", v);\n return 0;\n}\n" } ]
238
challahc/harvestautofill
https://github.com/challahc/harvestautofill
a4ab2474dec641649caf290897a8b2490cacdf43
b041e6aa46457215b99ce562db24a488bb7a39c5
97a578e73f592a40a6edf54b89b4f216395608fc
refs/heads/master
2021-01-17T05:19:57.360604
2013-03-18T17:57:48
2013-03-18T17:59:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6616456508636475, "alphanum_fraction": 0.7055168747901917, "avg_line_length": 30.619047164916992, "blob_id": "698a69dd4259177a0847256777b89d1ae4d8586e", "content_id": "d1a26034de84ae7495d25d8058e7471e72e66cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5311, "license_type": "no_license", "max_line_length": 115, "num_lines": 168, "path": "/harvestAutoFill.py", "repo_name": "challahc/harvestautofill", "src_encoding": "UTF-8", "text": "#\\\n# Created with PyCharm.\n# User: mkelly\n# Date: 3/15/13\n# Time: 4:19 PM\n# \n# Description: Autopopulates Harvest Timesheet for current day.\n#\n#/\n__author__ = 'mkelly'\n\n\nimport harvest\nimport random\n\nDEBUG=False\n\nTasks = {\n 'Architecture':'1842336',\n 'Documentation':'1842326',\n 'Research': '1842317',\n 'Strategy':'1842352',\n 'Admin':'1819029',\n 'Planning':'1842319',\n 'Vendor Management':'1842320'\n}\n\n#Project ID for Cloud Platform (Shared Platforms)\nProjectID = '3339844'\n\nMaxHoursPerDay=8\nHarvestURL=\"https://scrippsnetworksdigital.harvestapp.com\"\nHarvestEmail=\"scrippsemail@scrippsnetworks.com\"\nHarvestPassword=\"password\"\n\nValidTimeEntries = [0, .5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8]\n\ntry:\n mytime=harvest.Harvest(HarvestURL,HarvestEmail,HarvestPassword)\n print mytime.status()\nexcept:\n print \"Cannot connect to Harvest\"\n\n#Sample add line:\n#data = {\"notes\":\"test note\", \"project_id\":\"3339844\",\"hours\":\"1.0\", \"task_id\": \"1842326\",\"spent_at\": \"2013-03-14\" }\n\n# data = {\"project_id\":\"3339844\",\"hours\":\"1.0\", \"task_id\": \"1842326\"}\n# if DEBUG is False:\n# success=mytime.add(data)\n# if success=None:\n# print \"Error Adding data to Harvest\"\n# print success\n\n#Determine Split for the day\n# set .5 hours to admin time\n#\n#Total for the Day should be this:\nTotalTime=8\nAdminTime=.5\n#Track hours allocated so far\nMaxUsedTime=0+AdminTime\nAvailableTime=TotalTime-AdminTime\n\ndata = {\"project_id\":\"3339844\",\"hours\":str(AdminTime), \"task_id\": Tasks['Admin']}\nif DEBUG is False:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\nprint data\n#Documentation\n# Set it to be 0 - 1 hours for now\nDocumentationRange=random.randint(0, 2)\n#Maximum possible = 1.5 hours so far\nDocumentationTime=ValidTimeEntries[DocumentationRange]\nAvailableTime=AvailableTime-DocumentationTime\ndata = {\"project_id\":\"3339844\",\"hours\":str(DocumentationTime), \"task_id\": Tasks['Documentation']}\nif DEBUG is False and DocumentationTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\nprint data\n# Architecture\n# Set it to be .5 - 1.5 hours for now\nArchitectureRange=random.randint(1, 3)\n#Maximum possible = 3 hours so far\nArchitectureTime=ValidTimeEntries[ArchitectureRange]\nAvailableTime=AvailableTime-ArchitectureTime\ndata = {\"project_id\":\"3339844\",\"hours\":str(ArchitectureTime), \"task_id\": Tasks['Architecture']}\nif DEBUG is False and ArchitectureTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\nprint data\n# Planning\n# Set it to be 1 - 3 hours for now\nPlanningRange=random.randint(2, 6)\n#Maximum possible = 6 hours so far\nPlanningTime=ValidTimeEntries[PlanningRange]\nAvailableTime=AvailableTime-PlanningTime\ndata = {\"project_id\":\"3339844\",\"hours\":str(PlanningTime), \"task_id\": Tasks['Planning']}\nif DEBUG is False and PlanningTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\nprint data\n# Strategy\n# Set it to be .5 - 2 hours for now\nStrategyRange=random.randint(1, 4)\n#Maximum possible = 8 hours so far\nStrategyTime=ValidTimeEntries[StrategyRange]\nAvailableTime=AvailableTime-StrategyTime\ndata = {\"project_id\":\"3339844\",\"hours\":str(StrategyTime), \"task_id\": Tasks['Strategy']}\nif DEBUG is False and StrategyTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\nprint data\nif AvailableTime>0:\n # Research\n # Set it to be .5 - 2 hours for now\n ResearchRange=random.randint(1, 4)\n #Maximum possible = 11 hours so far\n ResearchTime=ValidTimeEntries[ResearchRange]\n if ResearchTime>AvailableTime:\n ResearchTime=AvailableTime\n AvailableTime=AvailableTime-ResearchTime\n data = {\"project_id\":\"3339844\",\"hours\":str(ResearchTime), \"task_id\": Tasks['Research']}\n if DEBUG is False and ResearchTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\n print data\nif AvailableTime>0:\n # Vendor Management\n # Set it to be 0 - 2 hours for now\n VendorManagementRange=random.randint(0, 4)\n #Maximum possible = 13 hours so far\n VendorManagementTime=ValidTimeEntries[VendorManagementRange]\n if VendorManagementTime>AvailableTime:\n VendorManagementTime=AvailableTime\n AvailableTime=AvailableTime-VendorManagementTime\n\n data = {\"project_id\":\"3339844\",\"hours\":str(VendorManagementTime), \"task_id\": Tasks['Vendor Management']}\n if DEBUG is False and VendorManagementTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\n print data\n\n# All Tasks have been used, now need to check and see if any left over time\nif AvailableTime>0:\n #Let's add left over time to Architecture for the day\n data = {\"project_id\":\"3339844\",\"hours\":str(AvailableTime), \"task_id\": Tasks['Architecture']}\n if DEBUG is False and AvailableTime > 0:\n success=mytime.add(data)\n if success is None:\n print \"Error Adding data to Harvest\"\n print success\n print data\nif DEBUG is True:\n print AvailableTime" }, { "alpha_fraction": 0.73758864402771, "alphanum_fraction": 0.73758864402771, "avg_line_length": 16.625, "blob_id": "de915558a2f59b025dad79716bc8565383471b84", "content_id": "b5e1ed8b9cec102cbceb5d2523f7fbe94f230632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/README.md", "repo_name": "challahc/harvestautofill", "src_encoding": "UTF-8", "text": "harvestautofill\n===============\n\nHarvest Timesheet Autofill\n\nrelies on Harvest python API:\n\nhttps://github.com/aurorasoftware/python-harvest\n" } ]
2
UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2
https://github.com/UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2
afc60229b78476c3c3056d593221991bce3c5ca1
253700d96f46dd902f7e132421aa05f2a024b77d
230499bfd1f019a93af5575d31c1ce59d8805bbf
refs/heads/master
2023-03-22T06:16:13.514857
2020-12-24T00:07:22
2020-12-24T00:07:22
348,104,578
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6471816301345825, "alphanum_fraction": 0.6471816301345825, "avg_line_length": 26.200000762939453, "blob_id": "0b02f3a6b8a317a9e97e96988758bdcf622175b1", "content_id": "dbfe1ea029bec63318557f5919f8a52b89fae5a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 121, "num_lines": 35, "path": "/application/model/entity/Produto.py", "repo_name": "UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2", "src_encoding": "UTF-8", "text": "class Produto:\n\n def __init__(self, id, imagemProduto, nome, descricao, precoOficial, precoLancamento, valorParcela, numeroParcelas):\n self._id = id \n self._imagemProduto = imagemProduto\n self._nome = nome\n self._descricao = descricao\n self._precoOficial = precoOficial\n self._precoLancamento = precoLancamento \n self._valorParcela = valorParcela\n self._numeroParcelas = numeroParcelas\n\n def get_id(self):\n return self._id\n\n def get_imagemProduto(self):\n return self._imagemProduto\n\n def get_nome(self):\n return self._nome\n\n def get_descricao(self):\n return self._descricao\n\n def get_precoOficial(self):\n return self._precoOficial\n\n def get_precoLancamento(self):\n return self._precoLancamento\n\n def get_valorParcela(self):\n return self._valorParcela\n\n def get_numeroParcelas(self):\n return self._numeroParcelas\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6200608015060425, "alphanum_fraction": 0.6231002807617188, "avg_line_length": 41.20000076293945, "blob_id": "bac3e22c9922f16e98d9d060700106e61bb85c7b", "content_id": "b03f1f08690dea9947e4f99859b14fd414f9c5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 227, "num_lines": 15, "path": "/application/model/dao/produto_dao.py", "repo_name": "UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2", "src_encoding": "UTF-8", "text": "from application.model.entity.Produto import Produto\nimport json \n\n\n\nclass ProdutoDAO:\n def __init__(self):\n self._produto_list=[] \n\n def buscar_todos(self):\n with open('C:\\\\Users\\\\anton\\\\Desktop\\\\trabalho p1\\\\T-ssio-p2-\\\\products.json') as product_file:\n product_list = json.load(product_file)\n for product in product_list:\n self._produto_list.append(Produto(product['id'], product['image'], product['name'], product['description'], product['oldPrice'], product['price'], product['installments']['value'], product['installments'][\"count\"]))\n return self._produto_list\n \n\n\n \n\n \n\n" }, { "alpha_fraction": 0.7568305730819702, "alphanum_fraction": 0.7568305730819702, "avg_line_length": 32.272727966308594, "blob_id": "701ee8493cc2b84100d02e92be82ba909ff00984", "content_id": "de326774eebc9f6a5a104730d88746474ee90a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 71, "num_lines": 11, "path": "/application/controller/index_controller.py", "repo_name": "UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2", "src_encoding": "UTF-8", "text": "from flask import render_template, request\nfrom application import app\nfrom application.model.entity.Produto import Produto\nfrom application.model.dao.produto_dao import ProdutoDAO\n\n\n@app.route ('/')\ndef home():\n produto_dao = ProdutoDAO() \n produto_lista = produto_dao.buscar_todos()\n return render_template(\"index.html\", listaProdutos = produto_lista)\n" }, { "alpha_fraction": 0.7819767594337463, "alphanum_fraction": 0.7848837375640869, "avg_line_length": 342, "blob_id": "40d8d24ba6e6096c86c8b5e98bcde65c96ad6eed", "content_id": "c664a5e904e1b769ee2dd04096e83ec43e458251", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 356, "license_type": "no_license", "max_line_length": 342, "num_lines": 1, "path": "/README.md", "repo_name": "UniversidadeDeVassouras/labproginter-2020.2-AntonioTadeuBerardinelli-p2", "src_encoding": "UTF-8", "text": "No projeto da p2 da disciplina de laboratório de programação de interface com o usúario ensinada pelo professor Tássio foi feita o Front end e o back end. No caso do back end só não foi feita a parte do Ajax, pois eu não sabia como fazer e no caso do front end só não foi feita a parte do corte header, pois não consegui colocá la no projeto. \n" } ]
4
dag-hammarskjold-library/generate_SC_tables
https://github.com/dag-hammarskjold-library/generate_SC_tables
cb0bb2e697278a6554b402d07da9f6a86bad7387
6b8afe064931fa4ea6f91ea41dd2994f4d5a5ec2
411543a46fbe8428d048b1f256b6fdd3b1e9def8
refs/heads/master
2021-06-18T20:29:31.460894
2019-12-03T20:19:24
2019-12-03T20:19:24
178,901,593
0
0
null
2019-04-01T16:17:51
2019-12-03T20:19:27
2021-03-20T00:48:41
HTML
[ { "alpha_fraction": 0.6539379358291626, "alphanum_fraction": 0.6539379358291626, "avg_line_length": 17.2608699798584, "blob_id": "1e69fbcc3e667181ad13d9e66c9fceb924815833", "content_id": "6b8778bb82d5b3453a9f3efecef8ce746f6635f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/generate_SC_table.py", "repo_name": "dag-hammarskjold-library/generate_SC_tables", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import render_template\nimport json\nfrom config import Config\nimport pymongo\n\nSC = Config.SC\n\napp = Flask(__name__)\n\n@app.route('/')\ndef sc():\n return 'Hello World!'\n\n@app.route('/<int:year>')\ndef show_year(year):\n\n results = SC.find({'year': year})\n\n return render_template('sc_table.html', results=results, year=year) \n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.5843373537063599, "alphanum_fraction": 0.608433723449707, "avg_line_length": 22.785715103149414, "blob_id": "90facd84b0f96667732d3ac5266be74d48d8fc9b", "content_id": "a6af84989b8987cebe03fd670b0af52475349695", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/config_sample.py", "repo_name": "dag-hammarskjold-library/generate_SC_tables", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\n\nclass Config(object):\n DB_CLIENT = MongoClient(\n 'localhost',\n port=27017,\n username='username',\n password='password',\n authSource='authentication database',\n authMechanism='SCRAM-SHA-256'\n )\n\n DB = DB_CLIENT['database']\n SC = DB['collection']" }, { "alpha_fraction": 0.7636363506317139, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 49.09090805053711, "blob_id": "dc8dc155912d492a27358f41cca0b06d2274c6bb", "content_id": "0636b70a66e1a175e91f2a1415d7026861c79f39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 550, "license_type": "no_license", "max_line_length": 94, "num_lines": 11, "path": "/README.MD", "repo_name": "dag-hammarskjold-library/generate_SC_tables", "src_encoding": "UTF-8", "text": "To run this after downloading it, you will have to take the following steps:\n\n>> Install your virtual environment, e.g.: virtualenv venv\n>> Activate your virtual environment. \n On Windows: .\\venv\\Scripts\\activate\n>> Install your dependencies from the included requirements file: \n pip install -r requirements.txt\n>> Copy config_sample.py to config.py and edit it to include your database connection details.\n>> Set your local FLASK_APP environment variable. On Windows:\n set:FLASK_APP=\"generate_SC_table.py\"\n>> Run the application: flask run" }, { "alpha_fraction": 0.4655870497226715, "alphanum_fraction": 0.6761133670806885, "avg_line_length": 14.5, "blob_id": "2dfdad37b51cd1f4046cc8e97d086a88237022c3", "content_id": "a37963a89f6a946dfd3637a3c0d8910bc1501de2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 247, "license_type": "no_license", "max_line_length": 24, "num_lines": 16, "path": "/requirements.txt", "repo_name": "dag-hammarskjold-library/generate_SC_tables", "src_encoding": "UTF-8", "text": "astroid==2.2.5\nClick==7.0\ncolorama==0.4.1\nFlask==1.0.2\nisort==4.3.16\nitsdangerous==1.1.0\nJinja2==2.10\nlazy-object-proxy==1.3.1\nMarkupSafe==1.1.1\nmccabe==0.6.1\npylint==2.3.1\npymongo==3.7.2\nsix==1.12.0\ntyped-ast==1.3.1\nWerkzeug==0.15.3\nwrapt==1.11.1" } ]
4
khushisahni/background-matters-project
https://github.com/khushisahni/background-matters-project
72319f2868780b15971b21bf8beaf3dee405a376
df978e91da075b42e678449e2a78a750da529207
b87b0729823ed6a7b9346d127586a7829cd594f5
refs/heads/main
2023-08-04T20:08:04.149305
2021-09-29T04:35:01
2021-09-29T04:35:01
411,530,825
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 28, "blob_id": "dc13049d1dd1a14dc70b311831dcd1f6dd0cadc2", "content_id": "ae083d5471032fb0fad33fc36e3175c841492228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/README.md", "repo_name": "khushisahni/background-matters-project", "src_encoding": "UTF-8", "text": "# background-matters-project" }, { "alpha_fraction": 0.586712658405304, "alphanum_fraction": 0.6609145998954773, "avg_line_length": 22.1875, "blob_id": "491b4d42919d1994ab030e60389fbc9a2f401061", "content_id": "ed6663d9660f5327b5bb6f7e2379e22b8b492574", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 74, "num_lines": 48, "path": "/code.py", "repo_name": "khushisahni/background-matters-project", "src_encoding": "UTF-8", "text": "import cv2\r\nimport time\r\nimport numpy as np\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*\"XVID\")\r\noutput_file = cv2.VideoWriter(\"Output.avi\",fourcc,20.0,(640,480))\r\ncap = cv2.VideoCapture(0)\r\ntime.sleep(2)\r\nbg = 0\r\n\r\nfor i in range(60):\r\n ret,bg=cap.read()\r\n\r\nbg=np.flip(bg,axis=1)\r\n\r\nwhile(cap.isOpened()):\r\n ret,img=cap.read()\r\n if not ret:\r\n break\r\n img = np.flip(img,axis=1)\r\n\r\nlower_black = np.array([30,30,0])\r\nupper_black = np.array([104,153,70])\r\n\r\nmask_1 = cv2.inRange(hsv,lower_black,upper_black)\r\n\r\nlower_black = np.array([30,30,0])\r\nupper_black = np.array([104,153,70])\r\n\r\nmask_2 = cv2.inRange(hsv,lower_black,upper_black)\r\n\r\nmask_1 = mask_1 + mask_2\r\n\r\nmask_1 = cv2.morphologyEx(mask_1,cv2.MORPH_OPEN,np.ones((3,3),np.uint8))\r\nmask_1 = cv2.morphologyEx(mask_1,cv2.MORPH_DILATE,np.ones((3,3),np.uint8))\r\n\r\nmask_2 = cv2.bitwise_not(mask_1)\r\n\r\nres_1 = cv2.bitwise_and(img,img,mask=mask_2)\r\nres_2 = cv2.bitwise_and(bg,bg,mask=mask_1)\r\n\r\nfinal_output = cv2.addweighted(res_1,1,res_2,1,0)\r\noutput_file.write(final_output)\r\ncv2.imShow('The Occult !!! Be AWARE',final_output)\r\ncv2.waitK(1)\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows()" } ]
2
drfuze/classroom
https://github.com/drfuze/classroom
81dc6b22d82c34a6463e1413788c4bd36d7ca3ac
9161be003ba09cdd215393d741a56ca397685db9
d612b81879f4a06fedcdfc444fb824e9555b66b9
refs/heads/master
2020-12-24T20:00:12.907070
2017-03-26T13:07:58
2017-03-26T13:07:58
86,225,099
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.644831120967865, "alphanum_fraction": 0.6673490405082703, "avg_line_length": 19.787233352661133, "blob_id": "aaebfa69db9e486251504e8e935cdf91df3e9545", "content_id": "43cffe7f94f1aa879ba5809d2ba11fec4e6238c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 83, "num_lines": 47, "path": "/guess.py", "repo_name": "drfuze/classroom", "src_encoding": "GB18030", "text": "# -*- coding: cp936 -*-\nfrom random import randint\n\nf=file('game.txt')\nscore=f.read().split()\n#print score\ngame_times=int(score[0])\nmin_times=int(score[1])\ntotal_times=int(score[2])\n\nif game_times>0:\n avg_times=float(total_times)/game_times\nelse:\n avg_times=0\n\nprint '游戏说明:从1-100之间随机产生一个数,大逗逗来猜,电脑会提示猜大了还是小了。如果不愿意继续猜,随便输入小于零的数就会退出。电脑会自动保存游戏记录。‘\n\n\nprint '你已经玩了%d次,最少%d轮猜出答案,平均%.2f轮猜出答案'%(game_times,min_times,avg_times)\n\nnum=randint(1,100)\ntimes=0\nprint 'Guess what I think?'\nbingo=False\nwhile bingo==False:\n times+=1\n answer=input()\n if answer<0:\n print 'you quit this game'\n break\n if answer<num:\n print 'too small'\n if answer>num:\n print 'too big'\n if answer==num:\n bingo=True\n print 'bingo!'\n#第一次玩或轮数比最小轮数小,就更新最小轮数\nif game_times==0 or times<min_times:\n min_times=times\ntotal_times+=times\ngame_times+=1\nresult='%d %d %d' %(game_times,min_times,total_times)\n\nf=open('game.txt', 'w')\nf.write(result)\nf.close\n" } ]
1
romintomasetti/ContinuumMechanics2018
https://github.com/romintomasetti/ContinuumMechanics2018
058cdf159ea7a0ddfc898f055fc57dc5881a0002
180b81d7ad07490541dddc3388f2885639aabd76
632a55b24ef6d55427412e0486f77f68203816a0
refs/heads/master
2020-04-09T19:41:37.696897
2018-04-20T12:23:41
2018-04-20T12:23:41
124,241,514
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.47386759519577026, "alphanum_fraction": 0.516629695892334, "avg_line_length": 27.196428298950195, "blob_id": "f4153616d09d69a7df77a5ab787f36f207cba324", "content_id": "9eb388334aeb31fe4cb3f9cfad252073d305f473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3157, "license_type": "no_license", "max_line_length": 125, "num_lines": 112, "path": "/My_trusses/Part_2/UPDATED_NORMAL_PLANE/group_C_updated_normal_plane.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0, '../')\nfrom node import *\nfrom bar import *\nfrom truss import *\nfrom nonLinAlgo import *\n\nimport matplotlib.pyplot as plt\n\nfrom optparse import OptionParser\nimport vtk\nimport math\n\ndef main(nogui):\n \n # Geometry:\n a = 0.75\n b = 0.25\n alpha = 30*math.pi/180\n \n # Material:\n E_1 = 70e9\n E_2 = 70e9\n A_1 = 25e-4\n A_2 = 25e-4\n \n # Misc.\n l0 = math.sqrt(a**2 + b**2)\n toll = 1e-6\n nItMax = 60\n \n #Nodes\n node1 = Node(1 ,0.0, 0.0)\n node2 = Node(2 ,a*math.cos(alpha) , a*math.sin(alpha))\n node3 = Node(3, a*math.cos(alpha)-b , a*math.sin(alpha))\n node4 = Node(4, 2*a*math.cos(alpha)-b , 0.0 )\n \n #Bars\n bar1 = Bar(1, [node1, node2], E_1, A_1)\n bar2 = Bar(2, [node2, node3], E_2, A_2)\n bar3 = Bar(3, [node3, node4], E_1, A_1)\n \n #Truss\n truss = Truss()\n truss.addNode(node1)\n truss.addNode(node2)\n truss.addNode(node3)\n truss.addNode(node4)\n truss.addBar(bar1)\n truss.addBar(bar2)\n truss.addBar(bar3)\n truss.setNodeRows() # ATTENTION: this line is mandatory at the end of the definition of the truss!\n \n #BCs\n truss.fix(node1,'x')\n truss.fix(node1,'y')\n truss.fix(node4,'x')\n truss.fix(node4,'y')\n \n #Critical load\n qcr = 25*(math.sqrt(3)*E_2*A_2*(b**3))/(9.0*(l0**3)) #It's the critical load divided by 2 since we consider just one bar!\n\n #Loads\n node2.applyLoad('y', -1.5*qcr)\n node3.applyLoad('y', -1.5*qcr)\n \n #Use a copy of the truss.\n truss_test = truss;\n \n #Non-linear algorithm\n dlambda_0 = 1e-3\n Id_0 = 10\n psi_0 = 1e-10\n while dlambda_0 <= 1e-2:\n Id = Id_0\n while Id <= 1000:\n psi = psi_0\n while psi <= 1:\n dlamda0 = dlambda_0\n algo = UpdatedNormalPlaneArcLengthAlgorithm(truss_test, toll, nItMax,dlamda0, psi, Id)\n #algo = NewtonRaphsonAlgorithm(truss,toll,nItMax,dlamda0)\n #algo = ArcLengthAlgorithm(truss,toll,nItMax,dlamda0,psi,Id)\n #algo = IncrementalAlgorithm(truss,dlamda0)\n print \"Starting with (\",dlamda0,\",\",Id,\",\",psi,\").\"\n returned_value = algo.run()\n if not truss.nodes[0].x == 0:\n sys.exit(\"The algo modified the initial truss!\")\n if returned_value == -7:\n print \"It failed with (\",dlamda0,\",\",Id,\",\",psi,\").\"\n else:\n print \"Success with (\",dlamda0,\",\",Id,\",\",psi,\").\"\n sys.exit(\"SUCCESS\")\n psi *= 10\n sys.exit()\n #End of loop on psi\n Id += 10\n #End of loop on Id\n dlambda_0 *= 10\n #End of loop on dlambda\n \n\nif __name__ == '__main__':\n \n parser=OptionParser()\n parser.add_option(\"--nogui\", action=\"store_true\",\n help=\"Specify if we need to use the GUI\", dest=\"nogui\", default=False)\n\n (options, args)=parser.parse_args()\n \n nogui = options.nogui\n \n main(nogui)" }, { "alpha_fraction": 0.8205128312110901, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 39, "blob_id": "8883a9c3dac55e324743c26274c99842ad55cbf2", "content_id": "31e02d3fd5f1f4319c36eb2e976fa49c11ac4bbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/My_trusses/tests/README.md", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "Create a separate folder for each test!" }, { "alpha_fraction": 0.8285714387893677, "alphanum_fraction": 0.8285714387893677, "avg_line_length": 69, "blob_id": "1ef6d5c557bd3384bbefcfb6bd88d4cf91a72f6d", "content_id": "28ec12f28eeda24458e6fc6cd817b41d881f5da6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 129, "num_lines": 2, "path": "/My_trusses/README.md", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "# Trusses\nSmall python non-linear solver for general truss structures made of bars - University of Liège Continuum Mechanics course project\n" }, { "alpha_fraction": 0.4625743627548218, "alphanum_fraction": 0.4736378788948059, "avg_line_length": 41.584598541259766, "blob_id": "457430678eb3138861676f33741ce62a3b1189af", "content_id": "fe3e0f5870436c41dad1f90ac3446e43889cb3c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42030, "license_type": "no_license", "max_line_length": 211, "num_lines": 987, "path": "/My_trusses/nonLinAlgo.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "import sys, os\nimport numpy as np\nfrom truss import *\nimport matplotlib.pyplot as plt\n\nclass NonLinearAlgorithm: # Father class for all the non-linear algorithms\n \n def __init__(self, _truss, _toll, _nItMax,_computeTangentMethod):\n self.truss = _truss # The truss\n self.toll = _toll # Tolerance used to assess convergence\n self.nItMax = _nItMax # Maximum number of iterations\n self.lamdaMax = 1.0 # Maximum value for lamda (a priori, it can be different from 1)\n self.stopit = False # Only used by the GUI\n self.cleanWorkspace() # Run at the beginning of the simulation to clean the workspace: all the old result files will be deleted\n self.computeTangentMethod = _computeTangentMethod # Or 'numeric' -> Used in bar.buildKt()\n self.dhook = None # Only used by the GUI\n \n def display(self, step, lamda):\n if self.dhook:\n self.dhook.display(step, lamda)\n self.dhook.refresh()\n \n def archive(self, lamda, nIt): # Writes the files containing the results. Just the main information is archived, but you can enrich it if you want...\n f1 = open(('Lambda.ascii'),'a')\n f1.write(str(lamda)+'\\n')\n f1.close()\n f2 = open(('Iterations.ascii'),'a')\n f2.write(str(nIt)+'\\n')\n f2.close()\n for node in self.truss.nodes:\n f3 = open(('Node_'+str(node.nb)+'_POSITION.ascii'),'a')\n f3.write(str(node.x)+' '+str(node.y)+'\\n')\n f3.close()\n f4 = open(('Node_'+str(node.nb)+'_DISPLACEMENTS.ascii'),'a')\n f4.write(str(node.u)+' '+str(node.v)+'\\n')\n f4.close()\n f5 = open(('Node_'+str(node.nb)+'_F_EXT.ascii'),'a')\n f5.write(str(node.Fx)+' '+str(node.Fy)+'\\n')\n f5.close()\n \n def archive_internal_forces(self):\n #Stores the internal forces inside each bar:\n for bar in self.truss.bars:\n f1 = open(('Bar_'+str(bar.nb)+'_internal_force.ascii'),'a')\n i_f = bar.getFint()\n f1.write(str(i_f[0])+' '+str(i_f[1])+' '+str(i_f[2])+' '+str(i_f[3])+'\\n')\n f1.close()\n \n def computeError(self,g,lambda_): # Computes the error used to assess convergence\n error = np.linalg.norm(self.truss.getOOBF())*self.toll\n return error\n \n def cleanWorkspace(self):\n dir_name = os.getcwd()\n test = os.listdir(dir_name)\n \n for item in test:\n if item.endswith(\".ascii\"):\n os.remove(os.path.join(dir_name, item))\n \nclass IncrementalAlgorithm(NonLinearAlgorithm): # A simple explicit solver for non-linear problems\n \n def __init__(self, _truss, _dlamda): \n \n NonLinearAlgorithm.__init__(self, _truss, 0., 0)\n self.dlamda = _dlamda\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0\n while lamda < self.lamdaMax and not self.stopit:\n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n \n print '\\n--- Incremental method - Load level, lambda =', lamda, ' ---'\n \n self.truss.applyNodalLoads(lamda)\n \n Kt = self.truss.buildKt(self.computeTangentMethod)\n g = self.truss.getOOBF()\n du = np.linalg.solve(Kt, -g)\n \n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n \n self.truss.update()\n self.archive(lamda, 0)\n self.display(step, lamda)\n lamda0 = lamda\n\nclass NewtonRaphsonAlgorithm(NonLinearAlgorithm): # Newton-Raphson method\n \n def __init__(self, _truss, _toll, _nItMax, _dlamda): \n \n NonLinearAlgorithm.__init__(self, _truss, _toll, _nItMax)\n self.dlamda = _dlamda\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0\n print \"\"\n while lamda < self.lamdaMax and not self.stopit:\n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1)) \n \n print '>>> Newton Raphson - Load level, lambda = ', lamda, ' --- dlambda = ',dlamda,' \\r',\n \n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n \n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Calcule la difference entre les forces internes et externes\n g = self.truss.getOOBF() \n # solve a linear matrix equation\n du = np.linalg.solve(Kt, -g) \n \n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n #Apply the corrector phase until tolerance is satisfied:\n current_iteration = 0\n while error > self.toll and current_iteration < self.nItMax:\n #Build the stiffness matrix \n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Compute the out of balance forces:\n g_oof = self.truss.getOOBF()\n #Solve the system:\n du = np.linalg.solve(Kt, -g_oof)\n \n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n error = self.computeError(g, lamda)\n current_iteration +=1\n \n self.truss.update()\n self.archive(lamda, current_iteration)\n self.display(step, lamda)\n lamda0 = lamda\n return 1\n\nclass QuasiNewtonAlgorithm(NonLinearAlgorithm): # Quasi-Newton method\n \n def __init__(self, _truss, _toll, _nItMax, _dlamda, _Kt_method): \n \n NonLinearAlgorithm.__init__(self, _truss, _toll, _nItMax,_Kt_method)\n self.dlamda = _dlamda\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0\n while lamda < self.lamdaMax and not self.stopit:\n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1)) #*2 pour les 2 directions x et y \n \n print '\\n--- Newton Raphson - Load level, lambda =', lamda, ' ---'\n \n self.truss.applyNodalLoads(lamda)\n \n Kt = self.truss.buildKt(self.computeTangentMethod)\n g = self.truss.getOOBF()\n try:\n Kt_inv = np.linalg.inv(Kt)\n except np.linalg.LinAlgError:\n print \"Matrix Kt not invertible\"\n print(Kt)\n sys.exit()\n else:\n du = np.dot(Kt_inv, -g)\n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n error = 1.0\n error = self.computeError(g, lamda)\n NbIter = 0\n while error > self.toll and NbIter < self.nItMax: \n g = self.truss.getOOBF()\n du = np.dot(Kt_inv, -g) \n \n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n error = self.computeError(g, lamda)\n NbIter +=1\n \n self.truss.update()\n self.archive(lamda, NbIter)\n self.display(step, lamda)\n lamda0 = lamda\n return 1\n \nclass NewtonRaphsonAlgorithm_inTime(NonLinearAlgorithm):\n #Constructor:\n def __init__(self, _truss, _toll, _nItMax, _dt, _t_max,_F_max, _period):\n NonLinearAlgorithm.__init__(self, _truss, _toll, _nItMax,'')\n #Time step:\n self.dt = _dt\n #Maximum time before stopping:\n self.t_max = _t_max\n #Maximum force:\n self.F_max = _F_max\n #Period of the signal:\n self.period = _period\n \n #Main function, only valid for a truss with 2 nodes !\n #All loads are applied on the node 2 ! In the Y direction.\n # DDL(X,Y) are fixed for node 1.\n # DDL(X) is fixed for node 2.\n def run(self):\n #Check the time step that is given to be sure we go through all minimas and maximas\n # of the sawtooth loading:\n if not self.period/self.dt == math.floor(self.period/self.dt):\n sys.exit('The provided time step is not correct.')\n \n #Initialize the time:\n t = 0.\n \n #Initialize nodal loads:\n self.truss.nodes[1].applyTimeLoad('y', self.F_sawtooth)\n \n dt = self.dt\n step = 0\n #Loop on time:\n while t < self.t_max:\n #Increment the step:\n step+=1\n \n #Increment time:\n t+=dt\n \n #Allocated space:\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n \n print '>>> Newton Raphson in time - Load time, t =', t, ' --- \\r',\n \n #Apply loads:\n self.truss.applyNodalTimeLoads(t)\n #Build the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Compute the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the linear system:\n du = np.linalg.solve(Kt, -g)\n #Update positions:\n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError_time(g, t)\n \n #Usual N.-R. step:\n current_iteration = 0\n while error > self.toll :\n if current_iteration > self.nItMax: \n print \"\"\n print \">>> N.-R. in time failed at converging.\"\n sys.exit()\n #Compute the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Compute the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the linear system:\n du = np.linalg.solve(Kt, -g)\n #Update positions:\n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError_time(g, t)\n current_iteration += 1\n \n self.truss.update()\n self.archive(t, current_iteration)\n self.archive_some_time_infos(t)\n self.archive_PK2(t)\n self.display(step, t)\n \n return 1\n \n #Archive Pk2 stress in the bars:\n def archive_PK2(self,t):\n f1 = open('NR_in_time_PK2.ascii','a')\n f1.write('t=' + str(t))\n for bar in self.truss.bars:\n pk2 = bar.getPK2Stress()\n f1.write(';PK2_bar' + str(bar.nb) + '=' + str(pk2))\n f1.write('\\n')\n f1.close()\n \n #Compute a saw-tooth-like loading:\n def F_sawtooth(self, t):\n t_equ = t/self.period - math.floor(t/self.period) # t_equ is the equivalent time which has the same force value.\n if(t_equ <= 0.25):\n F = 4.0*self.F_max*t_equ\n elif (t_equ > 0.25 and t_equ <= 0.75):\n F = 2.0*self.F_max*(-2.0*t_equ+1.0)\n else:\n F = 4.0*self.F_max*(t_equ-1.0)\n return F\n \n def archive_some_time_infos(self,t):\n f1 = open('Some_more_infos.ascii','a')\n f1.write('t=' + str(t) + ';dt=' + str(self.dt) + ';F_max=' + str(self.F_max))\n f1.write(';T=' + str(self.period) + '\\r')\n f1.close()\n \n def computeError_time(self, g, t):\n \n error = abs(np.linalg.norm(g))*self.toll\n\n return error\n\nclass ArcLengthAlgorithm(NonLinearAlgorithm): # Arc-length method\n \n def __init__(self, _truss, _toll, _nItMax, _dlamda, _psi, _Id, _applyCorrectiveMethod,_computeTangentMethod): \n \n NonLinearAlgorithm.__init__(self, _truss, _toll, _nItMax,_computeTangentMethod)\n self.dlamda = _dlamda\n self.psi = _psi\n self.Id = _Id # Ideal number of iterations per step\n self.nItMax = _nItMax\n self.applyCorrectiveMethod = _applyCorrectiveMethod\n \n def archive_truss_(self,step,lamda):\n str_ = '_'+str(self.dlamda)+'_'+str(self.Id)+'_'+str(self.psi)\n str_ = str_+'_'+'_'+str(self.nItMax)+'_'+str(self.toll)\n \n f1 = open(('Lambda'+str_+'.ascii'),'a')\n f1.write(str(lamda)+'\\n')\n f1.close()\n for node in self.truss.nodes:\n f3 = open(('Node_'+str(node.nb)+'_POSITION'+str_+'.ascii'),'a')\n f3.write(str(node.x)+' '+str(node.y)+'\\n')\n f3.close()\n f4 = open(('Node_'+str(node.nb)+'_DISPLACEMENTS'+str_+'.ascii'),'a')\n f4.write(str(node.u)+' '+str(node.v)+'\\n')\n f4.close()\n f5 = open(('Node_'+str(node.nb)+'_F_EXT'+str_+'.ascii'),'a')\n f5.write(str(node.Fx)+' '+str(node.Fy)+'\\n')\n f5.close()\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0 \n DeltaL = 0.\n DeltaL0 = 0.\n current_iteration = 0\n # The first step is a call to the Newton-Raphson algorithm:\n \n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1)) \n \n print '\\n--- ArcLengthAlgorithm :: Newton-Raphson step - Load level, lambda =', lamda, ' ---'\n \n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n #Build the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the linear system for du:\n du = np.linalg.solve(Kt, -g)\n #Assemble the global loads vector:\n qef = self.truss.get_qef()\n #Increment du:\n Delta_u+=du\n #Update positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n #While loop of the Newton-Raphson step:\n while error > self.toll and current_iteration < self.nItMax: \n Kt = self.truss.buildKt(self.computeTangentMethod)\n g = self.truss.getOOBF()\n du = np.linalg.solve(Kt, -g) \n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n error = self.computeError(g, lamda)\n current_iteration +=1\n if current_iteration > self.nItMax:\n print \">>> N.-R. step didn't converge in the spherical arc-length method.\"\n sys.exit()\n #Get the initial arc-length:\n DeltaL0 = math.sqrt(np.transpose(Delta_u).dot(Delta_u) + self.psi**2*lamda**2*np.transpose(qef).dot(qef)) # !!!!!!!!!!!!!! A refaire !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n DeltaL = DeltaL0\n lamda0 = lamda\n #Update the truss:\n self.truss.update()\n self.archive(lamda, current_iteration)\n self.display(step, lamda) \n self.archive_truss_(step,lamda)\n \n #########################################\n ## PREDICTOR PHASE ##\n #####################\n \n #We want to track oscillations:\n counter_oscillating = 0\n boolean_oscillating = 0\n counter_applied_corrective = 0\n val = 0\n bool_corr = 0\n \n RESTART_ = 0\n \n while lamda < self.lamdaMax and not self.stopit:\n if step == 6e3:\n return -7\n \n #Not yet implemented !\n if RESTART_ == 1:\n self.truss.resetPositions()\n print ''\n print 'truss has been reset to previous !',DeltaL,current_iteration\n \n dlamda_previous = dlamda\n \n step+=1\n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n \n current_iteration = 0\n\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n #Build the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF()\n #Get the global loads vector:\n qef = self.truss.get_qef() \n #Try to invert Kt:\n try:\n Kt_inv = np.linalg.inv(Kt)\n except np.linalg.LinAlgError:\n # Not invertible. Skip this one.\n print \"Kt is not invertible\"\n print(Kt)\n sys.exit()\n else:\n deltaP = - Kt_inv.dot(g) \n deltaPt = Kt_inv.dot(qef)\n #Compute deltaLambda_p:\n Ct1 = np.transpose(deltaP).dot(deltaP)*np.transpose(deltaPt).dot(deltaPt)+ (DeltaL**2-np.transpose(deltaP).dot(deltaP))*(np.transpose(deltaPt).dot(deltaPt)+self.psi**2*np.transpose(qef).dot(qef))\n if Ct1 < 0:\n print \"Computation of Ct1 leads to a negative number. We can't compute its square root.\"\n sys.exit()\n else:\n Ct1 = math.sqrt(Ct1)\n\n # Verify positive-definiteness of the tangent stiffness matrix:\n if np.all(np.linalg.eigvals(Kt) > 0):\n #Take the positive sign '+':\n dlamda = (-np.transpose(deltaP).dot(deltaPt) + Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef)) \n else:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef)) \n \n ##############################\n ## APPLY CORRECTIVE PROCESS ##\n ##############################\n MAXX = 300\n if self.applyCorrectiveMethod == 1 and counter_oscillating > 10 and counter_applied_corrective < MAXX:\n counter_applied_corrective += 1\n if counter_applied_corrective == 1:\n du_corr = np.linalg.solve(Kt_inv, -g);\n val = qef.T.dot(du_corr)\n if val < 0:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) + Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n else:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n bool_corr = 1\n \n #Update du:\n du = deltaP + dlamda*deltaPt\n Delta_u+=du\n lamda = lamda0 + dlamda\n self.truss.incrementPositions(Delta_u)\n error = self.computeError(g, Kt)\n \n #####################\n ## CORRECTOR PHASE ##\n #####################\n \n while error > self.toll and current_iteration < self.nItMax:\n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n #Build the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the initial out of balance forces:\n g0 = self.truss.getOOBF()\n #Get the globl loads vector:\n qef = self.truss.get_qef()\n try:\n Kt_inv = np.linalg.inv(Kt)\n except numpy.linalg.LinAlgError:\n print \"Kt is not invertible\"\n printf(Kt) \n sys.exit()\n else:\n deltaP = - Kt_inv.dot(g0) \n deltaPt = Kt_inv.dot(qef)\n #Compute coefficients of the equation (15):\n a1 = np.transpose(deltaPt).dot(deltaPt) + self.psi**2*np.transpose(qef).dot(qef)\n a2 = 2 * (np.transpose(deltaPt).dot(Delta_u+deltaP) + dlamda*self.psi**2*np.transpose(qef).dot(qef))\n a3 = np.transpose(Delta_u+deltaP).dot(Delta_u+deltaP) + dlamda**2 *self.psi**2*np.transpose(qef).dot(qef) - DeltaL**2\n #Compute the roots of the second order equation (15):\n try:\n lamda1 = ( -a2 + math.sqrt(a2**2 - 4*a1*a3)) / (2*a1)\n lamda2 = ( -a2 - math.sqrt(a2**2 - 4*a1*a3)) / (2*a1)\n except:\n print a1 , a2 , a3, a2**2 - 4*a1*a3, lamda\n sys.exit()\n du1 = deltaP + lamda1*deltaPt\n du2 = deltaP + lamda2*deltaPt\n #Compute the cosinus of the angle:\n cTheta1 = (np.transpose(Delta_u).dot(Delta_u+du1) + self.psi**2 * np.transpose(qef).dot(qef) * dlamda * (lamda1+dlamda)) \n cTheta2 = (np.transpose(Delta_u).dot(Delta_u+du2) + self.psi**2 * np.transpose(qef).dot(qef) * dlamda * (lamda2+dlamda))\n #Choose the smallest:\n if cTheta1>cTheta2:\n dlamda += lamda1\n du = du1\n else:\n dlamda += lamda2\n du = du2\n #Update:\n Delta_u += du\n lamda = lamda0 + dlamda \n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g0, lamda)\n current_iteration += 1\n \n # if error > self.toll:\n # RESTART_ = 1\n # else:\n # RESTART_ = 0\n \n if current_iteration != 0 :#and RESTART_ == 0: \n #Update deltaL:\n DeltaL = DeltaL0*math.sqrt(float(self.Id)/current_iteration) \n\n else:#if RESTART_ == 0:\n #update the deltaL variable with a default step:\n DeltaL = DeltaL0*math.sqrt(float(self.Id)/1.2)\n #Note: 1.2 is chosen quite arbitrarily.\n # else:\n # DeltaL = 0.8*DeltaL\n \n if RESTART_ == 0:\n print \">> Sph. arc-len. :: pred. at step \",step,\" ----- lambda: \",lamda,\" --- dlambda: \",dlamda,\"--- dlambda_previous: \",dlamda_previous,\" \\r\",\n #Update the truss:\n self.truss.update()\n self.archive(lamda[0,0], current_iteration)\n self.display(step, lamda)\n self.archive_truss_(step,lamda)\n #self.archive_internal_forces()\n lamda0 = lamda\n \n #Check that we don't oscillate between two values of dlamda:\n if abs(abs(dlamda_previous)-abs(dlamda)) < 1e-5:\n #print \"Oscillating!\"\n counter_oscillating += 1\n # Check the signs were opposite!\n if dlamda_previous < 0 and dlamda < 0:\n counter_oscillating -= 1\n if dlamda_previous > 0 and dlamda > 0:\n counter_oscillating -= 1\n if counter_oscillating > 50 and not self.applyCorrectiveMethod == 1:\n #boolean_oscillating = 1\n counter_oscillating = 0\n print \"\"\n print \">>> Oscillations.\"\n return -77\n else:\n #print \" >>> In undef. : \",abs(abs(dlamda_previous_at_beginning_of_pred)-abs(dlamda))\n if not self.applyCorrectiveMethod == 1:\n counter_oscillating = 0\n boolean_oscillating = 0\n \n if step > 10000 and self.applyCorrectiveMethod == 1:\n sys.exit(\"Je quitte a 10000 !\")\n \n return 1\n \n\nclass UpdatedNormalPlaneArcLengthAlgorithm(ArcLengthAlgorithm): # Updated normal plane arc-length method\n \n def __init__(self, _truss, _toll, _nItMax, _dlamda, _psi, _Id):\n ArcLengthAlgorithm.__init__(self, _truss, _toll, _nItMax, _dlamda, _psi, _Id)\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0 \n DeltaL = 0.\n DeltaL0 = 0.\n current_iteration = 0\n #################################\n ## FIRST STEP: N.-R. ALGORITHM ##\n #################################\n \n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n \n #print '\\n---UpdatedNormalPlane :: Newton Raphson - Load level, lambda =', lamda, ' ---'\n \n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n #Build the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Compute the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the linear system:\n du = np.linalg.solve(Kt, -g)\n #Get the global loads vector:\n qef = self.truss.get_qef()\n \n Delta_u+=du\n #Update positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n current_iteration = 0\n #Newton-Raphson loop:\n while error > self.toll and current_iteration < self.nItMax:\n #Compute the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the system:\n du = np.linalg.solve(Kt, -g)\n #Update positions:\n Delta_u+=du\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n current_iteration +=1\n if current_iteration > self.nItMax:\n print \"Update normal plane::Newton-Raphson step didn't converge.\"\n return -7\n #sys.exit()\n #Compute deltaL:\n DeltaL0 = math.sqrt(np.transpose(Delta_u).dot(Delta_u) + self.psi**2*lamda**2*np.transpose(qef).dot(qef)) # !!!!!!!!!!!!!! A refaire !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n DeltaL = DeltaL0\n lamda0 = lamda\n #Update the truss:\n self.truss.update()\n self.archive(lamda,current_iteration)\n self.display(step, lamda) \n \n boolean_oscillating = 0\n counter_oscillating = 0\n \n ####################\n ## PREDICTOR STEP ##\n ####################\n while lamda < self.lamdaMax and not self.stopit:\n dlamda_previous_at_beginning_of_pred = dlamda\n \n step+=1\n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n \n current_iteration = 0\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n #Compute the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF()\n qef = self.truss.get_qef()\n #Try to invert the tangent stiffness matrix:\n try:\n Kt_inv = np.linalg.inv(Kt)\n except numpy.linalg.LinAlgError:\n # Not invertible. Skip this one.\n #print \"Kt is not invertible\"\n print(Kt)\n #sys.exit()\n return -7\n else:\n deltaP = - Kt_inv.dot(g)\n deltaPt = Kt_inv.dot(qef)\n \n Ct1 = np.transpose(deltaP).dot(deltaP)*np.transpose(deltaPt).dot(deltaPt)+ (DeltaL**2-np.transpose(deltaP).dot(deltaP))*(np.transpose(deltaPt).dot(deltaPt)+self.psi**2*np.transpose(qef).dot(qef))\n if Ct1 < 0:\n print \"Ct1 is negative. Can't take its square root.\"\n return -7\n #sys.exit()\n else:\n Ct1 = math.sqrt(Ct1)\n try:\n if np.all(np.linalg.eigvals(Kt) > 0) or (step > 45 and step < 60) :\n if boolean_oscillating == 0:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) + Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n #print \"def. pos. -- usual\"\n else:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n #print \"def. pos. -- changed\"\n elif np.all(np.linalg.eigvals(Kt) < 0):\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n print \"Def. neg.\"\n return -7\n #sys.exit()\n else:\n if boolean_oscillating == 0:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n #print \"undef. -- usual\"\n else:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) + Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n #print \"undef. -- changed\"\n except:\n print(Kt)\n fig = plt.figure()\n plt.imshow(Kt,interpolation='none')\n plt.show()\n print \"Unexpected error:\", sys.exc_info()[0]\n #sys.exit()\n return -7\n \n du = deltaP + dlamda*deltaPt\n Delta_u+=du\n lamda = lamda0 + dlamda\n #Increment position:\n self.truss.incrementPositions(Delta_u)\n #Compute error:\n error = self.computeError(g, Kt)\n \n #####################\n ## CORRECTOR PHASE ##\n #####################\n while error > self.toll and current_iteration < self.nItMax:\n #Apply loads:\n #print \"Corrector\",du\n self.truss.applyNodalLoads(lamda)\n #Compute the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n g0 = self.truss.getOOBF()\n qef = self.truss.get_qef()\n try:\n Kt_inv = np.linalg.inv(Kt)\n except numpy.linalg.LinAlgError:\n print \"Kt is not invertible\"\n print(Kt)\n print \"Unexpected error:\", sys.exc_info()[0]\n #sys.exit()\n return -7\n else:\n deltaP = - Kt_inv.dot(g0)\n deltaPt = Kt_inv.dot(qef)\n\n a1 = dlamda*self.psi**2*np.transpose(qef).dot(qef)\n a1 = a1 + np.transpose(deltaPt).dot(Delta_u)\n a2 = np.transpose(Delta_u).dot(deltaP)\n \n #print \"a1 \",a1,\"a2 \",a2,\"ratio \",a2/a1,\"deltzPt \",deltaPt\n\n lamda1 = -a2/a1\n dlamda += lamda1\n\n du = deltaP + lamda1*deltaPt\n\n Delta_u += du\n lamda = lamda0 + dlamda\n #Increment positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g0, lamda)\n current_iteration += 1\n \n \n if current_iteration != 0: \n DeltaL = DeltaL0*math.sqrt(float(self.Id)/current_iteration)\n\n else :\n print \"Don't go over here !!\"\n sys.exit(\"Don't go over here!\")\n return -7\n #sys.exit()\n \n \n self.truss.update()\n self.archive(lamda[0,0], current_iteration)\n self.display(step, lamda)\n lamda0 = lamda\n print \"- Update normal plane :: The predictor at step \",step,\" ------- lambda: \",lamda,\"---- dlambda: \",dlamda,\" -- dlamda_prev: \",dlamda_previous_at_beginning_of_pred,\" \\r\",\n \n #Check that we don't oscillate between two values of dlamda:\n if abs(abs(dlamda_previous_at_beginning_of_pred)-abs(dlamda)) < 1e-6:\n #print \"Oscillating!\"\n counter_oscillating += 1\n # Check the signs were opposite!\n if dlamda_previous_at_beginning_of_pred < 0 and dlamda < 0:\n counter_oscillating -= 1\n if dlamda_previous_at_beginning_of_pred > 0 and dlamda > 0:\n counter_oscillating -= 1\n if counter_oscillating > 50:\n #boolean_oscillating = 1\n counter_oscillating = 0\n print \"\"\n print \">>> Oscillations.\"\n return -7\n else:\n #print \" >>> In undef. : \",abs(abs(dlamda_previous_at_beginning_of_pred)-abs(dlamda))\n counter_oscillating = 0\n boolean_oscillating = 0\n\n print \"Lambda avant le return: \",lamda \n return 1\n\nclass NormalPlaneArcLengthAlgorithm(ArcLengthAlgorithm): # Normal plane arc-length method\n \n def __init__(self, _truss, _toll, _nItMax, _dlamda, _psi, _Id):\n ArcLengthAlgorithm.__init__(self, _truss, _toll, _nItMax, _dlamda, _psi, _Id)\n \n def run(self):\n lamda = 0.\n lamda0 = 0.\n dlamda = self.dlamda\n step = 0 \n DeltaL = 0.\n DeltaL0 = 0.\n current_iteration = 0\n dLamdaPredict = 0.0\n #################################\n ## FIRST STEP : NEWTON-RAPHSON ##\n #################################\n step+=1\n lamda = lamda0 + dlamda\n Delta_u = np.zeros((2*len(self.truss.nodes),1)) \n \n print '\\n---NormalPlaneArcLengthAlgorithm :: Newton Raphson - Load level, lambda =', lamda, ' ---'\n \n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n #Compute tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF() \n #Solve the linear system:\n du = np.linalg.solve(Kt, -g) \n #Get the global loads vector:\n qef = self.truss.get_qef()\n #Update deltaU:\n Delta_u+=du\n #Update positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n current_iteration = 0\n #Loop of the Newton-Raphson algorithm:\n while error > self.toll and current_iteration < self.nItMax:\n #Get the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g = self.truss.getOOBF()\n #Solve the linear system:\n du = np.linalg.solve(Kt, -g) \n #Update du:\n Delta_u+=du\n #Update positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, lamda)\n current_iteration +=1\n if current_iteration > self.nItMax:\n print \"NormalPlaneArcLengthAlgorithm :: N.-R. didn't converge.\"\n sys.out()\n #Compute deltaL_0 and deltaL:\n DeltaL0 = math.sqrt(np.transpose(Delta_u).dot(Delta_u) + self.psi**2*lamda**2*np.transpose(qef).dot(qef)) # !!!!!!!!!!!!!! A refaire !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n DeltaL = DeltaL0\n lamda0 = lamda\n #Update the truss:\n self.truss.update()\n self.archive(lamda, current_iteration)\n self.display(step, lamda) \n \n #####################\n ## PREDICTOR PHASE ##\n #####################\n while lamda < self.lamdaMax and not self.stopit:\n print \"-- NormalPlaneArcLengthAlgorithm:: The predictor at step \",step,\" \\r\",\n step+=1\n #Apply loads to the truss:\n self.truss.applyNodalLoads(lamda)\n \n current_iteration = 0\n Delta_u = np.zeros((2*len(self.truss.nodes),1))\n #Get the tangent stiffness matrix:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Compute the out of balance forces:\n g = self.truss.getOOBF()\n #Compute the global loads vector:\n qef = self.truss.get_qef() \n #Try to invert the tangent stiffness matrix:\n try:\n Kt_inv = np.linalg.inv(Kt)\n except numpy.linalg.LinAlgError:\n print \"Kt is not invertible\"\n print(Kt)\n sys.exit()\n else:\n deltaP = - Kt_inv.dot(g)\n deltaPt = Kt_inv.dot(qef)\n \n Ct1 = np.transpose(deltaP).dot(deltaP)*np.transpose(deltaPt).dot(deltaPt)+ (DeltaL**2-np.transpose(deltaP).dot(deltaP))*(np.transpose(deltaPt).dot(deltaPt)+self.psi**2*np.transpose(qef).dot(qef))\n if Ct1 < 0:\n print \"Ct1 is negative. Can't take its square root.\"\n sys.exit()\n else:\n Ct1 = math.sqrt(Ct1)\n #Verify that the matrix is positive definite:\n if np.all(np.linalg.eigvals(Kt) > 0): \n dlamda = (-np.transpose(deltaP).dot(deltaPt) + Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n else:\n dlamda = (-np.transpose(deltaP).dot(deltaPt) - Ct1) / (np.transpose(deltaPt).dot(deltaPt) + self.psi**2 * np.transpose(qef).dot(qef))\n dLamdaPredict = dlamda\n du = deltaP + dlamda*deltaPt\n DuPredict = du\n Delta_u+=du\n lamda = lamda0 + dlamda\n #Update the positions:\n self.truss.incrementPositions(Delta_u)\n #Compute the error:\n error = self.computeError(g, Kt)\n \n #####################\n ## CORRECTOR PHASE ##\n ##################### \n while error > self.toll and current_iteration < self.nItMax:\n #print \"lamda = \", lamda\n #Apply loads:\n self.truss.applyNodalLoads(lamda)\n #Build Kt:\n Kt = self.truss.buildKt(self.computeTangentMethod)\n #Get the out of balance forces:\n g0 = self.truss.getOOBF()\n #Get the global loads vector:\n qef = self.truss.get_qef()\n #Try to invert Kt:\n try:\n Kt_inv = np.linalg.inv(Kt)\n except numpy.linalg.LinAlgError:\n print \"Kt is not invertible\"\n print(Kt)\n sys.exit()\n else:\n deltaP = - Kt_inv.dot(g0) \n deltaPt = Kt_inv.dot(qef) \n #Solve equation (18) by first finding both coefficients:\n a1 = dLamdaPredict*self.psi**2*np.transpose(qef).dot(qef)\n a1 = a1 + np.transpose(deltaPt).dot(DuPredict)\n a2 = np.transpose(DuPredict).dot(deltaP)\n\n lamda1 = -a2/a1\n dlamda += lamda1\n\n du = deltaP + lamda1*deltaPt\n #Update:\n Delta_u += du\n lamda = lamda0 + dlamda \n self.truss.incrementPositions(Delta_u)\n error = self.computeError(g0, lamda)\n current_iteration += 1\n \n \n if current_iteration != 0: \n DeltaL = DeltaL0*math.sqrt(float(self.Id)/current_iteration)\n\n else:\n print \"Error : should not end up here !\"\n self.truss.update()\n self.archive(lamda[0,0], current_iteration)\n self.display(step, lamda)\n lamda0 = lamda\n \n \n print \"\"\n return 1" }, { "alpha_fraction": 0.5404762029647827, "alphanum_fraction": 0.5726190209388733, "avg_line_length": 20.83116912841797, "blob_id": "7f6af807f55df945892ec950f4f27692e768efd7", "content_id": "21fce0c72a5f7e67ae3f5d8a5ed7cf2d48fac4c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "no_license", "max_line_length": 118, "num_lines": 77, "path": "/My_trusses/Part_1_in_time/group_C_Newton_Raphson_in_time_problem_1.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0, '../')\nfrom node import *\nfrom bar import *\nfrom truss import *\nfrom nonLinAlgo import *\n\nfrom optparse import OptionParser\nimport vtk\nimport math\n\ndef main(nogui):\n \n #Geometry\n a = 2.0\n b = 1.0\n #sys.exit()\n l0 = math.sqrt(a**2 + b**2)\n tol = 1e-9\n nItMax = 1000\n\n\n \n #Nodes\n node1 = Node(1, 0.0, 0.0)\n node2 = Node(2, a, b)\n \n #Material properties\n E = 70e9\n A = 0.01\n \n #Critical load\n qcr = (math.sqrt(3)*E*A*(b**3))/(9.0*(l0**3)) #It's the critical load divided by 2 since we consider just one bar!\n #We go a little bit higher than the critical load:\n F_max = -qcr*1.2\n print F_max\n #Max time before stopping:\n t_max = 2.0\n #Time step:\n dt = 0.005\n #Period:\n T = 1\n \n if(dt > T or dt > t_max or T > t_max):\n print \"Please check your parameters\"\n sys.exit()\n \n #Bars\n bar1 = Bar(1, [node1, node2], E, A)\n \n #Truss\n truss = Truss()\n truss.addNode(node1)\n truss.addNode(node2)\n truss.addBar(bar1)\n truss.setNodeRows() # ATTENTION: this line is mandatory at the end of the definition of the truss!\n \n #BCs\n truss.fix(node1,'x')\n truss.fix(node1,'y')\n truss.fix(node2,'x')\n \n #Non-linear algorithm\n algo = NewtonRaphsonAlgorithm_inTime(truss, tol, nItMax,dt, t_max, F_max, T)\n algo.run()\n\nif __name__ == '__main__':\n \n parser=OptionParser()\n parser.add_option(\"--nogui\", action=\"store_true\",\n help=\"Specify if we need to use the GUI\", dest=\"nogui\", default=False)\n\n (options, args)=parser.parse_args()\n \n nogui = options.nogui\n \n main(nogui)" }, { "alpha_fraction": 0.5363902449607849, "alphanum_fraction": 0.5449756383895874, "avg_line_length": 37.54135513305664, "blob_id": "6f74c4be7fdb964313093142767580d70a150290", "content_id": "6088cded15a562341ad4fc9a001a52bb178968fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5125, "license_type": "no_license", "max_line_length": 215, "num_lines": 133, "path": "/My_trusses/truss.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "from node import *\nfrom bar import *\nimport numpy as np\n\nclass Truss():\n\n def __init__(self):\n self.nodes = []\n self.bars = []\n self.fixedNodes = []\n \n def addNode(self, node): # Adds a node to the truss\n self.nodes.append(node)\n \n def addBar(self, bar): # Adds a bar to the truss\n self.bars.append(bar)\n \n def setNodeRows(self): # Set the rows corresponding to the nodes degrees of freedom in the global system of equations -> Used to imposed boundary conditions\n for i in range(len(self.nodes)):\n node = self.nodes[i]\n node.rowX = 2*i\n node.rowY = 2*i+1\n \n def buildKt(self, computeTangentMethod): # Assembles the global tangent stiffness matrix\n Kt = np.zeros((2*len(self.nodes), 2*len(self.nodes)))\n for bar in self.bars:\n Kte = bar.buildKt(computeTangentMethod)\n for i in range(len(bar.nodes)):\n nodi = bar.nodes[i]\n for j in range(len(bar.nodes)):\n nodj = bar.nodes[j]\n \n Kt[nodi.rowX, nodj.rowX] += Kte[2*i, 2*j]\n Kt[nodi.rowX, nodj.rowY] += Kte[2*i, 2*j+1]\n Kt[nodi.rowY, nodj.rowX] += Kte[2*i+1, 2*j]\n Kt[nodi.rowY, nodj.rowY] += Kte[2*i+1, 2*j+1]\n \n self.applyBCs(Kt)\n return Kt\n \n def getFint(self): # Assembles the global internal forces vector\n Fint = np.zeros((2*len(self.nodes), 1))\n for bar in self.bars:\n Fint_e = bar.getFint()\n for i in range(len(bar.nodes)):\n node = bar.nodes[i]\n Fint[node.rowX]+=Fint_e[2*i]\n Fint[node.rowY]+=Fint_e[2*i+1]\n return Fint\n \n def getFext(self): # Assembles the global external forces vector\n Fext = np.zeros((2*len(self.nodes), 1))\n for node in self.nodes:\n Fext[node.rowX] = node.Fx\n Fext[node.rowY] = node.Fy\n return Fext\n \n def get_qef(self): # Assembles the global loads vector\n qef = np.zeros((2*len(self.nodes), 1))\n for node in self.nodes:\n qef[node.rowX] = node.qef_x\n qef[node.rowY] = node.qef_y\n return qef\n \n def getOOBF(self): # Computes the Out-Of-Balance-Forces vector\n g = np.zeros((2*len(self.nodes), 1))\n \n Fint = self.getFint()\n Fext = self.getFext()\n g = Fint - Fext\n \n for node in self.fixedNodes:\n if node.isFixedAlongX:\n g[node.rowX] = 0.\n if node.isFixedAlongY:\n g[node.rowY] = 0.\n return g\n \n def fix(self, node, dof): # Imposes boundary conditions (only fixations are considered for the moment)\n self.fixedNodes.append(node)\n if (dof == 'x'):\n node.isFixedAlongX = True\n elif (dof == 'y'):\n node.isFixedAlongY = True\n else:\n raise Exception('Unknown dof!')\n \n def applyBCs(self, Kt): # Applies boundary conditions on the global tangent stiffness matrix\n for node in self.fixedNodes:\n if node.isFixedAlongX:\n Kt[node.rowX, :] = 0.\n Kt[:, node.rowX] = 0.\n Kt[node.rowX, node.rowX] = 1.0\n if node.isFixedAlongY:\n Kt[node.rowY, :] = 0.\n Kt[:, node.rowY] = 0.\n Kt[node.rowY, node.rowY] = 1.0\n \n def applyNodalLoads(self, lamda): # Applies the current loads to the nodes\n for node in self.nodes:\n node.Fx = node.qef_x * lamda\n node.Fy = node.qef_y * lamda\n \n def applyNodalTimeLoads(self, t): # Applies the current time-dependent loads to the nodes --> NB: To use only with NewtonRaphsonAlgorithmInTime !\n for node in self.nodes:\n node.Fx = node.get_qef_x(t)\n node.Fy = node.get_qef_y(t)\n \n def incrementPositions(self, du): # Increments nodes displacements of a quantity 'du' (which is a vector containing all the displacement variations) and updates nodes positions according to the new displacements\n for node in self.nodes:\n node.u = node.uOld + du[node.rowX,0]\n node.v = node.vOld + du[node.rowY,0]\n node.x = node.x0 + node.u\n node.y = node.y0 + node.v\n \n def resetPositions(self): # Reset nodes displacements and positions to the ones at the beginning of the step -> Useful when you have to restart a step!\n for node in self.nodes:\n node.u = node.uOld\n node.v = node.vOld\n node.x = node.xOld\n node.y = node.yOld\n \n def resetNodalLoads(self): # Resets current nodes loads to zero\n for node in self.nodes:\n node.Fx = 0.\n node.Fy = 0.\n \n def update(self):\n for node in self.nodes: # Updates nodes quantities -> To be called at the end of a step (i.e. when convergence is reached)\n node.uOld = node.u\n node.vOld = node.v\n node.xOld = node.x\n node.yOld = node.y" }, { "alpha_fraction": 0.5787314772605896, "alphanum_fraction": 0.583149254322052, "avg_line_length": 65.04166412353516, "blob_id": "98f293c1e8e66ea9e9840bf1d08f8eb2e881b406", "content_id": "36ba9af529f978e8b53cb4c8a0147b9950c55d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3169, "license_type": "no_license", "max_line_length": 206, "num_lines": 48, "path": "/My_trusses/node.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "class Node:\n \n def __init__(self, _nb, _x, _y):\n self.nb = _nb # Node number -> NB: Each node has to have a different number!\n self.x = _x # Node current position along x\n self.y = _y # Node current position along y\n self.xOld = self.x # Node position along x at the beginning of the step\n self.yOld = self.y # Node position along y at the beginning of the step\n self.x0 = self.x # Node position along x at the beginning of the simulation (\"t=0\")\n self.y0 = self.y # Node position along y at the beginning of the simulation (\"t=0\")\n self.u = 0. # Node current displacement along x\n self.v = 0. # Node current displacement along y\n self.uOld = self.u # Node displacement along x at the beginning of the step\n self.vOld = self.v # Node displacement along y at the beginning of the step\n self.rowX = -1 # Row corresponding to the node x degree of freedom in the global system of equations -> Used to imposed boundary conditions, set using truss.setNodeRows()\n self.rowY = -1 # Row corresponding to the node y degree of freedom in the global system of equations -> Used to imposed boundary conditions, set using truss.setNodeRows()\n self.Fx = 0. # Load currently applied on the node along x (= qef_x * lamda)\n self.Fy = 0. # Load currently applied on the node along y (= qef_x * lamda)\n self.qef_x = 0. # Total (initial) load applied on the node along x\n self.qef_y = 0. # Total (initial) load applied on the node along y\n self.qef_x_t = None # Time-dependent load applied on the node along x (it's a python function)\n self.qef_y_t = None # Time-dependent load applied on the node along y (it's a python function)\n self.isFixedAlongX = False # Flag set to true if the node is fixed along x\n self.isFixedAlongY = False # Flag set to true if the node is fixed along y\n \n def applyLoad(self, dof, val): # Applies a load of intensity 'val' along the direction identified by 'dof'\n if dof == 'x':\n self.qef_x = val\n if dof == 'y':\n self.qef_y = val\n \n def applyTimeLoad(self, dof, _f): # Initialize the time-dependent load (which is simply a python function) along the direction identified by 'dof' --> NB: To use only with NewtonRaphsonAlgorithmInTime !\n if dof == 'x':\n self.qef_x_t = _f\n if dof == 'y':\n self.qef_y_t = _f\n \n def get_qef_x(self, t): # Evaluates the time-dependent load along x at time 't' --> NB: To use only with NewtonRaphsonAlgorithmInTime !\n if self.qef_x_t:\n return self.qef_x_t(t)\n else:\n return 0.\n \n def get_qef_y(self, t): # Evaluates the time-dependent load along y at time 't' --> NB: To use only with NewtonRaphsonAlgorithmInTime !\n if self.qef_y_t:\n return self.qef_y_t(t)\n else:\n return 0." }, { "alpha_fraction": 0.5285835266113281, "alphanum_fraction": 0.564348042011261, "avg_line_length": 25.70676612854004, "blob_id": "60115afd7a034d0700dc2f5c670e8f3ed3efa7d0", "content_id": "638ec8875419f5534b0a8519b1a61a5270a910b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3551, "license_type": "no_license", "max_line_length": 131, "num_lines": 133, "path": "/My_trusses/tests/test2bar_newton_raphson_modified/test2bar_newton_raphson_modified.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0, '../../')\nfrom node import *\nfrom bar import *\nfrom truss import *\nfrom nonLinAlgo import *\n\nfrom optparse import OptionParser\nimport vtk\nimport math\n\nimport matplotlib.pyplot as plt\n\ndef main(nogui):\n \n #Geometry\n a = 2.0\n b = 1.0\n l0 = math.sqrt(a**2 + b**2)\n tol = 1e-6\n nItMax = 100\n \n #Nodes\n node1 = Node(1, 0.0, 0.0)\n node2 = Node(2, a, b)\n node3 = Node(3, 2*a, 0.0)\n \n #Material properties\n E = 70e9\n A = 0.01\n \n #Critical load\n qcr = (math.sqrt(3)*E*A*(b**3))/(9.0*(l0**3))*2 \n\n #Bars\n bar1 = Bar(1, [node1, node2], E, A)\n bar2 = Bar(2, [node2, node3], E, A)\n \n #Truss\n truss = Truss()\n truss.addNode(node1)\n truss.addNode(node2)\n truss.addNode(node3)\n truss.addBar(bar1)\n truss.addBar(bar2)\n truss.setNodeRows() # ATTENTION: this line is mandatory at the end of the definition of the truss!\n \n #BCs\n truss.fix(node1,'x')\n truss.fix(node1,'y')\n truss.fix(node3,'x')\n truss.fix(node3,'y')\n \n #Loads\n node2.applyLoad('y', -1.5*qcr)\n \n #Non-linear algorithm\n dlamda0 = 0.01\n algo = QuasiNewtonAlgorithm(truss, tol, nItMax,dlamda0,'')\n \n #GUI --> if you want to run a test without GUI: in a command window type (without the quotation marks) 'python test.py --nogui'\n if nogui:\n if vtk.VTK_MAJOR_VERSION <= 5:\n print \"I Have VTK version <=5\"\n import trussViewerVtk5PyQt4 as v\n elif vtk.VTK_MAJOR_VERSION == 6:\n print \"I Have VTK version == 6\"\n import trussViewerVtk6PyQt4 as v\n else:\n print \" I have VTK else\"\n import trussViewerVtk7PyQt5 as v\n print 'Initialize MeshViewer...'\n gui = v.MeshViewer(truss, algo) \n print 'MeshViewer initialized!'\n gui.start()\n else:\n algo.run() # Runs the algorithm. If you are using the GUI, the GUI will take care of that\n \n # Analytical solution:\n counter = 0\n lower_bound = 0\n upper_bound = 2.2\n step = 0.01\n u_b = np.zeros((upper_bound-lower_bound)/step)\n for n in np.arange(lower_bound,upper_bound,step):\n u_b[counter] = n\n counter += 1\n P = (E*A*b*b*b)/(2*l0*l0*l0)* np.multiply(u_b,np.multiply(u_b-1,u_b-2))\n u1 = (1+math.sqrt(3)/3)*b;\n u2 = (1-math.sqrt(3)/3)*b;\n Pcrit_analytical = E*A/(2*l0**3)*(u2**3-3*b*u2**2+2*b**2*u2)\n f = open('Node_2_DISPLACEMENTS.ascii', 'r')\n uy2 = []\n for line in f:\n line = line.strip()\n columns = line.split()\n uy2.append(float(columns[1]))\n \n f.close()\n uy2 = np.array(uy2) \n \n f = open('Lambda.ascii')\n lambda_ = []\n for line in f:\n line = line.strip()\n columns = line.split()\n lambda_.append(float(columns[0]))\n \n f.close()\n lambda_ = np.array(lambda_)\n X = -uy2/b\n Y = 1.5*Pcrit_analytical*lambda_\n \n plot_num = plt.plot(X,Y,'k-.',linewidth=3.0,label='numerical')\n plot_ana = plt.plot(u_b,P,'r-.',linewidth=3.0,label='analytical')\n plt.legend()\n plt.xlabel('displ',fontsize=22)\n plt.ylabel('load P',fontsize=22)\n plt.title('Incremental method',fontsize=26)\n plt.grid(True)\n plt.show()\n\nif __name__ == '__main__':\n \n parser=OptionParser()\n parser.add_option(\"--nogui\", action=\"store_true\",\n help=\"Specify if we need to use the GUI\", dest=\"nogui\", default=False)\n\n (options, args)=parser.parse_args()\n \n nogui = options.nogui\n \n main(nogui)" }, { "alpha_fraction": 0.3461306393146515, "alphanum_fraction": 0.3801004886627197, "avg_line_length": 32.161075592041016, "blob_id": "19308e88f48bdb14abb22b6ce38b12332828096f", "content_id": "47c80fee4a1adaf5de197798c36767d4475d8025", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4975, "license_type": "no_license", "max_line_length": 130, "num_lines": 149, "path": "/My_trusses/bar.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "from node import *\nimport numpy as np\nimport math\n\nclass Bar:\n \n def __init__(self, _nb, nods, _E, _A):\n self.nb = _nb # Bar number\n self.nodes = nods # Bar nodes (it is an array)\n if not len(self.nodes) == 2:\n raise Exception('bar nodes number is different from two!')\n self.l0 = self.getLength() # Bar initial length\n self.E_modulus = _E # Bar Young modulus\n self.Area = _A # Bar section area (always supposed constant in this code)\n self.numberOfKtEvals = 0 \n \n def getLength(self):\n return math.sqrt((self.nodes[1].x - self.nodes[0].x)**2 + (self.nodes[1].y - self.nodes[0].y)**2)\n \n def getE_GL(self):\n E_GL = ( self.getLength()**2 - self.l0**2 ) / ( 2 * self.l0**2 )\n return E_GL\n \n def getPK2Stress(self): # Computes PK2 stress\n E_GL = self.getE_GL()\n PK2 = self.E_modulus * E_GL\n #print \" -------------- Pk2 = \", self.E_modulus*E_GL, \"--------------\"\n return PK2\n \n def getFint(self): # Computes the bar internal forces vector\n \n Fint = np.zeros(4) \n PK2_stress = self.getPK2Stress()\n C = self.get_C()\n constant = PK2_stress * self.Area\n\n Fint[0] = -C[0]\n Fint[1] = -C[1]\n Fint[2] = C[0]\n Fint[3] = C[1]\n \n Fint = constant * Fint\n \n return Fint\n \n def buildKt(self, computeTangentMethod): # Computes the bar tangent stiffness matrix\n\n if computeTangentMethod == 'numeric':\n self.numberOfKtEvals += 1\n \n delta = 1e-5\n \n Kt = np.zeros((4,4))\n \n for j in range(0,4):\n if j >= 2:\n n = 1;\n else:\n n = 0;\n if j%2 == 0:\n self.nodes[n].x += delta\n else:\n self.nodes[n].y += delta\n P_plus = self.getFint()\n if j%2 == 0:\n self.nodes[n].x -= 2*delta\n else:\n self.nodes[n].y -= 2*delta\n P_minus = self.getFint()\n \n #print P_plus,P_minus\n \n if j%2 == 0:\n self.nodes[n].x += delta\n else:\n self.nodes[n].y += delta\n for i in range(0,4):\n Kt[i][j] = (P_plus[i]-P_minus[i])/(2*delta)\n \n #print Kt\n \n else:\n self.numberOfKtEvals += 1\n # The tangent stiffness matrix of a bar is 4 by 4:\n Kt = np.zeros((4, 4))\n \n # The tangent stiffness matrix is a combination of two matrices.\n # The material stiffness matrix:\n K_mat = np.zeros((4,4))\n # The geometric stiffness matrix:\n K_geo = np.zeros((4,4))\n \n # Building of K_mat (see https://www.colorado.edu/engineering/cas/courses.d/NFEM.d/NFEM.Ch09.d/NFEM.Ch09.pdf)\n constant_mat = self.Area * self.E_modulus / self.l0\n C = self.get_C()\n \n K_mat[0][0] = C[0]**2\n \n K_mat[0][1] = C[0] * C[1]\n K_mat[1][0] = K_mat[0][1]\n \n K_mat[0][2] = -C[0]**2\n K_mat[2][0] = K_mat[0][2]\n \n K_mat[0][3] = -C[0] * C[1]\n K_mat[3][0] = K_mat[0][3]\n \n K_mat[1][1] = C[1]**2\n \n K_mat[1][2] = K_mat[0][3]\n K_mat[2][1] = K_mat[1][2]\n \n K_mat[1][3] = -C[1]**2\n K_mat[3][1] = K_mat[1][3]\n \n K_mat[2][2] = K_mat[0][0]\n \n K_mat[2][3] = K_mat[0][1]\n K_mat[3][2] = K_mat[2][3]\n \n K_mat[3][3] = K_mat[1][1]\n \n K_mat = constant_mat * K_mat\n \n # Building of K_geo:\n # Get PK2 stress:\n PK2_stress = self.getPK2Stress()\n constant_geo = self.Area * PK2_stress / self.l0\n \n K_geo[0][0] = 1\n K_geo[1][1] = 1\n K_geo[2][2] = 1\n K_geo[3][3] = 1\n K_geo[0][2] = -1\n K_geo[1][3] = -1\n K_geo[2][0] = -1\n K_geo[3][1] = -1\n \n K_geo = constant_geo * K_geo\n \n Kt = K_geo + K_mat \n \n return Kt\n \n def get_C(self):\n C = np.zeros(2)\n C[0] = (self.nodes[1].x-self.nodes[0].x)/self.l0\n C[1] = (self.nodes[1].y-self.nodes[0].y)/self.l0\n return C\n \n \n \n " }, { "alpha_fraction": 0.551354706287384, "alphanum_fraction": 0.562438428401947, "avg_line_length": 30.476743698120117, "blob_id": "618feb5d1d0d825e005769fa89bca7e8025445f4", "content_id": "ad6122b7847de31cedaa84effe1cba3ba7d27336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8120, "license_type": "no_license", "max_line_length": 93, "num_lines": 258, "path": "/My_trusses/trussViewerVtk6PyQt4.py", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "# -*- coding: latin-1; -*-\n# $Id$\n\n# --- GUI for Trusses to use with VTK 6 and PyQt4 --- #\n\nimport sys\nimport vtk\nfrom PyQt4 import QtCore, QtGui\nfrom vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\n\nclass MeshViewer(QtGui.QWidget):\n\n app = QtGui.QApplication(sys.argv)\n \n \"\"\"\n Qt GUI for visu. the output\n \"\"\"\n def __init__(self, truss, algo): \n QtGui.QWidget.__init__(self) \n \n self.truss = truss\n self.algo = algo\n self.algo.dhook = self\n \n self.running = 'init'\n \n print \"starting MeshViewer init...\"\n \n self.__setupGUI()\n self.__setupVTK()\n \n self.app.lastWindowClosed.connect(self.app.quit)\n self.show() \n print \"MeshViewer ready.\" \n \n def closeEvent(self, event):\n self.algo.stopit=True \n self.running='running' # sort de \"while self.running=='pause'\" \n print \"GUI killed!\"\n QtGui.QWidget.closeEvent(self,event)\n\n\n def start(self):\n self.app.exec_()\n\n def __setupGUI(self):\n\n self.setWindowTitle(\"MeshViewer\")\n self.resize(800, 600)\n \n # vtk window\n \n self.vtkwidget = QVTKRenderWindowInteractor(self) # \"self\" sinon, rien ne s'affiche\n self.vtkwidget.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, \n QtGui.QSizePolicy.Expanding))\n self.vtkwidget.setMinimumSize(QtCore.QSize(300, 300));\n self.vtkwidget.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)\n \n \n self.vtkwidget.Initialize() \n \n self.renderer = vtk.vtkRenderer()\n self.renderer.SetBackground(1.0, 1.0, 1.0) \n self.vtkwidget.GetRenderWindow().AddRenderer(self.renderer) \n \n style = vtk.vtkInteractorStyleTrackballCamera()\n self.vtkwidget.SetInteractorStyle(style) \n\n\n hbox = QtGui.QHBoxLayout()\n self.setLayout(hbox)\n hbox.addWidget(self.vtkwidget)\n \n pan = QtGui.QFrame()\n pan.setMaximumSize(QtCore.QSize(200,999999))\n hbox.addWidget(pan)\n \n vbox = QtGui.QVBoxLayout()\n pan.setLayout(vbox)\n \n self.startBut = QtGui.QPushButton(self.tr(\"start!\"))\n self.connect(self.startBut, QtCore.SIGNAL(\"clicked()\"), self.startSlot)\n vbox.addWidget(self.startBut)\n \n groupBox = QtGui.QGroupBox(\"Infos\")\n self.steplabel = QtGui.QLabel(\"step # 0\")\n self.loadlabel = QtGui.QLabel(\"lambda = %2.8f\" % 0)\n gbox = QtGui.QVBoxLayout()\n groupBox.setLayout(gbox) \n gbox.addWidget(self.steplabel) \n gbox.addWidget(self.loadlabel) \n vbox.addWidget(groupBox)\n \n vbox.addStretch(1)\n \n def startSlot(self):\n if self.running=='init': \n self.startBut.setText('Pause') # on demarre et on affiche \"pause\"\n self.running='running'\n self.algo.run()\n self.startBut.setText(\"Quit\")\n self.running='quit'\n elif self.running=='running': # on stoppe et on affiche 'continue\"\n self.running='pause'\n self.startBut.setText(\"Continue\")\n while self.running=='pause':\n self.app.processEvents(QtCore.QEventLoop.WaitForMoreEvents)\n elif self.running=='pause':\n self.running='running'\n self.startBut.setText(\"Pause\")\n elif self.running=='quit':\n self.app.quit()\n \n def disableStart(self):\n self.startBut.setDisabled(True)\n \n def __setupVTK(self):\n # polydata\n self.__createPolyData()\n self.poly = PolyData(self.polydata)\n self.renderer.AddActor(self.poly.actor)\n self.renderer.AddActor2D(self.poly.pointLabels)\n \n self.resetCamera() \n\n def resetCamera(self):\n self.renderer.ResetCamera()\n cam1 = self.renderer.GetActiveCamera()\n # 3D\n if 0:\n cam1.Elevation(35)\n cam1.SetViewUp(0, 1, 0)\n cam1.Azimuth(30)\n #2D\n else:\n cam1.Elevation(0)\n cam1.SetViewUp(0, 1, 0)\n cam1.Azimuth(0)\n self.renderer.ResetCameraClippingRange() \n \n def display(self, step, lamda):\n \n self.steplabel.setText(\"step # %d\" % step)\n self.loadlabel.setText(\"lambda = %2.8f\" % lamda)\n \n self.points.Reset()\n self.vertices.Reset()\n self.lines.Reset()\n self.scalars.Reset()\n \n # points\n nmap={}\n i=0\n for nod in self.truss.nodes:\n nmap[nod.nb]=i\n self.points.InsertPoint(i, nod.x, nod.y, 0.0)\n self.scalars.InsertNextValue(nod.nb)\n # node cells\n vertex = vtk.vtkVertex()\n vertex.GetPointIds().SetId(0, i)\n self.vertices.InsertNextCell(vertex)\n i+=1\n self.points.Modified()\n self.vertices.Modified()\n \n for bar in self.truss.bars:\n line = vtk.vtkLine()\n ids = line.GetPointIds()\n ids.SetNumberOfIds(2)\n for j in range(len(bar.nodes)):\n ids.SetId(j, nmap[bar.nodes[j].nb])\n self.lines.InsertNextCell(line) \n self.lines.Modified()\n \n self.polydata.Modified()\n \n self.render()\n \n def ragequit(self):\n print \"rage quit!\"\n self.algo.stopit=True\n self.app.quit()\n \n def render(self): \n # draw the scene\n self.vtkwidget.Render()\n self.app.processEvents()\n \n def refresh(self):\n self.app.processEvents()\n \n def __createPolyData(self):\n print 'creating vtkPolyData...'\n self.points = vtk.vtkPoints()\n self.polydata = vtk.vtkPolyData()\n self.vertices = vtk.vtkCellArray()\n self.lines = vtk.vtkCellArray()\n \n self.polydata.SetPoints(self.points)\n self.polydata.SetVerts(self.vertices)\n self.polydata.SetLines(self.lines)\n \n # points\n self.scalars = vtk.vtkFloatArray()\n self.scalars.SetNumberOfComponents(1)\n self.polydata.GetPointData().SetScalars(self.scalars)\n \n nmap={}\n i=0\n for nod in self.truss.nodes:\n nmap[nod.nb]=i\n self.points.InsertPoint(i, nod.x, nod.y, 0.0)\n self.scalars.InsertNextValue(nod.nb)\n # node cells\n vertex = vtk.vtkVertex()\n vertex.GetPointIds().SetId(0, i)\n self.vertices.InsertNextCell(vertex)\n i+=1\n self.points.Modified()\n self.vertices.Modified()\n \n for bar in self.truss.bars:\n line = vtk.vtkLine()\n ids = line.GetPointIds()\n ids.SetNumberOfIds(2)\n for j in range(len(bar.nodes)):\n ids.SetId(j, nmap[bar.nodes[j].nb])\n self.lines.InsertNextCell(line) \n self.lines.Modified()\n\nclass PolyData:\n def __init__(self, polydata):\n \n self.polydata = polydata\n \n self.mapper = vtk.vtkPolyDataMapper() \n self.mapper.ImmediateModeRenderingOff()\n self.mapper.ScalarVisibilityOff()\n self.mapper.SetInputDataObject(polydata)\n self.actor = vtk.vtkActor()\n \n self.actor.GetProperty().SetPointSize(7.0)\n self.actor.GetProperty().SetColor(0.,0.,0.)\n self.actor.SetMapper(self.mapper)\n \n self.labelmapper = vtk.vtkLabeledDataMapper()\n self.labelmapper.SetLabelFormat(\" %g\")\n self.labelmapper.SetLabelModeToLabelScalars()\n self.labelmapper.SetInputDataObject(polydata)\n prp = self.labelmapper.GetLabelTextProperty()\n prp.SetFontSize(16)\n prp.SetItalic(False)\n prp.SetBold(True)\n prp.SetColor(1.0,0.0,0.0)\n prp.SetLineOffset(-2)\n \n self.pointLabels = vtk.vtkActor2D()\n self.pointLabels.SetMapper(self.labelmapper)" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.8405796885490417, "avg_line_length": 33.5, "blob_id": "247a138a3f589939f22e2d81013d50c9807e1e23", "content_id": "447109a41d1b49a92e87c47a5df76380262499c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "romintomasetti/ContinuumMechanics2018", "src_encoding": "UTF-8", "text": "# ContinuumMechanics2018\nProject of the course \"Continuum Mechanics\"\n" } ]
11
andrpics33/devnet_dc
https://github.com/andrpics33/devnet_dc
5a11d2a62d34752f6c8a69965620cd99b780a9b3
32f028744529ac1ded1bf224ebb8fd7488c648b9
87bbe658f9de2a7e614185d3b85306db9e676c19
refs/heads/master
2021-02-16T13:28:42.243477
2020-03-04T21:51:01
2020-03-04T21:51:01
245,010,465
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6440678238868713, "avg_line_length": 19, "blob_id": "2bd5015f0c960a896ebbc1f8eecf57a9ea57783e", "content_id": "43ed9b0bec72365edea92950e365f9ed359f8f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/primer_programa.py", "repo_name": "andrpics33/devnet_dc", "src_encoding": "UTF-8", "text": "print (\"Hola Mundo\")\nprint (\"Hallo Welt\")\nprint (\"Hello World\")" } ]
1
FilippMuffin/Calculation-methods
https://github.com/FilippMuffin/Calculation-methods
f9a9216fec6745d8ea5dd750d1d62f67cad59bed
95e12a9dc6537a5faafda14380432e82119ee80e
800905f02286a361bc0c0fecb88306bd9fe92909
refs/heads/master
2020-08-02T23:53:46.386085
2019-09-28T19:49:11
2019-09-28T19:49:11
211,553,749
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3734643757343292, "alphanum_fraction": 0.43488943576812744, "avg_line_length": 15.319999694824219, "blob_id": "dc94b7db64f7d7e9cdba73bc96b997c6670197b6", "content_id": "9079bb95fc8b04eae1a7e0cc3ab410a906afeb56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 41, "num_lines": 25, "path": "/0.py", "repo_name": "FilippMuffin/Calculation-methods", "src_encoding": "UTF-8", "text": "x = []\nv = 23.0\ne = 0.000001\na = int(input(\"Input start: \"))\nh = int(input(\"Input step: \"))\n\nfor i in range(a, (a + (5 * h))):\n\tx.append(i)\n\ti += h\n\nprint(*x)\n\nfor x_i in x:\n\ti = 3.0 \n\ts = 0.0\n\tf = v*x_i\n\tn = 0\n\twhile abs(f) > e:\n\t\tf *= -(x_i * v * x_i * v)/(i * (i - 1))\n\t\ts += f\n\t\ti += 2\n\t\tn += 1\n\t\t# print(\"f({0})={1}\".format(n, f))\n\tprint(\"S({0})={1}\".format(x_i, s))\n\tprint(\"N({0})={1}\".format(x_i, n))" } ]
1
soaringsoul/fastworkApp
https://github.com/soaringsoul/fastworkApp
6d4dff031cc327ac949e222017bd076e5d149a69
29e14e4e04dd0502b59b7065e978da1612f0c544
94f3fd98c7f497c3756ddf6f48a335c00ca897bd
refs/heads/master
2022-11-26T08:25:04.206225
2020-07-31T10:35:48
2020-07-31T10:35:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5932203531265259, "alphanum_fraction": 0.5944028496742249, "avg_line_length": 28.84705924987793, "blob_id": "f1375f8ced1ea0c152bdb43e1b9f8fbb4aa392a7", "content_id": "376e6b5ff58d76804c063d488e36aa482fb840a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2667, "license_type": "permissive", "max_line_length": 92, "num_lines": 85, "path": "/merge_excel_sheets/main.py", "repo_name": "soaringsoul/fastworkApp", "src_encoding": "UTF-8", "text": "# 导入python 自带库\n\n# 导入自定义模块\n\nfrom Ui_fastwork_merge import Ui_mainWindow\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QFileDialog\nfrom core import Core\n\nimport os\n\n\nclass MergeExcelSheets(QtWidgets.QMainWindow, Ui_mainWindow):\n def __init__(self):\n super(MergeExcelSheets, self).__init__()\n self.setupUi(self)\n self.pushbutton_openfolder.setVisible(False)\n\n def setBrowerPath(self):\n pass\n\n def init_app(self):\n self.excel_filepath = self.lineEdit_filepath.text()\n self.textEdit.clear()\n\n @pyqtSlot()\n def on_open_filepath_clicked(self):\n filename = self.open_file_dialog()\n self.lineEdit_filepath.setText(filename)\n\n def open_file_dialog(self):\n fileName, fileType = QFileDialog.getOpenFileName(self,\n \"请打开一个excel文件\",\n r\"%s\" % os.getcwd(),\n \"文件类型(*.xlsx;*.xls);\") # 设置文件扩展名过滤\n\n fileName = fileName.replace('/', '\\\\') # windows下需要进行文件分隔符转换\n return (fileName)\n\n @pyqtSlot()\n def on_pushbutton_openfolder_clicked(self):\n self.open_folder()\n\n @pyqtSlot()\n def on_pushbutton_start_clicked(self):\n self.init_app()\n if self.excel_filepath.endswith(\".xlsx\") or self.excel_filepath.endswith(\".xls\"):\n self.to_merge()\n else:\n self.textEdit.setText(\"当前输入的excel文件路径有误!请检查后重新输入!\")\n\n def to_merge(self):\n try:\n self.textEdit.clear()\n self.merge = Core(excel_filepah=self.excel_filepath)\n self.merge.progress_signal.connect(self.progress_signal_display)\n self.merge.start()\n self.pushbutton_openfolder.setVisible(True)\n except Exception as e:\n print(e)\n\n def progress_signal_display(self, log_info):\n self.textEdit.append(log_info)\n\n def error_message(self, error_info):\n self.label_progress.setText(error_info)\n\n def open_folder(self):\n abs_filepath = os.path.abspath(self.excel_filepath)\n folder_path = os.path.dirname(abs_filepath)\n print(folder_path)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n os.system('explorer.exe /n, %s' % folder_path)\n\n\nif __name__ == '__main__':\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n merge = MergeExcelSheets()\n merge.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5959493517875671, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 42.88888931274414, "blob_id": "a7b92caa21937218eba39faf03849ff8be21e641", "content_id": "0158b77463d113629d71bddf5fc09bdd9e96d9ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2179, "license_type": "permissive", "max_line_length": 91, "num_lines": 45, "path": "/merge_excel_sheets/core.py", "repo_name": "soaringsoul/fastworkApp", "src_encoding": "UTF-8", "text": "from fastwork.merge import MergeExcel\nfrom PyQt5 import QtCore\nimport pandas as pd\nimport os\n\n\nclass Core(QtCore.QThread):\n progress_signal = QtCore.pyqtSignal(str)\n\n def __init__(self, excel_filepah, keep_sheetname_lst=None):\n super(Core, self).__init__()\n self.excel_filepath = excel_filepah\n self.keep_sheetname_lst = keep_sheetname_lst\n\n def run(self):\n self.progress_signal.emit(\"开始合并!\")\n merge = MergeExcel(excel_filepath=self.excel_filepath)\n if type(self.keep_sheetname_lst) in [str, list] or self.keep_sheetname_lst is None:\n df_dict = merge.read_excel(excel_filepath=self.excel_filepath,\n sheet_name=self.keep_sheetname_lst)\n self.progress_signal.emit(\"当前共有%s个工作表需要合并!\" % len(df_dict))\n for sheet_name, df in df_dict.items():\n self.progress_signal.emit(\"工作表名称【%s】: 共%s行\" % (sheet_name, df.shape[0]))\n df_merge = pd.concat(df_dict)\n raw_col=[x for x in df_merge.columns]\n df_merge.index = [x[0] for x in df_merge.index]\n df_merge.index.name = '工作表名称'\n df_merge['工作表名称'] = df_merge.index\n df_merge = pd.DataFrame(df_merge, columns=['工作表名称'] + raw_col)\n\n else:\n self.progress_signal.emit(\"当前指定的参数有误!,请检查后重新输入!\")\n df_merge = None\n\n if df_merge is not None:\n merge.to_excel(df_merge)\n new_filename = \"%s_处理完成.xlsx\" % os.path.basename(self.excel_filepath)\n abs_filepath = os.path.abspath(self.excel_filepath)\n new_filepath = os.path.join(os.path.dirname(abs_filepath), new_filename)\n self.progress_signal.emit('*' * 30)\n self.progress_signal.emit('*' * 30)\n self.progress_signal.emit(\"【合并完成】,合并后的工作表共计%s行\" % df_merge.shape[0])\n self.progress_signal.emit(\"请到以下目录获取合并后的excel文件:\\n【%s】\" % new_filepath)\n\n # self.progress_signal.emit(\"合并完成\")\n" } ]
2
thomaav/smashladder-python
https://github.com/thomaav/smashladder-python
34fca0fef671baf00a5ed4b16c1e4c8a2e3ebac6
4f74b9a50f7af247c8474740410199bd8a5f102d
71495693232e1c17beb038e27b6bdbb0640ed6cb
refs/heads/master
2021-01-09T06:14:36.638179
2018-02-08T17:43:35
2018-02-08T17:43:35
80,943,408
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 14.399999618530273, "blob_id": "09c9876bb7c2e922c13afbcf5197c21ff8f37a38", "content_id": "3bc460b1110bafc3f24054f6b0be825502cdadf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/smashladder/slexceptions.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "class FailingRequestException(Exception):\n pass\n\n\nclass RequestTimeoutException(Exception):\n pass\n\n\nclass NotLoggedInException(Exception):\n pass\n\n" }, { "alpha_fraction": 0.84375, "alphanum_fraction": 0.875, "avg_line_length": 9.666666984558105, "blob_id": "a1e94e272dfc5bcdc1105ec34007117a173ad87f", "content_id": "c5b71d4b22fd2fdfb8b4a37cc14b6475cb0b173b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 32, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/requirements.txt", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "websocket-client\nPyQt5\nrequests\n" }, { "alpha_fraction": 0.6405405402183533, "alphanum_fraction": 0.6432432532310486, "avg_line_length": 15.086956977844238, "blob_id": "e3546cb4f973c8aa0860ce97d5b5f65cc8cd3342", "content_id": "f94a880edfc70b664dcc407184c8b288f5486fe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/slapp.pyw", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nimport os\n\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\nimport builtins\nimport smashladder.slqt as slqt\nimport time\n\n\ndef main():\n if 'debug' in sys.argv:\n builtins.debug_smashladder = True\n else:\n builtins.debug_smashladder = False\n\n sys.exit(slqt.app.exec_())\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6458960175514221, "alphanum_fraction": 0.6540610194206238, "avg_line_length": 37.14754104614258, "blob_id": "6d939fdca8132865a0508dbe68a11f7bf27efbf0", "content_id": "fcd5d1e7266a31e20b01a6c320d8bbae216cc389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2327, "license_type": "no_license", "max_line_length": 107, "num_lines": 61, "path": "/smashladder/slrequests.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "import smashladder.local as local\nimport requests\nimport smashladder.slexceptions as slexceptions\nfrom getpass import getpass\n\nTIMEOUT = 2\nDEFAULT_HEADERS = { 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n\t\t 'accept-encoding': 'gzip, deflate, sdch',\n\t\t 'accept-language': 'nb-NO,nb;q=0.8,no;q=0.6,nn;q=0.4,en-US;q=0.2,en;q=0.2',\n\t\t 'cache-control': 'max-age=0',\n\t\t 'connection': 'keep-alive',\n\t\t 'content-type': 'application/x-www-form-urlencoded',\n\t\t 'host': 'www.smashladder.com',\n\t\t 'origin': 'https://www.smashladder.com',\n\t\t 'referrer': 'https://smashladder.com/log-in',\n\t\t 'upgrade-insecure-requests' : '1',\n\t\t 'user-agent': 'python: https://github.com/thomaav/smashladder-python',\n};\n\n\ndef http_get_request(url, cookie_jar={}, headers=DEFAULT_HEADERS):\n try:\n return requests.get(url, cookies=cookie_jar, data=headers, timeout=TIMEOUT)\n except (requests.exceptions.ReadTimeout,\n requests.exceptions.ConnectTimeout,\n requests.exceptions.ConnectionError):\n raise slexceptions.RequestTimeoutException('HTTP get request to ' + url + ' timed out')\n\n\ndef http_post_request(url, data, cookie_jar={}, headers=DEFAULT_HEADERS):\n try:\n return requests.post(url, data=data, cookies=cookie_jar, headers=headers, timeout=TIMEOUT)\n except (requests.exceptions.ReadTimeout,\n requests.exceptions.ConnectTimeout,\n requests.exceptions.ConnectionError):\n raise slexceptions.RequestTimeoutException('HTTP post request to ' + url + ' timed out')\n\n\ndef get_login_credentials():\n username = input('Enter your username: ').strip()\n password = getpass()\n return username, password\n\n\ndef login_to_smashladder(username='', password=''):\n if not (username and password):\n username, password = get_login_credentials()\n\n login_content = { 'username': username,\n 'password': password,\n 'remember': '1',\n 'json': '1' }\n\n response = http_post_request('https://www.smashladder.com/log-in', login_content)\n\n if (response.json()['success']):\n response.cookies.set('username', username)\n local.save_cookies_to_file(response.cookies, local.COOKIE_FILE)\n return True\n\n return False\n" }, { "alpha_fraction": 0.5428280234336853, "alphanum_fraction": 0.5448674559593201, "avg_line_length": 34.30400085449219, "blob_id": "cf62975e2ef0dd58854a403eba347ecdcf3ea8b8", "content_id": "1e9a666090252fb5c9be9ef0bd771f02da587cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8826, "license_type": "no_license", "max_line_length": 98, "num_lines": 250, "path": "/smashladder/slthreads.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "import builtins\nimport threading\nimport time\nimport websocket\nimport ssl\nimport smashladder.sl as sl\nimport smashladder.slexceptions as slexceptions\nfrom smashladder.local import cookie_jar_to_string, PREFERRED_PLAYERS\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\nclass SlBaseThread(QThread):\n qt_print = pyqtSignal(str)\n\n def __init__(self, cookie_jar=None, parent=None):\n super().__init__(parent)\n if cookie_jar:\n self.cookie_jar = cookie_jar\n self.username = cookie_jar['username']\n else:\n self.cookie_jar = None\n\n\n def set_login(self, cookie_jar):\n self.cookie_jar = cookie_jar\n self.username = cookie_jar['username']\n\n\n def logout(self):\n self.cookie_jar = None\n self.username = None\n\n\nclass SlSocketThread(SlBaseThread):\n entered_match = pyqtSignal(str, str, str)\n match_message = pyqtSignal(str)\n private_message = pyqtSignal(str)\n preferred_queued = pyqtSignal()\n match_done = pyqtSignal()\n\n def __init__(self, cookie_jar=None, parent=None):\n super().__init__(cookie_jar, parent)\n self.lock = threading.Lock()\n self.priv_chat_enabled = True\n\n\n def auth_false(self):\n self.qt_print.emit('Authentication false, exiting')\n self.ws.close()\n\n\n def process_private_chat_message(self, raw_message):\n processed_message = sl.process_private_chat_message(raw_message)\n self.qt_print.emit(processed_message['info'])\n self.private_message.emit(processed_message['info'].replace('[private chat] ', ''))\n\n\n def process_match_message(self, raw_message):\n processed_message = sl.process_match_message(raw_message)\n if 'Entered match' in processed_message['info']:\n self.entered_match.emit(processed_message['match_id'],\n processed_message['opponent_username'],\n processed_message['opponent_country'])\n return\n\n if not processed_message['typing']:\n self.qt_print.emit(processed_message['info'])\n self.match_message.emit(processed_message['info'].replace('[match chat] ', ''))\n\n\n def process_open_challenges(self, raw_message):\n try:\n processed_message = sl.process_open_challenges(self.cookie_jar, raw_message)\n except slexceptions.RequestTimeoutException as e:\n self.qt_print.emit(str(e))\n return\n\n if processed_message['match']:\n self.qt_print.emit(processed_message['info'])\n\n if 'Accepted challenge' in processed_message['info']:\n self.entered_match.emit(str(processed_message['match'].match_id),\n processed_message['match'].opponent_username,\n processed_message['match'].opponent_country)\n\n\n def process_new_search(self, raw_message):\n if builtins.in_match:\n return\n\n try:\n player = sl.process_new_search(self.cookie_jar, raw_message, self.username)\n except slexceptions.RequestTimeoutException as e:\n self.qt_print.emit(str(e))\n return\n\n if player:\n self.qt_print.emit('Challenging ' + player['username'] + ' from ' + player['country'])\n\n\n def process_new_search_idle(self, raw_message):\n match = sl.get_new_search_info(raw_message)\n if not match:\n return\n\n if not match.relevant():\n return\n\n if match.opponent_username in PREFERRED_PLAYERS:\n self.qt_print.emit(match.username + ' (' + str(match.opponent_id) + ')' +\n ', preferred player, queued up for: ' + match.ladder_name + ' ' +\n match.match_id)\n self.preferred_queued.emit()\n return\n else:\n self.qt_print.emit(match.opponent_username + ' (' + str(match.opponent_id) + ')' +\n ' from ' + match.opponent_country +\n ' queued up for: ' + match.ladder_name + ', ' + match.match_id)\n\n\n def on_message(self, ws, raw_message):\n with self.lock:\n if '\\\"authentication\\\":false' in raw_message:\n self.auth_false()\n return\n\n if builtins.idle:\n if 'searches' in raw_message:\n self.process_new_search_idle(raw_message)\n elif 'current_matches' in raw_message and \\\n '\\\"all_entries\\\":true' not in raw_message:\n try:\n self.process_match_message(raw_message)\n except Exception as e:\n print(raw_message)\n print(e)\n\n if builtins.in_queue:\n if 'current_matches' in raw_message:\n self.process_match_message(raw_message)\n\n elif 'open_challenges' in raw_message:\n self.process_open_challenges(raw_message)\n\n elif 'searches' in raw_message:\n self.process_new_search(raw_message)\n\n # we need to redo current_matches here, as it appears both\n # when entering match, and when receiving a match message\n if builtins.in_match:\n if 'current_matches' in raw_message and \\\n '\\\"all_entries\\\":true' not in raw_message:\n try:\n self.process_match_message(raw_message)\n except Exception as e:\n print(raw_message)\n print(e)\n elif 'disputed_matches' in raw_message:\n self.match_done.emit()\n\n if 'private_chat' in raw_message and self.priv_chat_enabled:\n self.process_private_chat_message(raw_message)\n\n\n def on_error(self, ws, error):\n print('[WS ERROR]: ' + str(error))\n print('[DEBUG]: Error in WebSocket, likely tried to close before setup done')\n\n\n def on_close(self, ws):\n self.qt_print.emit(\"Connection to Smashladder lost\")\n\n\n def run(self):\n if not self.cookie_jar:\n print('[DEBUG]: SocketThread: can\\'t run without login')\n return\n\n self.ws = websocket.WebSocketApp('wss://www.smashladder.com/?type=1&version=9.11.4',\n on_message = self.on_message,\n on_error = self.on_error,\n on_close = self.on_close,\n cookie = cookie_jar_to_string(self.cookie_jar))\n self.ws.run_forever(sslopt={ 'cert_reqs': ssl.CERT_NONE })\n\n\nclass MMThread(SlBaseThread):\n secs_queued = 0\n\n def __init__(self, cookie_jar=None, parent=None):\n super().__init__(parent)\n\n\n def run(self):\n if not self.cookie_jar:\n print('[DEBUG]: MMThread: can\\'t run without login')\n return\n\n while True:\n if builtins.debug_smashladder:\n print('[DEBUG]: Would start matchmaking search')\n break\n\n if builtins.in_match or builtins.idle:\n break\n\n if builtins.in_queue:\n self.secs_queued += 1\n if self.secs_queued > 305:\n self.secs_queued = 0\n builtins.in_queue = False\n builtins.search_match_id = None\n time.sleep(1)\n continue\n else:\n try:\n mm_status = sl.begin_matchmaking(self.cookie_jar, 1, 2, 0, '', 0, '')\n except slexceptions.RequestTimeoutException as e:\n self.qt_print.emit(str(e))\n break\n\n if 'Already in queue' in mm_status['info']:\n builtins.in_queue = True\n continue\n\n self.qt_print.emit(mm_status['info'])\n\n if mm_status['match_id']:\n builtins.in_queue = True\n builtins.search_match_id = mm_status['match_id']\n\n time.sleep(1)\n\n\nclass ChallengeThread(SlBaseThread):\n def __init__(self, cookie_jar=None, parent=None):\n super().__init__(parent)\n\n def run(self):\n if not self.cookie_jar:\n print('[DEBUG]: ChallengeThread: can\\'t run without login')\n return\n\n try:\n challenged_players = sl.challenge_relevant_friendlies(self.cookie_jar, self.username)\n except slexceptions.RequestTimeoutException as e:\n self.qt_print.emit(str(e))\n return\n\n for player in challenged_players:\n self.qt_print.emit('Challenging ' + player['username'] + ' from ' + player['country'])\n" }, { "alpha_fraction": 0.6252971291542053, "alphanum_fraction": 0.6274023652076721, "avg_line_length": 35.222633361816406, "blob_id": "89b0f76f8aa177bdae19decd15211a75b876ad34", "content_id": "4c0c02a530887bd435a4fbd0f3bf483014fc575a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29450, "license_type": "no_license", "max_line_length": 103, "num_lines": 813, "path": "/smashladder/slqt.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "\nimport builtins\nimport sys\nimport smashladder.local as local\nimport smashladder.sl as sl\nimport smashladder.slrequests as slrequests\nimport smashladder.slexceptions as slexceptions\nimport smashladder.slthreads as slthreads\nimport os.path\nimport time\nimport enum\nimport threading\nfrom functools import wraps\nfrom PyQt5.QtWidgets import QApplication, QWidget, QToolTip, QPushButton, \\\n QDesktopWidget, QLineEdit, QFormLayout, QMainWindow, QLabel, QTextEdit, \\\n QAbstractScrollArea\nfrom PyQt5.QtGui import QIcon, QFont, QTextCharFormat, QBrush, QColor, QTextCursor, \\\n QTextFormat, QCursor\nfrom PyQt5.QtMultimedia import QSound\nfrom PyQt5.QtCore import QCoreApplication, QPoint, Qt, QThread, pyqtSignal\nfrom PyQt5 import uic\n\n\nMAINWINDOW_UI_FILE = 'static/mainwindow.ui'\nMAINWINDOW_CSS_FILE = 'static/mainwindow.css'\nQDOCUMENT_CSS_FILE = 'static/qdocument.css'\nMATCH_UI_FILE = 'static/match.ui'\nPRIV_CHAT_UI_FILE = 'static/private_chat.ui'\n\n\ndef loading(func):\n @wraps(func)\n def wrapper(*args):\n self = args[0]\n self.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n output = func(*args)\n self.setEnabled(True)\n QApplication.restoreOverrideCursor()\n return output\n return wrapper\n\n\nclass MMStatus(enum.Enum):\n IDLE = 1\n IN_QUEUE = 2\n IN_MATCH = 3\n\n\nclass MovableQWidget(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.mpressed = False\n self.mousePressEvent = (self.mouse_press)\n self.mouseReleaseEvent = (self.mouse_release)\n self.mouseMoveEvent = (self.mouse_move)\n\n def mouse_press(self, evt):\n if (evt.button() == 2):\n return\n\n cursor = QCursor()\n pos = cursor.pos()\n geometry = self.geometry()\n\n self.mpress_cur_x = pos.x()\n self.mpress_cur_y = pos.y()\n self.mpress_x = geometry.x()\n self.mpress_y = geometry.y()\n self.mpressed = True\n\n\n def mouse_release(self, evt):\n if (evt.button() == 2):\n return\n\n self.mpressed = False\n\n\n def mouse_move(self, evt):\n if self.mpressed:\n cursor = QCursor()\n pos = cursor.pos()\n\n diff_x = pos.x() - self.mpress_cur_x\n diff_y = pos.y() - self.mpress_cur_y\n\n self.move(self.mpress_x + diff_x, self.mpress_y + diff_y)\n\n\nclass PrivateChatWindow(MovableQWidget):\n async_print = pyqtSignal(str)\n\n def __init__(self, main_window, parent=None):\n super().__init__(parent)\n self.username = None\n self.main_window = main_window\n self.async_print.connect(self.print)\n self.initUI()\n\n\n def initUI(self):\n uic.loadUi(PRIV_CHAT_UI_FILE, self)\n\n self.setWindowTitle('Private chat')\n self.setFixedSize(self.width(), self.height())\n self.setObjectName('PrivateChatWidget')\n self.setWindowFlags(Qt.FramelessWindowHint)\n\n with open(MAINWINDOW_CSS_FILE) as f:\n self.setStyleSheet(f.read())\n\n\n def print(self, text):\n self.priv_chat_info.append('| ' + text)\n\n\n def clear(self):\n self.priv_chat_info.clear()\n\n\n def change_user(self, username):\n self.clear()\n self.username = username\n self.username_label.setText(username)\n\n def async_fetch_messages():\n latest_messages = sl.fetch_private_messages(main_window.cookie_jar, self.username)\n for message in latest_messages:\n self.async_print.emit(message['username'] + ': ' + message['message'])\n thr = threading.Thread(target=async_fetch_messages, args=(), kwargs={})\n thr.start()\n\n\n def send_message(self):\n message = self.priv_chat_input.text()\n if message:\n if message[0] == '/':\n if 'change_user' in message:\n smsg = message.strip().split()\n try:\n username = smsg[1]\n self.change_user(username)\n except:\n print('[DEBUG]: Error changing privmsg user, no username found')\n else:\n def async_message():\n sl.send_private_chat_message(main_window.cookie_jar, self.username, message)\n thr = threading.Thread(target=async_message, args=(), kwargs={})\n thr.start()\n self.priv_chat_input.setText('')\n\n\nclass MatchWindow(QWidget):\n def __init__(self, main_window, parent=None):\n super().__init__(parent)\n self.main_window = main_window\n self.preferred_toggled = False\n self.opponent = None\n self.initUI()\n\n\n def initUI(self):\n uic.loadUi(MATCH_UI_FILE, self)\n\n self.setWindowTitle('Match chat')\n self.setFixedSize(self.width(), self.height())\n self.setObjectName('MatchWidget')\n self.setWindowFlags(Qt.FramelessWindowHint)\n\n with open(MAINWINDOW_CSS_FILE) as f:\n self.setStyleSheet(f.read())\n\n self.refresh_match_chat_button.setIcon(QIcon('static/refresh.png'))\n self.refresh_match_chat_button.clicked.connect(self.refresh_match_chat)\n\n self.toggle_preferred_button.setIcon(QIcon('static/plus.png'))\n self.toggle_preferred_button.clicked.connect(self.toggle_preferred_click)\n\n self.hideEvent = self.hide_event\n self.showEvent = self.show_event\n\n\n def print(self, text):\n self.match_info.append('| ' + text)\n\n\n def print_match_message(self, text):\n QSound.play('static/matchmessage.wav')\n self.print(text)\n\n\n def clear(self):\n self.match_info.clear()\n\n\n def toggle_preferred_player(self, player):\n if self.preferred_toggled:\n local.remove_preferred_player(player)\n self.print(player + ' removed from preferred players')\n else:\n local.prefer_player(player)\n self.print(player + ' added to preferred players')\n\n\n def toggle_preferred_click(self):\n if not self.opponent:\n self.print('No opponent found for this match, try restarting the application')\n return\n\n if self.preferred_toggled:\n self.toggle_preferred_player(self.opponent)\n self.toggle_preferred_button.setIcon(QIcon('static/plus.png'))\n self.preferred_toggled = False\n else:\n self.toggle_preferred_player(self.opponent)\n self.toggle_preferred_button.setIcon(QIcon('static/minus.png'))\n self.preferred_toggled = True\n\n\n def send_message(self):\n message = self.match_input.text()\n if message and builtins.in_match:\n def async_message():\n sl.send_match_chat_message(main_window.cookie_jar, builtins.current_match_id, message)\n thr = threading.Thread(target=async_message, args=(), kwargs={})\n thr.start()\n self.match_input.setText('')\n\n\n def hide_event(self, evt):\n self.clear()\n main_window.quit_matchmaking()\n\n\n def show_event(self, evt):\n if self.opponent in local.PREFERRED_PLAYERS:\n self.preferred_toggled = True\n self.toggle_preferred_button.setIcon(QIcon('static/minus.png'))\n else:\n self.preferred_toggled = False\n self.toggle_preferred_button.setIcon(QIcon('static/plus.png'))\n\n\n @loading\n def refresh_match_chat(self, _=None):\n self.clear()\n self.print('Refreshing match messages..')\n QApplication.processEvents()\n self.repaint()\n\n match_messages = sl.fetch_match_messages(main_window.cookie_jar)\n for message in match_messages:\n self.print(message['username'] + ': ' + message['message'])\n\n\nclass LoginWindow(QWidget):\n def __init__(self, main_window, parent=None):\n super().__init__(parent)\n self.main_window = main_window\n self.initUI()\n\n\n def initUI(self):\n self.setWindowTitle(\"Login\")\n form_layout = QFormLayout()\n self.form_layout = form_layout\n self.setLayout(form_layout)\n self.setObjectName('LoginWidget')\n self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)\n\n with open(MAINWINDOW_CSS_FILE) as f:\n self.setStyleSheet(f.read())\n\n # center the widget on screen\n self.setMinimumSize(200, 100)\n self.setMaximumSize(350, 110)\n self.resize(300, 100)\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n self.username_input = QLineEdit(self)\n self.password_input = QLineEdit(self)\n self.password_input.setEchoMode(QLineEdit.Password)\n\n self.username_input.returnPressed.connect(self.login)\n self.password_input.returnPressed.connect(self.login)\n\n self.login_button = QPushButton('Log in')\n self.login_button.clicked.connect(self.login)\n\n self.login_status = QLabel(\"Logging in...\")\n self.login_status.setAlignment(Qt.AlignCenter)\n self.login_status.hide()\n\n form_layout.addRow('Username:', self.username_input)\n form_layout.addRow('Password:', self.password_input)\n form_layout.addRow(self.login_button)\n form_layout.addRow(self.login_status)\n\n self.showEvent = self.show_event\n self.closeEvent = self.close_event\n\n\n def show_event(self, evt):\n self.main_window.setEnabled(False)\n\n\n def close_event(self, evt):\n self.main_window.setEnabled(True)\n\n\n @loading\n def login(self, _=None):\n self.login_status.show()\n\n username = self.username_input.text()\n password = self.password_input.text()\n\n if not (username and password):\n self.login_status.setText('Enter username and password')\n return\n\n self.login_status.setText('Logging in...')\n QApplication.processEvents()\n self.repaint()\n\n try:\n if slrequests.login_to_smashladder(username, password):\n self.main_window.login()\n self.main_window.username = username\n self.login_status.hide()\n self.close()\n else:\n self.login_status.setText('Wrong username and/or password')\n except slexceptions.RequestTimeoutException as e:\n seflf.main_window.print(str(e))\n self.login_status.setText('Login to server timed out, try again later')\n\n\nclass MainWindow(MovableQWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.init_threads()\n self.initUI()\n self.closeEvent = self.close_event\n\n\n def initUI(self):\n uic.loadUi(MAINWINDOW_UI_FILE, self)\n\n self.center()\n self.setFixedSize(self.width(), self.height())\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.setWindowTitle('smashladder-python')\n\n with open(MAINWINDOW_CSS_FILE) as f:\n self.setStyleSheet(f.read())\n\n self.minimize_button.clicked.connect(lambda: self.showMinimized())\n self.exit_button.clicked.connect(lambda: self.close())\n\n self.mm_button.clicked.connect(self.start_matchmaking)\n self.quit_mm_button.clicked.connect(self.quit_matchmaking)\n self.fetch_active_matches_button.setIcon(QIcon('static/down-arrow.png'))\n self.fetch_active_matches_button.clicked.connect(self.fetch_active_matches)\n\n self.whitelist_country_button.clicked.connect(self.whitelist_country_wrapper)\n self.whitelist_country.returnPressed.connect(self.whitelist_country_wrapper)\n self.blacklist_player_button.clicked.connect(self.blacklist_player_wrapper)\n self.blacklist_player_username.returnPressed.connect(self.blacklist_player_wrapper)\n\n self.list_blacklisted_players_button.setIcon(QIcon('static/list.ico'))\n self.list_whitelisted_countries_button.setIcon(QIcon('static/list.ico'))\n self.list_preferred_players_button.setIcon(QIcon('static/friend.png'))\n self.list_blacklisted_players_button.clicked.connect(self.list_blacklisted_players)\n self.list_whitelisted_countries_button.clicked.connect(self.list_whitelisted_countries)\n self.list_preferred_players_button.clicked.connect(self.list_preferred_players)\n self.clear_config_info_button.clicked.connect(lambda: self.config_info.clear())\n\n whitelist_country_tooltip = \\\n \"\"\"\n Used to whitelist specific countries that you want the script\n to allow matches with. Especially useful in Europe where\n distance is less important than the country your opponent is\n residing in.\n \"\"\"\n blacklist_player_tooltip = \\\n \"\"\"\n Used to blacklist players that you have a bad connection\n to. Blacklisted players will not be challenged. Can be used cleverly\n to avoid noobs, jerks and salts without ignoring them forever.\n \"\"\"\n self.whitelist_country_tooltip.setAlignment(Qt.AlignCenter)\n self.blacklist_player_tooltip.setAlignment(Qt.AlignCenter)\n self.whitelist_country_tooltip.setToolTip(whitelist_country_tooltip)\n self.blacklist_player_tooltip.setToolTip(blacklist_player_tooltip)\n\n self.config_info.mouseMoveEvent = (self.highlight_config_line)\n self.config_info.mousePressEvent = (self.update_config)\n self.config_info.setLineWrapMode(QTextEdit.NoWrap)\n self.config_info.setContextMenuPolicy(Qt.NoContextMenu)\n\n self.matchmaking_info.mousePressEvent = (self.click_username)\n with open(QDOCUMENT_CSS_FILE) as f:\n self.matchmaking_info.document().setDefaultStyleSheet(f.read())\n\n self.friendlies_checkbox.setChecked(True)\n self.friendlies_checkbox.toggled.connect(self.change_checkbox_config)\n sl.friendlies_enabled = self.friendlies_checkbox.isChecked()\n self.ranked_checkbox.setChecked(False)\n self.ranked_checkbox.toggled.connect(self.change_checkbox_config)\n sl.ranked_enabled = self.ranked_checkbox.isChecked()\n self.doubles_checkbox.setChecked(False)\n self.doubles_checkbox.toggled.connect(self.change_checkbox_config)\n sl.doubles_enabled = self.doubles_checkbox.isChecked()\n self.priv_chat_checkbox.setChecked(True)\n self.priv_chat_checkbox.toggled.connect(self.change_checkbox_config)\n\n self.show()\n\n # we want the creation of the main window to be _done_ before\n # we create the login window and match window\n self.login_window = LoginWindow(self)\n self.relog_button.clicked.connect(lambda: self.login_window.show())\n self.logout_button.clicked.connect(self.logout)\n self.login()\n\n self.match_window = MatchWindow(self, self)\n self.match_window.match_input.returnPressed.connect(self.match_window.send_message)\n self.socket_thread.match_message.connect(self.match_window.print_match_message)\n self.socket_thread.match_done.connect(lambda: self.match_window.print(\n '<span style=\"color: red\">Opponent has quit the match</span>'))\n self.match_window.quit_match_button.clicked.connect(self.quit_match)\n self.match_window.move(self.width() / 2 - self.match_window.width() / 2,\n self.height() / 2 - self.match_window.height() / 2)\n\n self.priv_chat_window = PrivateChatWindow(self)\n self.priv_chat_window.priv_chat_input.returnPressed.connect(self.priv_chat_window.send_message)\n self.socket_thread.private_message.connect(self.priv_chat_window.print)\n self.priv_chat_window.close_button.clicked.connect(lambda: self.priv_chat_window.hide())\n self.priv_chat_label.mousePressEvent = (lambda _: self.priv_chat_window.show())\n\n self.matchmaking_info.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.config_info.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.match_window.match_info.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n self.priv_chat_window.priv_chat_info.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\n\n\n def init_threads(self):\n self.matchmaking_thread = slthreads.MMThread()\n self.socket_thread = slthreads.SlSocketThread()\n self.challenge_thread = slthreads.ChallengeThread()\n\n self.matchmaking_thread.qt_print.connect(self.print)\n self.socket_thread.qt_print.connect(self.print)\n self.socket_thread.entered_match.connect(self.entered_match)\n self.socket_thread.preferred_queued.connect(lambda: QSound.play('static/tutturuu.wav'))\n self.challenge_thread.qt_print.connect(self.print)\n\n\n def close_event(self, evt):\n local.remove_tmp_blacklisted()\n app.quit()\n\n\n def print(self, text):\n self.matchmaking_info.append('| ' + text)\n QApplication.processEvents()\n self.repaint()\n\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\n def change_status(self, status):\n if status == MMStatus.IDLE:\n self.mm_status.setText('Idle')\n elif status == MMStatus.IN_QUEUE:\n self.mm_status.setText('In queue')\n elif status == MMStatus.IN_MATCH:\n self.mm_status.setText('In match')\n\n\n def login(self):\n if os.path.isfile(local.COOKIE_FILE):\n local.cookie_jar = local.load_cookies_from_file(local.COOKIE_FILE)\n self.cookie_jar = local.load_cookies_from_file(local.COOKIE_FILE)\n self.username = self.cookie_jar['username']\n\n sl.active_config.set_login(self.username)\n self.socket_thread.set_login(self.cookie_jar)\n self.matchmaking_thread.set_login(self.cookie_jar)\n self.challenge_thread.set_login(self.cookie_jar)\n self.socket_thread.start()\n\n self.quit_existing_match()\n\n self.relog_button.hide()\n self.logged_in_label.show()\n self.logout_button.show()\n else:\n self.logged_in_label.hide()\n self.logout_button.hide()\n self.relog_button.click()\n\n\n def logout(self):\n try:\n local.cookie_jar = None\n self.cookie_jar = None\n os.remove(local.COOKIE_FILE)\n\n self.socket_thread.logout()\n self.matchmaking_thread.logout()\n self.challenge_thread.logout()\n\n self.relog_button.show()\n self.logged_in_label.hide()\n self.logout_button.hide()\n except Exception as e:\n self.print('Could not delete cookie file: {}'.format(e))\n\n\n def start_matchmaking(self):\n if not hasattr(self, 'cookie_jar') or not self.cookie_jar:\n self.print('Log in to matchmake')\n return\n\n if not builtins.idle:\n self.print('Already matchmaking, can\\'t start matchmaking')\n return\n\n builtins.idle = False\n self.matchmaking_thread.start()\n self.challenge_thread.start()\n if not self.socket_thread.isRunning():\n self.socket_thread.start()\n\n self.change_status(MMStatus.IN_QUEUE)\n self.print('Successfully started matchmaking')\n\n\n @loading\n def quit_matchmaking(self, _=None):\n if builtins.idle and not builtins.in_match:\n self.print('Already idle, can\\'t quit matcmaking')\n return\n\n self.print('Quitting matchmaking..')\n\n builtins.idle = True\n self.matchmaking_thread.wait()\n if self.challenge_thread.isRunning():\n self.challenge_thread.terminate()\n\n if builtins.search_match_id:\n quit_queue = sl.quit_matchmaking(self.cookie_jar, builtins.search_match_id)\n if quit_queue:\n self.print('Successfully unqueued match with id: ' + builtins.search_match_id)\n elif builtins.in_match:\n sl.report_friendly_done(self.cookie_jar, builtins.current_match_id)\n sl.finished_chatting_with_match(self.cookie_jar, builtins.current_match_id)\n\n builtins.in_queue = False\n builtins.search_match_id = None\n builtins.current_match_id = None\n builtins.in_match = False\n builtins.idle = True\n self.change_status(MMStatus.IDLE)\n self.print('Successfully quit matchmaking')\n\n @loading\n def fetch_active_matches(self, _=None):\n try:\n active_searches = sl.retrieve_relevant_searches(self.cookie_jar)\n except slexceptions.RequestTimeoutException as e:\n self.print('Timed out while fetching active searches')\n return\n\n if active_searches:\n self.print('<span style=\"color: green\">--Active relevant searches--</span>')\n match_searches = []\n for match in active_searches:\n print_str = match.opponent_username + ' from ' + match.opponent_country\n if match.is_ranked:\n if sl.active_config.ranked:\n print_str = print_str + ' ' + '(ranked)'\n match_searches.append(print_str)\n else:\n match_searches.append(print_str)\n\n # simple way to sort the strings on usernames\n for match_str in sorted(match_searches):\n self.print(match_str)\n else:\n self.print('<span style=\"color: red\">No active relevant searches active--</span>')\n\n\n def entered_match(self, match_id, opponent_username, opponent_country):\n if builtins.in_match:\n return\n\n builtins.current_match_id = match_id\n builtins.in_match = True\n builtins.in_queue = False\n builtins.search_match_id = None\n\n # quit threads that look for matches\n builtins.idle = True\n self.matchmaking_thread.wait()\n if self.challenge_thread.isRunning():\n self.challenge_thread.terminate()\n builtins.idle = False\n\n # send host code immediately if it is set\n host_code = self.host_code.text()\n if host_code:\n def async_greeting():\n time.sleep(1.5)\n sl.send_match_chat_message(self.cookie_jar, match_id, 'heja')\n sl.send_match_chat_message(self.cookie_jar, match_id, host_code)\n thr = threading.Thread(target=async_greeting, args=(), kwargs={})\n thr.start()\n\n QSound.play('static/challenger.wav')\n self.print('Entered match: ' + match_id)\n self.change_status(MMStatus.IN_MATCH)\n self.centralWidget.hide()\n self.match_window.opponent = opponent_username\n self.match_window.show()\n self.match_window.print('Match with ' + opponent_username + ' from ' + opponent_country)\n self.match_window.setFocus()\n\n\n @loading\n def quit_match(self, _=None):\n self.match_window.hide()\n self.centralWidget.show()\n\n\n def change_checkbox_config(self):\n sl.active_config.set_friendlies(self.friendlies_checkbox.isChecked())\n sl.active_config.set_ranked(self.ranked_checkbox.isChecked())\n sl.active_config.set_doubles(self.doubles_checkbox.isChecked())\n self.socket_thread.priv_chat_enabled = self.priv_chat_checkbox.isChecked()\n\n\n def whitelist_country_wrapper(self):\n country = self.whitelist_country.text()\n\n if country:\n self.config_info.clear()\n if country not in local.WHITELISTED_COUNTRIES:\n local.whitelist_country(country)\n self.config_info.append(country + ' added to whitelist')\n else:\n self.config_info.append(country + ' already whitelisted')\n self.whitelist_country.setText('')\n\n\n def blacklist_player_wrapper(self):\n username = self.blacklist_player_username.text()\n\n if username:\n self.config_info.clear()\n if username not in local.BLACKLISTED_PLAYERS:\n local.blacklist_player(username)\n self.config_info.append(username + ' added to blacklisted players')\n else:\n self.config_info.append(username + ' already blacklisted')\n self.blacklist_player_username.setText('')\n\n\n def reset_config_info_highlighting(self):\n reset_cursor = self.config_info.textCursor()\n format = QTextCharFormat()\n format.setBackground(QBrush(QColor(18, 20, 28)))\n reset_cursor.setPosition(0)\n reset_cursor.movePosition(QTextCursor.End, 1)\n reset_cursor.mergeCharFormat(format)\n\n\n def highlight_config_line(self, evt):\n self.reset_config_info_highlighting()\n cur = self.config_info.cursorForPosition(evt.pos())\n cur_line_no = cur.blockNumber()\n\n if cur_line_no <= 1:\n return\n\n cur.select(QTextCursor.LineUnderCursor)\n format = QTextCharFormat()\n format.setBackground(QBrush(QColor('red')))\n cur.mergeCharFormat(format)\n\n\n def update_config(self, evt):\n self.reset_config_info_highlighting()\n cur = self.config_info.cursorForPosition(evt.pos())\n cur.select(QTextCursor.LineUnderCursor)\n selected_text = cur.selectedText()\n\n if (evt.button() == 1):\n config_info_title = self.config_info.toPlainText()[:9]\n username = selected_text.split(' ')[0]\n if config_info_title == 'Blacklist':\n local.remove_blacklisted_player(username)\n self.list_blacklisted_players_button.click()\n elif config_info_title == 'Whitelist':\n local.remove_whitelisted_country(username)\n self.list_whitelisted_countries_button.click()\n elif config_info_title == 'Preferred':\n local.remove_preferred_player(username)\n self.list_preferred_players_button.click()\n elif (evt.button() == 2):\n config_info_title = self.config_info.toPlainText()[:9]\n username = selected_text.split(' ')[0]\n\n if 'Blacklisted' in username or '--' in username:\n return\n\n if config_info_title == 'Blacklist' and \\\n username not in local.TMP_BLACKLISTED_PLAYERS:\n local.tmp_blacklist_player(username)\n cur.insertHtml(username + ' (tmp)')\n elif '(tmp)' in selected_text:\n local.TMP_BLACKLISTED_PLAYERS.remove(username)\n self.list_blacklisted_players_button.click()\n elif 'added to blacklist' in selected_text and \\\n username not in local.TMP_BLACKLISTED_PLAYERS:\n local.tmp_blacklist_player(username)\n cur.insertHtml(username + ' temporarily blacklisted')\n\n\n def click_username(self, evt):\n cur = self.matchmaking_info.cursorForPosition(evt.pos())\n cur.select(QTextCursor.BlockUnderCursor)\n selected_line = cur.selectedText()\n\n if '| [private chat]' in selected_line:\n username = selected_line.strip().split(' ')[3].replace(':', '')\n self.priv_chat_window.change_user(username)\n self.priv_chat_window.show()\n elif 'queued up' in selected_line:\n processed_line = (selected_line.strip()[2:]).split(' ')\n username = processed_line[0].replace(',', '')\n user_id = processed_line[1][1:-1]\n match_id = processed_line[-1]\n\n def async_challenge():\n sl.challenge_opponent(self.cookie_jar, user_id, match_id)\n thr = threading.Thread(target=async_challenge, args=(), kwargs={})\n thr.start()\n self.print('Challenging player ' + username)\n\n\n def list_blacklisted_players(self):\n self.config_info.clear()\n self.config_info.append('Blacklisted players')\n self.config_info.append('------------------------')\n\n for player in sorted(local.BLACKLISTED_PLAYERS):\n player_text = player\n if player in local.TMP_BLACKLISTED_PLAYERS:\n player_text += ' (tmp)'\n self.config_info.append(player_text)\n\n self.config_info.verticalScrollBar().setValue(0)\n\n\n def list_whitelisted_countries(self):\n self.config_info.clear()\n self.config_info.append('Whitelisted countries')\n self.config_info.append('---------------------------')\n\n for country in sorted(local.WHITELISTED_COUNTRIES):\n self.config_info.append(country)\n\n self.config_info.verticalScrollBar().setValue(0)\n\n\n def list_preferred_players(self):\n self.config_info.clear()\n self.config_info.append('Preferred players')\n self.config_info.append('----------------------')\n\n for player in sorted(local.PREFERRED_PLAYERS):\n self.config_info.append(player)\n\n self.config_info.verticalScrollBar().setValue(0)\n\n\n @loading\n def quit_existing_match(self, _=None):\n try:\n match_id = sl.fetch_existing_match(self.cookie_jar)\n if match_id:\n sl.report_friendly_done(self.cookie_jar, match_id)\n sl.finished_chatting_with_match(self.cookie_jar, match_id)\n except slexceptions.RequestTimeoutException as e:\n self.print(str(e))\n\n\napp = QApplication(sys.argv)\napp.setWindowIcon(QIcon('static/smashladder.png'))\nmain_window = MainWindow()\nmain_window.show()\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7094017267227173, "avg_line_length": 9.636363983154297, "blob_id": "c99396295b7f81ef063b8502234fca4d9154c36a", "content_id": "37f7bc53beb813209d1ec34b1c94d722f870dc2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 117, "license_type": "no_license", "max_line_length": 24, "num_lines": 11, "path": "/Makefile", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": ".PHONY: clean\nclean:\n\t-pyclean .\n\n.PHONY: run\nrun:\n\tpython3 slapp.pyw\n\n.PHONY: debug\ndebug:\n\tpython3 slapp.pyw debug\n" }, { "alpha_fraction": 0.6933205127716064, "alphanum_fraction": 0.6936416029930115, "avg_line_length": 23.519685745239258, "blob_id": "81953ba2a6f5cf8810f7fee4ae78f9b83c75b7c4", "content_id": "0c6cfc6f891c66dfbe15028c2697889e5b6ce4a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3114, "license_type": "no_license", "max_line_length": 81, "num_lines": 127, "path": "/smashladder/local.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "import pickle\nimport os.path\n\n\nCOOKIE_FILE = 'conf/cookies.dat'\nWHITELISTED_COUNTRIES_FILE = 'conf/whitelisted_countries'\nBLACKLISTED_PLAYERS_FILE = 'conf/blacklisted_players'\nPREFERRED_PLAYERS_FILE = 'conf/preferred_players'\n\n\ndef save_cookies_to_file(cookie_jar, filename):\n with open(filename, 'wb') as f:\n pickle.dump(cookie_jar, f)\n\n\ndef load_cookies_from_file(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n\ndef cookie_jar_to_string(cookie_jar):\n cookie = 'timezone=Europe/Berlin; '\n cookie += 'lad_sock_user_id=' + cookie_jar['lad_sock_user_id'] + '; '\n cookie += 'lad_sock_hash=' + cookie_jar['lad_sock_hash'] + '; '\n cookie += 'lad_sock_remember_me=' + cookie_jar['lad_sock_remember_me'] + '; '\n return cookie\n\n\nif os.path.isfile(WHITELISTED_COUNTRIES_FILE):\n with open(WHITELISTED_COUNTRIES_FILE, 'rb') as f:\n WHITELISTED_COUNTRIES = pickle.load(f)\nelse:\n WHITELISTED_COUNTRIES = []\n\nif os.path.isfile(BLACKLISTED_PLAYERS_FILE):\n with open(BLACKLISTED_PLAYERS_FILE, 'rb') as f:\n BLACKLISTED_PLAYERS = pickle.load(f)\nelse:\n BLACKLISTED_PLAYERS = []\n\nif os.path.isfile(PREFERRED_PLAYERS_FILE):\n with open(PREFERRED_PLAYERS_FILE, 'rb') as f:\n PREFERRED_PLAYERS = pickle.load(f)\nelse:\n PREFERRED_PLAYERS = []\n\nTMP_BLACKLISTED_PLAYERS = []\n\n\ndef dump_whitelisted_countries():\n with open(WHITELISTED_COUNTRIES_FILE, 'wb') as f:\n pickle.dump(WHITELISTED_COUNTRIES, f)\n\n\ndef dump_blacklisted_players():\n with open(BLACKLISTED_PLAYERS_FILE, 'wb') as f:\n pickle.dump(BLACKLISTED_PLAYERS, f)\n\n\ndef dump_preferred_players():\n with open(PREFERRED_PLAYERS_FILE, 'wb') as f:\n pickle.dump(PREFERRED_PLAYERS, f)\n\n\ndef whitelist_country(country):\n WHITELISTED_COUNTRIES.append(country)\n dump_whitelisted_countries()\n\n\ndef blacklist_player(player):\n BLACKLISTED_PLAYERS.append(player)\n dump_blacklisted_players()\n\n\ndef prefer_player(player):\n PREFERRED_PLAYERS.append(player)\n dump_preferred_players()\n\n\ndef tmp_blacklist_player(player):\n if player not in BLACKLISTED_PLAYERS or \\\n player in TMP_BLACKLISTED_PLAYERS:\n return\n\n TMP_BLACKLISTED_PLAYERS.append(player)\n\n\ndef remove_whitelisted_country(country):\n if country not in WHITELISTED_COUNTRIES:\n return\n\n WHITELISTED_COUNTRIES.remove(country)\n dump_whitelisted_countries()\n\n\ndef remove_blacklisted_player(player):\n if player not in BLACKLISTED_PLAYERS:\n return\n\n BLACKLISTED_PLAYERS.remove(player)\n dump_blacklisted_players()\n\n\ndef remove_preferred_player(player):\n if player not in PREFERRED_PLAYERS:\n return\n\n PREFERRED_PLAYERS.remove(player)\n dump_preferred_players()\n\n\ndef remove_tmp_blacklisted_player(player):\n if player not in TMP_BLACKLISTED_PLAYERS:\n return\n\n TMP_BLACKLISTED_PLAYERS.remove(player)\n\n\ndef remove_tmp_blacklisted():\n for player in TMP_BLACKLISTED_PLAYERS:\n if player in BLACKLISTED_PLAYERS:\n BLACKLISTED_PLAYERS.remove(player)\n\n dump_blacklisted_players()\n\n\nWHITELISTED_GAMES = { 'Melee': '2', }\n" }, { "alpha_fraction": 0.7846012711524963, "alphanum_fraction": 0.7846012711524963, "avg_line_length": 34.78688430786133, "blob_id": "0b255ce94bcdf825763f8ca96996c2857e89c9b3", "content_id": "60ff17926ee8b79358280ec450843d048f82d66c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2188, "license_type": "no_license", "max_line_length": 83, "num_lines": 61, "path": "/README.md", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "# smashladder-python\n\nSmall project to matchmake automatically on Anther's (https://www.smashladder.com).\n\n![App](https://github.com/thomaav/smashladder-python/raw/master/static/app.png)\n\n## Functionality\n\n### Automatic matchmaking\n\nPress «Start» to have the application run automatic matchmaking for\nyou. You will automatically be searching, challenging any players\nqueuing up that fit your configuration, as well as accepting any\nreceived challenges.\n\n### Choose who to play against\n\nThere are several ways to decide who you want to have the script challenge\nand accept:\n\n#### Blacklisting\n\nIt is possible to add players that you do not want to have the script\nmatchmake to a blacklist. This can be because of high ping etc.\n\nIt is also possible to only temporarily blacklist someone until your\nnext session of playing by right clicking their name in the list. They\nwill then be deleted off the blacklist the next time you start the\napplication. Useful for when you just want to play someone else for a\nbit.\n\n#### Whitelisting\n\nTo have the script work at all, you have to whitelist countries that\nyou usually receive good ping towards, to have the script recognize\nwhich players you want to play against.\n\n#### Preferring players\n\nIf you play someone you enjoy playing a lot, you can hit the + icon in\nthe match to add them to your preferred players. If someone you prefer\nto play starts queueing when you are idling, a sound will be played,\nand you will be able to click their name in the chat to challenge them\nimmediately.\n\n### Idling\n\nThe application will always show when someone queues up that you would\notherwise be challenging if you were running the script, by printing\ninformation to the main window. This way you can start playing if you\nsee someone you like.\n\n\n### Private chat\n\nIf you click the «Private chat» letters beside the checkbox, a private\nchat window will be opened. You can change the user you want to talk\nto by issuing «/change_user $username», and the script will then fetch\nyour chat history as well. This is only meant for small messaging if\nyou just want to hit someone up, as the application is mostly meant\njust for finding matches -- not chatting." }, { "alpha_fraction": 0.5860049724578857, "alphanum_fraction": 0.5886718034744263, "avg_line_length": 34.41889190673828, "blob_id": "823f8904a5c57cfd3f6e43cf2f4fa527f42bd453", "content_id": "1f578fb3330f0263a912d805c9a3d0267945ff78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17249, "license_type": "no_license", "max_line_length": 103, "num_lines": 487, "path": "/smashladder/sl.py", "repo_name": "thomaav/smashladder-python", "src_encoding": "UTF-8", "text": "import builtins\nimport json\nimport re\nimport sys\nimport time\nfrom smashladder.local import *\nfrom smashladder.slrequests import *\n\n\nbuiltins.current_match_id = None\nbuiltins.in_match = False\nbuiltins.in_queue = False\nbuiltins.search_match_id = None\nbuiltins.idle = True\n\n\nclass SlConfig(object):\n def __init__(self,\n friendlies = True,\n ranked = False,\n doubles = False,\n enabled_games = { 'Melee': '2' },\n enabled_builds = ['Faster Melee 5.8.7'],\n username = None\n ):\n self.friendlies = friendlies\n self.ranked = ranked\n self.doubles = doubles\n self.enabled_games = enabled_games\n self.enabled_builds = enabled_builds\n self.username = username\n\n\n def set_friendlies(self, val):\n if type(val) is bool:\n self.friendlies = val\n\n\n def set_ranked(self, val):\n if type(val) is bool:\n self.ranked = val\n\n\n def set_doubles(self, val):\n if type(val) is bool:\n self.doubles = val\n\n\n def set_login(self, username):\n self.username = username\n\n\nactive_config = SlConfig()\n\n\nclass Match(object):\n def __init__(self, match):\n if 'is_removed' in match:\n self.removed = True\n return\n\n self.removed = False\n self.is_ranked = match['is_ranked']\n self.ladder_name = match['ladder_name']\n self.match_id = match['id']\n self.team_size = match['team_size']\n self.preferred_builds = match['player1']['preferred_builds']\n\n if match['player1']['username'] == active_config.username:\n try:\n self.opponent_username = match['player2']['username']\n self.opponent_id = match['player2']['id']\n self.opponent_country = match['player2']['location']['country']['name']\n except KeyError:\n self.opponent_username = match['player1']['username']\n self.opponent_id = match['player1']['id']\n self.opponent_country = match['player1']['location']['country']['name']\n else:\n self.opponent_username = match['player1']['username']\n self.opponent_id = match['player1']['id']\n self.opponent_country = match['player1']['location']['country']['name']\n\n\n def relevant(self):\n # first weed out definite irrelevant matches\n if self.removed:\n return False\n\n if self.ladder_name not in active_config.enabled_games.keys():\n return False\n\n if self.opponent_country.lower() not in [c.lower() for c in WHITELISTED_COUNTRIES] or\\\n self.opponent_username.lower() in [p.lower() for p in BLACKLISTED_PLAYERS]:\n return False\n\n # walk through all preferred builds of opponent to see if we\n # have _any_ matches across any enabled game\n builds_match = False\n for game in self.preferred_builds:\n if game in active_config.enabled_games.values():\n for build in self.preferred_builds[game]:\n if build['name'] in active_config.enabled_builds and build['active'] == True:\n builds_match = True\n if not builds_match:\n return False\n\n # handle doubles specifically to avoid confusion in friendlies\n # vs. ranked\n if self.team_size == 2 and not active_config.doubles:\n return False\n\n # now if we match certain config, we're relevant\n if (not self.is_ranked and active_config.friendlies) or \\\n (self.is_ranked and active_config.ranked):\n return True\n\n return False\n\n\ndef begin_matchmaking(cookie_jar, team_size, game_id, match_count,\n title, ranked, host_code):\n \"\"\"\n cookie_jar is required, as you need to be logged in\n team_size is 1, not doubles\n game_id is game, 2 for melee\n match_count is 0, probably for ranked\n title i dont fucking know, use ''\n ranked, 0 or 1 accordingly\n host_code is can be added if wanted, '' if none\n \"\"\"\n\n match_content = { 'team_size': team_size,\n 'game_id': game_id,\n 'match_count': match_count,\n 'title': title,\n 'ranked': ranked,\n 'host_code': host_code, }\n\n response = http_post_request('https://www.smashladder.com/matchmaking/begin_matchmaking',\n match_content,\n cookie_jar)\n response_body = json.loads(response.text)\n\n # go through returned current searches, the first is your own\n try:\n for match_id in response_body['searches']:\n if (re.match('[0-9]{7,9}', match_id)):\n own_match_id = match_id\n except KeyError:\n error_msg = response_body['error']\n\n if error_msg == 'You do not have access to this page':\n return { 'match_id': None, 'info': 'Log in to matchmake' }\n elif error_msg == 'A search like this is already active!':\n return { 'match_id': None, 'info': 'Already in queue, not starting matchmaking' }\n else:\n return { 'match_id': None, 'info': 'Unspecified failure. Matchmaking aborted' }\n\n if (own_match_id):\n return { 'match_id': own_match_id, 'info': 'Success! Queued for matchmaking: ' + own_match_id }\n else:\n return { 'match_id': None, 'info': 'Unspecified failure. Queueing aborted' }\n\n\ndef quit_matchmaking(cookie_jar, match_id):\n content = { 'match_id': match_id }\n\n response = http_post_request('https://www.smashladder.com/matchmaking/end_matchmaking',\n content, cookie_jar)\n response_body = json.loads(response.text)\n\n if 'success' in response_body:\n return True\n return False\n\n\ndef retrieve_active_searches(cookie_jar):\n response = http_get_request('https://www.smashladder.com/matchmaking/retrieve_match_searches',\n cookie_jar)\n response_body = json.loads(response.text)\n\n active_searches = []\n matches = iter(response_body['searches'].values())\n for match in matches:\n if type(match) is dict:\n match_obj = Match(match)\n active_searches.append(match_obj)\n\n return active_searches\n\n\ndef retrieve_relevant_searches(cookie_jar):\n active_searches = retrieve_active_searches(cookie_jar)\n relevant_searches = list()\n for match in active_searches:\n if match.relevant():\n relevant_searches.append(match)\n\n return relevant_searches\n\n\ndef retrieve_users_awaiting_reply(cookie_jar):\n response = http_get_request('https://www.smashladder.com/matchmaking/retrieve_match_searches',\n cookie_jar)\n response_body = json.loads(response.text)\n\n users = []\n for match_id in response_body['awaiting_replies']:\n if (re.match('[0-9]{7,9}', match_id)):\n match = response_body['awaiting_replies'][match_id]\n username = match['player1']['username']\n users.append(username)\n\n return users\n\n\ndef accept_match_challenge(cookie_jar, match_id):\n content = { 'accept': '1',\n 'match_id': match_id,\n 'host_code': '' }\n http_post_request('https://www.smashladder.com/matchmaking/reply_to_match',\n content, cookie_jar)\n\n\ndef decline_match_challenge(cookie_jar, match_id):\n content = { 'accept': '0',\n 'match_id': match_id,\n 'host_code': '' }\n http_post_request('https://www.smashladder.com/matchmaking/reply_to_match',\n content, cookie_jar)\n\n\ndef challenge_opponent(cookie_jar, opponent_id, match_id):\n content = { 'challenge_player_id': opponent_id,\n 'match_id': match_id }\n\n return http_post_request('https://www.smashladder.com/matchmaking/challenge_search',\n content, cookie_jar)\n\n\ndef challenge_relevant_friendlies(cookie_jar, own_username):\n relevant_searches = retrieve_relevant_searches(cookie_jar)\n\n users_awaiting_reply = retrieve_users_awaiting_reply(cookie_jar)\n ignored_users = retrieve_ignored_users(cookie_jar)\n\n challenged_players = []\n for match in relevant_searches:\n # special case rules not in Match.relevant()\n if match.opponent_username == own_username or \\\n match.opponent_username in users_awaiting_reply or \\\n match.opponent_username in ignored_users:\n continue\n\n if not match.relevant():\n continue\n\n response = challenge_opponent(cookie_jar, match.opponent_id, match.match_id)\n challenged_players.append({ 'username': decorate_username(match.opponent_username),\n 'country': match.opponent_country })\n\n return challenged_players\n\n\ndef retrieve_ignored_users(cookie_jar):\n response = http_post_request('https://www.smashladder.com/matchmaking/ignore_list',\n {}, cookie_jar)\n response_body = json.loads(response.text)\n\n ignored_users = []\n for user in response_body['ignores']:\n ignored_users.append(user['username'])\n\n return ignored_users\n\n\ndef process_private_chat_message(message):\n message = json.loads(message)\n chat_data = message['private_chat']\n\n for user_id in chat_data:\n if 'username' in chat_data[user_id]:\n username = chat_data[user_id]['username']\n else:\n username = 'You'\n chat_messages = chat_data[user_id]['chat_messages']\n for message_key in chat_messages:\n chat_message = chat_messages[message_key]['message']\n\n if username:\n return { 'info': '[private chat] ' + decorate_username(username) + ': ' + chat_message }\n else:\n return { 'info': 'Unspecified failure in handling private chat message' }\n\n\ndef process_match_message(message):\n message = json.loads(message)\n\n # if received message is about getting an answer to a challenge,\n # print a message and set globals\n for match_id in message['current_matches']:\n if 'start_time' in message['current_matches'][match_id]:\n opponent = message['current_matches'][match_id]['player1']\n return { 'match_id': match_id,\n 'typing': False,\n 'info': 'Entered match: ' + match_id,\n 'opponent_username': opponent['username'],\n 'opponent_country': opponent['location']['country']['name'] }\n\n for match_id in message['current_matches']:\n match_chat_data = message['current_matches'][match_id]['chat']['chat_messages']\n # you also receive data for someone starts/stops typing, which\n # should be ignored\n if type(match_chat_data) is dict:\n for chat_message_id in match_chat_data:\n if re.match('[0-9]{7,9}', chat_message_id):\n chat_message = match_chat_data[chat_message_id]['message']\n username = match_chat_data[chat_message_id]['player']['username']\n\n if 'chat_message' in locals():\n return { 'match_id': None,\n 'typing': False,\n 'info': '[match chat] ' + username + ': ' + chat_message }\n else:\n return { 'match_id': None,\n 'typing': True,\n 'info': 'Message for participant typing - safe to ignore' }\n\n\ndef process_open_challenges(cookie_jar, message):\n message = json.loads(message)\n\n matches = []\n matches_raw = iter(message['open_challenges'].values())\n for match in matches_raw:\n if type(match) is dict:\n match_obj = Match(match)\n matches.append(match_obj)\n\n for match in matches:\n if match.relevant():\n accept_match_challenge(cookie_jar, match.match_id)\n return { 'match': match,\n 'info': 'Accepted challenge from ' + decorate_username(match.opponent_username) \\\n + ' from ' + match.opponent_country }\n else:\n decline_match_challenge(cookie_jar, match.match_id)\n return { 'match': match,\n 'info': 'Declined challenge from ' + decorate_username(match.opponent_username) \\\n + ' from ' + match.opponent_country }\n\n return { 'match': None,\n 'info': 'No awaiting challenges' }\n\n\ndef get_new_search_info(message):\n message = json.loads(message)\n new_match = next(iter(message['searches'].values()))\n match = Match(new_match)\n if match.removed:\n return None\n else:\n return match\n\n\ndef process_new_search(cookie_jar, message, own_username):\n message = json.loads(message)\n new_match = next(iter(message['searches'].values()))\n match = Match(new_match)\n\n if not match.relevant():\n return\n\n if match.opponent_username != own_username:\n response = challenge_opponent(cookie_jar, match.opponent_id, match.match_id)\n return { 'username': decorate_username(match.opponent_username),\n 'country': match.opponent_country }\n\n\ndef report_friendly_done(cookie_jar, match_id):\n content = { 'won': 4,\n 'message': '',\n 'match_id': match_id }\n http_post_request('https://www.smashladder.com/matchmaking/report_match',\n content, cookie_jar)\n\n\ndef finished_chatting_with_match(cookie_jar, match_id):\n content = { 'match_id': match_id }\n http_post_request('https://www.smashladder.com/matchmaking/finished_chatting_with_match',\n content, cookie_jar)\n builtins.current_match_id = None\n builtins.in_match = False\n\n\ndef send_match_chat_message(cookie_jar, match_id, message):\n content = { 'match_id': match_id,\n 'message': message,\n 'send_id': 20 }\n response = http_post_request('https://www.smashladder.com/matchmaking/send_chat',\n content, cookie_jar)\n\n\ndef send_private_chat_message(cookie_jar, username, message):\n try:\n user_id = fetch_user_id(cookie_jar, username)\n message_content = { 'to_user_id': user_id,\n 'message': message,\n 'send_id': 1 }\n response = http_post_request('https://www.smashladder.com/matchmaking/send_chat',\n message_content, cookie_jar)\n except Exception as e:\n print('[DEBUG]: Error in sending private chat: ' + str(e))\n\n\ndef fetch_private_messages(cookie_jar, username):\n try:\n user_id = fetch_user_id(cookie_jar, username)\n content = { 'id': user_id,\n 'username': username }\n response = http_post_request('https://www.smashladder.com/matchmaking/private_chat',\n content, cookie_jar)\n messages = (response.json())['private_chat_user']['private_chat']['chat_messages']\n messages_ids = sorted(list(messages.keys()))\n latest_messages_ids = messages_ids[max(-10, -1 * len(messages)):]\n latest_messages = []\n\n for message_id in sorted(latest_messages_ids):\n message_username = messages[message_id]['player']['username']\n message_message = messages[message_id]['message']\n latest_messages.append({ 'username': message_username, 'message': message_message})\n return latest_messages\n except Exception as e:\n print('[DEBUG]: Error in fetching private messages: ' + str(e))\n return []\n\n\ndef decorate_username(username):\n return '<span class=\"username\">' + username + '</span>'\n\n\ndef fetch_user_id(cookie_jar, username):\n fetch_user_content = { 'username': username }\n response = http_post_request('https://www.smashladder.com/matchmaking/user',\n fetch_user_content, cookie_jar)\n try:\n user_id = (response.json())['user']['id']\n return user_id\n except Exception as e:\n print('[DEBUG]: Error in fetching user id: ' + str(e))\n return None\n\n\ndef fetch_match_messages(cookie_jar):\n content = { 'is_in_ladder': True,\n 'match_only_mode': True }\n response = http_post_request('https://www.smashladder.com/matchmaking/get_user_going',\n content, cookie_jar)\n\n if not response.json()['current_matches']:\n return []\n\n try:\n chat = list(response.json()['current_matches'].values())[0]['chat']['chat_messages']\n except TypeError:\n return []\n\n messages = []\n for message_id in chat:\n username = chat[message_id]['player']['username']\n message = chat[message_id]['message']\n messages.append({ 'username': username, 'message': message })\n\n return messages\n\n\ndef fetch_existing_match(cookie_jar):\n content = { 'is_in_ladder': True,\n 'match_only_mode': True }\n response = http_post_request('https://www.smashladder.com/matchmaking/get_user_going',\n content, cookie_jar)\n\n if not response.json()['current_matches']:\n return None\n\n match_id = list(response.json()['current_matches'].keys())[0]\n return match_id\n" } ]
10
Petrowykh/parser_price
https://github.com/Petrowykh/parser_price
f7a78853043d15f39ff116813d0258d6d7bf3d01
0a39182b49d7af93c4f9e8a6feca090d15878649
20573f5cab1532e98fee20fc099fd9e7f21fa284
refs/heads/main
2023-03-30T19:04:08.070842
2021-04-06T11:26:35
2021-04-06T11:26:35
329,423,771
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.497854083776474, "alphanum_fraction": 0.7081544995307922, "avg_line_length": 16.923076629638672, "blob_id": "a8e3a2e9df628d099004a6663da350979e988d5b", "content_id": "73bbbf139fa3f50c91675ea6f0543d0e9f0fbe68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 466, "license_type": "no_license", "max_line_length": 32, "num_lines": 26, "path": "/requirements.txt", "repo_name": "Petrowykh/parser_price", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.9.3\nbs4==0.0.1\ncachetools==4.2.0\ncertifi==2020.12.5\nchardet==4.0.0\ngoogle-api-core==1.25.0\ngoogle-api-python-client==1.12.8\ngoogle-auth==1.24.0\ngoogle-auth-httplib2==0.0.4\ngoogleapis-common-protos==1.52.0\ngsheets==0.5.1\nhttplib2==0.18.1\nidna==2.10\nlxml==4.6.2\noauth2client==4.1.3\nprogress==1.5\nprotobuf==3.14.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\npytz==2020.5\nrequests==2.25.1\nrsa==4.7\nsix==1.15.0\nsoupsieve==2.1\nuritemplate==3.0.1\nurllib3==1.26.2\n" }, { "alpha_fraction": 0.5307385921478271, "alphanum_fraction": 0.5486781597137451, "avg_line_length": 33.40794372558594, "blob_id": "a99ad49c72ed15ed65202d785c631b1e26d3facd", "content_id": "65cabb286d0b50822327994d3d78cfeca9f552ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9580, "license_type": "no_license", "max_line_length": 114, "num_lines": 277, "path": "/main.py", "repo_name": "Petrowykh/parser_price", "src_encoding": "UTF-8", "text": "\n# version 1.2\n\nimport bs4\nimport requests\nimport csv\nimport logging\nimport urllib3\nfrom progress.bar import IncrementalBar\n\nurllib3.disable_warnings()\n\nlogging.basicConfig(filename=\"parse.log\", level=logging.INFO, filemode=\"w\")\n\nurl_21vek = 'https://www.21vek.by/info/brands/belbohemia.html'\nurl_vdom_search = 'https://vdom.by/?post_type=product&s=' # for search\nurl_oz_main = 'https://oz.by/producer/more120300.html'\nurl_oki = \"https://oki.by/search?q=%D0%B1%D0%B5%D0%BB%D0%B1%D0%BE%D0%B3%D0%B5%D0%BC%D0%B8%D1%8F\"\n\nmy_list = []\n\n\nclass Parser:\n\n def __init__(self):\n # init parser\n self.session = requests.Session()\n self.session.headers = {'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.9 (KHTML, like Gecko) \\\n Chrome/5.0.307.11 Safari/532.9'}\n\n def get_page(self, page_url):\n # text of page\n try:\n r = self.session.get(page_url, verify=False)\n r.encoding = 'utf-8'\n html_page = r.text\n except Exception as E:\n html_page = \"\"\n logging.exception(E)\n return html_page\n\n\nclass ParserOz(Parser):\n\n def get_final_page(self):\n soup = bs4.BeautifulSoup(self.get_page(url_oz_main), 'lxml')\n r = soup.find(\"li\", class_=\"g-pagination__list__li pg-link pg-last\").find(\"a\").text\n final_page = int(r)\n return final_page\n\n @staticmethod\n def get_links(html):\n links = []\n soup = bs4.BeautifulSoup(html, 'lxml')\n container = soup.find_all(\"div\", class_=\"item-type-card__content\")\n for cont in container:\n links.append(cont.find(\"a\").get(\"href\"))\n return links\n\n def parse_product(self, link):\n cod_link = self.get_page(\"https://oz.by\" + link)\n soup = bs4.BeautifulSoup(cod_link, 'lxml')\n name = soup.find(\"div\", class_=\"b-product-title__heading\").find(\"h1\").text\n price = soup.find(\"div\", class_=\"b-product-control__row\").find(\"span\").text.strip().split(\"\\xa0\")[0]\n articles = soup.find(\"div\", class_=\"b-description__container-col\").find_all(\"td\")\n i = 0\n sa = ''\n for article in articles:\n i = i+1\n if article.text == 'Артикул':\n sa = articles[i].text\n break\n return name, price, sa\n\n\nclass ParserVdom(Parser):\n\n def price_vdom(self, article):\n vdom_text = url_vdom_search+article\n soup = bs4.BeautifulSoup(self.get_page(vdom_text), 'lxml')\n try:\n r = soup.find(\"p\", class_=\"price\").find(\"span\").text\n if article == soup.find(\"table\", class_=\"shop_attributes\").find(\"td\").text:\n price_vdom = str(int(r.split('.')[0])+0.01*int(r.split('.')[1][0:2])).replace('.', ',')\n else:\n price_vdom = ''\n except Exception as E:\n logging.exception(E)\n price_vdom = ''\n return price_vdom\n\n\nclass ParserOki(Parser):\n\n def get_final_page(self):\n soup = bs4.BeautifulSoup(self.get_page(url_oki), 'lxml')\n r = soup.find(\"ul\", class_=\"pagination\").find_all(\"li\")\n r = r[-1].find(\"a\").get(\"href\").split(\"=\")[-1]\n return int(r)\n\n @staticmethod\n def get_links(html):\n links = []\n soup = bs4.BeautifulSoup(html, 'lxml')\n container = soup.find_all(\"div\", class_=\"col-sm-6 col-md-4 item\")\n for cont in container:\n links.append(cont.find(\"div\", class_='prod-img').find(\"a\").get(\"href\"))\n return links\n\n def parse_product(self, link):\n cod_link = self.get_page(\"https://oki.by\" + link)\n soup = bs4.BeautifulSoup(cod_link, 'lxml')\n name = soup.find(\"div\", class_=\"col-md-12 title-name\").find(\"h1\").find(\"span\").text\n price = soup.find(\"div\", class_=\"price\").find(\"p\").text.strip().split(\" \")[0]\n articles = soup.find(\"table\", class_=\"table table-condensed\").find_all(\"td\")\n sa = articles[1].text\n return name, price.replace(\".\", \",\"), sa\n\n\nclass Parser21Vek(Parser):\n\n def get_links(self, html):\n list_urls_21vek = []\n soup = bs4.BeautifulSoup(self.get_page(html), 'lxml')\n try:\n links = soup.find(\"ul\", class_=\"b-categories-full brand-categories__list\")\\\n .find_all(\"li\", class_=\"brand-subcategories__item\")\n for url in links:\n list_url_21vek = url.find(\"a\", class_='brand-subcategories__link').get(\"href\")\n list_urls_21vek.append(list_url_21vek)\n links = list_urls_21vek\n except Exception as E:\n logging.exception('Страниц нет', E)\n links = []\n return links\n\n def get_final_page(self, page_url):\n # definition number of final page\n soup = bs4.BeautifulSoup(self.get_page(page_url), 'lxml')\n try:\n final_page_soup = soup.find(\"span\", class_=\"cr-curent cr-paging_link\").text\n final_page = int(final_page_soup.strip())\n except Exception as E:\n logging.exception(E)\n return 1 # page=1\n return final_page\n\n @staticmethod\n def get_blocks(html):\n # blocks with products\n soup = bs4.BeautifulSoup(html, 'lxml')\n container = soup.select('li.result__item')\n return container\n\n @staticmethod\n def get_article(name):\n # article for vdom.by\n str_article = name.split(' ')\n result = ''\n for i in str_article:\n if len(i) == 5 and i.isnumeric():\n result = i\n return result\n\n def parse_block(self, item):\n # definition name and price 21vek.by\n try:\n # name is found\n name_product = item.find(\"span\", class_=\"result__name\").text\n except Exception as E:\n logging.exception(E)\n name_product = ''\n try:\n # price is found\n price_product = item.find('span', class_=\"g-price__unit result__priceunit\").find_previous(\"span\").text\n except Exception as E:\n logging.exception(E)\n price_product = ''\n try:\n article_product = self.get_article(name_product)\n except Exception as E:\n logging.exception(E)\n article_product = ''\n return name_product, price_product, article_product\n\n\ndef parse_oki():\n vdom = ParserVdom()\n print(\"Парсим Oki.by\")\n oki = ParserOki()\n fp = oki.get_final_page() # define pages\n for page in range(0, fp):\n url_count = url_oki + '&sort=1&page=' + str(page + 1) # format url\n links = oki.get_links(oki.get_page(url_count))\n bar = IncrementalBar(' Links#' + str(page), max=len(links))\n for i in links:\n bar.next()\n parse_product_temp = oki.parse_product(i)\n if parse_product_temp[1] != '' and parse_product_temp[2] != '':\n short = [(parse_product_temp[2], parse_product_temp[0],\n parse_product_temp[1], vdom.price_vdom(parse_product_temp[2]))]\n my_list.append(short)\n bar.finish()\n return my_list\n\n\ndef parse_21vek():\n vdom = ParserVdom()\n print(\"Парсим 21 век\")\n p21 = Parser21Vek()\n list_url_21vek = p21.get_links(url_21vek)\n for url in list_url_21vek:\n fp = p21.get_final_page(url) # define pages\n for page in range(0, fp):\n url_count = url + 'page:' + str(page + 1) # format url\n cont = p21.get_blocks(p21.get_page(url_count))\n bar = IncrementalBar('Page #' + str(url), max=len(cont))\n for i in cont:\n bar.next()\n if p21.parse_block(i)[1] != '':\n parse_block_temp = p21.parse_block(i)\n short = [(parse_block_temp[2], parse_block_temp[0],\n parse_block_temp[1], vdom.price_vdom(p21.parse_block(i)[2]))]\n my_list.append(short)\n bar.finish()\n return my_list\n\n\ndef parse_oz():\n vdom = ParserVdom()\n print(\"Парсим oz.by\")\n oz = ParserOz()\n fp = oz.get_final_page() # define pages\n for page in range(0, fp):\n url_count = url_oz_main + 'page%3A2=&page=3?page=' + str(page+1) # format url\n links = oz.get_links(oz.get_page(url_count))\n bar = IncrementalBar(' Links#' + str(page), max=len(links))\n for i in links:\n bar.next()\n parse_product_temp = oz.parse_product(i)\n if parse_product_temp[1] != '' and parse_product_temp[2] != '':\n short = [(parse_product_temp[2], parse_product_temp[0],\n parse_product_temp[1], vdom.price_vdom(parse_product_temp[2]))]\n my_list.append(short)\n bar.finish()\n return my_list\n\n\ndef main():\n name = ''\n print(\"1 - 21 Век, 2 - Oki.by, 3 - oz.by\")\n parsing_type = input()\n if parsing_type == '1':\n parse_21vek()\n name = '21vek.csv'\n elif parsing_type == '2':\n parse_oki()\n name = \"oki.csv\"\n elif parsing_type == '3':\n parse_oz()\n name = \"oz.csv\"\n else:\n print(\"Выходим\")\n return my_list, name\n\n\nif __name__ == '__main__':\n\n list_csv, name_file = main()\n first_row = ['Art', 'Name', name_file, 'vdom.by']\n if list_csv:\n with open(name_file, \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=';')\n writer.writerow(first_row)\n for line in list_csv:\n writer.writerow(line[0])\n else:\n print(\"Thanks\")\n" } ]
2
swishderzy/tensorflow_0
https://github.com/swishderzy/tensorflow_0
47bcbdf58edb5c7ba094b2a32fc87111d9bfb420
730e26abfd334491bb70cbff7c6a9f86bec188e3
eda45573a8c912cdf1c251299fa1568d6f36760e
refs/heads/master
2020-04-19T23:28:43.703079
2019-02-06T09:55:56
2019-02-06T09:55:56
168,496,944
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6404459476470947, "alphanum_fraction": 0.6624341607093811, "avg_line_length": 30.359222412109375, "blob_id": "f2602661f4a197a7bdbf5e2b46d4cc1edd79330b", "content_id": "e7df33cdd9086567d8e2f964c5a1765e736d20fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3229, "license_type": "no_license", "max_line_length": 220, "num_lines": 103, "path": "/sentdex0.py", "repo_name": "swishderzy/tensorflow_0", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n# constants:\nx1 = tf.constant(5)\nx2 = tf.constant(6)\n\nresult = tf.multiply(x1, x2)\n\n\n# two methods to open session:\n#1:\nsess = tf.Session()\nprint(sess.run(result))\nsess.close()\n\n#2:\nwith tf.Session() as sess:\n print(sess.run(result))\n\n\n# now let's do a NN:\n\"\"\"\n\ninput > weights > hidden layer 1 (activation function) > weights >hidden l 2 ( activation func) > weights >output layer\n\ncompare output to intended output > cost function (cross entropy)\n\nOptimization function (optimizer) > minimize cost (Adamoptimizer, SGD, AdaGrad)\n\nback propagation\n\nfeed forward + backprop = epoch\n\n\"\"\"\n\n\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot = True)\n\nn_nodes_hl1 = 500\nn_nodes_hl2 = 500\nn_nodes_hl3 = 500\n\n\nn_classes = 10\nbatch_size = 100\n\"\"\" when batch is 100 for example: it's going through batch of 100 of features and feed them through out network at a time and manipulates the weights and then do another batch and manipulate the weights a little bit \"\"\"\n\n\nx = tf.placeholder(\"float\")\ny = tf.placeholder(\"float\")\n\ndef neural_network_model(data):\n hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])), 'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}\n hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])), 'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}\n hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])), 'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}\n output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])), 'biases': tf.Variable(tf.random_normal([n_classes]))}\n\n # (input_data * weights) + bias\n\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])\n l1 = tf.nn.relu(l1)\n\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])\n l2 = tf.nn.relu(l2)\n\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])\n l3 = tf.nn.relu(l3)\n\n output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']\n\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = prediction, labels = y))\n\n optimizer = tf.train.AdamOptimizer().minimize(cost) # gradient descent\n\n # cycles feed forward + backprop\n hm_epochs = 10\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(hm_epochs):\n epoch_loss = 0\n for _ in range(int(mnist.train.num_examples/batch_size)):\n epoch_x, epoch_y = mnist.train.next_batch(batch_size)\n _, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})\n epoch_loss += c\n print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)\n\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))\n\n\ntrain_neural_network(x)" }, { "alpha_fraction": 0.7962962985038757, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 20.600000381469727, "blob_id": "c1851d92ca0920ae9d0571624901cafbe17f7502", "content_id": "dff382528caea1765bbca9cfdc779c5bd0995dcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/README.md", "repo_name": "swishderzy/tensorflow_0", "src_encoding": "UTF-8", "text": "# tensorflow_0\nFirst steps into tensorflow and deep learning\n\n\nIncludes example codes of basics and models.\n" }, { "alpha_fraction": 0.6476942300796509, "alphanum_fraction": 0.6622351408004761, "avg_line_length": 23.571428298950195, "blob_id": "02436f0e12a2226ec258cc38042c27e095d24fb4", "content_id": "a8376ecf539b3066a74bca87ef26e65c64521100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2407, "license_type": "no_license", "max_line_length": 109, "num_lines": 98, "path": "/tf1.py", "repo_name": "swishderzy/tensorflow_0", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n# ----------print hello world:\n\nhello = tf.constant(\"Hello world!\")\n\nsess = tf.Session()\nprint(sess.run(hello))\n\n\nwith tf.Session() as sess:\n print(f\"Saying hello: {sess.run(hello)}\")\n\n# ---------more basic operations:\na = tf.placeholder(tf.int16)\nb = tf.placeholder(tf.int16)\n\n# Define some operations\nadd = tf.add(a, b)\nmul = tf.multiply(a, b)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n # Run every operation with variable input\n print(\"Addition with variables: %i\" % sess.run(add, feed_dict={a: 2, b: 3}))\n print(\"Multiplication with variables: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}))\n\n\n# Matrix Multiplication:\nmatrix1 = tf.constant([[3., 3.]])\n\n# Create another Constant that produces a 2x1 matrix.\nmatrix2 = tf.constant([[2.],[2.]])\n\nproduct = tf.matmul(matrix1, matrix2)\n\n\nwith tf.Session() as sess:\n result = sess.run(product)\n print(result)\n\n\n\n# --------- using eager API(To make this work restart kernel and only execute the following eager api block):\n\"\"\"\neager api is to be able to interact with the object of tensorflow.\nfor example declaring a as a constant:\n without eager api: when print it it will return Tensor(\"Const:0\", shape=(), dtype=string)\n but with eager it will return the value in it.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\n\n# Set Eager API\nprint(\"Setting Eager mode...\")\ntfe.enable_eager_execution()\n\n# Define constant tensors\nprint(\"Define constant tensors\")\na = tf.constant(2)\nprint(\"a = %i\" % a)\nb = tf.constant(3)\nprint(\"b = %i\" % b)\n\n# Run the operation without the need for tf.Session\nprint(\"Running operations, without tf.Session\")\nc = a + b\nprint(\"a + b = %i\" % c)\nd = a * b\nprint(\"a * b = %i\" % d)\n\n\n# Full compatibility with Numpy\nprint(\"Mixing operations with Tensors and Numpy Arrays\")\n\n# Define constant tensors\na = tf.constant([[2., 1.],[1., 0.]], dtype=tf.float32)\nprint(\"Tensor:\\n a = %s\" % a)\nb = np.array([[3., 0.],[5., 1.]], dtype=np.float32)\nprint(\"NumpyArray:\\n b = %s\" % b)\n\n# Run the operation without the need for tf.Session\nprint(\"Running operations, without tf.Session\")\n\nc = a + b\nprint(\"a + b = %s\" % c)\n\nd = tf.matmul(a, b)\nprint(\"a * b = %s\" % d)\n\nprint(\"Iterate through Tensor 'a':\")\nfor i in range(a.shape[0]):\n for j in range(a.shape[1]):\n print(a[i][j])" } ]
3
netgodz/GeoCAT-examples
https://github.com/netgodz/GeoCAT-examples
56ead3eb52d1f7790bad0ef5f3c53e643c91aedf
5ed9a1d68b69a921d0f1fee1160e109853926ed9
b5cfbb2d77f1349750f16b8ff51250acdb8d9ffe
refs/heads/main
2023-08-01T21:47:12.716723
2021-09-13T14:08:26
2021-09-13T14:08:26
409,575,223
1
0
Apache-2.0
2021-09-23T12:07:19
2021-09-16T09:31:45
2021-09-22T17:09:47
null
[ { "alpha_fraction": 0.5734989643096924, "alphanum_fraction": 0.5942028760910034, "avg_line_length": 34.12727355957031, "blob_id": "c9a2b9800c612bae7ba7181c866ae51c4b2706f4", "content_id": "10840ef255266f52a519853229fb029974abfc3f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3864, "license_type": "permissive", "max_line_length": 108, "num_lines": 110, "path": "/Plots/Contours/NCL_color_1.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_color_1.py\n===============\nThis script illustrates the following concepts:\n - Drawing a horizonal color bar\n - Adjusting a colorbar position relative to plot axes\n - Recreating a default NCL colormap\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/color_1.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/color_1_lg.png\n\nNote:\n This may not be the best colormap to interpret the information, but was included here in order to\n demonstrate how to recreate the original NCL colormap. For more information on colormap choices, see the\n Colors examples in the GeoCAT-examples documentation.\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\n\nimport geocat.viz.util as gvutil\nimport geocat.datafiles as gdf\nfrom geocat.viz import cmaps as gvcmaps\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarray\nds = xr.open_dataset(gdf.get(\"netcdf_files/uv300.nc\")).isel(time=1)\n\n###############################################################################\n# Plot:\n\n# Generate figure and set its size in (width, height)\nfig = plt.figure(figsize=(10, 8))\n\n# Generate axes using Cartopy to draw coastlines\nax = plt.axes(projection=ccrs.PlateCarree())\nax.coastlines(linewidth=0.5, alpha=0.6)\n\n# Use geocat.viz.util convenience function to set axes limits & tick values\ngvutil.set_axes_limits_and_ticks(ax,\n xlim=(-180, 180),\n ylim=(-90, 90),\n xticks=np.linspace(-180, 180, 13),\n yticks=np.linspace(-90, 90, 7))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax, labelsize=10)\n\n# Use geocat.viz.util convenience function to make latitude, longitude tick labels\ngvutil.add_lat_lon_ticklabels(ax)\n\n# Import the default color map\nnewcmp = gvcmaps.ncl_default\n\n# Define contour levels\nlevels = np.arange(-16, 48, 4)\n\n# Define dictionary for kwargs\nkwargs = dict(\n levels=levels,\n xticks=np.arange(-180, 181, 30), # nice x ticks\n yticks=np.arange(-90, 91, 30), # nice y ticks\n add_colorbar=False, # allow for colorbar specification later\n transform=ccrs.PlateCarree(), # ds projection\n)\n\n# Contouf-plot U data (for filled contours)\nfillplot = ds.U.plot.contourf(ax=ax, cmap=newcmp, **kwargs)\n\n# Create horizonal color bar\n# By changing the kwarg `pad`, the colorbar can be moved closer to or farther away from\n# the axis parallel to it.\n# `pad` defaults to 0.15 for horizontal colorbars\nfig.colorbar(fillplot,\n orientation=\"horizontal\",\n ticks=np.arange(-12, 44, 4),\n label='',\n shrink=0.75,\n pad=0.11)\n\n# Plot line contours\nds.U.plot.contour(ax=ax,\n colors='black',\n alpha=0.8,\n linewidths=0.4,\n linestyles='solid',\n add_labels=False,\n levels=levels,\n transform=ccrs.PlateCarree())\n\n# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.\ngvutil.set_titles_and_labels(ax,\n maintitle=\"Default Color\",\n lefttitle=ds.U.long_name,\n lefttitlefontsize=16,\n righttitle=ds.U.units,\n righttitlefontsize=16,\n xlabel=\"\",\n ylabel=\"\")\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.5813190340995789, "alphanum_fraction": 0.6001193523406982, "avg_line_length": 35.032257080078125, "blob_id": "40c030f60d2faa4faf1aaaa9afb16f6c6b445fb7", "content_id": "e5825739f59da5365acd85fa1271c15b67ef60bf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3351, "license_type": "permissive", "max_line_length": 100, "num_lines": 93, "path": "/Plots/Contours/NCL_conOncon_1.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_conOncon_1.py\n=================\nThis script illustrates the following concepts:\n - Drawing pressure/height contours on top of another set of contours\n - Drawing negative contour lines as dashed lines\n - Drawing the zero contour line thicker\n - Changing the color of a contour line\n - Overlaying dashed contours on solid line contours\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/conOncon_1.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/conOncon_1_lg.png\n\"\"\"\n\n################################################################################\n# Import packages:\n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/mxclim.nc\"))\n# Extract variables\nU = ds.U[0, :, :]\nV = ds.V[0, :, :]\n\n################################################################################\n# Plot:\n\n# Generate figure (set its size (width, height) in inches) and axes\nplt.figure(figsize=(12, 12))\nax = plt.gca()\n\n# Set y-axis to have log-scale\nplt.yscale('log')\n\n# Contour-plot U-data\np = U.plot.contour(ax=ax, levels=16, colors='red', extend='neither')\nax.clabel(p, fmt='%d', inline=1, fontsize=14)\n\n# Contour-plot V-data\np = V.plot.contour(ax=ax, levels=16, colors='blue', extend='neither')\nax.clabel(p, fmt='%d', inline=1, fontsize=14)\n\n# Use geocat.viz.util convenience function to set axes tick values\n# Set y-lim inorder for y-axis to have descending values\ngvutil.set_axes_limits_and_ticks(ax,\n xticks=np.linspace(-60, 60, 5),\n xticklabels=['60S', '30S', '0', '30N', '60N'],\n ylim=ax.get_ylim()[::-1],\n yticks=U[\"lev\"])\n\n# Change formatter or else we tick values formatted in exponential form\nax.yaxis.set_major_formatter(ScalarFormatter())\n\n# Tweak label sizes, etc.\nax.yaxis.label.set_size(20)\nax.tick_params('both', length=20, width=2, which='major', labelsize=18)\nax.minorticks_off()\n\n# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.\ngvutil.set_titles_and_labels(ax,\n maintitle=\"Ensemble Average 1987-89\",\n maintitlefontsize=20,\n lefttitle=U.long_name,\n lefttitlefontsize=18,\n righttitle=U.units,\n righttitlefontsize=18,\n xlabel=\"\")\n\n# Create second y-axis to show geo-potential height.\n# Currently we're using bogus values for height, cause we haven't figured out how to make this work.\naxRHS = ax.twinx()\ndummy = 10\nmn, mx = ax.get_ylim()\naxRHS.set_ylim(mn * dummy, mx * dummy)\naxRHS.set_ylim(axRHS.get_ylim()[::-1])\naxRHS.set_ylabel('Height (km)')\naxRHS.yaxis.label.set_size(20)\naxRHS.tick_params('both', length=20, width=2, which='major', labelsize=18)\n\n# Show the plot\nplt.tight_layout()\nplt.show()\n" }, { "alpha_fraction": 0.5540880560874939, "alphanum_fraction": 0.5682389736175537, "avg_line_length": 30.485149383544922, "blob_id": "75eb6c9e74b5e81811444080cca5c344b351b266", "content_id": "f5d73bd0f6bd0600029e617a53e19ee919f8aaf1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3180, "license_type": "permissive", "max_line_length": 82, "num_lines": 101, "path": "/Plots/Boxplots/NCL_box_2.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_box_2.py\n============\n\nThis script illustrates the following concepts:\n - Drawing box plots\n - Manipulating boxplot visualizations\n - Manipulating plot axes\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/box_2.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/box_2_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\nimport numpy as np\n\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Generate fake data:\n\nseed = 200\nnp.random.seed(seed)\n\ndata = np.random.lognormal(size=(40, 3), mean=1, sigma=.7)\nfor a in range(len(data)):\n data[a] = [x - 4 for x in data[a]]\n\n###############################################################################\n# Helper function to set edge color of boxes\n\n\ndef setBoxColor(boxplot, colors):\n\n # Set edge color of the outside and median lines of the boxes\n for element in ['boxes', 'medians']:\n for box, color in zip(boxplot[element], colors):\n plt.setp(box, color=color)\n\n # Set the color of the whiskers and caps of the boxes\n for element in ['whiskers', 'caps']:\n for box, color in zip(\n zip(boxplot[element][::2], boxplot[element][1::2]), colors):\n plt.setp(box, color=color)\n\n\n###############################################################################\n# Plot:\n\n# Create figure and axis\nw = 0.1\nfig, ax = plt.subplots(figsize=(6, 6))\nboxplots = ax.boxplot(data,\n labels=['Control', '-2Xna', '2Xna'],\n widths=[w, w, w],\n showfliers=False)\n\n# Set whiskers style to dashed\nplt.setp(boxplots['whiskers'], linestyle='--')\n\n# Set boxplot edge colors\nsetBoxColor(boxplots, ['blue', 'red', '#66FF00'])\n\n# Remove axis lines on top and right sides\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\n\n# Use geocat.viz.util convenience function to set axes tick values\ngvutil.set_axes_limits_and_ticks(ax,\n ylim=(-6.0, 8.5),\n yticks=[-3.0, 0.0, 3.0, 6.0])\n\n# Set y_axis format\nax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax,\n y_minor_per_major=3,\n x_minor_per_major=1,\n labelsize=16)\n\n# Use geocat.viz.util convenience function to add title to the plot axis.\ngvutil.set_titles_and_labels(ax,\n maintitle='Tailored Box Plot',\n maintitlefontsize=22)\n\n# Make both major and minor ticks point inwards towards the plot\nax.tick_params(direction=\"in\", which='both', pad=9)\n\n# Set ticks only at left and bottom sides of plot\nax.yaxis.set_ticks_position('left')\nax.xaxis.set_ticks_position('bottom')\n\n# Display Plot\nplt.tight_layout()\nplt.show()\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7357293963432312, "avg_line_length": 32, "blob_id": "175e1df71ccbee676f4333d72a30b7ed1e048219", "content_id": "007f9f6b6115c4973ea5db86a610165fab222138", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1419, "license_type": "permissive", "max_line_length": 83, "num_lines": 43, "path": "/docs/index.rst", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": ".. GeoCAT-examples documentation master file, created by\n sphinx-quickstart on Wed Dec 4 11:06:58 2019.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nGeoCAT-examples\n===============\n\nThis gallery contains visualization examples from many plotting categories\nof geosciences data (under the\n`Gallery <https://geocat-examples.readthedocs.io/en/latest/gallery/index.html>`_\ntab) and usage examples for the functions of the GeoCAT computational component,\n`GeoCAT-comp <https://geocat-comp.readthedocs.io>`_ (under the\n`GeoCAT-comp Examples\n<https://geocat-examples.readthedocs.io/en/latest/gallery-geocat-comp/index.html>`_\ntab).\n\nFor visualization, mainly `matplotlib` and `cartopy` are used. In addition,\n`geocat-datafiles <https://github.com/NCAR/geocat-datafiles>`_ is used as a\ndataset storage and `geocat-viz <https://github.com/NCAR/geocat-viz>`_ is used for\na higher level implementation for low level `matplotlib` functionalitie.\n`Xarray` and `numpy` are used for data processing.\n\nClick on any image to see the full image and source code as well as to\ndownload Python script and/or Jupyter notebook.\n\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\n\n ./gallery/index\n ./gallery-geocat-comp/index\n ./install\n ./citation\n ./support\n\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7581475377082825, "avg_line_length": 46.27027130126953, "blob_id": "ac786b971a1ec78cc9328d37e47aa4eecb7e5168", "content_id": "bf6dd0a83f658ec420be911c24f508e9cba0b47e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1749, "license_type": "permissive", "max_line_length": 122, "num_lines": 37, "path": "/docs/install.rst", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "Installation\n============\n\nThis installation guide includes only the GeoCAT-examples installation instructions.\nPlease refer to `GeoCAT Contributor's Guide <https://geocat.ucar.edu/pages/contributing.html>`_ for installation of\nthe whole GeoCAT project.\n\nCreate a GeoCAT-examples Conda environment\n------------------------------------------\nGeoCAT-examples is not distributed as a conda package; thus, there is no conda installation for it.\n\nThe easiest way to access GeoCAT-examples is by cloning the repo and then using a `Conda <http://conda.pydata.org/docs/>`_\nenvironment and then building file of which is provided in this repo as follows:\n\nFrom the root directory of the cloned geocat-examples repository, run the following commands:\n\n.. code-block:: bash\n\n $ conda env create -f conda_environment.yml -n geocat-examples\n $ conda activate geocat-examples\n\nNote that the Conda package manager automatically installs all the required\ndependencies of GeoCAT-examples listed under ``conda_environment.yml`` file (such as ``geocat-comp``,\n``geocat-datafiles``, ``cartopy``, ``matplotlib``, ``netcdf4``, etc.); therefore, there is no need to\nexplicitly install those packages.\n\nIf you need to make use of other software packages with GeoCAT-examples, you may wish\nto install them into your ``geocat-examples`` environment at anytime with a command as in the\nfollowing example (assuming your ``geocat-examples`` environment is already activated):\n\n.. code-block:: bash\n\n $ conda install -c bokeh bokeh\n\nIf you are interested in learning more about how Conda environments work, please visit\nthe `managing environments <https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html>`_\npage of the Conda documentation.\n" }, { "alpha_fraction": 0.5599619150161743, "alphanum_fraction": 0.5907360315322876, "avg_line_length": 31.494844436645508, "blob_id": "dcc6f4f2e14772eefaff58b4c0a5cf183c7c3e0a", "content_id": "b44641e673c984025b31c6fa88cc84667cf756d6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3152, "license_type": "permissive", "max_line_length": 90, "num_lines": 97, "path": "/Plots/MapProjections/NCL_native_2.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_native_2.py\n================\n\nThis script illustrates the following concepts:\n - Drawing filled contours over a mercator map\n - Overlaying contours on a map without having latitude and longitude coordinates\n - Turning on map tickmark labels with degree symbols\n - Selecting a different color map\n - Zooming in on a particular area on a mercator map\n - Using best practices when choosing plot color scheme to accomodate visual impairments\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/native_2.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/native_2_lg.png\n\"\"\"\n\n###############################################################################\n\n# Import packages:\n\nimport numpy as np\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\n\nimport geocat.datafiles as gdf\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and\n# load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/1994_256_FSD.nc\"),\n decode_times=False)\nt = ds.FSD.isel(time=0)\n\n###############################################################################\n# Plot:\n\n# Generate figure (set its size (width, height) in inches)\nfig = plt.figure(figsize=(10, 10))\n\n# Generate axes using Cartopy and draw coastlines\nax = plt.axes(projection=ccrs.Mercator())\nax.coastlines(linewidths=0.5)\nax.add_feature(cfeature.LAND, facecolor=\"lightgray\")\n\n# Set extent to include latitudes from 34 to 52 and longitudes from 128\n# to 144\nax.set_extent([128, 144, 34, 52], ccrs.PlateCarree())\n\n# Plot data and create colorbar\npt = t.plot.contourf(ax=ax,\n transform=ccrs.PlateCarree(),\n vmin=0,\n vmax=70,\n levels=15,\n cmap=\"inferno\",\n add_colorbar=False)\n\ncbar_ticks = np.arange(0, 71, 5)\ncbar = plt.colorbar(pt,\n orientation='vertical',\n extendrect=True,\n ticks=cbar_ticks)\n\n# Draw gridlines\ngl = ax.gridlines(crs=ccrs.PlateCarree(),\n draw_labels=True,\n dms=False,\n x_inline=False,\n y_inline=False,\n linewidth=1,\n color=\"black\",\n alpha=0.25)\n\n# Manipulate latitude and longitude gridline numbers and spacing\ngl.top_labels = False\ngl.right_labels = False\ngl.xlocator = mticker.FixedLocator([130, 134, 138, 142])\ngl.ylocator = mticker.FixedLocator([36, 38, 40, 42, 44, 46, 48, 50])\ngl.xlabel_style = {\"rotation\": 0, \"size\": 15}\ngl.ylabel_style = {\"rotation\": 0, \"size\": 15}\n\nplt.title(\"Native Mercator Projection\",\n loc=\"center\",\n y=1.05,\n size=15,\n fontweight=\"bold\")\nplt.title(t.units, loc=\"right\", y=1.0, size=14)\nplt.title(\"free surface deviation\", loc=\"left\", y=1.0, size=14)\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.5919309258460999, "alphanum_fraction": 0.6065207719802856, "avg_line_length": 34.91978454589844, "blob_id": "3de71eb81c4fcca88dfe27897c341791ed16e5dc", "content_id": "72a713555dd55947f61d29d5b029b3ae26613e8d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6717, "license_type": "permissive", "max_line_length": 121, "num_lines": 187, "path": "/Plots/Panels/NCL_panel_4.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_panel_4.py\n==============\nThis script illustrates the following concepts:\n - Paneling three plots vertically on a page\n - Adding a common title to paneled plots\n - Adding a common labelbar to paneled plots\n - Adding additional text at the bottom of a series of paneled plots\n - Subsetting a color map\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/panel_4.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/panel_4_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport cartopy.crs as ccrs\nfrom cartopy.mpl.gridliner import LongitudeFormatter, LatitudeFormatter\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport numpy as np\nimport xarray as xr\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import cmaps as gvcmaps\nimport geocat.viz.util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into\n# xarrays, choosing the 2nd timestamp\nds = xr.open_dataset(gdf.get(\"netcdf_files/uv300.nc\")).isel(time=1)\n\n###############################################################################\n# Utility Function: Labelled Filled Contour Plot:\n\n# Define a utility plotting function in order not to repeat many lines of codes\n# since we need to make the same figure with two different variables.\n\n\ndef plot_labelled_filled_contours(data, ax=None):\n \"\"\"A utility function for plotting labelled, filled contours with black\n contour outlines marking each level.\n\n Parameters\n ----------\n\n data : :class:`xarray.DataArray`:\n A two-dimensional array with longitude and latitude as dimensions.\n\n ax : :class:`cartopy.mpl.geoaxes.GeoAxesSubplot`:\n An axes object from Matplotlib package with projection from Cartopy package.\n\n Returns\n -------\n\n handles : :class:`dict`:\n A dictionary containing three objects corresponding to the filled contours, the black\n contour outlines, and the contour labels.\n\n Description\n -----------\n\n Produce labeled and filled contour on the world map with tickmarks and\n tick labels.\n \"\"\"\n\n # Import an NCL colormap, truncating it by using geocat.viz.util convenience function\n newcmp = gvutil.truncate_colormap(gvcmaps.gui_default,\n minval=0.03,\n maxval=0.9)\n\n handles = dict()\n handles[\"filled\"] = data.plot.contourf(\n ax=ax, # this is the axes we want to plot to\n cmap=newcmp, # our special colormap\n levels=levels, # contour levels specified outside this function\n transform=projection, # data projection\n add_colorbar=False, # don't add individual colorbars for each plot call\n add_labels=False, # turn off xarray's automatic Lat, lon labels\n )\n\n # matplotlib's \"contourf\" doesn't let you specify \"edgecolors\",\n # instead we use matplotlib's \"contour\" to plot contour lines on top of the filled contours\n handles[\"contour\"] = data.plot.contour(\n ax=ax,\n levels=levels,\n colors=\"black\", # note plurals in this and following kwargs\n linestyles=\"-\",\n linewidths=0.5,\n add_labels=False, # again turn off automatic labels\n )\n\n # Label the contours\n ax.clabel(\n handles[\"contour\"],\n levels=np.arange(-10, 50, 10),\n fontsize=8,\n fmt=\"%.0f\", # Turn off decimal points\n )\n\n # Add coastlines and make them semitransparent for plot legibility\n ax.coastlines(linewidth=0.5, alpha=0.75)\n\n # Use geocat.viz.util convenience function to set axes tick values\n gvutil.set_axes_limits_and_ticks(ax,\n xticks=np.arange(-180, 181, 30),\n yticks=np.arange(-90, 91, 30))\n\n # Use geocat.viz.util convenience function to add minor and major tick lines\n gvutil.add_major_minor_ticks(ax, labelsize=8)\n\n # Use geocat.viz.util convenience function to make plots look like NCL plots by using latitude, longitude tick labels\n gvutil.add_lat_lon_ticklabels(ax)\n # Remove degree symbol from tick labels\n ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n # Use geocat.viz.util convenience function to add main title as well as titles to left and right of the plot axes.\n gvutil.set_titles_and_labels(ax,\n lefttitle=data.attrs['long_name'],\n lefttitlefontsize=10,\n righttitle=data.attrs['units'],\n righttitlefontsize=10)\n\n return handles\n\n\n###############################################################################\n# Plot:\n\n# Make three panels (i.e. subplots in matplotlib) specifying white space\n# between them using gridspec_kw and hspace\n# Generate figure and axes using Cartopy projection\nprojection = ccrs.PlateCarree()\nfig, ax = plt.subplots(3,\n 1,\n figsize=(6, 10),\n gridspec_kw=dict(hspace=0.3),\n subplot_kw={\"projection\": projection})\n# Define the contour levels\nlevels = np.linspace(-10, 50, 13)\n\n# Contour-plot U data, save \"handles\" to add a colorbar later\nhandles = plot_labelled_filled_contours(ds.U, ax=ax[0])\n\n# Set a common title\nplt.suptitle(\"A common title\", fontsize=16, y=0.94)\n\n# Contour-plot V data\nplot_labelled_filled_contours(ds.V, ax=ax[1])\n\n# Contour-plot U data again but in the bottom axes\nplot_labelled_filled_contours(ds.U, ax=ax[2])\n\n# Create inset axes for colorbar\ncax = inset_axes(ax[2],\n width='100%',\n height='10%',\n loc='lower left',\n bbox_to_anchor=(0, -0.25, 1, 1),\n bbox_transform=ax[2].transAxes,\n borderpad=0)\n# Add horizontal colorbar\ncbar = plt.colorbar(handles[\"filled\"],\n cax=cax,\n orientation=\"horizontal\",\n ticks=levels[:-1],\n drawedges=True,\n aspect=30,\n extendrect=True,\n extendfrac='auto',\n shrink=1)\ncbar.ax.tick_params(labelsize=10)\n\n# Add figure label underneath subplots\nfig.text(0.5,\n 0.015,\n \"Figure 1: A nifty panel plot\",\n horizontalalignment='center',\n fontsize=14)\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.5709298849105835, "alphanum_fraction": 0.5966359972953796, "avg_line_length": 34.806819915771484, "blob_id": "f9812691e2e9b35061ffbbe354bd8fff6a92bacb", "content_id": "efeffcf95db8213a19eaf3734aa4a90908a1f804", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3151, "license_type": "permissive", "max_line_length": 120, "num_lines": 88, "path": "/Plots/Contours/NCL_lb_2.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_lb_2.py\n===============\nThis script illustrates the following concepts:\n - Making a vertical colorbar\n - Changing the colorbar labels\n - Setting color maps using the new standard\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/lb_2.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/lb_2_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages:\n\nimport numpy as np\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import cmaps as gvcmaps\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nds = xr.open_dataset(gdf.get(\"netcdf_files/atmos.nc\"), decode_times=False)\n# Extract variable\nv = ds.V.isel(time=0, lev=3)\n\n# Fix the artifact of not-shown-data around 0 and 360-degree longitudes\nwrap_v = gvutil.xr_add_cyclic_longitudes(v, \"lon\")\n\n###############################################################################\n# Plot:\n\n# Generate figure (set its size (width, height) in inches)\nfig = plt.figure(figsize=(10, 10))\n\n# Generate axes using Cartopy and draw coastlines\nax = plt.axes(projection=ccrs.PlateCarree())\nax.coastlines(linewidths=0.5)\n\n# Import an NCL colormap\nnewcmp = gvcmaps.wgne15\n\n# Contourf-plot data (for filled contours)\na = wrap_v.plot.contourf(levels=14,\n cmap=newcmp,\n add_colorbar=False,\n add_labels=False)\n# Contour-plot data (for borderlines)\nwrap_v.plot.contour(levels=14, linewidths=0.5, cmap='black', add_labels=False)\n\n# Add vertical colorbar\nclabels = [\n \"-70\", \"-50\", \"-30\", \"-10\", \"10\", \"30\", \"50\", \"70\", \"90\", \"110\", \"130\",\n \"150\"\n]\ncbar = fig.colorbar(a, label='', ticks=np.linspace(-24, 24, 12), shrink=0.4)\ncbar.ax.set_yticklabels(clabels)\n\n# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions\ngvutil.set_axes_limits_and_ticks(ax,\n ylim=(-90, 90),\n xticks=np.linspace(-180, 180, 13),\n yticks=np.linspace(-90, 90, 7))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax, labelsize=10)\n\n# Use geocat.viz.util convenience function to make plots look like NCL plots by using latitude, longitude tick labels\ngvutil.add_lat_lon_ticklabels(ax)\n\n# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.\ngvutil.set_titles_and_labels(ax,\n lefttitle=\"meridional wind component\",\n lefttitlefontsize=14,\n righttitle=\"m/s\",\n righttitlefontsize=14,\n xlabel=\"\",\n ylabel=\"\")\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.511660635471344, "alphanum_fraction": 0.5400080680847168, "avg_line_length": 31.940397262573242, "blob_id": "5a6c02cfcb0737e711a60c7a6e1ca98d20413d23", "content_id": "dd61df6e600ca7990953635cb5508ca37119c796", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4974, "license_type": "permissive", "max_line_length": 117, "num_lines": 151, "path": "/Plots/Vectors/NCL_vector_1.py", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "\"\"\"\nNCL_vector_1.py\n===============\nPlot U & V vector over SST\n\nThis script illustrates the following concepts:\n - Overlaying vectors and filled contours on a map\n - Changing the scale of the vectors on the plot\n - Moving the vector reference annotation to the top right of the plot\n - Setting the color for vectors\n - Increasing the thickness of vectors\n\nSee following URLs to see the reproduced NCL plot & script:\n - Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/vector_1.ncl\n - Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/vector_1_lg.png\n\"\"\"\n\n###############################################################################\n# Import packages\n\nimport xarray as xr\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cartopy\nimport cartopy.crs as ccrs\n\nimport geocat.datafiles as gdf\nfrom geocat.viz import cmaps as gvcmaps\nfrom geocat.viz import util as gvutil\n\n###############################################################################\n# Read in data:\n\n# Open a netCDF data file using xarray default engine and load the data into xarrays\nsst_in = xr.open_dataset(gdf.get(\"netcdf_files/sst8292.nc\"))\nuv_in = xr.open_dataset(gdf.get(\"netcdf_files/uvt.nc\"))\n\n# Use date as the dimension rather than time\nsst_in = sst_in.set_coords(\"date\").swap_dims({\"time\": \"date\"}).drop('time')\nuv_in = uv_in.set_coords(\"date\").swap_dims({\"time\": \"date\"}).drop('time')\n\n# Extract required variables\n# Read SST and U, V for Jan 1988 (at 1000 mb for U, V)\n# Note that we could use .isel() if we know the indices of date and lev\nsst = sst_in['SST'].sel(date=198801)\nu = uv_in['U'].sel(date=198801, lev=1000)\nv = uv_in['V'].sel(date=198801, lev=1000)\n\n# Read in grid information\nlat_sst = sst['lat']\nlon_sst = sst['lon']\nlat_uv = u['lat']\nlon_uv = u['lon']\n\n###############################################################################\n# Plot:\n\n# Generate figure (set its size (width, height) in inches)\nplt.subplots(figsize=(10, 7))\n\n# Generate axes using Cartopy projection\nax = plt.axes(projection=ccrs.PlateCarree())\n\n# Draw vector plot\nQ = plt.quiver(lon_uv,\n lat_uv,\n u,\n v,\n color='white',\n pivot='middle',\n width=.0025,\n scale=75,\n zorder=2)\n\n# Turn on continent shading\nax.add_feature(cartopy.feature.LAND,\n edgecolor='lightgray',\n facecolor='lightgray',\n zorder=1)\n\n# Define levels for contour map (24, 24.1, ..., 28.8, 28.9)\nlevels = np.linspace(24, 28.9, 50)\n\n# Import an NCL colormap, truncating it by using geocat.viz.util convenience function\ngvutil.truncate_colormap(gvcmaps.BlAqGrYeOrReVi200,\n minval=0.08,\n maxval=0.96,\n n=len(levels),\n name='BlAqGrYeOrReVi200')\n\n# Contourf-plot the SST data\ncf = sst.plot.contourf('lon',\n 'lat',\n extend='both',\n levels=levels,\n cmap='BlAqGrYeOrReVi200',\n zorder=0,\n add_labels=False,\n add_colorbar=False)\n\n# Add color bar\ncbar_ticks = np.arange(24, 29.1, .3)\ncbar = plt.colorbar(cf,\n orientation='vertical',\n drawedges=True,\n shrink=0.75,\n pad=0.05,\n ticks=cbar_ticks)\n\n# Draw the key for the quiver plot as a rectangle patch\nrect = plt.Rectangle((92.9, 22.6),\n 2,\n 2,\n facecolor='white',\n edgecolor=None,\n zorder=2)\nax.add_patch(rect)\nax.quiverkey(Q,\n 0.9675,\n 0.9,\n 3,\n '4',\n labelpos='N',\n color='black',\n coordinates='axes',\n fontproperties={'size': 14},\n labelsep=0.1)\n\n# Use geocat.viz.util convenience function to set axes tick values\ngvutil.set_axes_limits_and_ticks(ax,\n xlim=(65, 95),\n ylim=(5, 25),\n xticks=range(70, 95, 10),\n yticks=range(5, 27, 5))\n\n# Use geocat.viz.util convenience function to add minor and major tick lines\ngvutil.add_major_minor_ticks(ax,\n x_minor_per_major=5,\n y_minor_per_major=5,\n labelsize=14)\n\n# Use geocat.viz.util convenience function to make plots look like NCL plots by using latitude, longitude tick labels\ngvutil.add_lat_lon_ticklabels(ax)\n\n# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.\ngvutil.set_titles_and_labels(ax,\n lefttitle='Sea Surface Temperature',\n righttitle='C')\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.7913198471069336, "alphanum_fraction": 0.7919144034385681, "avg_line_length": 45.72222137451172, "blob_id": "c27bf84b9562e03a96d7fd25e551be47bb78ef90", "content_id": "646865283db82b4686170f8948703b6c16a47b27", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1682, "license_type": "permissive", "max_line_length": 117, "num_lines": 36, "path": "/INSTALLATION.md", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "# Installation\n\nThis installation guide includes only the GeoCAT-examples installation instructions.\nPlease refer to [GeoCAT Contributor's Guide](https://geocat.ucar.edu/pages/contributing.html) for installation of\nthe whole GeoCAT project.\n\n\n## Installing GeoCAT-examples\n\nGeoCAT-examples is not distributed as a conda package; thus, there is no conda installation for it.\n\nThe easiest way to access GeoCAT-examples is cloning the repo and then using a\n[Conda](http://conda.pydata.org/docs/) environment, building file of which is provided in this repo, as follows:\n\n### How to create a GeoCAT-examples Conda environment\n\nFrom the root directory of the cloned geocat-examples repository, run the following commands:\n```\n conda env create -f conda_environment.yml -n geocat-examples\n conda activate geocat-examples\n```\n\nNote that the Conda package manager automatically installs all the `required`\ndependencies of GeoCAT-examples listed under `conda_environment.yml` file (such as `geocat-comp`,\n`geocat-datafiles`, `cartopy`, `matplotlib`, `netcdf4`, etc.); therefore, there is no need for\nexplicitly installing those packages.\n\nIf you somewhat need to make use of other software packages with GeoCAT-examples, you may wish\nto install them into your `geocat-examples` environment at anytime with a command as in the\nfollowing example (assuming your `geocat-examples` environment is already activated):\n\n conda install -c bokeh bokeh\n\nIf you are interested in learning more about how Conda environments work, please visit\nthe [managing environments](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)\npage of the Conda documentation.\n" }, { "alpha_fraction": 0.7074176073074341, "alphanum_fraction": 0.7431318759918213, "avg_line_length": 35.400001525878906, "blob_id": "304893fc52f8aef8d316abad978ece3abae9a4cf", "content_id": "86ff40c54cece28c3a62c3f46c1b572fa7533378", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 728, "license_type": "permissive", "max_line_length": 93, "num_lines": 20, "path": "/docs/citation.rst", "repo_name": "netgodz/GeoCAT-examples", "src_encoding": "UTF-8", "text": "Citation\n========\n\nHow to cite GeoCAT-examples\n---------------------------\n\nCite GeoCAT-examples using the following text:\n\n**Visualization & Analysis Systems Technologies. (Year).\nGeoscience Community Analysis Toolkit (GeoCAT-examples) [Software].\nBoulder, CO: UCAR/NCAR - Computational and Informational System Lab. doi:10.5065/A8PP-4358.**\n\nUpdate the year as appropriate. For example:\n\n**Visualization & Analysis Systems Technologies. (2021).\nGeoscience Community Analysis Toolkit (GeoCAT-examples) [Software].\nBoulder, CO: UCAR/NCAR - Computational and Informational System Lab. doi:10.5065/A8PP-4358.**\n\nFor further information, please refer to\n`GeoCAT homepage - Citation <https://geocat.ucar.edu/pages/citation.html>`_.\n" } ]
11
yungshansu/line-bot-python-heroku
https://github.com/yungshansu/line-bot-python-heroku
6afd9ec428ba7e8abebb71a175de209cfff211d1
624663003596be6d029881e864fff877b962eb24
467e68e740e7464c0608e5e17b514170aba38ad7
refs/heads/master
2021-06-14T19:41:56.623963
2017-04-02T00:30:40
2017-04-02T00:30:40
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5792136788368225, "alphanum_fraction": 0.6030880808830261, "avg_line_length": 27.43911361694336, "blob_id": "265c78fb80b87873bdbaf85b4238439d0d0c9a61", "content_id": "1de2fddea996579391684fa3d48db6ea8d9ab699", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8479, "license_type": "no_license", "max_line_length": 172, "num_lines": 271, "path": "/app.py", "repo_name": "yungshansu/line-bot-python-heroku", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\nimport codecs\nimport random\n\n#ml\nimport operator\nimport io\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.keyedvectors import KeyedVectors\nfrom sklearn.preprocessing import scale\nimport numpy as np\n#from sklearn.manifold import TSNE\n#import matplotlib.pyplot as plt\nimport jieba\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import SGDClassifier\n\n\napp = Flask(__name__)\n\nline_bot_api = LineBotApi('+YrlgJ1c5YOs8NGIOUOPN2Z/Ya6zmtW2mlrynoKWm64OuqFFYIJ6Gy90AwyTZmg9bTPWUAa8bIA+tJfOgw1ekKR3/RUukTJw+9ppv08wBIF83Hx2FRqaKdcyZcUx2viZe8DXDc6l5ftaAyUNSt7cQQdB04t89/1O/w1cDnyilFU=') #Your Channel Access Token\n\n\nhandler = WebhookHandler('a29dfcc21df2547b09392b7f0a9cabbc') #Your Channel Secret\n\nmyquesfile=codecs.open(\"src/ques1.txt\", \"r\", \"utf-8\")\nmyansfile=codecs.open(\"src/ans1.txt\", \"r\", \"utf-8\")\nques1=myquesfile.read()\nans1=myansfile.read()\nmyquesfile.close()\nmyansfile.close()\n\nques2=u'我累了,我沒辦法熬過這一次'\nans2=u'相信我,你可以的!我建議你可以先看看【'+'TED'+u'】國際勵志大師安東尼.羅賓:'+'Why we do what we do'+u'。'+'http://www.knowledger.info/2014/07/29/tony-robbins-in-a-tedtalk-why-we-do-what-we-do/'+u'。'\nques3=u'或許一了百了比較輕鬆'\nans3=u'你先可以撥打衛生福利部'+'24'+u'小時安心專線:'+'0800-788-995'+u'、國際生命線協會'+'24'+u'小時電話協談:'+'1995'+u'聊聊看!'\nans4=u'多聊聊你的故事'\n\nhappyreply1=u'太棒了!我真是為你開心!'\nhappyreply2=u'好事可以多跟人分享,快樂會加倍喔!'\nhappyreply3=u'世界上果然有很多美好的事。'\nhappyreply4=u'你開心,我窩心。'\nhappyreply5=u'用心感受每天的快樂。'\nhappyreply6=u'願你每天笑口常開。'\nhappyreply7=u'守住你頭頂那片快樂的藍天。'\nhappyreply8=u'熱愛生命,快樂無所不在!'\nhappyreply9=u'讓我們一起來唱「快樂崇拜」!'\nhappyreply10=u'推薦你跟我一起聽首「一百件快樂的事」。'\n\nsadreply1=u'你有的不愉快,讓我來分擔。'\nsadreply2=u'每個人都有屬於自己快樂的公式,只是等待你發掘。'\nsadreply3=u'盡力就好!'\nsadreply4=u'睡個覺起來,明天又是新的一天!'\nsadreply5=u'讓我們先一起唱首「煎熬」發洩一下!'\nsadreply6=u'一秒前的事,就當它過去了吧。'\nsadreply7=u'盡量跟我發洩吧!說完心情會好很多。'\nsadreply8=u'多跟我說說,我是你永遠的垃圾桶。'\nsadreply9=u'不要排斥不快樂,是它讓你了解快樂的珍貴。'\nsadreply10=u'現在出門去運動吧!會讓你心情變好。'\n\n# Do some very minor text preprocessing\ndef cleanText(corpus):\n corpus = [z.lower().replace('\\n','').split() for z in corpus]\n return corpus\n\n# build word vector for training set by using the average value of all word vectors in the tweet, then scale\ndef buildWordVector(imdb_w2v,text, size):\n vec = np.zeros(size).reshape((1, size))\n count = 0.\n for word in text:\n try:\n vec += imdb_w2v[word].reshape((1, size))\n count += 1.\n except KeyError:\n continue \n if count != 0:\n vec /= count\n return vec \n\n##ML\nwith open('pos_chinese.txt', 'r') as infile:\n pos_tweets = infile.readlines()\n\nwith open('neg_chinese.txt', 'r') as infile:\n neg_tweets = infile.readlines()\n\n# use 1 for positive sentiment, 0 for negative\ny = np.concatenate((np.ones(len(pos_tweets)), np.zeros(len(neg_tweets))))\n\nx_train, x_test, y_train, y_test = train_test_split(np.concatenate((pos_tweets, neg_tweets)), y, test_size=0.1)\n\n\nx_train = cleanText(x_train)\nx_test = cleanText(x_test)\n\nn_dim = 300\n#Initialize model and build vocab\nimdb_w2v = Word2Vec(size=n_dim, min_count=10)\nimdb_w2v.build_vocab(x_train)\n\n#Train the model over train_reviews (this may take several minutes)\nimdb_w2v.train(x_train)\nimdb_w2v.save('chsen.model.bin')\n\n#model = KeyedVectors.load('chsen.model.bin')\ntrain_vecs = np.concatenate([buildWordVector(imdb_w2v,z, n_dim) for z in x_train])\ntrain_vecs = scale(train_vecs)\n\n#Train word2vec on test tweets\nimdb_w2v.train(x_test)\n\n#Build test tweet vectors then scale\ntest_vecs = np.concatenate([buildWordVector(imdb_w2v,z, n_dim) for z in x_test])\ntest_vecs = scale(test_vecs)\n\n#Use classification algorithm (i.e., Stochastic Logistic Regression) on training set, then assess model performance on test set\n\n\nlr = SGDClassifier(loss='log', penalty='l2')\n#print(train_vecs)\n#print(y_train)\nlr.fit(train_vecs, y_train)\n#print(lr) \n\n\ndef test_sentance(imdb_w2v,lr,input_sentence):\n\n # jieba custom setting.\n jieba.set_dictionary('dict.txt')\n\n # load stopwords set\n stopwordset = set()\n with io.open('stopwords.txt','r',encoding='utf-8') as sw:\n for line in sw:\n stopwordset.add(line.strip('\\n'))\n word_list = jieba.cut(input_sentence, cut_all=False)\n pos_result = 0\n neg_result = 0\n for word in word_list:\n if word not in stopwordset:\n if word in imdb_w2v.wv.vocab:\n vector = imdb_w2v.wv[word]\n #print(vector)\n ans = lr.predict([vector])\n anslist = ans.tolist()\n #print(ans)\n if(anslist[0] == 1):\n print(word)\n print(anslist[0])\n print(\"pos_result += 1\")\n pos_result = pos_result + 1\n elif(anslist[0] == 0):\n print(word)\n print(anslist[0])\n print(\"neg_result += 1\")\n neg_result = neg_result + 1;\n else:\n print(\"do nothing\")\n continue\n return {'pos':pos_result,'neg':neg_result}\n\n\n\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_text_message(event):\n text = event.message.text #message from user\n condition = -1\n if text == ques1:\n text='(Sad Emotion)'+ans1\n elif text ==ques2:\n text='(Sad Emotion)'+ans2\n elif text ==ques3:\n text='(Sad Emotion)'+ans3\n else:\n \n \n query = text\n\n result = {}\n result = test_sentance(imdb_w2v,lr,query)\n if(result['pos'] > result['neg']):\n condition=1\n elif(result['neg'] > result['pos']):\n condition=0\n else:\n text=ans4\n condition=2\n text='(No special emotion)'+text\n num=random.randint(1,10)\n if condition==1 : #happyreply\n if num==1:\n text=happyreply1\n elif num==2:\n text=happyreply2\n elif num==3:\n text=happyreply3\n elif num==4:\n text=happyreply4\n elif num==5:\n text=happyreply5\n elif num==6:\n text=happyreply6\n elif num==7:\n text=happyreply7\n elif num==8:\n text=happyreply8\n elif num==9:\n text=happyreply9\n else:\n text=happyreply10\n text= '(Happy Emotion)'+text\n elif condition==0 : #sadreply\n if num==1:\n text=sadreply1\n elif num==2:\n text=sadreply2\n elif num==3:\n text=sadreply3\n elif num==4:\n text=sadreply4\n elif num==5:\n text=sadreply5\n elif num==6:\n text=sadreply6\n elif num==7:\n text=sadreply7\n elif num==8:\n text=sadreply8\n elif num==9:\n text=sadreply9\n else:\n text=sadreply10\n text= '(Sad Emotion)'+text\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text )) #reply the same message from user\n \n\nimport os\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=os.environ['PORT'])\n" }, { "alpha_fraction": 0.8135592937469482, "alphanum_fraction": 0.8135592937469482, "avg_line_length": 7.142857074737549, "blob_id": "5f275b4f4f734560ffd862f72894491cd3cd942d", "content_id": "909ab18ce4652ca1c9e7afee12935729dfab9cbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 59, "license_type": "no_license", "max_line_length": 12, "num_lines": 7, "path": "/requirements.txt", "repo_name": "yungshansu/line-bot-python-heroku", "src_encoding": "UTF-8", "text": "line-bot-sdk\nflask\ngensim\nsklearn\nnumpy\nmatplotlib\njieba\n\n\n" } ]
2
Gh0stSancti/venom
https://github.com/Gh0stSancti/venom
928d052b90c5f2776d77c8c73c7b356d3ef22534
2a4a4b05175008ca664b8c3e071434c128174472
13814a71e5486de88461e55efec203b56e1f49d2
refs/heads/master
2021-01-10T23:03:30.327172
2016-10-01T00:33:28
2016-10-01T00:33:28
69,710,973
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5806087851524353, "alphanum_fraction": 0.5851183533668518, "avg_line_length": 18.674419403076172, "blob_id": "9a1501f5584477c89689aa645b18bc1a20682d28", "content_id": "bd1c5e97a5c8b8471e8a3956616c87368811cc02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 51, "num_lines": 43, "path": "/venom.py", "repo_name": "Gh0stSancti/venom", "src_encoding": "UTF-8", "text": "import os\r\nimport sys\r\nimport time\r\n\r\ndef main():\r\n\tprint(\"\")\r\n\tprint(\"venom - ARP Cache Poisoner\")\r\n\tprint(\"\")\r\n\tprint(\"Created by: Gh0st\")\r\n\tprint(\"Type help for assistance\")\r\n\taa = input(\"venom> \")\r\n\tif \"help\" in aa:\r\n\t\tprint(\"\")\r\n\t\tprint(\"arpcp - Cache Poison\")\r\n\t\tprint(\"poison - Poisons chosen internet address\")\r\n\t\ttime.sleep(2)\r\n\t\tmain()\r\n\telif \"arpcp\" in aa:\r\n\t\tprint(\"\")\r\n\t\tprint(\"Generating list...\")\r\n\t\ttime.sleep(5)\r\n\t\tprint(\"\")\r\n\t\tos.system(\"arp -a\")\r\n\t\tmain()\r\n\telif \"poison\" in aa:\r\n\t\tprint(\"\")\r\n\t\tbb = input(\"venom/poison/internet-address> \")\r\n\t\tprint(\"\")\r\n\t\tprint(\"Poisoning \" + bb + \"...\")\r\n\t\ttime.sleep(4)\r\n\t\tprint(bb + \" has been poisoned.\")\r\n\t\tprint(\"Press enter to continue\")\r\n\t\tos.system(\"pause >nul\")\r\n\t\tos.system(\"cls\")\r\n\t\tmain()\r\n\telse:\r\n\t\tprint(\"\")\r\n\t\tprint(\"Not a command\")\r\n\t\tprint(\"Press enter to continue\")\r\n\t\tos.system(\"pause >nul\")\r\n\t\tmain()\r\n\t\t\r\nmain()" }, { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 15.5, "blob_id": "7832b0e1efdf2bc43cf2c10866d6642b2cc9caef", "content_id": "48959b5d252f8dbbaf1f7983b0e85bf4d7592484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/README.md", "repo_name": "Gh0stSancti/venom", "src_encoding": "UTF-8", "text": "# venom\nvenom-arp cache posioner\n" } ]
2
venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens
https://github.com/venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens
59c1a62dcb4a50b2b1708e5ca5f8ed1295b76379
93fcbe7630f4ed282eef330187cae5763b6db0f8
0f255fd49c853c440430e1ca71ca2e3768be52a9
refs/heads/main
2023-05-15T03:23:47.112985
2021-06-08T05:42:28
2021-06-08T05:42:28
374,892,460
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7073906660079956, "alphanum_fraction": 0.7073906660079956, "avg_line_length": 38, "blob_id": "ac993ff5b071016193079e305d881d4c100860d7", "content_id": "8837e629d7bcdd9bd96d6ba20519061944bdaa94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 64, "num_lines": 17, "path": "/restapp/urls.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom . import views\n\nurlpatterns = [\n path('signup', views.signup),\n path('signin', views.signin),\n path('getproducts', views.getProducts),\n path('getproduct/<int:product_id>', views.getProduct),\n path('getusers', views.getUsers),\n path('getuser/<int:user_id>', views.getUser),\n path('deleteproduct/<int:product_id>', views.deleteProduct),\n path('deleteuser/<int:user_id>', views.deleteUser),\n path('createproduct', views.createProduct),\n path('updateproduct/<int:product_id>', views.updateProduct),\n path('createuser',views.createUser),\n]\n" }, { "alpha_fraction": 0.5053533315658569, "alphanum_fraction": 0.5438972115516663, "avg_line_length": 19.30434799194336, "blob_id": "16ce7ce52ab96f02dee3fcb40c91eec2e5ccfa50", "content_id": "42047f2e27c2ccaf56295710aaa5bc95cf5f1bd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 45, "num_lines": 23, "path": "/restapp/migrations/0002_auto_20210607_1237.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0 on 2021-06-07 07:07\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restapp', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='userroles',\n name='userole',\n ),\n migrations.DeleteModel(\n name='UserDetails',\n ),\n migrations.DeleteModel(\n name='UserRoles',\n ),\n ]\n" }, { "alpha_fraction": 0.6205661296844482, "alphanum_fraction": 0.620653510093689, "avg_line_length": 38.74305725097656, "blob_id": "f6019c5de3e7b94e52e69987aac70cd81666ea43", "content_id": "84ec046e560ccad537de1fd4a966b50c972ef28e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11446, "license_type": "no_license", "max_line_length": 111, "num_lines": 288, "path": "/restapp/views.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport json\nfrom django.views.decorators.csrf import csrf_protect\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes, renderer_classes, \\\n parser_classes\nfrom rest_framework.authentication import BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.response import Response\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.hashers import check_password\nfrom django.conf import settings\nfrom .serializers import UserSerializer, ProductsSerializer\nfrom .models import Products\nfrom rest_framework.parsers import JSONParser\n\n\n@csrf_protect\n@api_view(['GET', 'POST'])\ndef signin(request):\n if request.method == 'POST':\n username = request.POST.get('UserName')\n password = request.POST.get('Password')\n print(password)\n roles = []\n try:\n userDetails = User.objects.get(username=username)\n print(userDetails.password)\n if check_password(password, userDetails.password):\n print(userDetails.id)\n token = Token.objects.get(user_id=userDetails.id)\n authorizationKey = token.key\n print(authorizationKey)\n if userDetails.is_staff:\n roles.append('staff')\n if userDetails.is_superuser:\n roles.append('superuser')\n roles.append('put Products')\n roles.append('Post Products')\n roles.append('delete Products')\n roles.append('get Products')\n\n userSerializer = UserSerializer(userDetails)\n userData = {}\n userData.update(userSerializer.data)\n userData.update({'Authentication Key': authorizationKey})\n userData.update({'Roles': roles})\n return Response(json.dumps(userData, indent=2))\n\n else:\n return render(request, 'login.html', {'err_msg': 'Invalid Password'})\n except Exception as e:\n return render(request, 'login.html', {'err_msg': e})\n else:\n return render(request, 'login.html', {})\n\n\n@csrf_protect\ndef signup(request):\n if request.method == 'POST':\n username = request.POST.get('UserName')\n print(username)\n password = request.POST.get('Password')\n firstName = request.POST.get('firstName')\n print(firstName)\n lastName = request.POST.get('lastName')\n email = request.POST.get('Email')\n userType = request.POST.get('userType')\n if userType == 'normalUser':\n details = User.objects.create_user(username=username, password=password, email=email)\n details.is_staff = False\n details.is_superuser = False\n details.first_name = firstName\n details.last_name = lastName\n details.save()\n det = User.objects.get(username=username)\n print(det.id)\n token_gen = Token.objects.create(user_id=det.id)\n token_gen.save()\n token = Token.objects.get(user_id=det.id)\n authorizationKey = token.key\n print(authorizationKey)\n else:\n details = User.objects.create_superuser(username=username, password=password, email=email)\n details.is_staff = True\n details.is_superuser = True\n details.first_name = firstName\n details.last_name = lastName\n details.save()\n det = User.objects.get(username=username)\n print(det.id)\n token_gen = Token.objects.create(user_id=det.id)\n token_gen.save()\n token = Token.objects.get(user_id=det.id)\n authorizationKey = token.key\n print(authorizationKey)\n return render(request,'register.html',{'sucess_msg':\"User Created Sucessful\"})\n else:\n return render(request, 'register.html', {})\n\n\n@api_view(['GET'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef getProducts(request):\n if request.user.is_authenticated:\n try:\n productDetails = Products.objects.all()\n produtSerilizer = ProductsSerializer(productDetails, many=True)\n return Response(produtSerilizer.data)\n except Exception as e:\n return Response(json.dumps({'Error ': 'Does not exist'}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['GET'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef getProduct(request, product_id):\n if request.user.is_authenticated:\n try:\n productDetails = Products.objects.get(pk=product_id)\n produtSerilizer = ProductsSerializer(productDetails)\n return Response(produtSerilizer.data)\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['GET'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAdminUser])\ndef getUsers(request):\n if request.user.is_authenticated:\n try:\n userDetails = User.objects.all()\n print(userDetails)\n userSerilizer = UserSerializer(userDetails, many=True)\n print(userSerilizer.data)\n return Response(userSerilizer.data)\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['GET'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAdminUser])\ndef getUser(request, user_id):\n if request.user.is_authenticated:\n try:\n userDetails = User.objects.get(pk=user_id)\n print(userDetails)\n userSerilizer = UserSerializer(userDetails)\n print(userSerilizer.data)\n return Response(userSerilizer.data)\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['DELETE'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef deleteProduct(request, product_id):\n if request.user.is_authenticated:\n try:\n productDetails = Products.objects.get(pk=product_id)\n productDetails.delete()\n\n return Response(json.dumps({\"Msg\": \"Product Deleted\"}))\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['DELETE'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAdminUser])\ndef deleteUser(request, user_id):\n if request.user.is_authenticated:\n try:\n userDetails = User.objects.get(pk=user_id)\n userDetails.delete()\n return Response(json.dumps({\"Msg\": \"User Deleted\"}))\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['POST'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef createProduct(request):\n if request.user.is_authenticated:\n try:\n productName = request.POST.get('productName')\n productCategory = request.POST.get('productCategory')\n productCount = request.POST.get('productCount')\n productDetails = Products(productName=productName, productCategory=productCategory,\n productCount=productCount)\n productDetails.save()\n\n return Response(json.dumps({\"Msg\": \"Product Created\"}))\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n\n@api_view(['PUT'])\n@parser_classes([JSONParser])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef updateProduct(request, product_id):\n if request.user.is_authenticated:\n try:\n product = Products.objects.get(pk=product_id)\n data = JSONParser().parse(request)\n print(data)\n print(type(data))\n serilizer = ProductsSerializer(product, data=data)\n print(serilizer.is_valid())\n #print(serilizer.error_messages)\n if serilizer.is_valid():\n serilizer.save()\n return Response(json.dumps({\"Msg\": \"Product Updated\"}))\n else:\n return Response(json.dumps({'Msg': 'Enter Valid data'}))\n\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n\n@api_view(['POST'])\n@authentication_classes([BasicAuthentication, TokenAuthentication])\n@permission_classes([IsAdminUser])\ndef createUser(request):\n if request.user.is_authenticated:\n try:\n data = JSONParser().parse(request)\n username = data['UserName']\n password = data['Password']\n email = data['Email']\n firstName = data['firstName']\n lastName = data['lastName']\n userType = data['userType']\n if userType == 'normalUser':\n details = User.objects.create_user(username=username, password=password, email=email)\n details.is_staff = False\n details.is_superuser = False\n details.first_name = firstName\n details.last_name = lastName\n details.save()\n det = User.objects.get(username=username)\n print(det.id)\n token_gen = Token.objects.create(user_id=det.id)\n token_gen.save()\n token = Token.objects.get(user_id=det.id)\n authorizationKey = token.key\n print(authorizationKey)\n else:\n details = User.objects.create_superuser(username=username, password=password, email=email)\n details.is_staff = True\n details.is_superuser = True\n details.first_name = firstName\n details.last_name = lastName\n details.save()\n det = User.objects.get(username=username)\n print(det.id)\n token_gen = Token.objects.create(user_id=det.id)\n token_gen.save()\n token = Token.objects.get(user_id=det.id)\n authorizationKey = token.key\n print(authorizationKey)\n return Response(json.dumps({\"Msg\":\"User Created \"}))\n except Exception as e:\n return Response(json.dumps({'Error Msg': e}))\n else:\n return Response(json.dumps({'Error Message': 'Unauthrized User'}))\n" }, { "alpha_fraction": 0.7309562563896179, "alphanum_fraction": 0.7309562563896179, "avg_line_length": 31.473684310913086, "blob_id": "806405f98c05d73ffbfc24e7ba7c078d8e4b039d", "content_id": "49b98616851e1e3b792c118043577ad5ccd1aa6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "no_license", "max_line_length": 71, "num_lines": 19, "path": "/restapp/serializers.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Products\nfrom django.contrib.auth.models import User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\"id\", \"username\", \"first_name\", \"last_name\", \"email\")\n\n\nclass ProductsSerializer(serializers.ModelSerializer):\n productName = serializers.CharField(required=False)\n productCategory = serializers.CharField(required=False)\n productCount = serializers.IntegerField(required=False)\n\n class Meta:\n model = Products\n fields = (\"productName\", \"productCategory\", \"productCount\")\n" }, { "alpha_fraction": 0.5913978219032288, "alphanum_fraction": 0.5956989526748657, "avg_line_length": 24.88888931274414, "blob_id": "ac0679a5b837d8eca513059bc7d14de5c0906c6f", "content_id": "9bd6b3b6ed43c05b08f0839857b07463efc6ac23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 465, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/static/js/signupform.js", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "function check(elem) {\n var password = document.getElementById('password');\n var flag = 1;\n if (elem.value.length > 0) {\n if (elem.value != password.value) {\n document.getElementById('alert').innerText = \"confirm password does not match\";\n\n } else {\n document.getElementById('alert').innerText = \"\";\n\n }\n } else {\n document.getElementById('alert').innerText = \"please enter confirm password\";\n\n }\n\n\n}" }, { "alpha_fraction": 0.517188310623169, "alphanum_fraction": 0.5366854667663574, "avg_line_length": 35.092594146728516, "blob_id": "045ad0085a0c0825ef99801891c9c48ba5ca4036", "content_id": "8a7b764ce53b691fae7260ae160c0eba5cf961fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 118, "num_lines": 54, "path": "/restapp/migrations/0001_initial.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0 on 2021-06-06 09:03\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport mirage.fields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Products',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('productName', models.CharField(max_length=150)),\n ('productCategory', models.CharField(max_length=100)),\n ('productCount', models.IntegerField()),\n ],\n options={\n 'db_table': 'Products',\n },\n ),\n migrations.CreateModel(\n name='UserDetails',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('firstName', models.CharField(max_length=100)),\n ('lastName', models.CharField(max_length=100)),\n ('username', models.CharField(max_length=100)),\n ('password', mirage.fields.EncryptedCharField(max_length=100)),\n ('email', models.EmailField(max_length=150)),\n ('accountStatus', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'UserDetails',\n },\n ),\n migrations.CreateModel(\n name='UserRoles',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('roleName', models.CharField(max_length=150)),\n ('userole', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restapp.UserDetails')),\n ],\n options={\n 'db_table': 'UserRoles',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6655948758125305, "alphanum_fraction": 0.6848874688148499, "avg_line_length": 22.923076629638672, "blob_id": "f11afd0e3cebb56da9e52c2c1b1f1cdcfd56cbba", "content_id": "6de206ca0a89cd458aa8fba43d930c996639bdc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/restapp/models.py", "repo_name": "venkataprabhanjankumar/DjangoRestFrameoworkWithJWTtokens", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Products(models.Model):\n productName = models.CharField(max_length=150)\n productCategory = models.CharField(max_length=100)\n productCount = models.IntegerField()\n\n def __str__(self):\n return self.productName\n\n class Meta:\n db_table = \"Products\"\n" } ]
7
RobertCurry/VarroaPopLinux
https://github.com/RobertCurry/VarroaPopLinux
271f4336f8bc4616a8a0db15191a7302aab3bcaf
b462af5e8037654d68feec2c2d9eb31ecbae1c1e
588b063d6379d9ee7a74e402db8f8861f9fb1917
refs/heads/master
2023-01-14T21:35:54.698156
2020-11-23T20:28:41
2020-11-23T20:28:41
315,420,447
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6304532289505005, "alphanum_fraction": 0.6354543566703796, "avg_line_length": 48.65631866455078, "blob_id": "2cd941e403814e55693afa70e5ccb92a4f87eb37", "content_id": "2ab117876dafc0e8aa2d3d1f0a2883671954c2f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22395, "license_type": "no_license", "max_line_length": 304, "num_lines": 451, "path": "/Linux/vpanalysis.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "# make global imports\nimport copy\nfrom enum import Flag, Enum, auto\nimport math\nimport os\nimport pandas as pd\nimport matplotlib \nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\nimport matplotlib.cbook as cbook\n\n# Column names matching data from the VarroaPop output \ncolumn_names = [\"Date\", \"Colony Size\", \"Adult Drones\", \"Adult Workers\", \"Foragers\", \"Active Foragers\",\n \"Capped Drone Brood\", \"Capped Worker Brood\",\n \"Drone Larvae\", \"Worker Larvae\", \"Drone Eggs\", \"Worker Eggs\", \"Total Eggs\", \"DD\", \"L\", \"N\", \"P\", \"dd\",\n \"l\", \"n\", \"Free Mites\", \"Drone Brood Mites\",\n \"Worker Brood Mites\", \"Mites/Drone Cell\", \"Mites/Worker Cell\", \"Mites Dying\", \"Proportion Mites Dying\",\n \"Colony Pollen (g)\", \"Pollen Pesticide Concentration\", \"Colony Nectar\",\n \"Nectar Pesticide Concentration\",\n \"Dead Drone Larvae\", \"Dead Worker Larvae\", \"Dead Drone Adults\", \"Dead Worker Adults\", \"Dead Foragers\",\n \"Queen Strength\", \"Average Temperature (celsius)\", \"Rain\", \"Min Temp\", \"Max Temp\", \"Daylight hours\",\n \"Forage Inc\", \"Forage Day\"]\n\n# Columns to be printed on the X axis of graphs when needed\nbees = {\n 'columns':['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers'],\n 'palette':['steelblue', 'darkorange', 'green', 'red']\n}\nbees_extended = {\n 'columns':['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers', 'Capped Drone Brood', 'Drone Larvae', 'Capped Worker Brood', 'Worker Larvae'],\n 'palette':['steelblue', 'darkorange', 'green', 'red', 'limegreen', 'palegreen', 'moccasin', 'oldlace']\n}\nbees_active_inactive_foragers = {\n 'columns':['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers', 'Inactive Foragers'],\n 'palette':['steelblue', 'darkorange', 'green', 'red', 'salmon']\n}\n\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyears_fmt = mdates.DateFormatter('%y')\n\nfont = {'family' : 'Arial',\n 'weight' : 'normal',\n 'size' : 20}\n\nmatplotlib.rc('font', **font)\n\n# Replace the Date column dates by timeseries panda compatible dates\ndef enhance_data(output):\n # drop the line number column\n output = output.drop(output.index[0])\n # build a datetime column to be able to restraint to a specific time period\n output[\"Date\"] = pd.to_datetime(output[\"Date\"], format='%m/%d/%Y')\n return output\n\n# Read data from file and process it to be ready for exploitation\ndef load_data(simulation_output):\n data = pd.read_table(simulation_output, delim_whitespace=True, header=None, names=column_names, skiprows=6)\n data = enhance_data(data)\n return data\n\ndef add_monthly_ticks(plot):\n global years, years_fmt, months\n plot.xaxis.set_major_formatter(years_fmt)\n plot.xaxis.set_minor_locator(months)\n\n\n\nclass BeesPlots:\n current_data = None\n updated_data = None\n current_plot = None\n updated_plot = None\n plot = None\n plots = None\n figure = None\n secondary_primary_data = None\n \n class Select(Flag):\n CURRENT = auto()\n UPDATED = auto()\n BOTH = CURRENT | UPDATED\n \n class Type(Flag):\n SINGLE = auto()\n LEFT = auto()\n TOP = auto()\n \n class Secondary(Enum):\n TEMPERATURES = auto(),\n FORAGE_INC = auto(),\n FORAGE_DAY = auto(),\n EGGS = auto()\n \n def __init__(self, p_type = Type.SINGLE, p_figsize = None):\n if (p_type & self.Type.SINGLE):\n if not p_figsize:\n p_figsize = (15, 7.5)\n self.figure, self.plot = plt.subplots(nrows=1, ncols=1, figsize=p_figsize)\n elif (p_type & self.Type.LEFT):\n if not p_figsize:\n p_figsize = (30, 7.5)\n self.figure, self.plots = plt.subplots(nrows=1, ncols=2, figsize=p_figsize)\n else:\n if not p_figsize:\n p_figsize = (15, 15)\n self.figure, self.plots = plt.subplots(nrows=2, ncols=1, figsize=p_figsize)\n self.twin_count = {}\n self.lines = {}\n self.labels = {}\n self.additional_lines = {}\n self.additional_labels = {}\n \n def __make_plots(self, current_title, current, updated_title, updated, columns):\n self.current_data = current\n self.updated_data = updated\n self.current_plot = self.current_data.plot(x='Date', y=columns['columns'], color=columns['palette'], legend=False, ax=self.plots[0])\n self.plots[0].set_title(current_title, fontdict={'fontsize': 14, 'fontweight': 'normal'})\n self.updated_plot = self.updated_data.plot(x='Date', y=columns['columns'], color=columns['palette'], legend=False, ax=self.plots[1])\n self.plots[1].set_title(updated_title, fontdict={'fontsize': 14, 'fontweight': 'normal'})\n self.twin_count[self.current_plot] = 0\n self.twin_count[self.updated_plot] = 0\n self.additional_lines[self.current_plot] = []\n self.additional_lines[self.updated_plot] = []\n self.additional_labels[self.current_plot] = []\n self.additional_labels[self.updated_plot] = []\n self.lines[self.current_plot], self.labels[self.current_plot] = self.current_plot.get_legend_handles_labels()\n self.lines[self.updated_plot], self.labels[self.updated_plot] = self.updated_plot.get_legend_handles_labels()\n \n def __make_plot(self, title, current, columns):\n self.current_data = current\n self.current_plot = self.current_data.plot(x='Date', y=columns['columns'], color=columns['palette'], legend=False, ax=self.plot)\n self.plot.set_title(title, fontdict={'fontsize': 14, 'fontweight': 'normal'})\n self.twin_count[self.current_plot] = 0\n self.additional_lines[self.current_plot] = []\n self.additional_labels[self.current_plot] = []\n self.lines[self.current_plot], self.labels[self.current_plot] = self.current_plot.get_legend_handles_labels()\n \n def add_primary_data(self, data, columns): \n self.secondary_primary_data = data\n self.secondary_primary_data.plot(x='Date', y=columns['columns'], color=columns['palette'], legend=False, linestyle='--', ax=self.current_plot) \n \n def limit(self, start_date, end_date):\n if self.current_plot:\n self.current_plot.set_xlim(pd.Timestamp(start_date), pd.Timestamp(end_date))\n if self.updated_plot:\n self.updated_plot.set_xlim(pd.Timestamp(start_date), pd.Timestamp(end_date))\n \n def limit_left(self, start, end):\n if self.plot:\n pass\n # self.plot.right_axis.set_xlim(start, end)\n elif len(self.plots)==2:\n self.plots[0].secondary_yaxis.set_xlim(start, end)\n self.plots[1].right_axis.set_xlim(start, end)\n \n def limit_y(self, bottom, top):\n if self.current_plot:\n self.current_plot.set_ylim(bottom, top)\n if self.updated_plot:\n self.updated_plot.set_ylim(bottom, top)\n \n def limit_and_relim(self, start_date, end_date):\n if self.current_plot:\n self.current_plot.set_xlim(pd.Timestamp(start_date), pd.Timestamp(end_date))\n self.current_plot.relim()\n if self.updated_plot:\n self.updated_plot.set_xlim(pd.Timestamp(start_date), pd.Timestamp(end_date))\n self.updated_plot.relim()\n \n def beautifuling(self):\n add_grid(self.plots[0])\n \n def __get_spacing(self, in_plot):\n spacing = .1\n return 1.0 + spacing * self.twin_count[in_plot]\n \n def __get_axis(self, in_plot):\n axis = in_plot.twinx()\n axis.spines['right'].set_position(('axes', self.__get_spacing(in_plot)))\n self.twin_count[in_plot]+=1\n return axis\n \n def __add_legend_data(self, in_plot, axis):\n # Proper legend position\n line, label = axis.get_legend_handles_labels()\n self.additional_lines[in_plot] += line\n self.additional_labels[in_plot] += label\n \n def add_secondary_data(self, kind, options):\n secondary_plot_selector = {\n self.Secondary.TEMPERATURES: self.__add_temperatures,\n self.Secondary.FORAGE_INC: self.__add_forage_inc,\n self.Secondary.FORAGE_DAY: self.__add_forage_day,\n self.Secondary.EGGS: self.__add_eggs\n }\n if kind in secondary_plot_selector.keys():\n self.__call_on_plot(secondary_plot_selector[kind], options)\n else:\n raise Exception(\"Unsupported kind: \" + kind)\n \n def __call_on_plot(self, func, options):\n which = self.Select.BOTH\n if hasattr(options, 'which'):\n which = option['options']\n if (self.current_plot and (which & self.Select.CURRENT)):\n func(self.current_data, self.current_plot, options)\n if (self.updated_plot and (which & self.Select.UPDATED)):\n func(self.updated_data, self.updated_plot, options)\n \n def __add_temperatures(self, in_data, in_plot, options):\n axis = self.__get_axis(in_plot)\n min_temp = in_data.plot(x='Date', y='Min Temp', color='lightgrey', legend=False, ax=axis)\n max_temp = in_data.plot(x='Date', y='Max Temp', color='darkgrey', legend=False, ax=axis)\n # Use weighted averages to reduce noise of min and max temperature data\n # output['Min Temp'] = output['Min Temp'].ewm(span=7, adjust=True).mean()\n # output['Max Temp'] = output['Max Temp'].ewm(span=7, adjust=True).mean()\n max_temp.axhline(12, color=\"lightgrey\", linestyle=\"--\")\n max_temp.axhline(43.3, color=\"lightgrey\", linestyle=\"--\")\n # Name axis\n axis.set_ylabel('Temperatures')\n self.__add_legend_data(in_plot, axis)\n \n def __add_rainfall(self, in_data, in_plot, options):\n axis = self.__get_axis(in_plot)\n rain = in_data.plot(x='Date', y='Rain', color='darkblue', legend=False, ax=axis)\n # Rain threshold 0.197 * 25.4 to mm\n rain.axhline(0.197*25.4, color=\"lightgrey\", linestyle=\"--\")\n # Name axis\n axis.set_ylabel('Rainfall')\n self.__add_legend_data(in_plot, axis)\n \n def __add_forage_day(self, in_data, in_plot, options):\n axis = self.__get_axis(in_plot)\n forage_day_plot = in_data.plot(x='Date', y='Forage Day', color='lightgray', legend=False, ax=axis)\n forage_day_plot.set_ylim(0.0, 1.1)\n # Name axis\n axis.set_ylabel('Forage Day')\n self.__add_legend_data(in_plot, axis)\n \n def __add_forage_inc(self, in_data, in_plot, options):\n axis = self.__get_axis(in_plot)\n forage_inc_plot = in_data.plot(x='Date', y='Forage Inc', color='darkgray', legend=False, ax=axis)\n forage_inc_plot.set_ylim(0.0, 1.1)\n # Name axis\n axis.set_ylabel('Forage Inc')\n # forage_inc_plot.set_ylim(0.0, 0.01)\n self.__add_legend_data(in_plot, axis)\n \n def __add_eggs(self, in_data, in_plot, options):\n axis = self.__get_axis(in_plot)\n eggs_plot = in_data.plot(x='Date', y='Total Eggs', color='purple', legend=False, ax=axis)\n if hasattr(options, 'limit'):\n eggs_plot.set_ylim(options.limit[0], options.limit[1])\n if hasattr(options, 'grid'):\n eggs_plot.grid(True, color='purple', linestyle='--')\n self.__add_legend_data(in_plot, axis)\n if self.secondary_primary_data is not None:\n self.secondary_primary_data.plot(x='Date', y='Total Eggs', color='purple', linestyle='--', legend=False, ax=eggs_plot)\n # Name axis\n axis.set_ylabel('Eggs')\n \n def __display_monthly_ticks(self, in_plot):\n plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator())\n #in_plot.xaxis.set_major_locator(years)\n #in_plot.xaxis.set_major_formatter(years_fmt)\n in_plot.xaxis.set_minor_locator(months)\n in_plot.xaxis.set_minor_formatter(mdates.DateFormatter('%m\\n%Y'))\n \n def display_monthly_ticks(self, which=Select.BOTH):\n if (self.current_plot and (which & self.Select.CURRENT)):\n self.__display_monthly_ticks(self.current_plot)\n if (self.updated_plot and (which & self.Select.UPDATED)):\n self.__display_monthly_ticks(self.updated_plot)\n \n def __display_yearly_ticks(self, in_plot):\n in_plot.xaxis.set_major_locator(years)\n in_plot.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n \n def display_yearly_ticks(self, which=Select.BOTH):\n self.__call_on_plot(self.__display_yearly_ticks, which)\n \n def __display_daily_ticks(self, in_plot):\n in_plot.xaxis.set_minor_locator(mdates.DayLocator())\n in_plot.xaxis.set_major_locator(mdates.MonthLocator())\n in_plot.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n \n def display_daily_ticks(self, which=Select.BOTH):\n self.__call_on_plot(self.__display_daily_ticks, which)\n \n def __display_y_1000_ticks(self, in_plot):\n in_plot.yaxis.set_minor_locator(LinearLocator(10))\n #in_plot.yaxis.set_major_locator(LinearLocator(10))\n \n def display_y_1000_ticks(self, which=Select.BOTH):\n self.__call_on_plot(self.__display_y_1000_limiticks, which)\n \n def add_grid(self):\n if self.plot:\n self.plot.grid(True)\n elif len(self.plots)==2:\n self.plots[0].grid(True)\n self.plots[1].grid(True)\n \n def __make_legend(self, in_data, in_plot, options): \n lines = copy.copy(self.lines[in_plot])\n labels = copy.copy(self.labels[in_plot])\n lines += self.additional_lines[in_plot]\n labels += self.additional_labels[in_plot]\n in_plot.legend(lines, labels, loc='best')\n \n def make_legend(self, which=Select.BOTH):\n options = {'which': which}\n self.__call_on_plot(self.__make_legend, options)\n \n def __move_legend(self, in_data, in_plot, options): \n lines = copy.copy(self.lines[in_plot])\n labels = copy.copy(self.labels[in_plot])\n lines += self.additional_lines[in_plot]\n labels += self.additional_labels[in_plot]\n in_plot.legend(lines, labels, loc='center right', bbox_to_anchor=[self.__get_spacing(in_plot)+0.4, 0.5])\n \n def move_legend(self, which=Select.BOTH):\n options = {'which': which}\n self.__call_on_plot(self.__move_legend, options)\n \n def make_plot(options):\n plot = None\n orientation = None\n # early exit for not providing data or columns\n if not options.data:\n raise ('Need to provide data')\n if not options.columns:\n raise ('Need to provide columns')\n # first instantiate plot object\n figsize = None\n if hasattr(options, 'figsize') and options.figsize.activated:\n figsize = options.figsize.params\n if isinstance(options.data, PlotsData):\n orientation = BeesPlots.Type.TOP\n if hasattr(options, 'layout') and options.layout.activated:\n orientation = options.layout.params\n plot = BeesPlots(orientation, figsize)\n options.data.first.load_if_needed()\n options.data.second.load_if_needed()\n plot.__make_plots(options.data.first.title, options.data.first.data, options.data.second.title, options.data.second.data, options.columns)\n elif isinstance(options.data, Data):\n orientation = BeesPlots.Type.SINGLE\n plot = BeesPlots(orientation, figsize)\n options.data.load_if_needed()\n plot.__make_plot(options.data.title, options.data.data, options.columns)\n else:\n raise('data option type is unsupported')\n # add another primary data on the same plot\n if hasattr(options, 'additional_primary_data') and options.additional_primary_data.activated:\n if orientation == BeesPlots.Type.SINGLE:\n options.additional_primary_data.params['data'].load_if_needed()\n plot.add_primary_data(options.additional_primary_data.params['data'].data, options.additional_primary_data.params['columns'])\n else:\n raise ('additional_primary_data can only be specified for single graph layout')\n # add secondary plots if needed\n if hasattr(options, 'eggs') and options.eggs.activated:\n plot.add_secondary_data(BeesPlots.Secondary.EGGS, options.eggs.params)\n if hasattr(options, 'forage_day') and options.forage_day.activated:\n plot.add_secondary_data(BeesPlots.Secondary.FORAGE_DAY, options.forage_day.params)\n if hasattr(options, 'forage_inc') and options.forage_inc.activated:\n plot.add_secondary_data(BeesPlots.Secondary.FORAGE_INC, options.forage_inc.params)\n if hasattr(options, 'temperatures') and options.temperatures.activated:\n plot.add_secondary_data(BeesPlots.Secondary.TEMPERATURES, options.temperatures.params)\n # process other options\n if hasattr(options, 'x_limit') and options.x_limit.activated:\n plot.limit(options.x_limit.params[0], options.x_limit.params[1])\n if hasattr(options, 'x_limit_left') and options.x_limit_left.activated:\n plot.limit_left(options.x_limit_left.params[0], options.x_limit_left.params[1])\n if hasattr(options, 'y_limit') and options.y_limit.activated:\n plot.limit_y(options.y_limit.params[0], options.y_limit.params[1])\n plot.make_legend()\n return plot\n \n# options to easy plots input data specifications\n\nclass Option:\n def __init__(self, activated: bool, params):\n self.activated = activated\n self.params = params\n \nclass Options:\n data = None\n pass\n\n# data class to load and prepare data for ploting\n\nclass Data:\n data = None\n \n def __init__(self, directory, filename, title):\n self.directory = directory\n self.filename = filename\n self.title = title \n \n def load_if_needed(self): \n if self.data is None:\n path_to_data = os.path.join(self.directory, self.filename)\n if not os.path.exists(path_to_data):\n raise Exception('No data at ' + path_to_data)\n self.data = load_data(path_to_data)\n \n\nclass PlotsData:\n def __init__(self, first, second):\n self.first = first\n self.second = second\n\n\nclass Custom:\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self.__setattr__(key, value)\n \n \n# different files layout from output simulations\n\ndef get_step1_data(output_dir):\n data = {\n 'current' : Data(output_dir, 'Current.txt', 'Adults stop aging when larvae/brood is 0'),\n 'adults_aging_when_egg_laid' : Data(output_dir, 'AdultAgingWhenEggLaid.txt', 'Adults stop aging when queen stop laying eggs'),\n 'adults_aging_when_egg_laid_fixed_larvae_and_brood' : Data(output_dir, 'AdultAgingWhenEggLaidFixedLarvaeAndBrood.txt', 'Adults stop aging when queen stop laying eggs'),\n 'adults_aging_when_egg_laid_fixed_larvae_and_brood_fixed_adults' : Data(output_dir, 'AdultAgingWhenEggLaidFixedLarvaeAndBroodFixedAdults.txt', 'Adults stop aging when the queen stop laying eggs'),\n 'hourly_temp' : Data(output_dir, 'HourlyTemp.txt', 'Forage Inc computed using hourly temperatures'),\n 'hourly_temp_forage_day' : Data(output_dir, 'HourlyTempForageDayWindAndRain.txt', 'Forage Day is function of Rain and Wind only'),\n 'foragers_aging_with_forage_inc' : Data(output_dir, 'ForagersFirst.txt', 'Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_and_adults_aging_with_forage_inc' : Data(output_dir, 'AdultAgingAsForagers.txt', 'Adults and Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaid.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid_fixed_larvae_and_brood': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaidFixedLarvaeAndBrood.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid_fixed_larvae_and_brood_fixed_adults': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaidFixedLarvaeAndBroodFixedAdults.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc')\n }\n return data\n\n\ndef get_data(output_dir):\n data = {\n 'current' : Data(output_dir, 'Current.txt', 'Adults stop aging when larvae/brood is 0'),\n 'foragers_aging_with_forage_inc' : Data(output_dir, 'ForagersAging.txt', 'Adults stop aging when larvae/brood is 0 & Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_and_adults_aging_with_forage_inc' : Data(output_dir, 'AdultAgingAsForagers.txt', 'Adults and Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_and_adults_aging_with_forage_inc_progressive_foragers' : Data(output_dir, 'AdultAgingAsForagersProgressiveForagers.txt', 'Adults and Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaid.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid_bug_larvae_and_brood': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaidLarvaeAndBroodIssue.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc'),\n 'foragers_aging_with_forage_inc_adults_aging_when_egg_laid_progressive_foragers': Data(output_dir, 'ForagersAgingAdultAgingWhenEggLaidProgressiveForagers.txt', 'Adults stop aging when the queen stop laying eggs & Foragers age using Hourly Temperatures based Forage Inc')\n }\n return data\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 23, "blob_id": "db25d7841a0c6399ef944aff156f3d3a490e6f20", "content_id": "97c5f50cb096a1ae1e162b55f463acca098673c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/Linux/portcode/ccmdtarget.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"ccmdtarget.h\"\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 21, "blob_id": "1753a32579d143fde9f72e60b183f2070dcedf3e", "content_id": "ae80e3c50f36f59f72596c1179f557fcf0946672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/Linux/tests/helpers/myobject.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"myobject.h\"\n" }, { "alpha_fraction": 0.6997206807136536, "alphanum_fraction": 0.7011173367500305, "avg_line_length": 15.651163101196289, "blob_id": "26365d4f64c45b0926cc014b6e40754cdcea5305", "content_id": "676f4ef6908ec3e1c16f812336607c1e89ef0f53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 716, "license_type": "no_license", "max_line_length": 75, "num_lines": 43, "path": "/Linux/portcode/carray.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CARRAY_CUSTOM_H\n#define CARRAY_CUSTOM_H\n\n#include \"cobject.h\"\n\n#include <vector>\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\ntemplate<class TYPE, class ARG_TYPE = const TYPE&>\nclass CArray : public CObject\n{\npublic:\n\tBOOL IsEmpty() const;\n\n\tINT_PTR GetCount() const;\n\n\tvoid RemoveAll();\n\n\tvoid Copy(const CArray& array);\n\n\tvoid Add(const TYPE& element);\n\n\tconst TYPE& operator[](INT_PTR index) const;\n\t\n\tTYPE& operator[](INT_PTR index);\n\n\tvoid RemoveAt(INT_PTR index);\n\n\tvoid SetSize(INT_PTR size, INT_PTR growBy = -1);\n\n\tvoid Serialize(CArchive& ar);\n\t\nprotected:\n\n std::vector<TYPE> m_data;\n};\n\n#include \"carray.inline.h\"\n\n#endif // CARRAY_CUSTOM_H\n" }, { "alpha_fraction": 0.6466349363327026, "alphanum_fraction": 0.6491999626159668, "avg_line_length": 55.45517349243164, "blob_id": "6b23ad8f4b029b1135573bec15596dd6c75a66cd", "content_id": "0ddb7ceae17adffa1c683c1285a7f88775d33df6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8187, "license_type": "no_license", "max_line_length": 160, "num_lines": 145, "path": "/Linux/scripts/plots.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom string import Template\n\n\ncolumn_names = [\"Date\", \"Colony Size\",\"Adult Drones\",\"Adult Workers\",\"Foragers\", \"Active Foragers\", \"Capped Drone Brood\", \"Capped Worker Brood\",\n \"Drone Larvae\", \"Worker Larvae\", \"Drone Eggs\", \"Worker Eggs\", \"Total Eggs\", \"DD\", \"L\", \"N\", \"P\", \"dd\", \"l\", \"n\", \"Free Mites\", \"Drone Brood Mites\",\n \"Worker Brood Mites\", \"Mites/Drone Cell\", \"Mites/Worker Cell\", \"Mites Dying\", \"Proportion Mites Dying\",\n \"Colony Pollen (g)\", \"Pollen Pesticide Concentration\", \"Colony Nectar\", \"Nectar Pesticide Concentration\",\n \"Dead Drone Larvae\", \"Dead Worker Larvae\", \"Dead Drone Adults\", \"Dead Worker Adults\", \"Dead Foragers\",\n \"Queen Strength\", \"Average Temperature (celsius)\", \"Rain\", \"Min Temp\", \"Max Temp\", \"Daylight hours\", \"Forage Inc\"]\n\n# original verion of varroapop\n\ncolumn_names_original = [\"Date\", \"Colony Size\",\"Adult Drones\",\"Adult Workers\", \"Foragers\", \"Capped Drone Brood\", \"Capped Worker Brood\",\n \"Drone Larvae\", \"Worker Larvae\", \"Drone Eggs\", \"Worker Eggs\", \"Total Eggs\", \"DD\", \"L\", \"N\", \"P\", \"dd\", \"l\", \"n\", \"Free Mites\", \"Drone Brood Mites\",\n \"Worker Brood Mites\", \"Mites/Drone Cell\", \"Mites/Worker Cell\", \"Mites Dying\", \"Proportion Mites Dying\",\n \"Colony Pollen (g)\", \"Pollen Pesticide Concentration\", \"Colony Nectar\", \"Nectar Pesticide Concentration\",\n \"Dead Drone Larvae\", \"Dead Worker Larvae\", \"Dead Drone Adults\", \"Dead Worker Adults\", \"Dead Foragers\",\n \"Queen Strength\", \"Average Temperature (celsius)\", \"Rain\"]\n\nclass Plotter:\n\n display_temperature_data = False\n display_activity_ratio = False\n start_date = None\n end_date = None\n\n def do_plot(self, output_directory, output_filename):\n global column_names\n global column_names_original\n # override column_names if working with previous version of the VarroaPop application\n #column_names = column_names_original\n\n # read output file skipping the first 6 lines\n output = pd.read_table(output_filename, delim_whitespace=True, header=None, names=column_names, skiprows=6)\n # drop the line number column\n output = output.drop(output.index[0])\n # build a datetime column to be able to restraint to a specific time period\n date_time_series = pd.to_datetime(output[\"Date\"], format='%m/%d/%Y')\n output[\"Datetime\"] = date_time_series\n # remove the prefix from output file name to get the plot title\n # plot_title = os.path.splitext(output_file)[0][len(prefix)+1:]\n # if len(plot_title) == 0:\n # plot_title = 'current'\n plot_title = os.path.splitext(os.path.basename(output_filename))[0]\n # add Inactive Foragers column\n output['Inactive Foragers'] = output['Foragers'] - output['Active Foragers']\n #colunms = ['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers', 'Worker Larvae', 'Worker Eggs', 'Capped Worker Brood']\n colunms = ['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers']\n\n # original data lines\n # output['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] - output['Adult Workers'] - output['Foragers']\n # colunms = ['Colony Size', 'Adult Workers', 'Adult Drones', 'Foragers', 'Inactive Foragers']\n\n plt.figure()\n\n bees_plot_pd = output.plot(x='Datetime', y=colunms, legend=True, figsize=(20, 10), title=plot_title)\n bees_plot_pd.set_ylabel('Bees')\n\n # Display Temperature Data\n\n if self.display_temperature_data and 'Min Temp' in output.keys():\n # Use weighted averages to reduce noise of min and max temperature data\n #output['Min Temp'] = output['Min Temp'].ewm(span=7, adjust=True).mean()\n min_temp_plot_pd = output.plot(x='Datetime', y='Min Temp', secondary_y=True, color='lightgrey', ax=bees_plot_pd)\n #output['Max Temp'] = output['Max Temp'].ewm(span=7, adjust=True).mean()\n max_temp_plot_pd = output.plot(x='Datetime', y='Max Temp', secondary_y=True, color='darkgrey', ax=bees_plot_pd)\n min_temp_plot_pd.axhline(12, color=\"lightgrey\", linestyle=\"--\")\n max_temp_plot_pd.axhline(43.3, color=\"lightgrey\", linestyle=\"--\")\n\n # Display Active Ration\n if self.display_activity_ratio:\n # output['Activity Ratio'] = output['Activity Ratio'].ewm(span=7, adjust=True).mean()\n ratio_plot = output.plot(x='Datetime', y='Forage Inc', secondary_y=True, color='darkgrey', ax=bees_plot_pd)\n ratio_plot.set_ylabel('Forage Inc')\n\n # The following line allow us to get a specific window from the data\n if self.start_date is not None and self.end_date is not None:\n bees_plot_pd.set_xlim(pd.Timestamp(self.start_date), pd.Timestamp(self.end_date))\n\n plt.savefig(os.path.join(output_directory, plot_title + '.svg'), format='svg')\n plt.close()\n\n def do_plots(self, directory, prefix):\n global column_names\n\n # gather valid output files\n output_files = []\n for entry in os.scandir(directory):\n if entry.name.startswith(prefix) and os.path.isfile(os.path.join(directory, entry.name)):\n output_files.append(entry.name)\n\n # make sure plots directory exists\n plots_directory = os.path.join(directory, prefix)\n if not os.path.exists(plots_directory):\n os.makedirs(plots_directory)\n\n for output_file in output_files:\n output_file_path = os.path.join(directory, output_file)\n self.do_plot(plots_directory, output_file_path)\n\n # Aggregate plots in an html file\n html_template = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'plots.html')\n with open(html_template) as html_template_file:\n html_template_content = Template(html_template_file.read())\n img_tag = \"<img src='\" + prefix + \"/{0}.svg'/>\"\n html_template_content = html_template_content.substitute(title=prefix, images='\\n'.join([\n img_tag.format(os.path.splitext(output_file)[0]) for output_file in output_files\n ]))\n html_target = os.path.join(directory, 'plots.html')\n with open(html_target, 'w') as html_target_file:\n html_target_file.write(html_template_content)\n\n\nif __name__ == '__main__':\n plotter = Plotter()\n\n parser = argparse.ArgumentParser(description='Generate plots for previously ran simulation.')\n parser.add_argument('--directory', type=str, help='directory where output files are located', required=True)\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--prefix', type=str, help='prefix of the output files in the same simulation')\n group.add_argument('--simulation_output', type=str, help='if this option is specified a single plot is generated')\n optional_group = parser.add_mutually_exclusive_group()\n optional_group.add_argument('--display_temperature_data', action='store_true', default=False,\n help='Display Min / Max temperature data in output graph')\n optional_group.add_argument('--display_activity_ratio', action='store_true', default=False,\n help='Display Activity Ratio')\n date_group = parser.add_argument_group()\n date_group.add_argument('--start_date', default=None, help='Start date for output graphs using YYYY-MM-DD format')\n date_group.add_argument('--end_date', default=None, help='End date for output graphs using YYYY-MM-DD format')\n arguments = parser.parse_args()\n\n # move options values to Plotter class\n plotter.display_temperature_data = arguments.display_temperature_data\n plotter.display_activity_ratio = arguments.display_activity_ratio\n plotter.start_date = arguments.start_date\n plotter.end_date = arguments.end_date\n\n if not arguments.simulation_output:\n plotter.do_plots(arguments.directory, arguments.prefix)\n else:\n plotter.do_plot(arguments.directory, arguments.simulation_output)\n\n" }, { "alpha_fraction": 0.7337826490402222, "alphanum_fraction": 0.7375737428665161, "avg_line_length": 26.287355422973633, "blob_id": "97484097fde1aa54b7654edcf10399cda4c8ac74", "content_id": "7b59487124d8b08963075e0c0c22165dd8846c80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2374, "license_type": "no_license", "max_line_length": 91, "num_lines": 87, "path": "/ColdStorageSimulator.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <afx.h>\n\n#include <string>\n\nclass CEvent;\nclass CColony;\n\n// There are 2 ways to activate cold storage:\n// - based on an exterior criteria then call Activate / DeActivate to force cold storage\n// - specify start and end date so that cold storage is activated during this period\nclass CColdStorageSimulator : public CObject\n{\npublic:\n\tstatic double GetDefaultColdStorageTemperature()\n\t{\n\t\tconst double temperature = ((40.0 - 32.0) / 1.8);\n\t\treturn temperature;\n\t}\n\nprivate:\n\t// Enables cold storage \n\tbool m_Enabled = false;\n\t// start and end date for automatic cold storage\n\tCOleDateTime m_StartDate;\n\tCOleDateTime m_EndDate;\n\t// Cold storage is activated based on m_On state only\n\tbool m_On = false;\n\t// Temperature in cold storage\n\tdouble m_Temperature = GetDefaultColdStorageTemperature();\n\n\t// Optimization\n\tstd::string m_StartDateStr; // used for faster comparison between current and start date\n\tstd::string m_EndDateStr; // used for faster comparison between current and end date\n\n\t// State attributes\n\tbool m_IsActive = false;\n\tbool m_IsStarting = false;\n\tbool m_IsEnding = false;\n\npublic:\n\n\tstatic CColdStorageSimulator& Get()\n\t{\n\t\tstatic CColdStorageSimulator sInstance;\n\t\treturn sInstance;\n\t}\n\n\tvoid SetEnabled(bool enabled) { m_Enabled = enabled; }\n\tbool IsEnabled() const { return m_Enabled; }\n\n\tvoid Activate() { m_On = true; }\n\tvoid DeActivate() { m_On = false; }\n\tbool IsOn() const { return m_On; }\n\n\t// Returns true if the start and end date are not specified\n\tbool IsAutomatic() const { return m_StartDateStr.empty() && m_EndDateStr.empty(); }\n\n\tdouble GetTemp(CEvent& p_Event) const;\n\tdouble GetMaxTemp(CEvent& p_Event) const;\n\tdouble GetMinTemp(CEvent& p_Event) const;\n\tdouble GetForageInc(CEvent& p_Event) const;\n\tbool IsForageDay(CEvent& p_Event) const;\n\n\tvoid SetStartDate(const COleDateTime& startDate);\n\tvoid SetEndDate(const COleDateTime& endDate);\n\n\t// Updates the state of the cold storage, call once a simulation day\n\tvoid Update(CEvent& p_Event, CColony& queen);\n\n\t// Reset default values\n\tvoid Reset();\n\n\t// Returns true if cold storage is currently activated\n\tbool IsActive() const;\n\n\t// Returns true if cold storage is currently activated\n\tbool IsStarting() const;\n\n\t// Returns true if cold storage is currently activated\n\tbool IsEnding() const;\n\nprotected:\n\n\tbool IsColdStoragePeriod(CEvent& p_Event) const;\n};\n" }, { "alpha_fraction": 0.6777777671813965, "alphanum_fraction": 0.6971631050109863, "avg_line_length": 26.828947067260742, "blob_id": "9d5d7e7ee61a51fea520dd924b5cf0835c0a6abb", "content_id": "902a321989c21eba9f270a779595b9abcf4cc0ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 149, "num_lines": 152, "path": "/WeatherGridData.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <array>\n#include <cstdint>\n#include <string>\n#include <variant>\n#include <vector>\n#include <limits>\n\nclass CEvent;\n\nstruct ObservedHistoricalItem\n{\n std::uint16_t PPT;\n std::int16_t TMAX;\n std::int16_t TMIN;\n std::int16_t WIND;\n std::int16_t SPH;\n std::int16_t SRAD;\n std::int16_t RMAX;\n std::int16_t RMIN;\n};\n\nstruct ModeledHistoricalItem\n{\n std::uint16_t PPT;\n std::int16_t TMAX;\n std::int16_t TMIN;\n std::int16_t WIND;\n};\n\nstruct Rcp85\n{\n std::uint16_t PPT;\n std::int16_t TMAX;\n std::int16_t TMIN;\n std::int16_t WIND;\n};\n\n// Rcp85 and Rcp45 format have the same binary layout\ntypedef Rcp85 Rcp45;\n\n// Accessor to use to create proprietory data structure\ntemplate<typename GridDataType>\nstruct DataItemAccessor\n{\n const GridDataType& m_dataItem;\n\n DataItemAccessor(const GridDataType& dataItem) : m_dataItem(dataItem) {}\n\n // Precipitations in mm\n double PPT()\n {\n const static double pptMultiplier = 1.0 / 40.0;\n return m_dataItem.PPT * pptMultiplier;\n }\n // Max temperature in Deg C\n double TMAX()\n {\n const static double tMultiplier = 1.0 / 100.0;\n return m_dataItem.TMAX * tMultiplier;\n }\n // Min temperature in Deg C\n double TMIN()\n {\n const static double tMultiplier = 1.0 / 100.0;\n return m_dataItem.TMIN * tMultiplier;\n }\n // Wind in m/s\n double WIND()\n {\n const static double wMultiplier = 1.0 / 100.0;\n return m_dataItem.WIND * wMultiplier;\n }\n};\n\ntemplate<typename GridDataType>\nclass WeatherGridData\n{\npublic:\n typedef std::vector<GridDataType> Data;\n\n WeatherGridData();\n\n void load(const std::string& filename);\n\n const Data& data() const;\n\n COleDateTime getStartTime() const;\n COleDateTime getEndTime() const;\n\nprotected:\n\n\tData m_data;\n};\n\nnamespace WeatherGridDataNs\n{\n\ttemplate<typename GridDataType>\n\tWeatherGridData<GridDataType> LoadGridData(const std::string & filename);\n\n\tstruct DayLengthResult\n\t{\n\t\tfloat sunrise = -99.0;\n\t\tfloat sunset = -99.0;\n\t\tfloat daylength = 0.0;\n\t};\n\n\t// Computes the sunrise, sunset and daylength values for the given latitude and JDay of the year\n\tDayLengthResult DayLength(float latitude, int jDay);\n\n // Computes the day light hours\n double DayLightHours(float latitude, int JDay);\n\n\t// Computes the JDay given the date\n\tint ComputeJDay(const COleDateTime& date);\n\n\t// Computes the daylight hours (daylength) \n\t// @param filename: needs to be under the form \"prefix_latitude_suffix\" where prefix and suffix can be anything but should not contain character '_'\n\t// @param date: used to compute the JDay\n\tdouble ComputeDaylightHours(const std::string& filename, const COleDateTime& date);\n\n\t// This method generates an std::exception is no latitude can be found in the filename\n\t// @param filename: needs to be under the form \"prefix_latitude_suffix\" where prefix and suffix can be anything but should not contain character '_'\n\tdouble GetLatitudeFromFilename(const std::string& filename);\n\n\t// Set all parameters to compute the estimated hourly temperatures\n\t// You can ommit:\n\t// - prev_* properties if computing first day from the temperature file\n\t// - next_* properties if computing last day from the temperature file\n\tstruct HourlyTempraturesEstimator\n\t{\n\t\tdouble tmin = (std::numeric_limits<double>::max)();\n\t\tdouble tmax = (std::numeric_limits<double>::max)();\n\t\tdouble sunrise = (std::numeric_limits<double>::max)();\n\t\tdouble sunset = (std::numeric_limits<double>::max)();\n\t\tdouble daylength = (std::numeric_limits<double>::max)();\n\t\tdouble prev_tmin = (std::numeric_limits<double>::max)();\n\t\tdouble prev_tmax = (std::numeric_limits<double>::max)();\n\t\tdouble prev_sunset = (std::numeric_limits<double>::max)();\n\t\tdouble next_tmin = (std::numeric_limits<double>::max)();\n\t\tdouble next_sunrise = (std::numeric_limits<double>::max)();\n\n\t\tstd::array<float, 24> hourly_temperatures;\n\n\t\t// compute hourly temperature estimations \n\t\tvoid compute();\n\n\t\t// count daylight hours when temperatures are between temperatureMinThreshold and temperatureMaxThreshold\n\t\tint count_dayligth(double temperatureMinThreshold = 12.0, double temperatureMaxThreshold = 43.33);\n\t};\n}\n" }, { "alpha_fraction": 0.660139262676239, "alphanum_fraction": 0.6962575912475586, "avg_line_length": 32.30434799194336, "blob_id": "5e268038c4fe9b7f70cea3cdc508745cd8ee0c8f", "content_id": "1b771c3a2351c52095bbd9a92e09b18dedc92b94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2298, "license_type": "no_license", "max_line_length": 99, "num_lines": 69, "path": "/Queen.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "// Queen.h: interface for the CQueen class.\n//\n//////////////////////////////////////////////////////////////////////\n\n#if !defined(AFX_QUEEN_H__8C6C41B8_7899_11D2_8D9A_0020AF233A70__INCLUDED_)\n#define AFX_QUEEN_H__8C6C41B8_7899_11D2_8D9A_0020AF233A70__INCLUDED_\n\n#if _MSC_VER >= 1000\n#pragma once\n#endif // _MSC_VER >= 1000\n\n#include \"EGG.H\"\n\nclass CQueen : public CBee {\n private:\n\tdouble m_CurrentSperm;\n\tdouble m_InitialSperm;\n\tdouble m_Strength;\n\tint m_Weggs;\n\tint m_Deggs;\n\tint m_Teggs; // Maintains total number of eggs laid on a day\n\tdouble m_DD, m_L, m_N, m_P, m_dd, m_l, m_n; // Variables needed to calculate 'E' \n\tdouble m_MaxEggs;\n\tint m_CurQueenDay_1; // The simulation day # on which the current queen started\n\tint m_EggLayingDelay;\n\tint CumWeggs; // Test Only\n\n\tint m_Vars[5][2]; // Declares the array of MaxEggs and InitialSperm based on 1..5 queen strengths\n\n\n\n public:\n\tdouble GetPropDroneEggs();\n\tCQueen();\n\tCQueen(CQueen* oldQueen);\n\tvoid Serialize(CArchive &ar);\n\tvoid LayEggs(int LayDays, double DegreeDays, double DaylightHours, int NumForagers, \n\t\tdouble LarvPerBee);\n\tCEgg* GetWeggs();\n\tCEgg* GetDeggs();\n\tint GetTeggs() const { return m_Teggs; }\n\tdouble GetDD() { return m_DD; }\n\tdouble GetL() { return m_L; }\n\tdouble GetN() { return m_N; }\n\tdouble GetP() { return m_P; }\n\tdouble Getdd() { return m_dd; }\n\tdouble Getl() { return m_l; }\n\tdouble Getn() { return m_n; }\n\tvoid SetInitialSperm(double sperm) {m_InitialSperm = sperm;}\n\tvoid SetCurrentSperm(double sperm) {m_CurrentSperm = sperm;}\n\tdouble GetInitialSperm() {return m_InitialSperm;}\n\tdouble GetCurrentSperm() {return m_CurrentSperm;}\n\tvoid SetStrength(double Strengh);\n\tdouble GetQueenStrength() {return m_Strength;}\n\tvoid SetMaxEggs(double max) {m_MaxEggs = max;}\n\tdouble GetMaxEggs() {return m_MaxEggs;}\n\tvoid SetEggLayingDelay(int Delay) {m_EggLayingDelay = Delay;}\n\tvoid SetDayOne(int DayNum) {m_CurQueenDay_1 = DayNum;}\n\tvoid ReQueen(int EggLayingDelay, double QueenStrength, int SimDayNum);\n\tvirtual ~CQueen();\n\n\tCQueen& operator=(const CQueen& theQueen);\n\n\t// Return the value of L which is the DaylightHours based component\n\t// to the computation of laid eggs.\n\tdouble ComputeL(const double& DaylightHours) const;\n};\n\n#endif // !defined(AFX_QUEEN_H__8C6C41B8_7899_11D2_8D9A_0020AF233A70__INCLUDED_)\n" }, { "alpha_fraction": 0.704781711101532, "alphanum_fraction": 0.7089397311210632, "avg_line_length": 19.913043975830078, "blob_id": "7ecf0fc5a89be573fb529caea1c81368c1064ad5", "content_id": "8b650166fac231cdab07fda00edb78cca3e6adb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 75, "num_lines": 69, "path": "/Linux/portcode/carchive.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CARCHIVE_CUSTOM_H\n#define CARCHIVE_CUSTOM_H\n\n#include <cstdint>\n#include <istream>\n\nclass CFile;\nclass COleDateTime;\nclass CString;\nclass CTime;\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CArchive\n{\npublic:\n\n\tenum Mode { store = 0, load = 1, bNoFlushOnDelete = 2, bNoByteSwap = 4 };\n\n\tCArchive(CFile* pFile, UINT nMode);\n\t~CArchive();\n\n\tbool IsLoading() const;\n\tbool IsStoring() const;\n\n // Storing\n\n\ttemplate<typename Type>\n\tvoid operator<<(const Type& value)\n\t{\n\t\tmStream.write((char*)&value, sizeof(value));\n\t}\n\n // Loading\n\n\ttemplate<typename Type>\n\tvoid operator>>(Type& value)\n\t{\n\t\tmStream.read((char*)&value, sizeof(value));\n\t}\n\t\n\t// special functions for reading and writing (16-bit compatible) counts\n\tDWORD_PTR ReadCount();\n\tvoid WriteCount(DWORD_PTR dwCount);\n\t\n\t// raw methods to read / write blob of data\n\tUINT Read(void* lpBuf, UINT nMax);\n\tvoid Write(const void* lpBuf, UINT nMax);\n\nprotected:\n\n\tUINT mModeMask = load;\n\tstd::iostream& mStream;\n};\n\n// Specializations\n\ntemplate <> void CArchive::operator<<(const CString &value);\ntemplate <> void CArchive::operator>>(CString &value);\n\ntemplate <> void CArchive::operator<<(const COleDateTime &value);\ntemplate <> void CArchive::operator>>(COleDateTime &value);\n\ntemplate <> void CArchive::operator<<(const CTime &value);\ntemplate <> void CArchive::operator>>(CTime &value);\n\n#endif // CARCHIVE_CUSTOM_H\n" }, { "alpha_fraction": 0.6116759181022644, "alphanum_fraction": 0.6311707496643066, "avg_line_length": 26.419540405273438, "blob_id": "a08873b9375a48535759f25259b240e873c678d7", "content_id": "0930cfffc31a5dc98c73bfd795f392997e9db6f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9541, "license_type": "no_license", "max_line_length": 127, "num_lines": 348, "path": "/Linux/portcode/coledatetime.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"coledatetime.h\"\n\n#include <ctime>\n#include <iomanip>\n#include <sstream>\n\n/**\n * COleDateTime\n */\n\n/*static*/ COleDateTime COleDateTime::GetTickCount()\n{\n COleDateTime time (std::chrono::system_clock::now());\n return time;\n}\n\nCOleDateTime::COleDateTime(DATE dateSrc)\n{\n double daysShift;\n double dayShift = std::modf(dateSrc, &daysShift);\n std::chrono::duration<double, std::ratio<24*3600>> spanInDays (daysShift);\n std::chrono::duration<double, std::ratio<24*3600>> spanInDay (std::abs(dayShift));\n COleDateTimeSpan delta (std::chrono::duration_cast<std::chrono::seconds>(spanInDays+spanInDay));\n COleDateTime origin(1899, 12, 30, 0, 0, 0);\n COleDateTime date = origin + delta;\n if (date.GetStatus() == valid)\n {\n *this = date;\n }\n else\n {\n m_status = error;\n }\n}\n\nCOleDateTime::COleDateTime()\n: COleDateTime(1899, 12, 30, 0, 0, 0)\n{\n}\n\nCOleDateTime::COleDateTime(int32_t nYear,\n int32_t nMonth,\n int32_t nDay,\n int32_t nHour,\n int32_t nMin,\n int32_t nSec)\n{\n const char* dateTimeFormat = \"%Y-%m-%d %H:%M:%S\";\n std::string dateTime = fmt::format(\"{:0>4}-{:0>2}-{:0>2} {:0>2}:{:0>2}:{:0>2}\", nYear, nMonth, nDay, nHour, nMin, nSec);\n std::istringstream stream (dateTime);\n\tstd::tm dt = {0};\n dt.tm_isdst = -1; // needs to be set to unspecified otherwise random value is set\n stream >> std::get_time(&dt, dateTimeFormat);\n if (!stream.fail())\n {\n m_time_point = std::chrono::system_clock::from_time_t(std::mktime(&dt));\n m_status = valid;\n }\n else\n {\n m_status = error;\n }\n}\n\nCOleDateTime::COleDateTime(const std::chrono::system_clock::time_point& timePoint)\n: m_time_point(timePoint)\n, m_status(valid)\n{\n}\n\nint32_t COleDateTime::GetYear() const\n{\n SYSTEMTIME st = {0};\n return GetAsSystemTime(st)? st.wYear : error;\n}\n\nint32_t COleDateTime::GetMonth() const\n{\n SYSTEMTIME st = {0};\n return GetAsSystemTime(st)? st.wMonth : error;\n}\n\nint32_t COleDateTime::GetDay() const\n{\n SYSTEMTIME st = {0};\n return GetAsSystemTime(st)? st.wDay : error;\n}\n\nint32_t COleDateTime::GetHour() const\n{\n SYSTEMTIME st = {0};\n return GetAsSystemTime(st)? st.wHour : error;\n}\n\nint32_t COleDateTime::GetMinute() const\n{\n SYSTEMTIME st = {0};\n return GetAsSystemTime(st)? st.wMinute : error;\n}\n\nint32_t COleDateTime::GetDayOfYear() const\n{\n UDATE date = {0};\n return GetAsUDATE(date)? date.wDayOfYear : error;\n}\n\nCOleDateTime::DateTimeStatus COleDateTime::GetStatus() const\n{\n return m_status;\n}\n\nbool COleDateTime::operator < (const COleDateTime& other) const\n{\n return m_time_point < other.m_time_point;\n}\n\nbool COleDateTime::operator > (const COleDateTime& other) const\n{\n return m_time_point > other.m_time_point;\n}\n\nbool COleDateTime::operator >= (const COleDateTime& other) const\n{\n return m_time_point >= other.m_time_point;\n}\n\nbool COleDateTime::operator <= (const COleDateTime& other) const\n{\n return m_time_point <= other.m_time_point;\n}\n\nCString COleDateTime::Format(const char* format) const\n{\n CString string;\n std::time_t l_time = std::chrono::system_clock::to_time_t(m_time_point);\n auto tm = std::localtime(&l_time);\n if (tm != nullptr)\n {\n std::stringstream ss;\n ss << std::put_time(tm, format);\n string = ss.str();\n }\n return string;\n}\n\nbool COleDateTime::ParseDateTime(const CString& dateTimeStr, DWORD dwFlags)\n{\n std::istringstream stream(dateTimeStr.ToString());\n std::tm dt = {0};\n\n dt.tm_isdst = -1; // needs to be set to unspecified otherwise random value is set\n\n // Handles the 3 supported formats if the dwFlags is not specified\n const std::map<size_t, std::string> supportedDateTimeFormatsLengths = {\n {strlen(\"00/00/0000 00:00:00\"), \"%m/%d/%Y %H:%M:%S\"},\n {strlen(\"00/00/0000\"), \"%m/%d/%Y\"},\n {strlen(\"00:00:00\"), \"%H:%M:%S\"}\n };\n\n // Status will be set to error if stream isn't valid after get_time or the dateTimeStr is not in the right format\n m_status = valid;\n \n // Paser \n if (dwFlags == VAR_DATEVALUEONLY)\n {\n // let's try to parse only a date\n const char* dateFormat = \"%m/%d/%Y\";\n stream >> std::get_time(&dt, dateFormat);\n }\n else if (dwFlags == VAR_TIMEVALUEONLY)\n {\n // let's try to parse only a time\n const char* timeFormat = \"%H:%M:%S\";\n stream >> std::get_time(&dt, timeFormat);\n }\n else \n {\n auto dtIndex = supportedDateTimeFormatsLengths.find(dateTimeStr.GetLength());\n if (dtIndex != supportedDateTimeFormatsLengths.end())\n {\n // let's try to parse using the expected length of the given input\n auto& dateTimeFormat = dtIndex->second;\n stream >> std::get_time(&dt, dateTimeFormat.c_str());\n }\n else\n {\n m_status = error;\n }\n }\n\n // Convert the given date to a time_point \n if (!stream.fail())\n {\n m_time_point = std::chrono::system_clock::from_time_t(std::mktime(&dt));\n }\n else\n {\n m_status = error;\n }\n return m_status == valid;\n}\n\nint COleDateTime::SetDate(int32_t year, int32_t month, int32_t day)\n{\n const char* dateFormat = \"%Y-%m-%d\";\n std::string dateStr = fmt::format(\"{:0>4}-{:0>2}-{:0>2}\", year, month, day);\n std::istringstream stream (dateStr);\n std::tm dt = {0};\n dt.tm_isdst = -1; // needs to be set to unspecified otherwise random value is set\n stream >> std::get_time(&dt, dateFormat);\n if (!stream.fail())\n {\n m_time_point = std::chrono::system_clock::from_time_t(std::mktime(&dt));\n m_status = valid;\n }\n else\n {\n m_status = error;\n }\n return !(m_status == valid);\n}\n\nbool COleDateTime::GetAsSystemTime(SYSTEMTIME& time) const\n{\n std::time_t l_time = std::chrono::system_clock::to_time_t(m_time_point);\n auto tm = std::localtime(&l_time);\n bool success = tm != nullptr;\n if(success)\n {\n time.wYear = tm->tm_year + 1900;\n time.wMonth = tm->tm_mon + 1;\n time.wDayOfWeek = tm->tm_wday;\n time.wDay = tm->tm_mday;\n time.wHour = tm->tm_hour;\n time.wMinute = tm->tm_min;\n time.wSecond = tm->tm_sec;\n auto tp_no_milliseconds = std::chrono::system_clock::from_time_t(l_time);\n time.wMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(m_time_point-tp_no_milliseconds).count();\n }\n return success;\n}\n\nbool COleDateTime::GetAsUDATE(UDATE& date) const\n{\n std::time_t l_time = std::chrono::system_clock::to_time_t(m_time_point);\n auto tm = std::localtime(&l_time);\n bool success = tm != nullptr;\n if(success)\n {\n date.st.wYear = tm->tm_year + 1900;\n date.st.wMonth = tm->tm_mon + 1;\n date.st.wDayOfWeek = tm->tm_wday;\n date.wDayOfYear = tm->tm_yday + 1;\n date.st.wDay = tm->tm_mday;\n date.st.wHour = tm->tm_hour;\n date.st.wMinute = tm->tm_min;\n date.st.wSecond = tm->tm_sec;\n auto tp_no_milliseconds = std::chrono::system_clock::from_time_t(l_time);\n date.st.wMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(m_time_point-tp_no_milliseconds).count();\n }\n return success;\n}\n\nbool COleDateTime::GetAsDATE(DATE& date) const\n{\n COleDateTime origin(1899, 12, 30, 0, 0, 0);\n COleDateTimeSpan timeSpan = *this - origin;\n date = std::chrono::duration_cast<std::chrono::duration<double, std::ratio<24*3600>>>(timeSpan.m_span).count();\n if (date < 0.0)\n {\n double daysShift;\n double dayShift = std::modf(date, &daysShift);\n date = (daysShift<0.0)? daysShift:-1.0 - (1.0 + dayShift);\n }\n return true;\n}\n\nCOleDateTime COleDateTime::operator+(const COleDateTimeSpan& span) const\n{\n auto time_point = m_time_point + span.m_span;\n return COleDateTime(time_point);\n}\n\nCOleDateTime COleDateTime::operator-(const COleDateTimeSpan& span) const\n{\n auto time_point = m_time_point - span.m_span;\n return COleDateTime(time_point);\n}\n\nCOleDateTime& COleDateTime::operator+=(const COleDateTimeSpan& span)\n{\n m_time_point += span.m_span;\n return *this;\n}\n\nCOleDateTime& COleDateTime::operator-=(const COleDateTimeSpan& span)\n{\n m_time_point -= span.m_span;\n return *this;\n}\n\nCOleDateTimeSpan COleDateTime::operator-(const COleDateTime& date) const\n{\n auto time_diff = m_time_point - date.m_time_point; \n return COleDateTimeSpan(std::chrono::duration_cast<std::chrono::seconds>(time_diff));\n}\n\n/**\n * COleDateTimeSpan\n */\n\nCOleDateTimeSpan::COleDateTimeSpan()\n: m_span(0)\n{\n}\n\nCOleDateTimeSpan::COleDateTimeSpan(double dblSpanSrc)\n{\n std::chrono::duration<double, std::ratio<24*3600>> spanInDays (dblSpanSrc);\n m_span = std::chrono::duration_cast<std::chrono::seconds>(spanInDays);\n}\n\nCOleDateTimeSpan::COleDateTimeSpan(size_t lDays,\n int32_t nHours,\n int32_t nMins,\n int32_t nSecs)\n{\n std::chrono::duration<int, std::ratio<24*3600>> spanInDays (lDays);\n std::chrono::hours spanInHours (nHours);\n std::chrono::minutes spanInMin (nMins);\n std::chrono::seconds spanInSecs (lDays);\n m_span = spanInDays+spanInHours+spanInMin+spanInSecs;\n}\n\nCOleDateTimeSpan::COleDateTimeSpan(const std::chrono::seconds& span)\n: m_span(span)\n{\n}\n\nint32_t COleDateTimeSpan::GetDays()\n{\n return std::chrono::duration_cast<std::chrono::duration<int, std::ratio<24*3600>>>(m_span).count();\n}\n\nbool COleDateTimeSpan::operator!=(const COleDateTimeSpan& other) const\n{\n return m_span != other.m_span;\n}" }, { "alpha_fraction": 0.7244541645050049, "alphanum_fraction": 0.7244541645050049, "avg_line_length": 23.105262756347656, "blob_id": "f03978d1b81709170f190f25cb7f116800359057", "content_id": "4fd725d290c226bfceaf1c873b51c626fc0ce6d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2290, "license_type": "no_license", "max_line_length": 84, "num_lines": 95, "path": "/Linux/portcode/coblist.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef COBLIST_CUSTOM_H\n#define COBLIST_CUSTOM_H\n\n#include \"cobject.h\"\n#include \"cstring.h\"\n\n#include \"stdafx.h\"\n\n#include <cstddef>\n#include <list>\n\n/**\n * TODO: Improve the way to replace the behavior the of MFC POSITION structure since\n * this implementation is not efficient (lots of allocations)\n */\nnamespace CObListNs { struct InnerPosition : public __POSITION {\n\tInnerPosition(const std::list<CObject*>::const_iterator& it) : m_it(it) {}\n\tInnerPosition* copy() { return new InnerPosition(m_it); }\n std::list<CObject*>::const_iterator m_it;\n}; }\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CObList : public CObject\n{\npublic:\n\tCObList();\n\tvirtual ~CObList();\n\n\tINT_PTR GetCount() const;\n\tBOOL IsEmpty() const;\n\n\t// In the MFC framework we use the index to go through the linked list and return\n\t// the actual node wrapped in the POSITION structure\n\tPOSITION FindIndex(INT_PTR index) const;\n\n\tCObject* GetAt(POSITION position) const;\n\tCObject* GetHead() const;\n\tCObject* GetTail() const;\n\tCObject* GetPrev(POSITION& position) const;\n\tCObject* GetNext(POSITION& position) const;\n\n\tPOSITION GetHeadPosition() const;\n\tPOSITION GetTailPosition() const;\n\n\tPOSITION AddHead (CObject* object);\n\tPOSITION AddTail (CObject* object);\n\tvoid RemoveAt(POSITION position);\n\tCObject* RemoveHead();\n\tCObject* RemoveTail();\n\tvoid RemoveAll();\n\nprotected:\n\n std::list<CObject*> m_data;\n};\n\n/**\n * TODO: Improve the way to replace the behavior the of MFC POSITION structure since\n * this implementation is not efficient (lots of allocations)\n */\nnamespace CStringListNs { struct InnerPosition : public __POSITION {\n\tInnerPosition(const std::list<CString>::const_iterator& it) : m_it(it) {}\n\tInnerPosition* copy() { return new InnerPosition(m_it); }\n std::list<CString>::const_iterator m_it;\n}; }\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CStringList : public CObject\n{\npublic:\n\tCStringList();\n\t~CStringList();\n\n\tINT_PTR GetCount() const;\n\tBOOL IsEmpty() const;\n\n\tconst CString& GetNext(POSITION& position) const;\n\n\tPOSITION GetHeadPosition() const;\n\t\n\tvoid AddTail(const CString& string);\n\t\n\tvoid RemoveAll();\n\nprotected:\n\n std::list<CString> m_data;\n};\n\n#endif // COBLIST_CUSTOM_H\n" }, { "alpha_fraction": 0.561430037021637, "alphanum_fraction": 0.5798086524009705, "avg_line_length": 18.47549057006836, "blob_id": "f43bc3ba08443d1f13cf4508b640a8c5c8db8a52", "content_id": "59edc789389d77cf95e090995348f5e98c955dbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3972, "license_type": "no_license", "max_line_length": 79, "num_lines": 204, "path": "/Linux/portcode/carchive.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"carchive.h\"\n\n#include \"stdafx.h\"\n\nCArchive::CArchive(CFile* pFile, UINT nMode)\n: mStream(pFile->GetStream())\n, mModeMask(nMode)\n{\n \n}\n\nCArchive::~CArchive()\n{\n\n}\n \nbool CArchive::IsLoading() const\n{\n return (mModeMask & load) != 0;\n}\n\nbool CArchive::IsStoring() const\n{\n return (mModeMask & store) != 0;;\n}\n\ntemplate<>\nvoid CArchive::operator<<(const COleDateTime& date)\n{\n // CArchive& AFXAPI operator<<(CArchive& ar, COleDateTime dateSrc)\n // {\n // \tar << (long)dateSrc.m_status;\n // \treturn ar << dateSrc.m_dt;\n // }\n int32_t status = static_cast<int32_t>(date.GetStatus());\n DATE dt;\n date.GetAsDATE(dt);\n\n mStream.write((const char*)&status, sizeof(int32_t));\n mStream.write((const char*)&dt, sizeof(DATE));\n}\n\n// The following implementation is coming from arccore.cpp\nvoid AfxWriteStringLength(CArchive& ar, UINT_PTR nLength)\n{\n if (nLength < 255)\n {\n ar<<(BYTE)nLength;\n }\n else if (nLength < 0xfffe)\n {\n ar<<(BYTE)0xff;\n ar<<(WORD)nLength;\n }\n else if (nLength < 0xffffffff)\n {\n ar<<(BYTE)0xff;\n ar<<(WORD)0xffff;\n ar<<(DWORD)nLength;\n }\n else\n {\n ar<<(BYTE)0xff;\n ar<<(WORD)0xffff;\n ar<<(DWORD)0xffffffff;\n ar<<(ULONGLONG)nLength;\n }\n}\n\n// The following implementation is coming from arccore.cpp\nUINT_PTR AfxReadStringLength(CArchive& ar)\n{\n UINT_PTR nLength;\n BYTE b;\n ar >> b;\n if (b == 0xff)\n {\n WORD w;\n ar >> w;\n if (w == 0xffff)\n {\n DWORD dw;\n ar >> dw;\n if (dw == 0xffffffff)\n {\n ULONGLONG ulong;\n ar >> ulong;\n nLength = ulong;\n }\n else\n {\n nLength = dw;\n }\n }\n else\n {\n nLength = w;\n }\n }\n else\n {\n nLength = b;\n }\n return nLength;\n}\n\ntemplate<>\nvoid CArchive::operator<<(const CString& str)\n{\n int strLength = str.GetLength();\n AfxWriteStringLength(*this, strLength);\n mStream.write((const char*)str.ToString().c_str(), sizeof(char)*strLength);\n}\n\ntemplate<>\nvoid CArchive::operator<<(const CTime& time)\n{\n std::int64_t timeInt64 = time.GetAsTimeT();\n mStream.write((const char*)&timeInt64, sizeof(std::int64_t));\n}\n\ntemplate<>\nvoid CArchive::operator>>(COleDateTime& date)\n{\n int32_t status = 0;\n mStream.read((char*)&status, sizeof(int32_t));\n \n double dt = 0.0;\n mStream.read((char*)&dt, sizeof(double));\n date = COleDateTime(dt);\n}\n\ntemplate<>\nvoid CArchive::operator>>(CString& str)\n{\n int strLength = AfxReadStringLength(*this);\n\n char* buffer = new char[strLength+1];\n buffer[strLength] = '\\0';\n mStream.read(buffer, strLength);\n str = buffer;\n delete[] buffer;\n}\n\ntemplate<>\nvoid CArchive::operator>>(CTime& time)\n{\n std::int64_t timeInt64;\n mStream.read((char*)&timeInt64, sizeof(std::int64_t));\n time.FromTimeT(timeInt64);\n}\n\nDWORD_PTR CArchive::ReadCount()\n{\n\tWORD wCount;\n\t*this >> wCount;\n\tif (wCount != 0xFFFF)\n\t\treturn wCount;\n\n\tDWORD dwCount;\n\t*this >> dwCount;\n#ifndef _WIN64\n\treturn dwCount;\n#else // _WIN64\n\tif (dwCount != 0xFFFFFFFF)\n\t\treturn dwCount;\n\n\tDWORD_PTR qwCount;\n\t*this >> qwCount;\n\treturn qwCount;\n#endif // _WIN64\n}\n\nvoid CArchive::WriteCount(DWORD_PTR dwCount)\n{\n\tif (dwCount < 0xFFFF)\n\t\t*this << (WORD)dwCount; // 16-bit count\n\telse\n\t{\n\t\t*this << (WORD)0xFFFF;\n#ifndef _WIN64\n\t\t*this << (DWORD)dwCount; // 32-bit count\n#else // _WIN64\n\t\tif (dwCount < 0xFFFFFFFF)\n\t\t\t*this << (DWORD)dwCount; // 32-bit count\n\t\telse\n\t\t{\n\t\t\t*this << (DWORD)0xFFFFFFFF;\n\t\t\t*this << dwCount;\n\t\t}\n#endif // _WIN64\n\t}\n}\n\nUINT CArchive::Read(void *lpBuf, UINT nMax)\n{\n size_t read = mStream.readsome((char *)&lpBuf, nMax);\n return static_cast<UINT>(read);\n}\n\nvoid CArchive::Write(const void *lpBuf, UINT nMax)\n{\n mStream.write((const char *)&lpBuf, nMax);\n}" }, { "alpha_fraction": 0.554973840713501, "alphanum_fraction": 0.5951134562492371, "avg_line_length": 26.285715103149414, "blob_id": "80d118eaea87593fed074d1189eee494c8adb241", "content_id": "f514fd581ee7f4f49ccbc87a12805d3e6c1c9084", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 573, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/Linux/portcode/varroapop.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "// VarroaPop.h: implementation of the CWasp class.\n//\n//////////////////////////////////////////////////////////////////////\n\n#pragma once\n#ifndef VARROAPOP_CUSTOM_H\n#define VARROAPOP_CUSTOM_H\n\n#include \"stdafx.h\"\n\nextern bool gl_RunGUI;\n\n#define MB_OK 0x00000000L\n#define MB_YESNO 0x00000004L\n\n//Free Functions\nint MyMessageBox( LPCTSTR lpszText, UINT nType = MB_OK, UINT nIDHelp = 0 );\nenum PELEMENT {DRV = 1, DIR = 2, FNAME = 3, EXT =4};\nCString SplitPath(CString PathString, PELEMENT PathElement);\n\n#endif // VARROAPOP_CUSTOM_H\n" }, { "alpha_fraction": 0.6483253836631775, "alphanum_fraction": 0.6483253836631775, "avg_line_length": 13.928571701049805, "blob_id": "721f9de18c7edd81be97749079f8ab9a15520794", "content_id": "a0f4fd03a30baefd1ec3568c85e952ee6064d3a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 418, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/Linux/portcode/cuintarray.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cuintarray.h\"\n\nINT_PTR CUIntArray::GetSize() const\n{\n return m_data.size();\n}\n\nUINT CUIntArray::GetAt(INT_PTR index) const\n{\n return m_data.at(index);\n}\n\nvoid CUIntArray::Add(UINT eventId)\n{\n m_data.push_back(eventId);\n}\n\nvoid CUIntArray::RemoveAt(UINT index)\n{\n auto it = m_data.begin();\n std::advance(it, index);\n m_data.erase(it);\n}\n\nvoid CUIntArray::RemoveAll()\n{\n m_data.clear();\n}\n" }, { "alpha_fraction": 0.6814814805984497, "alphanum_fraction": 0.6814814805984497, "avg_line_length": 21.5, "blob_id": "b18f3b6638efebe1f524857127911eef0cf8ba3c", "content_id": "b18ebd4cce350701df2e95614199fa9f798f0c6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 135, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/Linux/portcode/stdafx.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\n\nstd::ostream &operator<<(std::ostream &stream, const CString& string)\n{\n return stream << string.ToString();\n}\n" }, { "alpha_fraction": 0.5509679913520813, "alphanum_fraction": 0.5570920705795288, "avg_line_length": 29.130952835083008, "blob_id": "9a8610740b7e1458e959415563f50ca7af56a5cb", "content_id": "e47b06ceb07a10353be25b029a9503ab464c18ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5062, "license_type": "no_license", "max_line_length": 80, "num_lines": 168, "path": "/Linux/tests/test_cptrlist.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cptrlist.h\"\n\n#include \"tests/helpers/myobject.h\"\n\n#include <sstream>\n\nTEST_CASE(\"CPtrList operations\", \"[port]\") {\n \n CTypedPtrList<CPtrList, MyObject*> list;\n\n SECTION(\"Empty list checks\") {\n \n CHECK(list.GetCount() == 0);\n CHECK(list.IsEmpty());\n CHECK(list.GetTailPosition() == nullptr);\n CHECK(list.GetHeadPosition() == nullptr);\n }\n\n SECTION(\"Insert and Find - 1 element\") {\n\n std::unique_ptr<MyObject> object (new MyObject()); \n object->UpdateData(\"tail\");\n POSITION tail = list.AddTail(object.get());\n\n CHECK(list.GetCount() == 1);\n CHECK_FALSE(list.IsEmpty());\n\n CObject* cObj = list.GetAt(tail);\n CHECK(cObj == object.get());\n\n cObj = list.GetNext(tail);\n CHECK(cObj == object.get());\n CHECK(tail == nullptr);\n }\n\n SECTION(\"Insert and Find - 2 elements\") {\n\n std::unique_ptr<MyObject> headObject (new MyObject()); \n headObject->UpdateData(\"head\");\n list.AddTail(headObject.get());\n\n std::unique_ptr<MyObject> tailObject (new MyObject()); \n tailObject->UpdateData(\"tail\");\n list.AddTail(tailObject.get());\n\n CHECK(list.GetCount() == 2);\n CHECK_FALSE(list.IsEmpty());\n\n POSITION it = list.GetHeadPosition();\n CObject* cObj = list.GetAt(it);\n CHECK(cObj == headObject.get());\n\n cObj = list.GetNext(it);\n CHECK(cObj == headObject.get());\n CHECK_FALSE(it == nullptr);\n\n cObj = list.GetNext(it);\n CHECK(cObj == tailObject.get());\n CHECK(it == nullptr);\n\n it = list.GetTailPosition();\n cObj = list.GetAt(it);\n CHECK(cObj == tailObject.get());\n }\n\n SECTION(\"Insert and Find - Multiple elements\") {\n\n std::vector<std::unique_ptr<MyObject>> myObjects;\n myObjects.resize(10);\n\n for (size_t i=0; i<10; i++) {\n\n std::stringstream elementName;\n elementName << \"element \" << (i+1);\n\n myObjects[i].reset(new MyObject()); \n myObjects[i]->UpdateData(elementName.str());\n list.AddTail(myObjects[i].get());\n }\n\n CHECK(list.GetCount() == 10);\n CHECK_FALSE(list.IsEmpty());\n\n POSITION it = list.GetHeadPosition();\n REQUIRE(it != nullptr);\n auto cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 1\");\n\n it = list.GetTailPosition();\n REQUIRE(it != nullptr);\n cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 10\");\n\n list.RemoveHead();\n\n it = list.GetHeadPosition();\n REQUIRE(it != nullptr);\n cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 2\");\n\n it = list.GetTailPosition();\n REQUIRE(it != nullptr);\n cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 10\");\n\n list.RemoveAt(list.GetTailPosition());\n\n it = list.GetHeadPosition();\n REQUIRE(it != nullptr);\n cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 2\");\n\n it = list.GetTailPosition();\n REQUIRE(it != nullptr);\n cObj = dynamic_cast<MyObject*>(list.GetAt(it));\n CHECK(cObj->GetData() == \"element 9\");\n\n it = list.GetHeadPosition();\n REQUIRE(it != nullptr);\n\n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 2\");\n \n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 3\");\n\n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 4\");\n \n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 5\");\n \n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 6\");\n \n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 7\");\n \n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it != nullptr);\n CHECK(cObj->GetData() == \"element 8\");\n\n cObj = dynamic_cast<MyObject*>(list.GetNext(it));\n REQUIRE(it == nullptr);\n CHECK(cObj->GetData() == \"element 9\");\n\n it = list.GetHeadPosition();\n list.GetNext(it);\n list.GetNext(it);\n // save valid Posision\n POSITION valid = it; list.GetNext(valid);\n list.RemoveAt(it);\n cObj = dynamic_cast<MyObject*>(list.GetAt(valid));\n CHECK(cObj->GetData() == \"element 5\");\n }\n}\n" }, { "alpha_fraction": 0.6135265827178955, "alphanum_fraction": 0.6159420013427734, "avg_line_length": 25.74193572998047, "blob_id": "c5cc6c0287b9d1d76465a7fa9650297a4d7c9aae", "content_id": "568ed4b17c2b54e50a62551fe941b9c998051a7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 828, "license_type": "no_license", "max_line_length": 86, "num_lines": 31, "path": "/Linux/portcode/compat.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef COMPAT_H\n#define COMPAT_H\n\n/// In this file we define standard methods / features that may not be available \n/// on all compilers\n\n#include <algorithm>\n#include <cassert>\n#if !defined(__cpp_lib_clamp)\n#pragma message \"Defining std::clamp since not supported by this compiler c++ library\"\n// C__14 and previous versions code here\nnamespace std\n{\n template<class T>\n constexpr const T& clamp( const T& v, const T& lo, const T& hi )\n {\n assert( !(hi < lo) );\n return (v < lo) ? lo : (hi < v) ? hi : v;\n }\n\n template<class T, class Compare>\n constexpr const T& clamp( const T& v, const T& lo, const T& hi, Compare comp )\n {\n assert( !comp(hi, lo) );\n return comp(v, lo) ? lo : comp(hi, v) ? hi : v;\n }\n}\n#endif // !defined(__cpp_lib_clamp)\n\n#endif // COMPAT_H" }, { "alpha_fraction": 0.6493598818778992, "alphanum_fraction": 0.6500711441040039, "avg_line_length": 22.433332443237305, "blob_id": "ce9fa798f13f1bd4ce463658dabb56bb8c29d7e1", "content_id": "62521a969806a436028227ae64ff963c7d1abcbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1406, "license_type": "no_license", "max_line_length": 94, "num_lines": 60, "path": "/Linux/portcode/cmapstringtoob.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cmapstringtoob.h\"\n\nBOOL CMapStringToOb::IsEmpty() const\n{\n return m_map.empty();\n}\n\nPOSITION CMapStringToOb::GetStartPosition() const\n{\n // initialize iterator \n auto it = m_map.begin();\n POSITION position = nullptr;\n if (it != m_map.end())\n {\n position = std::make_unique<CMapStringToObNs::InnerPosition>(it);\n }\n return position;\n}\n\nvoid CMapStringToOb::SetAt(LPCTSTR string, CObject* value)\n{\n m_map[string] = value;\n}\n\nvoid CMapStringToOb::GetNextAssoc(POSITION& position, CString& string, CObject*& value) const\n{\n auto iterator = ext::dynamic_unique_cast<CMapStringToObNs::InnerPosition>(position.get());\n string = iterator->m_it->first;\n value = iterator->m_it->second;\n iterator->m_it++;\n if (iterator->m_it == m_map.end())\n {\n position = nullptr;\n }\n}\n\nBOOL CMapStringToOb::Lookup(LPCTSTR string, CObject*& value) const\n{\n BOOL found = false;\n auto it = m_map.find(string);\n if (it != m_map.end())\n {\n value = it->second;\n found = true;\n }\n return found;\n}\n\nBOOL CMapStringToOb::RemoveKey(LPCTSTR string)\n{\n // here for now I don't update the current InnerPosition since\n // we should have incremented the iterator before erasing the key\n // hence the iterator on next will still be valid\n return m_map.erase(string) > 0;\n}\n\nvoid CMapStringToOb::RemoveAll()\n{\n m_map.clear();\n}\n" }, { "alpha_fraction": 0.6178467273712158, "alphanum_fraction": 0.6304558515548706, "avg_line_length": 24.774999618530273, "blob_id": "decf7dc614bc5493379bcd1a6bac48c5e1a154c5", "content_id": "da4a62c2786429236cc66ed0092e1516306109a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 160, "num_lines": 40, "path": "/Linux/portcode/ctime.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"ctime.h\"\n\n#include <iomanip>\n#include <sstream>\n\nCTime::CTime()\n: m_time_point(std::chrono::system_clock::now())\n{\n}\n\nCTime::CTime(const SYSTEMTIME& time)\n{\n const char* dateTimeFormat = \"%Y-%m-%d %H:%M:%S\";\n std::string dateTime = fmt::format(\"{:0>4}-{:0>2}-{:0>2} {:0>2}:{:0>2}:{:0>2}\", time.wYear, time.wMonth, time.wDay, time.wHour, time.wMinute, time.wSecond);\n std::istringstream stream (dateTime);\n std::tm dt;\n dt.tm_isdst = -1; // needs to be set to unspecified otherwise random value is set\n stream >> std::get_time(&dt, dateTimeFormat);\n if (!stream.fail())\n {\n m_time_point = std::chrono::system_clock::from_time_t(std::mktime(&dt));\n }\n}\n\nCOleDateTime CTime::GetTime() const\n{\n COleDateTime time (m_time_point);\n return time;\n}\n\nvoid CTime::FromTimeT(const time_t& time)\n{\n m_time_point = std::chrono::system_clock::from_time_t(time); \n}\n\ntime_t CTime::GetAsTimeT() const\n{\n time_t time = std::chrono::system_clock::to_time_t(m_time_point);\n return time;\n}\n" }, { "alpha_fraction": 0.6869639158248901, "alphanum_fraction": 0.68764328956604, "avg_line_length": 23.278350830078125, "blob_id": "432c8f3726029346286507aaef6a7126219894f5", "content_id": "df3df0b87bd356dcbced41f4fc0abe52baed0402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11775, "license_type": "no_license", "max_line_length": 101, "num_lines": 485, "path": "/VarroaPopDoc.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "// VarroaPopDoc.cpp : implementation of the CVarroaPopDoc class\n//\n\n#include \"stdafx.h\"\n#include \"VarroaPop.h\"\n\n#include \"VarroaPopDoc.h\"\n#include \"VarroaPopView.h\"\n#include \"MainFrm.h\"\n#include \"SelGraph.h\"\n#include \"CombRemDlg.h\"\n#include \"Options.h\"\n#include \"WeatherEditor.h\"\n#include \"afxcoll.h\"\n#include <math.h>\n\n#ifdef _DEBUG\n#define new DEBUG_NEW\n#undef THIS_FILE\nstatic char THIS_FILE[] = __FILE__;\n#endif\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopDoc\n\nIMPLEMENT_DYNCREATE(CVarroaPopDoc, CDocument)\n\nBEGIN_MESSAGE_MAP(CVarroaPopDoc, CDocument)\n\t//{{AFX_MSG_MAP(CVarroaPopDoc)\n\tON_COMMAND(ID_FILE_NEW, OnFileNew)\n\tON_COMMAND(ID_SELECT_GRAPH, OnSelectGraph)\n\tON_COMMAND(ID_VIEW_PLOTDATA, OnViewPlotdata)\n\tON_COMMAND(ID_FILE_SAVE_RESULTS, OnFileSaveResults)\n\tON_COMMAND(ID_FILE_SAVE_SESSION, OnFileSaveSession)\n\tON_COMMAND(ID_VIEW_COMBREMOVALDATE, OnViewCombremovaldate)\n\tON_UPDATE_COMMAND_UI(ID_VIEW_DATA_FREQ, OnUpdateToggleDataFreq)\n\tON_COMMAND(ID_VIEW_DATA_FREQ, OnToggleDataFreq)\n\tON_COMMAND(ID_VIEW_OPTIONS, OnViewOptions)\n\tON_COMMAND(ID_WEATHER_CREATENEWWEATHERFILE, OnWeatherCreatenewweatherfile)\n\tON_COMMAND(ID_WEATHER_EDITCURRENTWEATHERFILE, OnWeatherEditcurrentweatherfile)\n\tON_COMMAND(ID_WEATHER_EDITWEATHERFILEFROMDISK, OnWeatherEditweatherfilefromdisk)\n\t//}}AFX_MSG_MAP\n\tON_UPDATE_COMMAND_UI(ID_VIEW_SHOWWARNINGS, &CVarroaPopDoc::OnUpdateViewShowwarnings)\n\tON_COMMAND(ID_VIEW_SHOWWARNINGS, &CVarroaPopDoc::OnToggleShowwarnings)\nEND_MESSAGE_MAP()\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopDoc construction/destruction\n\nCVarroaPopDoc::CVarroaPopDoc()\n{\n\tCVarroaPopApp* theApp = (CVarroaPopApp*)AfxGetApp();\n\n\tSetBridge(this);\n\n\t// Set Default Path name = App path\n\tCString modulename = CString(((CVarroaPopApp*)AfxGetApp())->GetModuleFileName());\n\tSetDefaultPathName(SplitPath(modulename,DRV) + SplitPath(modulename,DIR));\n\n\t// Find all colony description files and build the list\n\tCFileFind finder;\n\tCString name;\n\tint ok = finder.FindFile(\"*.col\"); // colony files\n\tint i;\n\tbool nullify;\n\twhile(ok) {\n\t\tok = finder.FindNextFile();\n\t\tname = finder.GetFileName();\n\t\t// strip the extension and add the name to the list\n\t\tnullify = false;\n\t\tfor(i=0; i<name.GetLength(); i++) {\n\t\t\tif((nullify) || (name.GetAt(i) == '.')) {\n\t\t\t\tname.SetAt(i, ' ');\n\t\t\t\tnullify = true;\n\t\t\t}\n\t\t\tname.TrimRight();\n\t\t}\n\t\tgetAvailableFilesList()->AddTail(name);\n\t}\n}\n\nCVarroaPopDoc::~CVarroaPopDoc()\n{\n}\n\n\n\nBOOL CVarroaPopDoc::OnNewDocument()\n{\n\tif (!CDocument::OnNewDocument())\n\t\treturn FALSE;\n\n\t// TODO: add reinitialization code here\n\t// (SDI documents will reuse this document)\n\tSetModifiedFlag(FALSE);\n\tSetTitle(\"Untitled\");\n\tGetColony()->Clear();\n\treturn TRUE;\n}\n\n\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopDoc serialization\n\nvoid CVarroaPopDoc::Serialize(CArchive& ar)\n{\n\tCVarroaPopSession::Serialize(ar);\n}\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopDoc diagnostics\n\n#ifdef _DEBUG\nvoid CVarroaPopDoc::AssertValid() const\n{\n\tCDocument::AssertValid();\n}\n\nvoid CVarroaPopDoc::Dump(CDumpContext& dc) const\n{\n\tCDocument::Dump(dc);\n}\n#endif //_DEBUG\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopDoc commands\n\nvoid CVarroaPopDoc::SetTitle(LPCTSTR lpszTitle) \n{\n\t\n\tCDocument::SetTitle(lpszTitle);\n}\n\n\nvoid CVarroaPopDoc::OnFileNew() \n{\n\tPOSITION pos = GetFirstViewPosition();\n\tCVarroaPopView* pFirstView = (CVarroaPopView*)GetNextView( pos );\n\n\tif (((GetWeather()->IsInitialized())||(GetColony()->IsInitialized()))&&IsModified())\n\t{\n\t\t// Give opportunity to save old file\n\n\t\tif (MyMessageBox(\"Save Changes to \"+GetPathName()+\"?\\n\",MB_YESNO) == IDYES)\n\t\t{\n\t\t\tCFileDialog MyFileDialog(\tFALSE,\n\t\t\t\t\t\t\t\t\t\".vrp\",\n\t\t\t\t\t\t\t\t\tGetPathName(),\n\t\t\t\t\t\t\t\t\tOFN_HIDEREADONLY | OFN_OVERWRITEPROMPT,\n\t\t\t\t\t\t\t\t\t_T(\"VarroaPop Files (*.vrp)|*.vrp|All Files|*.*||\"));\n\n\t\t\tif (MyFileDialog.DoModal() == IDOK) OnFileSaveSession();\n\t\t}\n\t}\n\tOnNewDocument();\n\n\tpFirstView->ShowPropsheet();\n\t\n}\n\n\nvoid CVarroaPopDoc::OnSelectGraph() \n{\n\t// We get here from the toolbar selection\n\tCSelGraph SelectGraph(this);\n\tSelectGraph.DoModal();\n\tint i = 0;\n\n\t\n}\n\nvoid CVarroaPopDoc::OnViewPlotdata() \n{\n\t// We get here from the menu selection\n\tOnSelectGraph();\n\t\n}\n\n\nvoid CVarroaPopDoc::OnFileSaveResults() \n{\n\t\n\tCFileDialog MyFileDialog(\tFALSE,\n\t\t\t\t\t\t\t\t\".res\",\n\t\t\t\t\t\t\t\tNULL,\n\t\t\t\t\t\t\t\tOFN_HIDEREADONLY | OFN_OVERWRITEPROMPT,\n\t\t\t\t\t\t\t\t_T(\"Simulation Results Files (*.res)|*.res|All Files|*.*||\"));\n\n\tif (MyFileDialog.DoModal() == IDOK)\n\t{\n\t\tStoreResultsFile(MyFileDialog.GetPathName());\n\t}\n}\n\n\n\nvoid CVarroaPopDoc::OnFileSaveSession() \n{\n\tOnSaveDocument(GetPathName());\n}\n\nvoid CVarroaPopDoc::OnViewCombremovaldate() \n{\n\tCCombRemDlg MyDialog;\n\tMyDialog.m_RemovalDate = m_CombRemoveDate;\n\tMyDialog.m_IsEnabled = m_CombRemoveEnable;\n\tMyDialog.m_Percent = m_CombRemovePct;\n\n\tif (MyDialog.DoModal() == IDOK)\n\t{\n\t\tm_CombRemoveDate = MyDialog.m_RemovalDate;\n\t\tm_CombRemoveEnable = MyDialog.m_IsEnabled;\n\t\tm_CombRemovePct = MyDialog.m_Percent;\n\t}\n\n\t\n}\n\n\nvoid CVarroaPopDoc::OnUpdateToggleDataFreq(CCmdUI* pCmdUI) \n{\n\tpCmdUI->Enable();\n\t//if (!m_DispWeeklyData) pCmdUI->SetText(\"Weekly Data\");\n\t//else pCmdUI->SetText(\"Daily Data\");\n\tif (m_DispWeeklyData) pCmdUI->SetCheck(1);\n\telse pCmdUI->SetCheck(0);\n}\n\nvoid CVarroaPopDoc::OnToggleDataFreq() \n{\n\tm_DispWeeklyData = !m_DispWeeklyData;\n}\n\n\n\nvoid CVarroaPopDoc::OnViewOptions() \n{\n\tCOptions OptDlg(this);\n\n\tOptDlg.DoModal();\n\n\t\n}\n\nvoid CVarroaPopDoc::OnWeatherCreatenewweatherfile() \n{\n\t// Creates a new CWeatherEvents and sends it to the weather editor\n\t// Upon completion of editing, if user closed the editor with OK and \n\t// has not yet saved the weather file, prompt for saving and prompt to make\n\t// this the current weather for the session\n\n\tCWeatherEvents* pWeather = new CWeatherEvents;\n\tCWeatherEditor WeatherEditor(pWeather);\n\tif (WeatherEditor.DoModal() == IDOK)\n\t{\n\t\tif(WeatherEditor.IsModified())\n\t\t{\n\t\t\t// Prompt to set this as the active weather file and prompt to save\n\t\t\tif(AfxMessageBox(\"Save Weather File ?\",MB_YESNO) == IDYES)\n\t\t\t{\n\t\t\t\t// Save weather\n\t\t\t\t// TODO - Add file save dialog\n\t\t\t\tif (AfxMessageBox(\"Replace current weather with this one ?\",MB_YESNO) == IDYES)\n\t\t\t\t{\n\t\t\t\t\tif (AfxIsValidAddress(m_pWeather,sizeof(CWeatherEvents))) delete m_pWeather;\n\t\t\t\t\tm_pWeather = pWeather;\n\t\t\t\t}\n\t\t\t\telse delete pWeather;\n\t\t\t}\n\t\t}\n\t}\n\t\n}\n\nvoid CVarroaPopDoc::OnWeatherEditcurrentweatherfile() \n{\n\tCWeatherEditor WeatherEditor(m_pWeather);\n\tif (WeatherEditor.DoModal() == IDOK)\n\t{\n\t\tif(WeatherEditor.IsModified())\n\t\t{\n\t\t\t// Prompt to save\n\t\t\t// TODO - Add file save dialog\n\t\t}\n\t}\n\t\n}\n\nvoid CVarroaPopDoc::OnWeatherEditweatherfilefromdisk() \n{\n\t// Loads a weather file from disk and edits. After editing, prompts to\n\t// save and to make current\n\tCFileDialog fd(\tTRUE,\n\t\t\t\t\t\".wth\",\n\t\t\t\t\tNULL,\n\t\t\t\t\tOFN_HIDEREADONLY | OFN_OVERWRITEPROMPT,\n\t\t\t\t\t_T(\"Weather Files (*.wth)|*.wth|All Files|*.*||\"));\n\n\tif (fd.DoModal() == IDOK)\n\t{\n\t\tCString FileName = fd.GetPathName();\n\t\tCWeatherEvents* pWeather = new CWeatherEvents;\n\t\tif (pWeather->LoadWeatherFile(FileName))\n\t\t{\n\t\t\tCWeatherEditor WeatherEditor(pWeather);\n\t\t\tif (WeatherEditor.DoModal() == IDOK)\n\t\t\t{\n\t\t\t\tif (WeatherEditor.IsModified())\n\t\t\t\t{\n\t\t\t\t\t// Prompt to save file\n\t\t\t\t\t// TODO - Add File Save dialog\n\t\t\t\t\tif (AfxMessageBox(\"Replace current weather with this one ?\",MB_YESNO) == IDYES)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (AfxIsValidAddress(m_pWeather,sizeof(CWeatherEvents))) delete m_pWeather;\n\t\t\t\t\t\tm_pWeather = pWeather;\n\t\t\t\t\t}\n\t\t\t\t\telse delete pWeather;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\nBOOL CVarroaPopDoc::OnOpenDocument(LPCTSTR lpszPathName)\n{\n\tif (!CDocument::OnOpenDocument(lpszPathName))\n\t\treturn FALSE;\n\n\t// TODO: Add your specialized creation code here\n\n\treturn TRUE;\n}\n\n\n\nvoid CVarroaPopDoc::OnUpdateViewShowwarnings(CCmdUI *pCmdUI)\n{\n\tpCmdUI->Enable();\n\tif (IsShowWarnings()) pCmdUI->SetCheck(1);\n\telse pCmdUI->SetCheck(0);\n}\n\n\nvoid CVarroaPopDoc::OnToggleShowwarnings()\n{\n\tSetShowWarnings(!IsShowWarnings()); // Toggle\n}\n\n/////////////////////////////////////////////////////////////////////////////\n// CVarroaPopSessionBridge Implementation\n\nvoid CVarroaPopDoc::SimulationStartUpdated()\n{\n\tif (gl_RunGUI)\n\t{\n\t\tif (IsWeatherLoaded()) ((CMainFrame*)(AfxGetApp()->m_pMainWnd))->InitializeDateCtrls();\n\t}\n}\n\nvoid CVarroaPopDoc::SimulationEndUpdated() \n{\n\tif (gl_RunGUI)\n\t{\n\t\tif (IsWeatherLoaded()) ((CMainFrame*)(AfxGetApp()->m_pMainWnd))->InitializeDateCtrls();\n\t}\n}\n\n\nvoid CVarroaPopDoc::StartSimulation(CVarroaPopSession& session)\n{\n\tBeginWaitCursor();\n}\n\nvoid CVarroaPopDoc::EndSimulation(CVarroaPopSession& session)\n{\n\tif (gl_RunGUI) UpdateAllViews(NULL);\n\telse\n\t{\n\t\t// This code generates an execption on Julien's computer we need to figure out what it does and why\n\t\tPOSITION pos = GetFirstViewPosition();\n\t\tCVarroaPopView* pView = (CVarroaPopView*)GetNextView(pos);\n\t\tpView->ChartData(this, gl_RunGUI);\n\t}\n\n\t// If command line switch has /or then save results file\n\tCString ResultsFileName = ((CVarroaPopApp*)AfxGetApp())->m_OutputResultsFileName;\n\tif (ResultsFileName.GetLength() > 0) StoreResultsFile(ResultsFileName);\n\n\tEndWaitCursor();\n}\n\nvoid CVarroaPopDoc::ImmigrationEnabled(bool enabled)\n{\n\tif ((enabled) && (gl_RunGUI))\n\t\t((CMainFrame*)(AfxGetApp()->m_pMainWnd))->SetImmigration(true);\n\telse if (gl_RunGUI)\n\t\t((CMainFrame*)(AfxGetApp()->m_pMainWnd))->SetImmigration(false);\n}\n\nvoid CVarroaPopDoc::WeatherFileMissing()\n{\n\tif (gl_RunGUI)\n\t{\n\t\tCString msg = \" This Session has no associated Weather File\\n\";\n\t\tmsg += \"You will have to specify one before you run a simulation\";\n\t\tMyMessageBox(msg);\n\t\t((CMainFrame*)(AfxGetApp()->m_pMainWnd))->m_WeatherFileName = \"\";\n\t}\n\telse\n\t{\n\t\t// Weather file might be specified in the input file\n\t}\n}\n\nvoid CVarroaPopDoc::WeatherFileLoaded(bool loaded, const CString& filename)\n{\n\tif (loaded)\n\t{\n\t\tif (gl_RunGUI)\n\t\t\t((CMainFrame*)(AfxGetApp()->m_pMainWnd))->InitializeDateCtrls();\n\t}\n\telse\n\t{\n\t\tCString msg = \" Reading Session File: The Specified Weather file \";\n\t\tmsg += filename + \" was not found\\n\";\n\t\tmsg += \"You will have to specify one before you run a simulation\";\n\t\tMyMessageBox(msg);\n\t}\n}\n\nvoid CVarroaPopDoc::SessionFileLoaded(CArchive& ar)\n{\n\t// After reading .vrp file, update any variables which were changed\n\t// by the command line input file\n\tCString InputFileName = ((CVarroaPopApp*)AfxGetApp())->m_InputFileName;\n\tif ((!ar.IsStoring()) & (InputFileName.GetLength() > 0))\n\t{\n\t\tProcessInputFile(InputFileName);\n\t}\n\n\tif (gl_RunGUI)\n\t\t((CMainFrame*)(AfxGetApp()->m_pMainWnd))->EnableDialogBar(ReadyToSimulate());\n}\n\nCString CVarroaPopDoc::GetDefaultPathName(CArchive& ar)\n{\n\t// Set Default Path name = SessionFile Path\n\tCString pathname = ar.GetFile()->GetFilePath();\n\treturn (SplitPath(pathname, DRV) + SplitPath(pathname, DIR));\n}\n\nvoid CVarroaPopDoc::InputFileUnknownVariable(const CString& name)\n{\n\tMyMessageBox(\"Input File Variable Name \" + name + \" not Recognized\");\n}\n\nvoid CVarroaPopDoc::InputFileException(const CString& message)\n{\n\tMyMessageBox(CString(\"Error Pocessing Input File: \") + CString(message));\n}\n\nvoid CVarroaPopDoc::OutputFileException(const CString& message)\n{\n\tMyMessageBox(CString(\"Error Storing Results: \") + CString(message));\n}\n\nCString CVarroaPopDoc::GetVersion()\n{\n\tCString verstg;\n\tverstg.LoadString(IDS_VPVERSION);\n\treturn verstg;\n}\n\nBOOL CVarroaPopDoc::CheckDateConsistencyFailed(const CString& warning)\n{\n\tCString DispString =\n\t\t\"Warning - The Following Events are Outside the Simulation Period:\\n\\n\";\n\tDispString += warning;\n\tDispString += \"\\nPress OK to Continue the Simulation\\nPress Cancel to Stop the Simulation\";\n\n\treturn (MyMessageBox(DispString, MB_OKCANCEL) == IDOK);\n}\n" }, { "alpha_fraction": 0.7124999761581421, "alphanum_fraction": 0.7124999761581421, "avg_line_length": 14.800000190734863, "blob_id": "fb255c3909fb241b38a2c28ec669a6223406db50", "content_id": "72150ed9b5bd0042461c7fc83941f7d3d7f00fc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 80, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/Linux/portcode/afx.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "\n#pragma once\n#ifndef AFX_CUSTOM_H\n#define AFX_CUSTOM_H\n\n#endif // AFX_CUSTOM_H\n" }, { "alpha_fraction": 0.6229080557823181, "alphanum_fraction": 0.6327345371246338, "avg_line_length": 36.64739990234375, "blob_id": "29bcd4d048f61b413fa0e34f030aa6750faa3dde", "content_id": "f4dc16a7f3fb0e78ba0b510ac2e4f483839cb4cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6513, "license_type": "no_license", "max_line_length": 120, "num_lines": 173, "path": "/Linux/scripts/cold-storage-simulations.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import argparse\nimport datetime\nimport multiprocessing as mp\nimport os\nimport utilities\n\n\nclass ExecConfiguration:\n def __init__(self):\n self.config = None\n\n\nclass DateConfig:\n def __init__(self, date_str):\n self.date_with_slash = date_str\n self.date_with_hyphen = date_str.replace('/', '-')\n\n def slash(self):\n return self.date_with_slash\n\n def hyphen(self):\n return self.date_with_hyphen\n\n\ndef run_simulation(command_str):\n # Measure simulation duration\n start_simulation = datetime.datetime.now()\n # Call simulation\n process = os.popen(command_str)\n process.close()\n # Compute elapsed time and update total simulation time\n end_simulation = datetime.datetime.now()\n elapsed_time = end_simulation - start_simulation\n utilities.safe_print('\\tCommand:' + command_str + ' \\n\\t\\tElapsed:' + '%.2f seconds' % elapsed_time.total_seconds())\n return elapsed_time.total_seconds()\n\n\n# total simulation time for the record\ntotal_simulations_time = 0\ntotal_simulations_run = 0\n\n\ndef sum_simulation_time(result):\n global total_simulations_time\n global total_simulations_run\n total_simulations_time += int(result)\n total_simulations_run += 1\n\n\ndef simulation_error(error):\n utilities.safe_print(error)\n\n\ndef to_normalize_path(path):\n return r'\"%s\"' % path\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Cold storage simulation.')\n parser.add_argument('--exe', type=str, help='Path of the VarroaPop command line application', required=True)\n parser.add_argument('--vrp', type=str, help='Path of the vrp file to use for simulations', required=True)\n parser.add_argument('--output_directory', type=str,\n help='Output files will be written in an autogenerated folder within OUT_DIR',\n metavar='OUT_DIR', required=True)\n parser.add_argument('--input_directory', type=str, help='Input directory expecting IN_DIR/SCENARIO.txt',\n metavar='IN_DIR', required=True)\n parser.add_argument('--weather_directory', type=str, help='Get weather files from WEATHER_DIRECTORY',\n metavar='WEATHER_DIRECTORY', required=True)\n arguments = parser.parse_args()\n\n print('Working directory: ' + os.getcwd())\n\n if not os.path.isfile(arguments.exe):\n print('Cannot find VarroaPop executable at: ' + arguments.exe)\n exit(-1)\n\n if not os.path.isfile(arguments.vrp):\n print('Cannot find VRP file at: ' + arguments.vrp)\n exit(-1)\n\n if os.path.isfile(arguments.output_directory):\n print(arguments.output_directory + ' is not a directory')\n exit(-1)\n\n if not os.path.isdir(arguments.input_directory):\n print('Cannot find input directory at: ' + arguments.input_directory)\n exit(-1)\n\n if not os.path.isdir(arguments.weather_directory):\n print('Cannot find weather directory at: ' + arguments.weather_directory)\n exit(-1)\n\n start_dates = [\n DateConfig('09/15'),\n DateConfig('09/22'),\n DateConfig('09/29'),\n DateConfig('10/06'),\n DateConfig('10/13'),\n DateConfig('10/20')]\n\n end_dates = [\n DateConfig('02/15'),\n DateConfig('02/22'),\n DateConfig('02/29'),\n DateConfig('03/01'),\n DateConfig('03/08'),\n DateConfig('03/15')]\n\n exec_configurations = []\n\n default_command = arguments.exe + ' -f -v ' + to_normalize_path(arguments.vrp) + \\\n ' --forageDayNoTemp --hourlyTemperaturesEstimation --foragersAlwaysAgeBasedOnForageInc' + \\\n ' --adultAgingBasedOnLaidEggs --inOutEvents'\n\n input_files_exists = {}\n\n # gather configurations for simulations\n weather_files = os.listdir(arguments.weather_directory)\n for weather_file in weather_files:\n info = utilities.parse_weather_filename(weather_file)\n output_directory = os.path.join(arguments.output_directory, os.path.join(info.location, info.scenario))\n\n # get input filename and check if it exists\n input_file = os.path.join(arguments.input_directory, info.scenario + '.txt')\n if not input_file in input_files_exists:\n input_files_exists[input_file] = os.path.exists(input_file)\n if not input_files_exists[input_file]:\n print('Missing input file ' + input_file)\n exit(-1)\n\n command = default_command + ' -i ' + to_normalize_path(input_file)\n command += ' -w ' + to_normalize_path(os.path.join(arguments.weather_directory, weather_file))\n command += ' --binaryWeatherFileFormat ' + utilities.get_valid_binary_format_identifier(info.scenario)\n\n # add configuration without cold storage\n output_filename = info.model + '_default'\n output_file = os.path.join(output_directory, output_filename + '.txt')\n exec_command = command + ' -o ' + to_normalize_path(output_file)\n exec_configurations.append(exec_command)\n\n # add configurations for cold storage\n for start_date in start_dates:\n for end_date in end_dates:\n output_filename = info.model + '_cold_storage_' + start_date.hyphen() + '_' + end_date.hyphen()\n output_file = os.path.join(output_directory, output_filename + '.txt')\n exec_command = command + ' -o ' + to_normalize_path(output_file)\n exec_command += ' --coldStorage --coldStorageStartDate %s --coldStorageEndDate %s' \\\n % (start_date.slash(), end_date.slash())\n exec_configurations.append(exec_command)\n\n # run simulations\n print('Executing Cold Storage Simulations: ')\n simulation_time = datetime.datetime.now()\n\n # Step 1: Init multiprocessing.Pool()\n pool = mp.Pool(mp.cpu_count())\n\n # Step 2: Use loop to parallelize\n for configuration in exec_configurations:\n pool.apply_async(run_simulation,\n args=(configuration,),\n callback=sum_simulation_time,\n error_callback=simulation_error)\n\n # Step 3: Don't forget to close\n pool.close()\n\n # Step 4: Wait for processes to complete\n pool.join()\n\n print('Total duration (s):' + '%.2f' % (datetime.datetime.now() - simulation_time).total_seconds())\n print('Total duration accumulated (s):' + '%.2f' % total_simulations_time)\n print('Total simulations executed :' + '%d' % total_simulations_run)\n" }, { "alpha_fraction": 0.5683012008666992, "alphanum_fraction": 0.5899007320404053, "avg_line_length": 25.976377487182617, "blob_id": "95894ea79d7ffe71ee96dbe27a4aa651e643105c", "content_id": "b5fd8e7eff85cce4a2aa9beec5f828fcf2e146f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3426, "license_type": "no_license", "max_line_length": 90, "num_lines": 127, "path": "/Linux/tests/test_carray.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"carray.h\"\n\n#include \"tests/helpers/myobject.h\"\n\nTEST_CASE(\"CArray operations\", \"[port]\") {\n \n CArray<int32_t, int32_t> intArray;\n CArray<MyObject*, MyObject*> moArray;\n\n SECTION(\"IsEmpty\") {\n\n CHECK(intArray.IsEmpty());\n CHECK(moArray.IsEmpty());\n }\n\n intArray.Add(12);\n intArray.Add(21);\n intArray.Add(54);\n\n auto obj1 = std::unique_ptr<MyObject>(new MyObject);\n auto obj2 = std::unique_ptr<MyObject>(new MyObject);\n auto obj3 = std::unique_ptr<MyObject>(new MyObject);\n auto obj4 = std::unique_ptr<MyObject>(new MyObject);\n auto obj5 = std::unique_ptr<MyObject>(new MyObject);\n\n moArray.Add(obj1.get());\n moArray.Add(obj2.get());\n moArray.Add(obj3.get());\n moArray.Add(obj4.get());\n moArray.Add(obj5.get());\n\n SECTION(\"Size\") {\n\n CHECK_FALSE(intArray.IsEmpty());\n CHECK_FALSE(moArray.IsEmpty());\n\n CHECK(intArray.GetCount() == 3);\n CHECK(moArray.GetCount() == 5);\n }\n\n SECTION(\"RemoveAll\") {\n\n intArray.RemoveAll();\n moArray.RemoveAll();\n\n CHECK(intArray.IsEmpty());\n CHECK(moArray.IsEmpty());\n\n CHECK(intArray.GetCount() == 0);\n CHECK(moArray.GetCount() == 0);\n }\n\n SECTION(\"Copy\") {\n\n CArray<int32_t, int32_t> cpIntArray;\n cpIntArray.Copy(intArray);\n\n CArray<MyObject*, MyObject*> cpMoArray;\n cpMoArray.Copy(moArray);\n\n CHECK_FALSE(cpIntArray.IsEmpty());\n CHECK_FALSE(cpMoArray.IsEmpty());\n\n CHECK(cpIntArray.GetCount() == 3);\n CHECK(cpMoArray.GetCount() == 5);\n\n // However Copy does not make a deep-copy so pointer in the array are copied as-is\n\n obj3->UpdateData(\"obj3 updated\");\n obj4->SetInitialized();\n\n CHECK(cpMoArray[2]->GetData() == \"obj3 updated\");\n CHECK_FALSE(cpMoArray[2]->IsInitialized());\n CHECK(cpMoArray[3]->GetData() == \"nodata\");\n CHECK(cpMoArray[3]->IsInitialized());\n\n CHECK(moArray[2]->GetData() == \"obj3 updated\");\n CHECK_FALSE(moArray[2]->IsInitialized());\n CHECK(moArray[3]->GetData() == \"nodata\");\n CHECK(moArray[3]->IsInitialized());\n }\n\n SECTION(\"[] operator non-const\"){\n\n moArray[0]->UpdateData(\"altered through operator []\");\n CHECK(moArray[0]->GetData() == \"altered through operator []\");\n CHECK_FALSE(moArray[0]->IsInitialized());\n\n moArray[4]->SetInitialized();\n CHECK(moArray[4]->GetData() == \"nodata\");\n CHECK(moArray[4]->IsInitialized());\n }\n\n SECTION(\"SetSize\") {\n\n intArray.SetSize(2);\n moArray.SetSize(10);\n \n CHECK(intArray.GetCount() == 2);\n CHECK(moArray.GetCount() == 10);\n\n CHECK(intArray[0] == 12);\n CHECK(intArray[1] == 21);\n CHECK_THROWS_AS(intArray[3] = 10, std::out_of_range);\n\n // However in the case of the CArray of pointers objects were not created \n }\n\n SECTION(\"RemoveAt\") {\n\n intArray.SetSize(2);\n moArray.SetSize(10);\n \n CHECK(intArray.GetCount() == 2);\n CHECK(moArray.GetCount() == 10);\n\n CHECK_THROWS_AS(intArray[4] = 10, std::out_of_range);\n\n // However in the case of the CArray of pointers objects were not created \n }\n}\n" }, { "alpha_fraction": 0.6860465407371521, "alphanum_fraction": 0.6860465407371521, "avg_line_length": 13.5, "blob_id": "ec7a44ddd541654ae82295c2843724a4633337f3", "content_id": "ef97007a0abca19b14906863c669dde543245668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 86, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/Linux/portcode/cobject.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cobject.h\"\n\nvoid CObject::Serialize(CArchive& ar)\n{\n NOT_IMPLEMENTED();\n}" }, { "alpha_fraction": 0.7366930842399597, "alphanum_fraction": 0.7366930842399597, "avg_line_length": 18.406593322753906, "blob_id": "96faa5d66ad5c5e534a4a8d5811ac5ca04f32820", "content_id": "25d9b628e12814c5b9775442c4217bd9e223956d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 89, "num_lines": 91, "path": "/Linux/varroapopcmdbridge.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"varroapopcmdbridge.h\"\n\n#include <iostream>\n\nVarroaPopCmdBridge::VarroaPopCmdBridge(CVarroaPopSession& session)\n: m_Session(session)\n{\n}\n\nVarroaPopCmdBridge::~VarroaPopCmdBridge()\n{\n}\n\t\nvoid VarroaPopCmdBridge::SimulationStartUpdated()\n{\n\t// do nothing\n}\n\nvoid VarroaPopCmdBridge::SimulationEndUpdated()\n{\n\tm_Session.StoreResultsFile(m_ResultsFileName);\n}\n\nvoid VarroaPopCmdBridge::StartSimulation(CVarroaPopSession& session)\n{\n\t// TODO: start timer\n}\n\nvoid VarroaPopCmdBridge::EndSimulation(CVarroaPopSession& session)\n{\n\t// TODO: end timer\n\tm_Session.StoreResultsFile(m_ResultsFileName);\n}\n\nvoid VarroaPopCmdBridge::ImmigrationEnabled(bool enabled)\n{\n\t// do nothing\n}\n\nvoid VarroaPopCmdBridge::WeatherFileMissing()\n{\n\tstd::cerr << \"Weather file missing\" << std::endl;\n}\n\nvoid VarroaPopCmdBridge::WeatherFileLoaded(bool loaded, const CString& filename)\n{\n\tif (loaded)\n\t{\n\t\tstd::cout << \"Weather file: \" << filename.ToString() << \" loaded\" << std::endl;\n\t}\n\telse \n\t{\n\t\tstd::cerr << \"Weather file: [ERROR] when loading \" << filename.ToString() << std::endl;\n\t}\n}\n\nvoid VarroaPopCmdBridge::SessionFileLoaded(CArchive& ar)\n{\n\n}\n\nCString VarroaPopCmdBridge::GetDefaultPathName(CArchive& ar)\n{\n\treturn \"./\";\n}\n\nvoid VarroaPopCmdBridge::InputFileUnknownVariable(const CString& name)\n{\n\tstd::cerr << name.ToString() << std::endl;\n}\n\nvoid VarroaPopCmdBridge::InputFileException(const CString& name)\n{\n\tstd::cerr << name.ToString() << std::endl;\n}\n\nvoid VarroaPopCmdBridge::OutputFileException(const CString& name)\n{\n\tstd::cerr << name.ToString() << std::endl;\n}\n\nCString VarroaPopCmdBridge::GetVersion()\n{\n\treturn \"CMD\";\n}\n\nBOOL VarroaPopCmdBridge::CheckDateConsistencyFailed(const CString& warning)\n{\n\tstd::cerr << warning.ToString() << std::endl;\n\treturn true;\n}\n" }, { "alpha_fraction": 0.5366395115852356, "alphanum_fraction": 0.5647668242454529, "avg_line_length": 20.80645179748535, "blob_id": "6b5e59a0f02cfb66edecff83e9cbff9b24c6af73", "content_id": "d3fc7ba2b9bd59015a18f70857b55fd7e807c38d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1351, "license_type": "no_license", "max_line_length": 80, "num_lines": 62, "path": "/Linux/tests/test_cstringarray.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cstringarray.h\"\n\nTEST_CASE(\"CStringArray operations\", \"[port]\") {\n \n CStringArray array;\n\n CHECK(array.GetSize() == 0);\n CHECK(array.GetCount() == 0);\n\n array.SetSize(10, 10);\n\n CHECK(array.GetSize() == 10);\n CHECK(array.GetCount() == 10);\n\n array.Add(\"foo\");\n \n CHECK(array.GetSize() == 11);\n CHECK(array.GetCount() == 11);\n\n CHECK(array[10] == \"foo\");\n \n CHECK(array.GetUpperBound() == 10);\n \n array.Add(\"bar\");\n array.Add(\"hello\");\n array.Add(\"world\");\n \n CHECK(array.GetUpperBound() == 13);\n\n CHECK(array.GetAt(10) == \"foo\");\n CHECK(array.GetAt(11) == \"bar\");\n CHECK(array.GetAt(12) == \"hello\");\n CHECK(array.GetAt(13) == \"world\");\n \n array.RemoveAll();\n\n array.Add(\"foo\");\n \n CHECK(array.GetSize() == 1);\n CHECK(array.GetCount() == 1);\n\n CHECK(array[0] == \"foo\");\n \n CHECK(array.GetUpperBound() == 0);\n \n array.Add(\"bar\");\n array.Add(\"hello\");\n array.Add(\"world\");\n \n CHECK(array.GetUpperBound() == 3);\n\n CHECK(array.GetAt(0) == \"foo\");\n CHECK(array.GetAt(1) == \"bar\");\n CHECK(array.GetAt(2) == \"hello\");\n CHECK(array.GetAt(3) == \"world\");\n}" }, { "alpha_fraction": 0.7027027010917664, "alphanum_fraction": 0.7027027010917664, "avg_line_length": 15.586206436157227, "blob_id": "2e58f752d0db923f1e348b0f814188a9f6533dab", "content_id": "4f1c64438f22925d04ac43ebb42d32b6637d00e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 481, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/Linux/portcode/cuintarray.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CUINTARRAY_CUSTOM_H\n#define CUINTARRAY_CUSTOM_H\n\n#include \"cobject.h\"\n\n#include <cstdint>\n#include <vector>\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CUIntArray : public CObject\n{\npublic:\n INT_PTR GetSize() const;\n UINT GetAt(INT_PTR index) const;\n\n void Add(UINT eventId);\n void RemoveAt(UINT index);\n\n\tvoid RemoveAll();\n\nprotected:\n\n std::vector<UINT> m_data;\n};\n\n#endif // CUINTARRAY_CUSTOM_H\n" }, { "alpha_fraction": 0.7181302905082703, "alphanum_fraction": 0.7195467352867126, "avg_line_length": 17.578947067260742, "blob_id": "a3fca9d015d75db5a14e1a9d3118f723afe16a36", "content_id": "3635f45f5f33638a41e136b17e098f84a7e77231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 706, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/Linux/portcode/cstringarray.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CSTRINGARRAY_CUSTOM_H\n#define CSTRINGARRAY_CUSTOM_H\n\n#include \"cobject.h\"\n#include \"cstring.h\"\n\n#include <cstdint>\n#include <vector>\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CStringArray : public CObject\n{\npublic:\n\n\tINT_PTR GetSize() const;\n\tINT_PTR GetCount() const;\n\tBOOL IsEmpty() const;\n\tINT_PTR GetUpperBound() const;\n\tvoid SetSize(INT_PTR nNewSize, INT_PTR nGrowBy = -1);\n\n\tCString GetAt(INT_PTR position);\n\tvoid Add(const CString& str);\n\n\tconst CString& operator[](INT_PTR index) const;\n\tCString& operator[](INT_PTR index);\n\n\tvoid RemoveAll();\n\nprotected:\n\n std::vector<CString> m_data;\n}; \n\n\n#endif // CSTRINGARRAY_CUSTOM_H\n" }, { "alpha_fraction": 0.701985776424408, "alphanum_fraction": 0.70643150806427, "avg_line_length": 55.20833206176758, "blob_id": "52529b1ad0f845a5e89f02621b7815aa9cbb7af6", "content_id": "672eec8f3d3bae74b3a505947eb84df0ef4aa816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6748, "license_type": "no_license", "max_line_length": 329, "num_lines": 120, "path": "/Linux/scripts/simulations.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import argparse\nimport datetime\nimport multiprocessing as mp\nimport os\nimport re\nimport utilities\nimport subprocess\nfrom threading import Lock\n\n\ndef run_simulation(output_directory, command, configuration):\n if not configuration['Name'] and not configuration['Options']:\n raise Exception('Incorrect configuration object passed')\n output_file = configuration['Name'] + '.txt'\n output_filename = os.path.join(output_directory, output_file)\n sub_command = command + ' -o ' + output_filename + ' '\n if len(configuration['Options']) > 0:\n sub_command += ' '.join(['--{0} '.format(option) for option in configuration['Options']])\n # Measure simulation duration\n start_simulation = datetime.datetime.now()\n # Call simulation\n process = os.popen(sub_command)\n process.close()\n # Compute elapsed time and update total simulation time\n end_simulation = datetime.datetime.now()\n elapsed_time = end_simulation - start_simulation\n utilities.safe_print('\\tCommand :' + sub_command +\n '\\n\\tDuration (s):' + '%.2f' % elapsed_time.total_seconds())\n return elapsed_time.total_seconds()\n\n\ntotal_simulations_time = 0\n\n\ndef sum_simulation_time(result):\n global total_simulations_time\n total_simulations_time += result\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Generate plots for previously ran simulation.')\n parser.add_argument('--exe', type=str, help='Path of the VarroaPop command line application', required=True)\n parser.add_argument('--vrp', type=str, help='Path of the vrp file to use for simulations', required=True)\n parser.add_argument('--output_directory', type=str,\n help='Output files will be written in an autogenerated folder within OUT_DIR',\n metavar='OUT_DIR', required=True)\n parser.add_argument('--input_file', type=str, help='Run the simulation for the given input FILE', metavar='FILE',\n required=True)\n parser.add_argument('--weather_file', type=str, help='Use WEATHER_FILE to generate data', metavar='WEATHER_FILE')\n arguments = parser.parse_args()\n\n command = arguments.exe + ' -f -v ' + arguments.vrp + ' -i ' + arguments.input_file + ' --binaryWeatherFileFormat '\n command += utilities.parse_binary_format(arguments.input_file)\n if arguments.weather_file:\n command += ' -w ' + arguments.weather_file\n\n '''\n configurations = [\n {'Name': 'Current', 'Options': []},\n {'Name': 'AdultAgingWhenEggLaid', 'Options': ['adultAgingBasedOnLaidEggs']},\n {'Name': 'AdultAgingWhenEggLaidFixedLarvaeAndBrood', 'Options': ['adultAgingBasedOnLaidEggs', 'larvaeAndBroodBecomeBeesAfterAdultsStopedAging']},\n {'Name': 'AdultAgingWhenEggLaidFixedLarvaeAndBroodFixedAdults', 'Options': ['adultAgingBasedOnLaidEggs', 'larvaeAndBroodBecomeBeesAfterAdultsStopedAging', 'forageIncThresholdForAdultsAgingWhenLaidEggs 0.5']},\n {'Name': 'HourlyTemp', 'Options': ['hourlyTemperaturesEstimation']},\n {'Name': 'HourlyTempForageDayWindAndRain', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation']},\n {'Name': 'ForagersFirst', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst']},\n {'Name': 'AdultAgingAsForagers', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst', 'forageDayAdultBeesAging']},\n {'Name': 'AdultAgingAsForagersProgressiveForagers', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst', 'forageDayAdultBeesAging', 'foragersFinerAging']},\n {'Name': 'ForagersAgingAdultAgingWhenEggLaid', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst', 'forageDayAdultBeesAging', 'adultAgingBasedOnLaidEggs', 'larvaeAndBroodBecomeBeesAfterAdultsStopedAging']},\n {'Name': 'ForagersAgingAdultAgingWhenEggLaidProgressiveForagers', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst', 'forageDayAdultBeesAging', 'adultAgingBasedOnLaidEggs', 'larvaeAndBroodBecomeBeesAfterAdultsStopedAging', 'foragersFinerAging']},\n {'Name': 'ForagersAgingAdultAgingWhenEggLaidFixedLarvaeAndBroodFixedAdults', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'pendingForagerFirst', 'forageDayAdultBeesAging', 'adultAgingBasedOnLaidEggs', 'larvaeAndBroodBecomeBeesAfterAdultsStopedAging', 'forageIncThresholdForAdultsAgingWhenLaidEggs 0.5']}\n ]\n '''\n\n # let's simulate for the different options we have\n configurations = [\n {'Name': 'Current', 'Options': []},\n {'Name': 'ForagersAging', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'foragersAlwaysAgeBasedOnForageInc']},\n {'Name': 'ForagersAgingAdultAgingWhenEggLaid', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'foragersAlwaysAgeBasedOnForageInc', 'adultAgingBasedOnLaidEggs']},\n {'Name': 'ForagersAgingAdultAgingWhenEggLaid_InOut', 'Options': ['forageDayNoTemp', 'hourlyTemperaturesEstimation', 'foragersAlwaysAgeBasedOnForageInc', 'adultAgingBasedOnLaidEggs', 'inOutEvents']}\n ]\n\n # set prefix for output data\n prefix = os.path.splitext(os.path.basename(arguments.input_file))[0]\n # add location information to prefix if not present\n if not re.search(r'\\d+.\\d+_-?\\d+.\\d+', prefix):\n if arguments.weather_file:\n prefix += '-' + os.path.basename(arguments.weather_file)\n else:\n raise Exception('Location information need to be present either in input_file name or weather_file ex:data_46.03125_-118.34375')\n\n output_directory = os.path.join(arguments.output_directory, prefix)\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n print('Executing simulations for input file: ' + arguments.input_file);\n user_simulation_time = datetime.datetime.now()\n\n # Step 1: Init multiprocessing.Pool()\n pool = mp.Pool(mp.cpu_count())\n\n # Step 2: Use loop to parallelize\n for configuration in configurations:\n pool.apply_async(run_simulation, args=(output_directory, command, configuration), callback=sum_simulation_time)\n\n # Step 3: Don't forget to close\n pool.close()\n\n # Step 4: Wait for processes to complete\n pool.join()\n\n print('')\n print('Total duration (s):' + '%.2f' % (datetime.datetime.now()-user_simulation_time).total_seconds())\n print('Total duration accumulated (s):' + '%.2f' % total_simulations_time)\n\n #print('Generating plots')\n #plotter = plots.Plotter()\n # plotter.display_temperature_data = True\n #plotter.display_activity_ratio = True\n #plotter.do_plots(output_directory, prefix)\n\n\n\n" }, { "alpha_fraction": 0.6418803334236145, "alphanum_fraction": 0.6418803334236145, "avg_line_length": 18.847457885742188, "blob_id": "672fa61938a97e76ba75d6da11c05ff6910b21d9", "content_id": "72b5f84c41c9afbb031231495edbafd75efacf38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1170, "license_type": "no_license", "max_line_length": 92, "num_lines": 59, "path": "/Linux/portcode/position.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"position.h\"\n\nPOSITION::POSITION() : m_position(nullptr)\n{\n}\n\nPOSITION::POSITION(const POSITION &other)\n{\n if (other.m_position != nullptr)\n {\n m_position.reset(other.m_position->copy());\n }\n}\n\nPOSITION::POSITION(const POSITION_PTR &other) : m_position()\n{\n if (other != nullptr)\n {\n m_position.reset(other->copy());\n }\n}\n\nPOSITION::POSITION(const std::nullptr_t &other)\n : m_position(other)\n{\n}\n\nPOSITION &POSITION::operator=(const POSITION &other)\n{\n if (this != &other)\n {\n POSITION_PTR::pointer value = other.m_position ? other.m_position->copy() : nullptr;\n m_position.reset(value);\n }\n return *this;\n}\n\nPOSITION &POSITION::operator=(const POSITION_PTR &other)\n{\n POSITION_PTR::pointer value = other ? other->copy() : nullptr;\n m_position.reset(value);\n return *this;\n}\n\nPOSITION &POSITION::operator=(const std::nullptr_t &other)\n{\n m_position.reset(other);\n return *this;\n}\n\nbool POSITION::operator==(const POSITION &other) const\n{\n return m_position == other.m_position;\n}\n\nbool POSITION::operator!=(const POSITION &other) const\n{\n return m_position != other.m_position;\n}" }, { "alpha_fraction": 0.7443303465843201, "alphanum_fraction": 0.769576370716095, "avg_line_length": 46.693878173828125, "blob_id": "5f7ae7abe0187d3b0020221ba4aa19e779558e58", "content_id": "5a3396e01c7c02afe3e47c21a5ef7f7018aa536c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4674, "license_type": "no_license", "max_line_length": 205, "num_lines": 98, "path": "/Linux/README.md", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "Created by Julien Pireaud \n\nThis file is to guide you through compile VarroaPop simulator on Linux / macOS\n\n## Compilation\n\nRun setup.py to prepare the Linux folder to be ready to build\n\nFor C++ dependency management we use Hunter package manager.\n\nTo compile in release:\n\n1. create a directory FOO\n1. move in FOO directory\n1. execute `python ../setup.py` to generate appropriate CMakeLists.txt file for datamodel\n1. prepare cmake folder `cmake -DCMAKE_BUILD_TYPE=Release ..`\n1. cd to parent of FOO directory\n1. call `cmake --build FOO --target VarroaPop -- -j <number of threads>`\n1. call program `./FOO/VarroaPop -h`\n\nTo time an execution you can use:\n\n- On Linux: `time ./<build>/VarroaPop -d VarroaPop/files/ -v exe/default.vrp -o output/vp_output_linux.txt -i input/vp_linux.txt -f`\n- On Windows (PowerShell): `Measure-Command {start-process Release\\VarroaPop.exe -argumentlist \"exe\\default.vrp /b /or output\\vp_output_win.txt /i input\\vp_input.txt\" -wait}` \n\n## Run Simulations\n\nThe options that we keep for now are:\n - `-f` to force overwriting existing files\n - `-v simplified.vrp` minimal VRP file\n - `-i rcp85-input.txt` \n - `--binaryWeatherFileFormat Rcp85` specifies the binary format of the weather file Observed|Modeled|Rcp85|Rcp45 \n - `-w data_46.03125_-118.34375` weather file \n - `-o output.txt` output file \n - `--forageDayNoTemp --hourlyTemperaturesEstimation --foragersAlwaysAgeBasedOnForageInc --adultAgingBasedOnLaidEggs --inOutEvents` options to properly run the simulations with the selected aging model\n\n## Helper scrips \n\n- `run-simulations.py` will run simulations for omak, wenatchee, walla-walla with historical and future weather simulations. Make sure the path to executable in the script is correct.\n- `scripts/plots.py` sample script which aim to automate plots generation, this is out of date but is a good source of inspiration\n- `scripts/simulations.py` sample script to call VarroaPop simulations with different sets of options\n\n## Weather format supported \n\n- ObservedHistorical starting 1/1/1979 \n- ModeledHistorical starting 1/1/1950\n- Rcp85 starting 1/1/2006\n- Rcp45 starting 1/1/2006\n\n## Changes made to the original VarroaPop codebase\n\n### Changes to existing codebase\n\n- Adult\n - Changed current age from being an Integer to a Float number. Currently the m_CurrentAge attribute is not used, but it would be useful if we want to use it and age Adults with a portion of a day;\n - Added missing attributes initialisation in constructors.\n- Beed\n - Added missing attributes initialisation in constructor.\n- Colony\n - Added a CAdultList::Add method to be able to add new Adults to the first box card without making other Adults age;\n - Added InOutEvent structure to add addtional output data on what's moved from one aging structure to the other;\n - Changed Foragers aging structure with `--foragerAgingBasedOnHourlyTemperatureEstimate`.\n - Changed Adult aging structure with `--adultAgingBasedOnLaidEggs` option.\n- Queen\n - Extracted L daylight hours based component of the egg laying equation in CQueen::ComputeL method.\n- VarroaPopSession\n - Enhanced output formating by extracting parameter specification and adding delimiter afterwards.\n- WeatherEvents\n - Added ability to load binary data from WSU grid weather data;\n - Added a common utility function UpdateForageAttributeForEvent to set m_ForageDay and m_ForageInc with the same logic for all types of input weather data;\n - Added functionnality to CEvent GetTemp, GetMaxTemp, GetMinTemp, IsForageDay and GetForageInc methods to behave correctly in the case we activated the CColdStorageSimulator;\n - Fixed bad interpretation of Rainfall and Windspeed for WSU grid weather files;\n - Made Windspeed and Rainfall threasholds customizable.\n\n### Addition to existing codebase\n\n- GlobalOptions\n - Singleton object used to set global parameters used to control behavior of simulation.\n- ColdStorageSimulator\n - Singleton object used to simulate cold storage.\n- WeatherGridData\n - Class used to load different WSU weather grid data files.\n\n\n## Locations\n\n- Walla Walla: 46.03125,-118.34375\n- Richland: 46.28125, -119.34375\n- Wenatchee: 47.40625, -120.34375\n- Omak: 48.40625, -119.53125\n\n\n## Roadmap\n\n- Change the POSITION wrapper which makes allocations and I think there is a better way to do\n- Change the Implementation of the COleDateTime which is not portable on Windows (not using Windows SDK) by using Boost::DateTime library\n- Enhance Binary serialization feedback to reject invalid files (check if eof is reached in >> operators)\n- Add a logger to have several logging levels INFO/WARNING/ERROR so that it can be easily disabled\n" }, { "alpha_fraction": 0.6528986692428589, "alphanum_fraction": 0.6591394543647766, "avg_line_length": 37.295597076416016, "blob_id": "fd468a1468e8199de5ade91a41d1b2fa46ba3e18", "content_id": "5b7f613f829bbf693f895f5e0b63491eb7b1d432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12178, "license_type": "no_license", "max_line_length": 281, "num_lines": 318, "path": "/Linux/main.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"coldstoragesimulator.h\"\n#include \"globaloptions.h\"\n#include \"varroapopcmdbridge.h\"\n\n#include \"stdafx.h\"\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\n\n#include <cxxopts.hpp>\n\n#include <set>\n\nvoid LoadVRPFile(CVarroaPopSession& session, const CString& filename);\n\nint main(int argc, char** argv)\n{\n\tcxxopts::Options options(\"VarroaPop\", \"Command Line version of the VarroaPop app\");\n\n\t// Here we omit the log file since it is not used in the VarroaPop app\n\n\toptions.add_options(\"usage\")\n\t\t(\"i,input_file\", \"Input File: Contains initialization data for simulation values\", cxxopts::value<std::string>())\n\t\t(\"o,output_file\", \"Output File: Simulation results will be saved at this location\", cxxopts::value<std::string>())\n\t\t(\"v,vrp_file\", \"[optional] VRP File: If specified the VRP file will be parsed to initialize default simulation values\", cxxopts::value<std::string>())\n\t\t(\"w,weather_file\", \"[optional] Weather File: Simulation will use this weather file instead of the one in the input file\", cxxopts::value<std::string>())\n\t\t(\"d,working_directory\", \"[optional] Working Directory: If specified all path provided are relative this path\", cxxopts::value<std::string>())\n\t\t(\"f,force\", \"Force overwrite of output file if it exists\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"forageDayNoTemp\", \"A forage day is computed only using wind and rain for a given day\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"hourlyTemperaturesEstimation\", \"Compute hourly temperatures estimation\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"foragersAlwaysAgeBasedOnForageInc\", \"All new foragers go first in the pending foragers list to improve aging process\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"adultAgingBasedOnLaidEggs\", \"Adults age only if the Queen is laying eggs\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"binaryWeatherFileFormat\", \"Specifies the binary format of the weather file (Observed|Modeled|Rcp85|Rcp45)\", cxxopts::value<std::string>())\n\t\t(\"windspeed\", \"Specifies the windspeed threshold after which the current day will not be considered as Forage Day\", cxxopts::value<double>())\n\t\t(\"rainfall\", \"Specifies the rainfall threshold after which the current day will not be considered as Forage Day\", cxxopts::value<double>())\n\t\t(\"daylighthours\", \"Specifies the DaylightHours threshold after which the Queen stop laying eggs, default is 9.5\", cxxopts::value<double>())\n\t\t(\"inOutEvents\", \"Output additional information adding the following data -- NewWEggs NewDEggs WEggsToLarv DEggsToLarv WLarvToBrood DLarvToBrood WBroodToAdult DBroodToAdult WAdultToForagers WinterMortalityForagersLoss DeadForagers\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"coldStorage\", \"Activate cold storage\", cxxopts::value<bool>()->default_value(\"false\"))\n\t\t(\"coldStorageStartDate\", \"Date at which colony is placed in cold storage with format MM/DD\", cxxopts::value<std::string>())\n\t\t(\"coldStorageEndDate\", \"Date at which colony is removed from cold storage with format MM/DD\", cxxopts::value<std::string>())\n\t\t;\n\n\toptions.add_options(\"help\")\n\t(\"h,help\", \"Displays help message\")\n\t(\"u,usage\", \"Displays help message\")\n\t;\n\n\tbfs::path inputFile, outputFile, vrpFile, weatherFile;\n\n\ttry\n\t{\n\t\tauto arguments = options.parse(argc, argv);\n\n\t\tif (arguments.count(\"i\") == 1 && arguments.count(\"o\") == 1)\n\t\t{\n\t\t\t// store input and output file in boost paths\n\t\t\tinputFile = bfs::path(arguments[\"i\"].as<std::string>());\n\t\t\toutputFile = bfs::path(arguments[\"o\"].as<std::string>());\n\n\t\t\t// store vrpFile if present for working directory handling\n\t\t\tif (arguments.count(\"v\") == 1)\n\t\t\t{\n\t\t\t\tvrpFile = bfs::path(arguments[\"v\"].as<std::string>());\n\t\t\t}\n\t\t\t\n\t\t\t// store weatherFile if present for working directory handling\n\t\t\tif (arguments.count(\"w\") == 1)\n\t\t\t{\n\t\t\t\tweatherFile = bfs::path(arguments[\"w\"].as<std::string>());\n\t\t\t}\n\n\t\t\t// handle working directory is specified\n\t\t\tbfs::path workingDirectory;\n\t\t\tif (arguments.count(\"d\") == 1)\n\t\t\t{\n\t\t\t\tworkingDirectory = bfs::path(arguments[\"d\"].as<std::string>());\n\t\t\t}\n\t\t\tif (!workingDirectory.empty())\n\t\t\t{\n\t\t\t\tif (bfs::exists(workingDirectory))\n\t\t\t\t{\n\t\t\t\t\tinputFile = workingDirectory / inputFile;\n\t\t\t\t\toutputFile = workingDirectory / outputFile;\n\t\t\t\t\tif (!vrpFile.empty())\n\t\t\t\t\t\tvrpFile = workingDirectory / vrpFile;\n\t\t\t\t\tif (!weatherFile.empty())\n\t\t\t\t\t\tweatherFile = workingDirectory / weatherFile;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tstd::cerr << \"working directory \" << workingDirectory.string() << \" does not exists\" << std::endl;\n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// check input files existance\n\t\t\tbool error = false;\n\t\t\tif (!bfs::exists(inputFile))\n\t\t\t{\n\t\t\t\tstd::cerr << \"input file \" << inputFile.string() << \" does not exists\" << std::endl;\n\t\t\t\terror = true;\n\t\t\t}\n\t\t\tif (!vrpFile.empty() && !bfs::exists(vrpFile))\n\t\t\t{\n\t\t\t\tstd::cerr << \"vrp file \" << vrpFile.string() << \" does not exists\" << std::endl;\n\t\t\t\terror = true;\n\t\t\t}\n\t\t\tif (!weatherFile.empty() && !bfs::exists(weatherFile))\n\t\t\t{\n\t\t\t\tstd::cerr << \"weather file \" << weatherFile.string() << \" does not exists\" << std::endl;\n\t\t\t\terror = true;\n\t\t\t}\n\n\t\t\t// prevent overwriting output file\n\t\t\tauto outputDirectory = outputFile.parent_path();\n\t\t\tif (!bfs::exists(outputFile))\n\t\t\t{\n\t\t\t\tif (!outputDirectory.empty() && !bfs::exists(outputDirectory))\n\t\t\t\t{\n\t\t\t\t\tif (!bfs::create_directories(outputDirectory))\n\t\t\t\t\t{\n\t\t\t\t\t\t// it is possible that the given directory was created by another processus so\n\t\t\t\t\t\t// let's check if the directory is created in which case no need to set the error\n\t\t\t\t\t\tif (!bfs::exists(outputDirectory))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tstd::cerr << \"cannot create output directory \" << outputDirectory.string() << std::endl;\n\t\t\t\t\t\t\terror = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (arguments.count(\"f\") == 0)\n\t\t\t{\n\t\t\t\tstd::cerr << \"output file \" << outputFile.string() << \" already exists. Do you want to overwrite it? [y/n]: \";\n\t\t\t\tchar answer = 0;\n\t\t\t\tstd::cin >> answer;\n\t\t\t\tconst std::set<char> validAnswer = {'n', 'N', 'y', 'Y', 'q', 'Q'};\n\t\t\t\twhile (validAnswer.find(answer) == validAnswer.end())\n\t\t\t\t{\n\t\t\t\t\tstd::cerr << \"answer only by y or n (q for exit): \";\n\t\t\t\t\tstd::cin >> answer;\n\t\t\t\t}\n\t\t\t\tif (answer == 'q' || answer == 'Q')\n\t\t\t\t{\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t\telse if (answer == 'n' || answer == 'N')\n\t\t\t\t{\n\t\t\t\t\tauto increment = 2;\n\t\t\t\t\tauto outputFilename = outputFile.filename();\n\t\t\t\t\tauto outputExtension = outputFilename.extension();\n\t\t\t\t\tstd::stringstream replaceString;\n\n\t\t\t\t\t// remove extension temporarely\n\t\t\t\t\toutputFilename.replace_extension(\"\");\n\n\t\t\t\t\t// handle increment ouput file (i) index if any\n\t\t\t\t\tdo\n\t\t\t\t\t{\n\t\t\t\t\t\t// try to execute replace regex\n\t\t\t\t\t\tconst std::regex digitRegex(\"(\\\\(\\\\d+\\\\))\");\n\t\t\t\t\t\tconst std::regex digitReplaceRegex(\"\\\\(\\\\d+\\\\)\");\n\t\t\t\t\t\tconst std::string filename = outputFilename.string();\n\t\t\t\t\t\tstd::smatch digitMatch;\n\t\t\t\t\t\tif (std::regex_search(filename, digitMatch, digitRegex))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tincrement = std::atoi(digitMatch[digitMatch.size()-1].str().c_str()+1);\n\t\t\t\t\t\t\treplaceString << \"(\" << ++increment << \")\";\n\t\t\t\t\t\t\toutputFilename = std::regex_replace(outputFilename.string(), digitReplaceRegex, replaceString.str());\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\treplaceString << \"(\" << increment++ << \")\";\n\t\t\t\t\t\t\toutputFilename = outputFilename.string() + replaceString.str();\n\t\t\t\t\t\t}\n\t\t\t\t\t\treplaceString.str(\"\");\n\t\t\t\t\t\t\n\t\t\t\t\t\t// restore extension temporarely\n\t\t\t\t\t\toutputFile = outputDirectory / outputFilename ;\n\t\t\t\t\t\toutputFile.replace_extension(outputExtension);\t\n\t\t\t\t\t} \n\t\t\t\t\twhile (bfs::exists(outputFile));\n\n\t\t\t\t\tstd::cout << \"output file will be \" << outputFile.string() << std::endl;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (arguments.count(\"forageDayNoTemp\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().ShouldForageDayElectionBasedOnTemperatures.Set(!arguments[\"forageDayNoTemp\"].as<bool>());\n\t\t\t}\n\t\t\tif (arguments.count(\"hourlyTemperaturesEstimation\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().ShouldComputeHourlyTemperatureEstimation.Set(arguments[\"hourlyTemperaturesEstimation\"].as<bool>());\n\t\t\t}\n\t\t\tif (arguments.count(\"foragersAlwaysAgeBasedOnForageInc\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().ShouldForagersAlwaysAgeBasedOnForageInc.Set(arguments[\"foragersAlwaysAgeBasedOnForageInc\"].as<bool>());\n\t\t\t}\n\t\t\tif (arguments.count(\"binaryWeatherFileFormat\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().BinaryWeatherFileFormatIdentifier.Set(arguments[\"binaryWeatherFileFormat\"].as<std::string>());\n\t\t\t}\n\t\t\tif (arguments.count(\"adultAgingBasedOnLaidEggs\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().ShouldAdultsAgeBasedLaidEggs.Set(arguments[\"adultAgingBasedOnLaidEggs\"].as<bool>());\n\t\t\t}\n\t\t\tif (arguments.count(\"windspeed\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().WindspeedThreshold.Set(arguments[\"windspeed\"].as<double>());\n\t\t\t}\n\t\t\tif (arguments.count(\"rainfall\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().RainfallThreshold.Set(arguments[\"rainfall\"].as<double>());\n\t\t\t}\n\t\t\tif (arguments.count(\"daylighthours\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().DaylightHoursThreshold.Set(arguments[\"daylighthours\"].as<double>());\n\t\t\t}\n\t\t\tif (arguments.count(\"inOutEvents\") == 1)\n\t\t\t{\n\t\t\t\tGlobalOptions::Get().ShouldOutputInOutCounts.Set(arguments[\"inOutEvents\"].as<bool>());\n\t\t\t}\n\t\t\tif (arguments.count(\"coldStorage\") == 1)\n\t\t\t{\n\t\t\t\tCColdStorageSimulator::Get().SetEnabled(arguments[\"coldStorage\"].as<bool>());\n\n\t\t\t\tstd::regex validateDate(\"(0?[1-9]|1[0-2])/(0?[1-9]|[1-2][0-9]|3[0-1])\");\n\t\t\t\tstd::smatch dateElements;\n\t\t\t\tif (arguments.count(\"coldStorageStartDate\") == 1 && arguments.count(\"coldStorageEndDate\") == 1)\n\t\t\t\t{\n\t\t\t\t\tint startMonth=-1, startDay=-1, endMonth=-1, endDay=-1;\n\t\t\t\t\tstd::string startDate = arguments[\"coldStorageStartDate\"].as<std::string>();\n\t\t\t\t\tif (std::regex_match(startDate, dateElements, validateDate) && dateElements.size() == 3 && startDate == *dateElements.begin())\n\t\t\t\t\t{\n\t\t\t\t\t\tstartMonth = std::stoi(*(dateElements.begin() + 1));\n\t\t\t\t\t\tstartDay = std::stoi(*(dateElements.begin() + 2));\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tstd::cerr << \"coldStorageStartDate is not specified correctly \" << startDate << \" expected MM/DD\" << std::endl;\n\t\t\t\t\t\terror = true;\n\t\t\t\t\t}\n\n\t\t\t\t\tstd::string endDate = arguments[\"coldStorageEndDate\"].as<std::string>();\n\t\t\t\t\tif (std::regex_match(endDate, dateElements, validateDate) && dateElements.size() == 3 && endDate == *dateElements.begin())\n\t\t\t\t\t{\n\t\t\t\t\t\tendMonth = std::stoi(*(dateElements.begin() + 1));\n\t\t\t\t\t\tendDay = std::stoi(*(dateElements.begin() + 2));\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tstd::cerr << \"coldStorageEndDate is not specified correctly \" << endDate << \" expected MM/DD\" << std::endl;\n\t\t\t\t\t\terror = true;\n\t\t\t\t\t}\n\t\t\t\t\tif (startMonth != -1 && startDay != -1 && endMonth != -1 && endDay != -1)\n\t\t\t\t\t{\n\t\t\t\t\t\t// In the case of the cold storage year is not relevant, let's set it as the default first year\n\t\t\t\t\t\tCColdStorageSimulator::Get().SetStartDate(COleDateTime(1970, startMonth, startDay, 0, 0, 0));\n\t\t\t\t\t\tCColdStorageSimulator::Get().SetEndDate(COleDateTime(1970, endMonth, endDay, 0, 0, 0));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if (arguments.count(\"coldStorageStartDate\") != arguments.count(\"coldStorageEndDate\"))\n\t\t\t\t{\n\t\t\t\t\tstd::cerr << \"coldStorageStartDate and coldStorageEndDate should be specified together in order for the cold storage to work correctly \" << std::endl;\n\t\t\t\t\terror = true;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (error)\n\t\t\t{\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tstd::cout << options.help({\"usage\"}) << std::endl;\n\t\t}\n\t}\n\tcatch (const std::exception &e)\n\t{\n\t\tstd::cerr << e.what() << std::endl;\n\t}\n\n\t// now that we processed input arguments let's proceed with the simulation\n\n CVarroaPopSession session;\n\n\t// setup bridge\n\n\tVarroaPopCmdBridge bridge (session);\n\tbridge.SetResultsFileName(outputFile.string().c_str());\n\n\tsession.SetBridge(&bridge);\n\n\tif (!vrpFile.empty())\n\t{\n\t\tLoadVRPFile(session, vrpFile.string().c_str()); \n\t}\n\n\t// Read input file\n\tsession.ProcessInputFile(inputFile.string().c_str());\n\n\tif (!weatherFile.empty())\n\t{\n\t\tsession.LoadWeatherFile(weatherFile.string().c_str()); \n\t}\n\n\t// Run simulation\n\tsession.Simulate();\n\n\treturn 0;\n}\n\nvoid LoadVRPFile(CVarroaPopSession& session, const CString& filename)\n{\n\tCStdioFile file(filename, CFile::modeRead | CFile::typeBinary);\n\tCArchive archive(&file, CArchive::load);\n\tsession.Serialize(archive); \n}\n" }, { "alpha_fraction": 0.7304964661598206, "alphanum_fraction": 0.7304964661598206, "avg_line_length": 15.588234901428223, "blob_id": "8f35df14cfc99ca568e6bd7cd13f331fb5bdceb1", "content_id": "0bf2c130507a4e4c17d4ce58937fc715e7aeffaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 282, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/Linux/portcode/cobject.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef COBJECT_CUSTOM_H\n#define COBJECT_CUSTOM_H\n\nclass CArchive;\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CObject\n{\npublic:\n\tvirtual ~CObject() {}\n\tvirtual void Serialize(CArchive& ar);\n};\n\n#endif // COBJECT_CUSTOM_H\n" }, { "alpha_fraction": 0.6896969676017761, "alphanum_fraction": 0.6915151476860046, "avg_line_length": 26.5, "blob_id": "dd330bb6167839e5d1fa682d62550fdec526ae1b", "content_id": "902f5bbc11e71d2eda4c855c17083a43f974161a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3300, "license_type": "no_license", "max_line_length": 128, "num_lines": 120, "path": "/ColdStorageSimulator.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\n\n#include \"ColdStorageSimulator.h\"\n#include \"Colony.h\"\n#include \"Egg.h\"\n#include \"Queen.h\"\n#include \"WeatherEvents.h\"\n\n\n//////////////////////////////////////////////////////////////////////\n// CColdStorageSimulator Member Functions\n//////////////////////////////////////////////////////////////////////\ndouble CColdStorageSimulator::GetTemp(CEvent& p_Event) const\n{\n\tdouble temperature = p_Event.GetTemp();\n\tif (IsActive())\n\t\ttemperature = m_Temperature;\n\treturn temperature;\n}\ndouble CColdStorageSimulator::GetMaxTemp(CEvent& p_Event) const\n{\n\tdouble temperature = p_Event.GetMaxTemp();\n\tif (IsActive())\n\t\ttemperature = m_Temperature;\n\treturn temperature;\n}\ndouble CColdStorageSimulator::GetMinTemp(CEvent& p_Event) const\n{\n\tdouble temperature = p_Event.GetMinTemp();\n\tif (IsActive())\n\t\ttemperature = m_Temperature;\n\treturn temperature;\n}\ndouble CColdStorageSimulator::GetForageInc(CEvent& p_Event) const\n{\n\tdouble forageInc = p_Event.GetForageInc();\n\tif (IsActive())\n\t\tforageInc = 0.0;\n\treturn forageInc;\n}\nbool CColdStorageSimulator::IsForageDay(CEvent& p_Event) const\n{\n\tbool forageDay = p_Event.IsForageDay();\n\tif (IsActive())\n\t\tforageDay = false;\n\treturn forageDay;\n}\nvoid CColdStorageSimulator::SetStartDate(const COleDateTime& startDate)\n{\n\tm_StartDate = startDate;\n\tm_StartDateStr = m_StartDate.Format(\"%m%d\");\n}\nvoid CColdStorageSimulator::SetEndDate(const COleDateTime& endDate)\n{\n\tm_EndDate = endDate;\n\tm_EndDateStr = m_EndDate.Format(\"%m%d\");\n}\nvoid CColdStorageSimulator::Update(CEvent& p_Event, CColony& p_Colony)\n{\n\tbool isActive = IsEnabled() && (IsOn() || IsColdStoragePeriod(p_Event));\n\tif (!m_IsActive && isActive)\n\t{\n\t\tm_IsStarting = true;\n\t}\n\t// Starting phase is when the bees are placed in cold storage \n\t// and there are still brood that needs to become Adult\n\tif (m_IsStarting)\n\t{\n\t\tconst bool starting = p_Colony.queen.GetTeggs() > 0 || p_Colony.CapDrn.GetQuantity() > 0 || p_Colony.CapWkr.GetQuantity() > 0;\n\t\tm_IsStarting = starting;\n\t}\n\t// Ending phase is when the queen starts to lay eggs again while in cold storage\n\tif (isActive && !m_IsStarting && p_Colony.queen.GetTeggs() > 0)\n\t{\n\t\tm_IsEnding = true;\n\t}\n\tif (!isActive)\n\t{\n\t\tm_IsStarting = false;\n\t\tm_IsEnding = false;\n\t}\n\tm_IsActive = isActive;\n}\nvoid CColdStorageSimulator::Reset()\n{\n\tm_Enabled = false;\n\tm_StartDate = COleDateTime();\n\tm_EndDate = COleDateTime();\n\tm_On = false;\n\tm_Temperature = GetDefaultColdStorageTemperature();\n\tm_StartDateStr = \"\";\n\tm_EndDateStr = \"\";\n\tm_IsActive = false;\n\tm_IsStarting = false;\n\tm_IsEnding = false;\n}\nbool CColdStorageSimulator::IsActive() const\n{\n\treturn m_IsActive;\n}\nbool CColdStorageSimulator::IsStarting() const\n{\n\treturn IsActive() && m_IsStarting;\n}\nbool CColdStorageSimulator::IsEnding() const\n{\n\treturn IsActive() && m_IsEnding;\n}\nbool CColdStorageSimulator::IsColdStoragePeriod(CEvent& p_Event) const\n{\n\t// check that SetStartDate and SetEndDate were called\n\tif (m_StartDateStr.empty() || m_EndDateStr.empty())\n\t{\n\t\treturn false;\n\t}\n\n\tconst std::string currentDateStr = (const char*)p_Event.GetTime().Format(\"%m%d\");\n\treturn (m_StartDateStr >= m_EndDateStr && (currentDateStr >= m_StartDateStr || currentDateStr <= m_EndDateStr))\n\t\t|| (m_StartDateStr <= m_EndDateStr && currentDateStr >= m_StartDateStr && currentDateStr <= m_EndDateStr);\n}\n" }, { "alpha_fraction": 0.5899307131767273, "alphanum_fraction": 0.6313706040382385, "avg_line_length": 32.67916488647461, "blob_id": "0e31a80fe93969a40afacf30fc3fa49613abe74d", "content_id": "36620bd87d0579dabc365abfc6e2664c1989e540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 8084, "license_type": "no_license", "max_line_length": 211, "num_lines": 240, "path": "/Simulations/NewForagingModel.R", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "library(data.table)\nlibrary(dplyr)\nlibrary(tidyverse)\nlibrary(lubridate)\nlibrary(ggplot2)\nlibrary(chillR)\nlibrary(geosphere)\nlibrary(suncalc)\n\noptions(digits=9)\noptions(digit=9)\n\nread_binary <- function(file_path, hist, no_vars){\n ###### The modeled historical is in /data/hydro/jennylabcommon2/metdata/maca_v2_vic_binary/\n ###### modeled historical is equivalent to having 4 variables, and years 1950-2005\n ######\n ###### The observed historical is in \n ###### /data/hydro/jennylabcommon2/metdata/historical/UI_historical/VIC_Binary_CONUS_to_2016\n ###### observed historical is equivalent to having 8 variables, and years 1979-2016\n ######\n if (hist) {\n if (no_vars==4){\n start_year <- 1950\n end_year <- 2005\n } else {\n start_year <- 1979\n end_year <- 2015\n }\n } else{\n start_year <- 2006\n end_year <- 2099\n }\n ymd_file <- create_ymdvalues(start_year, end_year)\n data <- read_binary_addmdy(file_path, ymd_file, no_vars)\n return(data)\n}\n\nread_binary_addmdy <- function(filename, ymd, no_vars){\n if (no_vars==4){\n return(read_binary_addmdy_4var(filename, ymd))\n } else {return(read_binary_addmdy_8var(filename, ymd))}\n}\n\nread_binary_addmdy_8var <- function(filename, ymd){\n Nofvariables <- 8 # number of variables or column in the forcing data file\n Nrecords <- nrow(ymd)\n ind <- seq(1, Nrecords * Nofvariables, Nofvariables)\n fileCon <- file(filename, \"rb\")\n temp <- readBin(fileCon, integer(), size = 2, n = Nrecords * Nofvariables,\n endian = \"little\")\n dataM <- matrix(0, Nrecords, 8)\n k <- 1\n dataM[1:Nrecords, 1] <- temp[ind] / 40.00 # precip data\n dataM[1:Nrecords, 2] <- temp[ind + 1] / 100.00 # Max temperature data\n dataM[1:Nrecords, 3] <- temp[ind + 2] / 100.00 # Min temperature data\n dataM[1:Nrecords, 4] <- temp[ind + 3] / 100.00 # Wind speed data\n dataM[1:Nrecords, 5] <- temp[ind + 4] / 10000.00 # SPH\n dataM[1:Nrecords, 6] <- temp[ind + 5] / 40.00 # SRAD\n dataM[1:Nrecords, 7] <- temp[ind + 6] / 100.00 # Rmax\n dataM[1:Nrecords, 8] <- temp[ind + 7] / 100.00 # RMin\n AllData <- cbind(ymd, dataM)\n # calculate daily GDD ...what? There doesn't appear to be any GDD work?\n colnames(AllData) <- c(\"year\", \"month\", \"day\", \"precip\", \"tmax\", \"tmin\",\n \"windspeed\", \"SPH\", \"SRAD\", \"Rmax\", \"Rmin\")\n close(fileCon)\n return(AllData)\n}\n\nread_binary_addmdy_4var <- function(filename, ymd) {\n Nofvariables <- 4 # number of variables or column in the forcing data file\n Nrecords <- nrow(ymd)\n ind <- seq(1, Nrecords * Nofvariables, Nofvariables)\n fileCon <- file(filename, \"rb\")\n temp <- readBin(fileCon, integer(), size = 2, n = Nrecords * Nofvariables,\n endian=\"little\")\n dataM <- matrix(0, Nrecords, 4)\n k <- 1\n dataM[1:Nrecords, 1] <- temp[ind] / 40.00 # precip data\n dataM[1:Nrecords, 2] <- temp[ind + 1] / 100.00 # Max temperature data\n dataM[1:Nrecords, 3] <- temp[ind + 2] / 100.00 # Min temperature data\n dataM[1:Nrecords, 4] <- temp[ind + 3] / 100.00 # Wind speed data\n \n AllData <- cbind(ymd, dataM)\n # calculate daily GDD ...what? There doesn't appear to be any GDD work?\n colnames(AllData) <- c(\"year\", \"month\", \"day\", \"precip\", \"tmax\", \"tmin\",\n \"windspeed\")\n close(fileCon)\n return(AllData)\n}\n\ncreate_ymdvalues <- function(data_start_year, data_end_year){\n Years <- seq(data_start_year, data_end_year)\n nYears <- length(Years)\n daycount_in_year <- 0\n moncount_in_year <- 0\n yearrep_in_year <- 0\n \n for (i in 1:nYears){\n ly <- leap_year(Years[i])\n if (ly == TRUE){\n days_in_mon <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)\n }\n else{\n days_in_mon <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)\n }\n \n for (j in 1:12){\n daycount_in_year <- c(daycount_in_year, seq(1, days_in_mon[j]))\n moncount_in_year <- c(moncount_in_year, rep(j, days_in_mon[j]))\n yearrep_in_year <- c(yearrep_in_year, rep(Years[i], days_in_mon[j]))\n }\n }\n \n daycount_in_year <- daycount_in_year[-1] #delete the leading 0\n moncount_in_year <- moncount_in_year[-1]\n yearrep_in_year <- yearrep_in_year[-1]\n ymd <- cbind(yearrep_in_year, moncount_in_year, daycount_in_year)\n colnames(ymd) <- c(\"year\", \"month\", \"day\")\n return(ymd)\n}\n\nfilePathPrefix <- ''\nfile <- ''\nlocation <- 'Richland'\nhist <- TRUE\nvars <- 8\n\nif (location == 'Omak')\n file <- 'data_48.40625_-119.53125'\nif (location == 'Wenatchee')\n file <- 'data_47.40625_-120.34375'\nif (location == 'Richland')\n file <- 'data_46.28125_-119.34375'\nif (location == 'WallaWalla')\n file <- 'data_46.03125_-118.34375'\n\nif (hist) {\n filePathPrefix <- \"D:/Coursework/PACCAR/VarroaPOP/Beepop/Simulations/ObservedHistoricalBinary/\"\n} else {\n filePathPrefix <- \"D:/Coursework/PACCAR/VarroaPOP/Beepop/Simulations/Rcp85Binary/\"\n vars <- 4\n}\n \noutputfilePath <- paste0('D:/Coursework/PACCAR/VarroaPOP/Beepop/Simulations/', file, '.csv')\n\nmet_data <- read_binary(file_path = paste0(filePathPrefix, file),\n hist = hist, no_vars=vars)\n\n# I make the assumption that lat always has same number of decimal points\nlat <- as.numeric(substr(x = file, start = 6, stop = 13))\nlon <- as.numeric(substr(x = file, start = 15, stop =24))\n\n# data frame required\nmet_data <- as.data.frame(met_data)\n\n# 3b. Clean it up\n# rename needed columns\n\ndata.table::setnames(met_data, old=c(\"year\",\"month\", \"day\", \"tmax\", \"tmin\"), \n new=c(\"Year\", \"Month\", \"Day\", \"Tmax\", \"Tmin\"))\n\nmet_data <- met_data %>%\n select(-c(precip, windspeed)) %>%\n data.frame()\n\n# 3c. Get hourly interpolation\n# generate hourly data\nmet_hourly <- stack_hourly_temps(weather = met_data,\n latitude = lat)\nmet_hourly <- met_hourly$hourtemps\n\nmet_data['HRSFLYtemp'] <- 0\n\nfor (row in 1:nrow(met_data)) {\n \n year <- met_data[row, \"Year\"]\n month <- met_data[row, \"Month\"]\n day <- met_data[row, \"Day\"]\n \n onThatDay <- filter(met_hourly, Year == year & Month == month & Day == day)\n \n sunrise = as.POSIXlt(getSunlightTimes(date = as.Date(strptime(paste(onThatDay$Year[1], onThatDay$Month[1], onThatDay$Day[1], sep = '-'), \"%Y-%m-%d\")), lat = lat, lon = lon, tz = 'America/Los_Angeles')$sunrise)\n sunset = as.POSIXlt(getSunlightTimes(date = as.Date(strptime(paste(onThatDay$Year[1], onThatDay$Month[1], onThatDay$Day[1], sep = '-'), \"%Y-%m-%d\")), lat = lat, lon = lon, tz = 'America/Los_Angeles')$sunset)\n \n onThatDay <- filter(onThatDay, Hour >= sunrise$hour & Hour <= sunset$hour)\n \n count <- sum(onThatDay$Temp > 10)\n rm(onThatDay)\n met_data$HRSFLYtemp[row] = count\n \n}\n\nmet_data['HRSlight'] = daylength(lat, as.character(strptime(paste(met_data$Year, met_data$Month, met_data$Day, sep = '-'), \"%Y-%m-%d\")))\n\nHRSsolstice = max(met_data$HRSlight)\n\n#met_data['PROPHRSFLYtemp'] <- met_data['HRSFLYtemp'] / met_data['HRSlight']\n\n\n#for (row in 1:nrow(met_data)) {\n \n# if(met_data$PROPHRSFLYtemp[row] > 1) {\n# met_data$PROPHRSFLYtemp[row] = 1\n# }\n# \n#}\n\n#met_data['HRSFLY'] = met_data['PROPHRSFLYtemp'] * 24\n\nmet_data['PROPFLIGHTDAY'] <- met_data['HRSFLYtemp'] / HRSsolstice\n\nmet_data['SUMFLIGHTDAY'] <- 0\n\nmet_data$SUMFLIGHTDAY[1] = met_data$PROPFLIGHTDAY[1]\n\nfor (row in 2:nrow(met_data)) {\n \n met_data$SUMFLIGHTDAY[row] = ifelse(met_data$PROPFLIGHTDAY[row] + met_data$SUMFLIGHTDAY[row - 1] > 14, 0, met_data$PROPFLIGHTDAY[row] + met_data$SUMFLIGHTDAY[row - 1])\n \n}\n\nmet_data['CURPROPFLIGHTDAY'] <- 0\n\nfor (row in 1:nrow(met_data)) {\n \n met_data$CURPROPFLIGHTDAY[row] = ifelse(met_data$PROPFLIGHTDAY[row] > 0, 1, met_data$CURPROPFLIGHTDAY[row])\n \n}\n\nmet_data['CURSUMFLIGHTDAY'] <- 0\n\nmet_data$CURSUMFLIGHTDAY[1] = met_data$CURPROPFLIGHTDAY[1]\n\nfor (row in 2:nrow(met_data)) {\n \n met_data$CURSUMFLIGHTDAY[row] = ifelse(met_data$CURPROPFLIGHTDAY[row] + met_data$CURSUMFLIGHTDAY[row - 1] > 14, 0, met_data$CURPROPFLIGHTDAY[row] + met_data$CURSUMFLIGHTDAY[row - 1])\n \n}\n\nwrite.csv(met_data,'D:/Coursework/PACCAR/VarroaPOP/Beepop/Simulations/Richland.csv', row.names = FALSE)\n\n" }, { "alpha_fraction": 0.7309504151344299, "alphanum_fraction": 0.7434192299842834, "avg_line_length": 33.04716873168945, "blob_id": "6da81da0fa12ad0b43b8a538364906bf56e50cf1", "content_id": "d0d024fdec73405355866efb5ba49153465ff92d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3609, "license_type": "no_license", "max_line_length": 115, "num_lines": 106, "path": "/GlobalOptions.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <string>\n#include <stdexcept>\n\n// Singleton class to easily add conditional block statements\n// and select them by either:\n// - changing default value in this class\n// - add a command line option to control and validate its impact on results.\nclass GlobalOptions\n{\npublic:\n\t// Returns the single instance of GlobalOptions class\n\tstatic GlobalOptions& Get();\n\npublic:\n\t// The use of this constructor is restricted for testing\n\tGlobalOptions();\n\npublic:\n\ttemplate<typename OptionType>\n\tclass Option\n\t{\n\tpublic:\n\t\tOption() {}\n\t\tOption(const OptionType& value) { Set(value); }\n\t\tvirtual ~Option() {}\n\t\tvirtual const OptionType& operator()() const { return m_value; }\n\t\tvirtual void Set(const OptionType& value) { m_value = value; }\n\n\tprotected:\n\t\tOptionType m_value;\n\t};\n\n\ttemplate<typename OptionType>\n\tclass AggregateOption : Option<OptionType>\n\t{\n\tpublic:\n\t\tAggregateOption(GlobalOptions& options) : Option<OptionType>(), m_options(options) {}\n\t\tvirtual void Set(const OptionType& value);\n\t\tvirtual const OptionType& operator()() const \n\t\t{\n\t\t\tthrow std::runtime_error(\"GlobalOptions::AggregateOption::operator()() should not be called directly\");\n\t\t\treturn Option<OptionType>::m_value;\n\t\t}\n\n\tprivate:\n\t\tGlobalOptions& m_options;\n\t};\n\n\t// Options\npublic:\n\n\t// Egg laying options\n\t\n\t// This option affect Egg laying. When bellow threshold no egg will be laid.\n\tOption<double> DaylightHoursThreshold = 9.5;\n\n\t// Adult aging options\n\n\t// This option changes the way adult are aged. Adults will be aging only during the time frame where the\n\t// Queen is laying eggs.\n\tOption<bool> ShouldAdultsAgeBasedLaidEggs = false;\n\n\t// Forager aging options\n\n\t// This option will affect the way we elect a day as a Forage day\n\t// If true:\n\t// 12.0 Deg C < MaxTemp < 43.33 Deg C AND\n\t// Windspeed <= 8.94 m/s AND\n\t// Rainfall <= .197 inches\n\t// else if false:\n\t// Windspeed <= 8.94 m/s AND\n\t// Rainfall <= .197 inches\n\t//\n\t// 5/21/2020: Changed the Windspeed from 21.13 meters/sec to 8.94 meters/sec\n\tOption<bool> ShouldForageDayElectionBasedOnTemperatures = true;\n\t// This option affect the Windspeed threshold to determinate if the current day can be a Forage Day\n\tOption<double> WindspeedThreshold = 8.94;\n\t// This option affect the Rainfall threshold to determinate if the current day can be a Forage Day\n\tOption<double> RainfallThreshold = 0.197;\n\t// This option will affect wth weather file data (Observed, Historical, and RCP)\n\t// Hourly temperature estimation will be used to enhance ForagerInc aging increment.\n\tOption<bool> ShouldComputeHourlyTemperatureEstimation = false;\n\t// This option changes the way forager are processed on non foraging days.\n\t// On non-Foraging days, foragers when added to the Foragers list will not age and they will age\n\t// of ForageInc on the next Foraging day instead of aging 1 full day.\n\tOption<bool> ShouldForagersAlwaysAgeBasedOnForageInc = false;\n\n\t// This options controls ShouldForageDayElectionBasedOnTemperatures, ShouldComputeHourlyTemperatureEstimation and \n\t// ShouldForagersAlwaysAgeBasedOnForageInc when it is set\n\ttypedef bool ForagerAgingBasedHourlyTemperatureEstimate;\n\tAggregateOption<ForagerAgingBasedHourlyTemperatureEstimate> ShouldForagerAgingBasedOnHourlyTemperatureEstimate;\n\t\n\t// Weather file options\n\n\t// If the weather file to be loaded is in bynary format, this specify which format to use\n\t// Valid options are:\n\t// - Observed\n\t// - Modeled\n\t// - Rcp85\n\tOption<std::string> BinaryWeatherFileFormatIdentifier;\n\n\t// Additional Output Data\n\tOption<bool> ShouldOutputInOutCounts = false;\n};\n" }, { "alpha_fraction": 0.7127246856689453, "alphanum_fraction": 0.7398660778999329, "avg_line_length": 29.836956024169922, "blob_id": "ad63ea173957b2f529fc45dafa312abf27028fbb", "content_id": "cea3c21d1eea6464e46c77eae01641b2db0d9bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2837, "license_type": "no_license", "max_line_length": 103, "num_lines": 92, "path": "/VarroaPopDoc.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "// VarroaPopDoc.h : interface of the CVarroaPopDoc class\n//\n/////////////////////////////////////////////////////////////////////////////\n\n\n#if !defined(AFX_VARROAPOPDOC_H__8C6C418D_7899_11D2_8D9A_0020AF233A70__INCLUDED_)\n#define AFX_VARROAPOPDOC_H__8C6C418D_7899_11D2_8D9A_0020AF233A70__INCLUDED_\n\n#if _MSC_VER >= 1000\n#pragma once\n#endif // _MSC_VER >= 1000\n\n#include \"Colony.h\"\n#include \"WeatherEvents.h\"\n#include \"MyPropSheet.h\"\n#include \"Mite.h\"\n#include \"MiteTreatments.h\"\n#include \"Matrix.h\"\n#include \"IEDItem.h\"\n#include \"VarroaPopSession.h\"\n\nclass CVarroaPopDoc : public CDocument, public CVarroaPopSession, public CVarroaPopSessionBridge\n{\nprotected: // create from serialization only\n\tCVarroaPopDoc();\n\tDECLARE_DYNCREATE(CVarroaPopDoc)\n\n// Overrides\n\t// ClassWizard generated virtual function overrides\n\t//{{AFX_VIRTUAL(CVarroaPopDoc)\npublic:\n\tvirtual BOOL OnNewDocument();\n\tvirtual void Serialize(CArchive& ar);\n\tvirtual void SetTitle(LPCTSTR lpszTitle);\n\t//}}AFX_VIRTUAL\n\n// Implementation\npublic:\n\tvirtual ~CVarroaPopDoc();\n#ifdef _DEBUG\n\tvirtual void AssertValid() const;\n\tvirtual void Dump(CDumpContext& dc) const;\n#endif\n\nprotected:\n\n// Generated message map functions\nprotected:\n\t//{{AFX_MSG(CVarroaPopDoc)\n\tafx_msg void OnFileNew();\n\tafx_msg void OnSelectGraph();\n\tafx_msg void OnViewPlotdata();\n\tafx_msg void OnFileSaveResults();\n\tafx_msg void OnFileSaveSession();\n\tafx_msg void OnViewCombremovaldate();\n\tafx_msg void OnUpdateToggleDataFreq(CCmdUI* pCmdUI);\n\tafx_msg void OnToggleDataFreq();\n\tafx_msg void OnViewOptions();\n\tafx_msg void OnWeatherCreatenewweatherfile();\n\tafx_msg void OnWeatherEditcurrentweatherfile();\n\tafx_msg void OnWeatherEditweatherfilefromdisk();\n\t//}}AFX_MSG\n\tDECLARE_MESSAGE_MAP()\npublic:\n\tvirtual BOOL OnOpenDocument(LPCTSTR lpszPathName);\n\tafx_msg void OnUpdateViewShowwarnings(CCmdUI *pCmdUI);\n\tafx_msg void OnToggleShowwarnings();\n\npublic:\n\t// CVarroaPopSessionBridge Implementation\n\tvoid SimulationStartUpdated();\n\tvoid SimulationEndUpdated();\n\tvoid StartSimulation(CVarroaPopSession& session);\n\tvoid EndSimulation(CVarroaPopSession& session);\n\tvoid ImmigrationEnabled(bool enabled);\n\tvoid WeatherFileMissing();\n\tvoid WeatherFileLoaded(bool loaded, const CString& filename);\n\tvoid SessionFileLoaded(CArchive& ar);\n\tCString GetDefaultPathName(CArchive& ar);\n\tvoid InputFileUnknownVariable(const CString& name);\n\tvoid InputFileException(const CString& name);\n\tvoid OutputFileException(const CString& name);\n\tCString GetVersion();\n\tBOOL CheckDateConsistencyFailed(const CString& warning);\n};\n\n/////////////////////////////////////////////////////////////////////////////\n\n//{{AFX_INSERT_LOCATION}}\n// Microsoft Developer Studio will insert additional declarations immediately before the previous line.\n\n#endif // !defined(AFX_VARROAPOPDOC_H__8C6C418D_7899_11D2_8D9A_0020AF233A70__INCLUDED_)\n" }, { "alpha_fraction": 0.456487238407135, "alphanum_fraction": 0.6083505749702454, "avg_line_length": 42.38473129272461, "blob_id": "ec4d73c51ffc90e835ea2de744808422d9ffa8ad", "content_id": "037884c8d1b4651542ce6f1277a6db77995db8e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 28980, "license_type": "no_license", "max_line_length": 560, "num_lines": 668, "path": "/Linux/tests/test_temperaturedata.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"helpers/common.h\" \n\n#define _USE_MATH_DEFINES\n#include <cmath>\n\n#include <cstdint>\n#include <optional>\n#include <regex>\n#include <variant>\n#include <vector>\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\n\n#include <boost/date_time.hpp>\nnamespace bd = boost::gregorian;\n\n#include <boost/algorithm/string.hpp>\nnamespace ba = boost::algorithm;\n\n#include \"colony.h\"\n#include \"coldstoragesimulator.h\"\n#include \"queen.h\"\n#include \"weatherevents.h\"\n#include \"weathergriddata.h\"\n\nusing namespace WeatherGridDataNs;\n\n// The matcher class\nclass DayLengthResultMatcher : public Catch::MatcherBase<DayLengthResult> {\n DayLengthResult m_result;\n std::float_t m_epsilon = 1.e-5f;\npublic:\n DayLengthResultMatcher(const DayLengthResult& result) : m_result(result){}\n\n // Performs the test for this matcher\n bool match(const DayLengthResult& testing) const override {\n return Catch::Floating::WithinRelMatcher(m_result.sunrise, m_epsilon).match(testing.sunrise)\n && Catch::Floating::WithinRelMatcher(m_result.sunset, m_epsilon).match(testing.sunset)\n && Catch::Floating::WithinRelMatcher(m_result.daylength, m_epsilon).match(testing.daylength);\n }\n\n // Produces a string describing what this matcher does. It should\n // include any provided data (the begin/ end in this case) and\n // be written as if it were stating a fact (in the output it will be\n // preceded by the value under test).\n virtual std::string describe() const override {\n std::ostringstream ss;\n ss << \"is matching sunrise:\" << m_result.sunrise << \" sunset:\" << m_result.sunset << \" daylength:\" << m_result.daylength;\n return ss.str();\n }\n};\n\n// The builder function\ninline DayLengthResultMatcher DayLengthResultEq(const DayLengthResult& result) {\n return DayLengthResultMatcher(result);\n}\n\ntemplate<typename Container, typename Compare>\nstruct CompareMatcher : Catch::MatcherBase<Container> {\n\n CompareMatcher(const Container& comparator, const Compare& compare)\n : m_comparator(comparator),\n m_compare(compare) {}\n\n bool match(const Container& v) const override {\n if (m_comparator.size() != v.size()) {\n return false;\n }\n for (size_t i = 0; i < v.size(); ++i) {\n if (!m_compare(m_comparator[i], v[i])) {\n return false;\n }\n }\n return true;\n }\n\n virtual std::string describe() const override {\n return \"Equals: \" + Catch::Detail::stringify(m_comparator);\n }\n\n const Container& m_comparator;\n const Compare& m_compare;\n};\n\ntemplate<typename Container, typename C>\nCompareMatcher<Container, C> Compare(const Container& comparator, const C& compare) {\n return CompareMatcher<Container, C>(comparator, compare);\n}\n\ntemplate<typename Type>\nauto EqualsApprox(const std::array<Type, 24>& comparator) {\n return Compare(comparator, [=](Type actual, Type expected) {\n return actual == Approx(expected);\n });\n}\n\nTEST_CASE(\"Temperature Data\", \"[input]\") {\n \n SECTION(\"Load\") {\n\n bfs::path weatherFile (bfs::path(GetSimulationsDir()) / \"ObservedHistoricalBinary\" / \"data_46.03125_-118.34375\");\n\n CHECK(bfs::exists(weatherFile));\n\n auto data = LoadGridData<ObservedHistoricalItem>(weatherFile.string());\n bd::date startDate(1979, bd::Jan, 1);\n bd::date endDate = startDate + bd::days(data.data().size() - 1 /* first day*/);\n CHECK(endDate.year() == 2016);\n CHECK(endDate.month() == 12);\n CHECK(endDate.day() == 31);\n }\n\n SECTION(\"Load Rcp85Binary\") {\n\n bfs::path weatherFile(bfs::path(GetSimulationsDir()) / \"Rcp85Binary\" / \"data_46.03125_-118.34375\");\n\n CHECK(bfs::exists(weatherFile));\n\n auto data = LoadGridData<Rcp85>(weatherFile.string());\n bd::date startDate(2006, bd::Jan, 1);\n bd::date endDate = startDate + bd::days(data.data().size() - 1 /* first day*/);\n CHECK(endDate.year() == 2099);\n CHECK(endDate.month() == 12);\n CHECK(endDate.day() == 31);\n }\n\n SECTION(\"JDay\") {\n\n CHECK(ComputeJDay(COleDateTime(2019, 1, 1, 0, 0, 0)) == 1);\n CHECK(ComputeJDay(COleDateTime(2019, 2, 28, 0, 0, 0)) == 59);\n CHECK(ComputeJDay(COleDateTime(2019, 2, 29, 0, 0, 0)) == 60);\n CHECK(ComputeJDay(COleDateTime(2019, 3, 1, 0, 0, 0)) == 60);\n CHECK(ComputeJDay(COleDateTime(2019, 12, 31, 0, 0, 0)) == 365);\n CHECK(ComputeJDay(COleDateTime(2020, 1, 1, 0, 0, 0)) == 1);\n CHECK(ComputeJDay(COleDateTime(2020, 2, 28, 0, 0, 0)) == 59);\n CHECK(ComputeJDay(COleDateTime(2020, 2, 29, 0, 0, 0)) == 60);\n CHECK(ComputeJDay(COleDateTime(2020, 12, 31, 0, 0, 0)) == 366);\n }\n\n SECTION(\"GetLatitudeFromFileName\"){\n\n const std::map<std::string, double> filenames = {\n {\"Omak_bcc-csm1-1_rcp45_48.40625_-119.53125\", 48.40625},\n {\"Omak_inmcm4_rcp85_48.40625_-119.53125\", 48.40625},\n {\"Omak_inmcm4_rcp85_-48.40625_-119.53125\", -48.40625},\n {\"Richland_GFDL-ESM2G_rcp85_46.28125_-119.34375\", 46.28125},\n {\"Omak_inmcm4_rcp85_-48.40625_-119.53125.wth\", -48.40625},\n {\"Richland_GFDL-ESM2G_rcp85_46.28125_-119.34375.wth\", 46.28125},\n {\"data_46.28125_-119.34375\", 46.28125},\n {\"data_47.40625_-120.34375\", 47.40625},\n {\"data_-47.40625_-120.34375.txt\", -47.40625},\n {\"data_46.28125_-119.34375.txt\", 46.28125},\n {\"data_47.40625_-120.34375.wth\", 47.40625},\n {\"data_-47.40625_-120.34375.wth\", -47.40625}\n };\n for (auto it = filenames.begin(); it != filenames.end(); it++)\n {\n std::stringstream info;\n info << it->first;\n info << \": \";\n info << it->second;\n INFO(info.str());\n CHECK(GetLatitudeFromFilename(it->first) == Catch::Detail::Approx(it->second));\n }\n }\n\n SECTION(\"daylength\") {\n\n DayLengthResult northPole1 = { -99.0f, -99.0f, 0.0f };\n DayLengthResult northPole180 = { 99.0f, 99.0f, 24.0f };\n\n CHECK_THAT(DayLength(90.0f, 1), DayLengthResultEq(northPole1));\n CHECK_THAT(DayLength(90.0f, 180), DayLengthResultEq(northPole180));\n\n DayLengthResult pullman1 = { 7.694178f, 16.30582f, 8.611644f };\n DayLengthResult pullman180 = { 4.010171f, 19.98983f, 15.97966f };\n\n CHECK_THAT(DayLength(46.7298f, 1), DayLengthResultEq(pullman1));\n CHECK_THAT(DayLength(46.7298f, 180), DayLengthResultEq(pullman180));\n\n DayLengthResult rio1 = { 6.624304f, 17.3757f, 10.75139f };\n DayLengthResult rio180 = { 5.207381f, 18.79262f, 13.58524f };\n\n CHECK_THAT(DayLength(22.9068f, 1), DayLengthResultEq(rio1));\n CHECK_THAT(DayLength(22.9068f, 180), DayLengthResultEq(rio180));\n }\n\n SECTION(\"hourly temperature\") {\n // JDay 1\n {\n auto current_daylength = DayLength(46.7298f, 1);\n auto next_daylength = DayLength(46.7298f, 2);\n\n HourlyTempraturesEstimator input;\n input.tmin = -2.55f;\n input.tmax = -0.28f;\n input.sunrise = current_daylength.sunrise;\n input.sunset = current_daylength.sunset;\n input.daylength = current_daylength.daylength;\n\n input.next_tmin = -6.71f;\n input.next_sunrise = next_daylength.sunrise;\n\n input.compute();\n\n std::array<std::float_t, 24> expectedResult = { -2.550000f,-2.550000f,-2.550000f,-2.550000f,-2.550000f,-2.550000f,-2.550000f,-2.550000f,-2.377236f,-1.824559f,-1.316665f,-0.8849064f,-0.5559374f,-0.3500657f,-0.28f,-0.3500657f,-0.5559374f,-1.787940f,-2.794280f,-3.479060f,-3.998755f,-4.417698f,-4.768682f, -5.070708f };\n CHECK_THAT(input.hourly_temperatures, EqualsApprox(expectedResult));\n }\n }\n SECTION(\"Cold Storage\")\n {\n CEvent event;\n event.SetTemp(25.0);\n\n CColony colony;\n CQueen& queen = colony.queen;\n queen.SetMaxEggs(2000.0);\n\n CColdStorageSimulator simulator;\n\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n simulator.SetEnabled(true);\n\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n simulator.Activate();\n simulator.Update(event, colony);\n\n CHECK(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // Pick a date where the queen is laying eggs\n queen.LayEggs(90, 14.0, 12.0, 2000, 1);\n REQUIRE(queen.GetTeggs() > 0);\n\n simulator.DeActivate();\n simulator.Update(event, colony);\n\n CHECK_FALSE(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n // cold storage period in a year \n simulator.SetStartDate(COleDateTime(2020, 10, 1, 0, 0, 0));\n simulator.SetEndDate(COleDateTime(2020, 11, 30, 0, 0, 0));\n\n // event date is before cold storage period\n event.SetTime(COleDateTime(2020, 7, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK_FALSE(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n // event date is on cold storage period start date\n event.SetTime(COleDateTime(2020, 10, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.IsActive());\n CHECK(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // Pick a date where the queen is not laying eggs (change daylight hours)\n queen.LayEggs(90, 14.0, 9.0, 2000, 1);\n REQUIRE(queen.GetTeggs() == 0);\n\n // event date is during cold storage period \n event.SetTime(COleDateTime(2020, 11, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // Pick a date where the queen is laying eggs (change daylight hours)\n queen.LayEggs(90, 14.0, 12.0, 2000, 1);\n REQUIRE(queen.GetTeggs() > 0);\n\n // event date is on cold storage period end date\n event.SetTime(COleDateTime(2020, 11, 30, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // event date is after cold storage period \n event.SetTime(COleDateTime(2020, 12, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK_FALSE(simulator.IsActive());\n CHECK_FALSE(simulator.IsStarting());\n CHECK_FALSE(simulator.IsEnding());\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n // cold storage period on year transition\n simulator.SetStartDate(COleDateTime(2021, 11, 1, 0, 0, 0));\n simulator.SetEndDate(COleDateTime(2022, 2, 29, 0, 0, 0));\n\n // event date is before cold storage period\n event.SetTime(COleDateTime(2021, 7, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n // event date is on cold storage period start date\n event.SetTime(COleDateTime(2021, 11, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // event date is during cold storage period \n event.SetTime(COleDateTime(2021, 12, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // event date is during cold storage period \n event.SetTime(COleDateTime(2022, 1, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // event date is on cold storage period end date\n event.SetTime(COleDateTime(2022, 3, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(CColdStorageSimulator::GetDefaultColdStorageTemperature()));\n\n // event date is after cold storage period \n event.SetTime(COleDateTime(2022, 3, 2, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n simulator.SetEnabled(false);\n\n // event date is on cold storage period end date\n event.SetTime(COleDateTime(2022, 3, 1, 0, 0, 0));\n simulator.Update(event, colony);\n\n CHECK(simulator.GetTemp(event) == Approx(25.0));\n\n simulator.Reset();\n }\n}\n\nTEST_CASE(\"Temperature Data Failing\", \"[hide]\") \n{\n SECTION(\"Hourly Temperature\")\n {\n // JDay 2\n {\n auto previous_daylength = DayLength(46.7298f, 1);\n auto current_daylength = DayLength(46.7298f, 2);\n auto next_daylength = DayLength(46.7298f, 3);\n\n HourlyTempraturesEstimator input;\n input.tmin = -6.71f;\n input.tmax = 1.46f;\n input.sunrise = current_daylength.sunrise;\n input.sunset = current_daylength.sunset;\n input.daylength = current_daylength.daylength;\n\n input.prev_tmin = -2.55f;\n input.prev_tmax = -0.28f;\n input.prev_sunset = previous_daylength.sunset;\n\n input.next_tmin = -6.71f;\n input.next_sunrise = next_daylength.sunrise;\n\n input.compute();\n\n std::array<std::float_t, 24> expectedResult = { -5.444190f,-5.685756f,-5.903585f,-6.101928f,-6.283987f,-6.452233f,-6.608616f,-6.754699f,-6.074356f,-4.088088f,-2.263309f,-0.7124124f,0.4690795f,1.2083956f,1.46f,1.2083956f,0.4690795f,-1.082260f,-2.173649f,-2.915478f,-3.478154f,-3.931593f,-4.311390f,-4.638157f };\n CHECK_THAT(input.hourly_temperatures, EqualsApprox(expectedResult));\n }\n // JDay 148\n {\n auto previous_daylength = DayLength(46.7298f, 147);\n auto current_daylength = DayLength(46.7298f, 148);\n auto next_daylength = DayLength(46.7298f, 149);\n\n HourlyTempraturesEstimator input;\n input.tmin = 8.83f;\n input.tmax = 18.02f;\n input.sunrise = current_daylength.sunrise;\n input.sunset = current_daylength.sunset;\n input.daylength = current_daylength.daylength;\n\n input.prev_tmin = 8.33f;\n input.prev_tmax = 17.42f;\n input.prev_sunset = previous_daylength.sunset;\n\n input.next_tmin = 5.62f;\n input.next_sunrise = next_daylength.sunrise;\n\n input.compute();\n\n std::array<std::float_t, 24> expectedResult = { 10.779638f,10.463412f,10.194304f,9.960077f,9.752718f,9.976793f,11.420404f,12.79732f,14.07209f,15.21189f,16.18737f,16.97343f,17.54982f,17.90169f,18.02f,17.90169f,17.54982f,16.97343f,16.18737f,15.21189f,13.56064f,11.23618f,9.791873f,8.741350f };\n CHECK_THAT(input.hourly_temperatures, EqualsApprox(expectedResult));\n }\n // JDay last\n {\n auto previous_daylength = DayLength(46.7298f, 150);\n auto current_daylength = DayLength(46.7298f, 151);\n\n HourlyTempraturesEstimator input;\n input.tmin = 7.64f;\n input.tmax = 13.75f;\n input.sunrise = current_daylength.sunrise;\n input.sunset = current_daylength.sunset;\n input.daylength = current_daylength.daylength;\n\n input.prev_tmin = 7.57f;\n input.prev_tmax = 17.29f;\n input.prev_sunset = previous_daylength.sunset;\n\n input.compute();\n\n std::array<std::float_t, 24> expectedResult = { 9.918212f,9.548716f,9.234689f,8.961630f,8.720073f,8.445311f,9.399063f,10.30797f,11.14885f,11.90026f,12.54306f,13.06085f,13.44042f,13.67211f,13.75f,13.67211f,13.44042f,13.06085f,12.54306f,11.90026f,7.64000f,7.64000f,7.640000f,7.640000f };\n CHECK_THAT(input.hourly_temperatures, EqualsApprox(expectedResult));\n }\n }\n SECTION(\"Hourly temperature from file\") {\n\n struct CSVHourlyTemperatureDataItem\n {\n int Year;\n int Month;\n int Day;\n float Tmax;\n float Tmin;\n float SPH;\n float SRAD;\n float Rmax;\n float Rmin;\n float latitude;\n float longitude;\n float sunrise;\n float sunset;\n float daylength;\n float hour_0;\n float hour_1;\n float hour_2;\n float hour_3;\n float hour_4;\n float hour_5;\n float hour_6;\n float hour_7;\n float hour_8;\n float hour_9;\n float hour_10;\n float hour_11;\n float hour_12;\n float hour_13;\n float hour_14;\n float hour_15;\n float hour_16;\n float hour_17;\n float hour_18;\n float hour_19;\n float hour_20;\n float hour_21;\n float hour_22;\n float hour_23;\n\n void parse(const std::string& line)\n {\n std::vector<std::string> elements;\n ba::split(elements, line, [](const char& c) { return c == ','; });\n if (elements.size() == 38)\n {\n size_t index = 0; \n Year = std::atoi(elements[index++].c_str());\n Month = std::atoi(elements[index++].c_str());\n Day = std::atoi(elements[index++].c_str());\n Tmax = std::atof(elements[index++].c_str());\n Tmin = std::atof(elements[index++].c_str());\n SPH = std::atof(elements[index++].c_str());\n SRAD = std::atof(elements[index++].c_str());\n Rmax = std::atof(elements[index++].c_str());\n Rmin = std::atof(elements[index++].c_str());\n latitude = std::atof(elements[index++].c_str());\n longitude = std::atof(elements[index++].c_str());\n sunrise = std::atof(elements[index++].c_str());\n sunset = std::atof(elements[index++].c_str());\n daylength = std::atof(elements[index++].c_str());\n hour_0 = std::atof(elements[index++].c_str());\n hour_1 = std::atof(elements[index++].c_str());\n hour_2 = std::atof(elements[index++].c_str());\n hour_3 = std::atof(elements[index++].c_str());\n hour_4 = std::atof(elements[index++].c_str());\n hour_5 = std::atof(elements[index++].c_str());\n hour_6 = std::atof(elements[index++].c_str());\n hour_7 = std::atof(elements[index++].c_str());\n hour_8 = std::atof(elements[index++].c_str());\n hour_9 = std::atof(elements[index++].c_str());\n hour_10 = std::atof(elements[index++].c_str());\n hour_11 = std::atof(elements[index++].c_str());\n hour_12 = std::atof(elements[index++].c_str());\n hour_13 = std::atof(elements[index++].c_str());\n hour_14 = std::atof(elements[index++].c_str());\n hour_15 = std::atof(elements[index++].c_str());\n hour_16 = std::atof(elements[index++].c_str());\n hour_17 = std::atof(elements[index++].c_str());\n hour_18 = std::atof(elements[index++].c_str());\n hour_19 = std::atof(elements[index++].c_str());\n hour_20 = std::atof(elements[index++].c_str());\n hour_21 = std::atof(elements[index++].c_str());\n hour_22 = std::atof(elements[index++].c_str());\n hour_23 = std::atof(elements[index++].c_str());\n }\n }\n\n static CSVHourlyTemperatureDataItem Invalid()\n {\n CSVHourlyTemperatureDataItem invalid;\n invalid.clear();\n return invalid;\n }\n\n bool isValid() const \n {\n static CSVHourlyTemperatureDataItem sInvalid = Invalid();\n return !(*this == sInvalid);\n }\n\n void clear() { memset(this, 0, sizeof(CSVHourlyTemperatureDataItem)); }\n\n bool operator==(const CSVHourlyTemperatureDataItem& other) const\n {\n return memcmp(this, &other, sizeof(CSVHourlyTemperatureDataItem)) == 0;\n }\n };\n\n bfs::path dataDirectory = GetTestsDir();\n dataDirectory /= \"data\";\n\n bfs::path omakFilePath = dataDirectory / \"Omak_HourlyTemperaturesEstimation.csv\";\n std::ifstream omakFile(omakFilePath.string(), std::ios_base::in);\n REQUIRE(omakFile.good());\n\n std::vector<CSVHourlyTemperatureDataItem> data;\n\n std::string line;\n\n std::getline(omakFile, line); // first line is column headers\n\n while (omakFile.good() && !omakFile.eof())\n {\n std::getline(omakFile, line);\n try\n {\n CSVHourlyTemperatureDataItem item;\n item.parse(line);\n if (item.isValid())\n {\n data.push_back(item);\n }\n }\n catch(...)\n {\n std::cerr << \"Line: \" << line << \" cannot be parsed to CSVData\" << std::endl;\n }\n }\n\n REQUIRE(data.size() == 13881);\n\n CSVHourlyTemperatureDataItem expected;\n \n expected = { 1979,2,6,1.83,-6.25,0.0028,55.4,100,61.23,48.40625,-119.53125,7.17144570027357,16.8285542997264,9.65710859945285,-4.76008948755474,-5.02424441646498,-5.26110578322043,-5.47578928902288,-5.67209480159138,-5.85292220253554,-6.02053491933582,-6.1767335438027,-4.71929728940465,-2.95065729767772,-1.35583489818696,-0.0188493076474865,0.989863742593826,1.61716273634142,1.83,1.61716273634142,0.989863742593828,-0.253972510197426,-1.94308776995746,-2.97982543578956,-3.72995274949267,-4.31809284307809,-4.80193148440331,-5.21294918358566 };\n CHECK(data[36] == expected);\n\n expected = { 1979,7,6,33.07,15.2,0.0085,252.625,89.1,21.58,48.40625,-119.53125,3.93431561602807,20.0656843839719,16.1313687679439,19.7303742452638,19.0075824387806,18.3978010149415,17.8704229878499,15.3831705488828,18.1581893499511,20.861313337576,23.4268467188875,25.7924376556453,27.9005936418054,29.7000787815529,31.1471590088389,32.2066649850338,32.8528468424013,33.07,32.8528468424013,32.2066649850338,31.1471590088389,29.7000787815529,27.9005936418054,25.7924376556453,22.44660797383,20.4268105805484,19.0053998401906 };\n CHECK(data[186] == expected);\n\n expected = { 1982,12,21,1.84,-4.3,0.0036,40.4,100,88,48.40625,-119.53125,7.84458317615193,16.1554168238481,8.31083364769614,-3.48986802737647,-3.64124919147136,-3.77797088437726,-3.90262330827026,-4.01716554711344,-4.12311528772586,-4.22167220701657,-4.31380104697031,-4.05654710188955,-2.51573869605991,-1.09049498277154,0.126872530037326,1.05751625316166,1.64115938998495,1.84,1.64115938998495,1.05751625316166,-0.441263630377153,-1.41451416488852,-2.09137312798071,-2.61083471192319,-3.03246429026715,-3.38733609358046,-3.69372864101575 };\n CHECK(data[1450] == expected);\n\n expected = { 1999,7,5,27.37,7.29,0.007,325.4,98.69,34.37,48.40625,-119.53125,3.92417750718255,20.0758224928175,16.1516449856349,11.2404514331275,10.6101214416003,10.0784910479075,9.61879479539915,7.52735097636718,10.6420131492972,13.6753726760999,16.5538558131174,19.2076453235285,21.572373885413,23.590685312834,25.2136257227221,26.401830904988,27.1264810963558,27.37,27.1264810963558,26.401830904988,25.2136257227221,23.590685312834,21.572373885413,19.2076453235285,16.4457140770673,14.8022992089702,13.6472623123843 };\n CHECK(data[7490] == expected);\n\n expected = { 2011,9,20,26.26,5.72,0.0048,203.375,89.59,21.7,48.40625,-119.53125,5.83629680945432,18.1637031905457,12.3274063810914,9.85516016703379,9.17640378273531,8.57957434533616,8.04700749581804,7.56619991043564,7.12797599524873,6.36687212441886,10.2807921975313,14.0263807973456,17.4653942986169,20.4709042677994,22.93198218651,24.7577936439678,25.880950888377,26.26,25.880950888377,24.7577936439678,22.93198218651,20.4709042677994,17.4350924662465,15.5906984388493,14.3094462326179,13.3267196995101,12.5293649012352 };\n CHECK(data[11950] == expected);\n\n expected = { 2016,12,31,-4.98,-10.23,0.0021,57.7,100,88.06,48.40625,-119.53125,7.80756225656321,16.1924377434368,8.38487548687358,-8.27525538655417,-8.63957376828067,-8.96851300939355,-9.26834034497371,-9.5437923561646,-9.79853610690149,-10.035468721517,-10.2569189005201,-9.97382585417202,-8.66609472026456,-7.4584550222929,-6.42819685770049,-5.64125773588436,-5.14800251443357,-4.98,-5.14800251443357,-5.64125773588436,-10.23,-10.23,-10.23,-10.23,-10.23,-10.23,-10.23 };\n CHECK(data[13879] == expected);\n\n const size_t sTmaxIndex = 3;\n const size_t sTminIndex = 4;\n const size_t sLatitude = 9;\n const size_t sSunrise = 11;\n const size_t sSunset = 12;\n const size_t sDaylength = 13;\n const size_t sHourlyTemperatureStart = 14;\n\n std::string compareFileName = GetFileInTempDirectory(\"compare.csv\");\n\n std::ofstream output(compareFileName, std::ios_base::out);\n\n output << \"cpp, r\" << std::endl;\n\n size_t startIndex = 0;\n size_t endIndex = data.size();\n\n COleDateTime date(1979, 1, 1, 0, 0, 0);\n date += COleDateTimeSpan(startIndex, 0, 0, 0);\n for (size_t dayIndex = startIndex; dayIndex < endIndex; dayIndex++)\n {\n auto& expectedItem = data[dayIndex];\n auto daylengthResult = DayLength(expectedItem.latitude, ComputeJDay(date));\n\n INFO(dayIndex);\n REQUIRE(daylengthResult.sunrise == Approx(expectedItem.sunrise));\n REQUIRE(daylengthResult.sunset == Approx(expectedItem.sunset));\n REQUIRE(daylengthResult.daylength == Approx(expectedItem.daylength));\n\n HourlyTempraturesEstimator estimator;\n estimator.tmin = expectedItem.Tmin;\n estimator.tmax = expectedItem.Tmax;\n estimator.sunrise = expectedItem.sunrise;\n estimator.sunset = expectedItem.sunset;\n estimator.daylength = expectedItem.daylength;\n if (dayIndex > 0)\n {\n estimator.prev_tmin = data[dayIndex - 1].Tmin;\n estimator.prev_tmax = data[dayIndex - 1].Tmax;\n estimator.prev_sunset = data[dayIndex - 1].sunset;\n }\n if (dayIndex < 13880)\n {\n estimator.next_tmin = data[dayIndex + 1].Tmin;\n estimator.next_sunrise = data[dayIndex + 1].sunrise;\n }\n estimator.compute();\n\n std::array<float, 24> expectedResult = {expectedItem.hour_0,expectedItem.hour_1,expectedItem.hour_2,expectedItem.hour_3,expectedItem.hour_4,expectedItem.hour_5,expectedItem.hour_6,expectedItem.hour_7,expectedItem.hour_8,expectedItem.hour_9,expectedItem.hour_10,expectedItem.hour_11,expectedItem.hour_12,expectedItem.hour_13,expectedItem.hour_14,expectedItem.hour_15,expectedItem.hour_16,expectedItem.hour_17,expectedItem.hour_18,expectedItem.hour_19,expectedItem.hour_20,expectedItem.hour_21,expectedItem.hour_22,expectedItem.hour_23 }; \n CHECK_THAT(estimator.hourly_temperatures, EqualsApprox(expectedResult));\n\n for (size_t hourIndex = 0; hourIndex < 24; hourIndex++)\n {\n output << estimator.hourly_temperatures[hourIndex] << \",\" << expectedResult[hourIndex] << std::endl;\n }\n\n date += COleDateTimeSpan(1, 0, 0, 0);\n }\n\n std::cout << \"Output file written at \" << compareFileName << std::endl;\n output.close();\n\n std::cout << date.Format(\"%Y/%m/%d\") << std::endl;\n }\n}" }, { "alpha_fraction": 0.6616584062576294, "alphanum_fraction": 0.6675541996955872, "avg_line_length": 19.782608032226562, "blob_id": "e8169c4b5c8bf4ad533b7a78efa1d41f9a700c05", "content_id": "69fc47823dbd8099923c030eb641f59ed6a1efa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5258, "license_type": "no_license", "max_line_length": 74, "num_lines": 253, "path": "/Linux/portcode/cstring.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cstring.h\"\n\n#include \"compat.h\"\n\n#include <boost/algorithm/string/case_conv.hpp>\n#include <boost/algorithm/string/trim.hpp>\n#include <boost/algorithm/string/replace.hpp>\n\n#include <boost/tokenizer.hpp>\n\n#include <cassert>\n#include <limits>\n\nnamespace ba = boost::algorithm;\n\nCString::CString() : m_data()\n{\n}\n\nCString::CString(const std::string& str) : m_data(str)\n{\n}\n\nCString::CString(const char* cStr) : m_data(cStr)\n{\n}\n\nbool CString::operator==(const CString& str) const\n{\n\treturn m_data == str.m_data;\n}\n\nbool CString::operator==(const char* str) const\n{\n\treturn m_data == str;\n}\n\nbool CString::operator!=(const CString& str) const\n{\n\treturn m_data != str.m_data;\n}\n\nbool CString::operator!=(const char* str) const\n{\n\treturn m_data != str;\n}\n\nchar& CString::operator[](const size_t& index)\n{\n\treturn m_data[index];\n}\n\nconst char& CString::operator[](const size_t& index) const\n{\n\treturn m_data[index];\n}\n\nCString& CString::operator+=(const CString& str)\n{\n\tif (&str != this)\n\t{\n\t\tm_data += str.m_data;\n\t}\n\treturn *this;\n}\n\nCString& CString::operator=(const CString& str)\n{\n\tif (&str != this)\n\t{\n\t\tm_data = str.m_data;\n\t}\n\treturn *this;\n}\n\nCString& CString::operator+=(const char& c)\n{\n\tm_data += c;\n\treturn *this;\n}\n\nbool CString::operator<(const CString& str) const\n{\n\treturn m_data < str.m_data;\n}\n\nconst std::string& CString::ToString() const\n{\n\treturn m_data;\n}\n\nCString::operator const char*() const\n{\n\treturn m_data.c_str();\n}\n\nint CString::GetLength() const\n{\n\treturn static_cast<int>(m_data.length());\n}\n\nCString& CString::MakeLower()\n{\n\tba::to_lower(m_data);\n\treturn *this;\n}\n\nCString& CString::MakeUpper()\n{\n\tba::to_upper(m_data);\n\treturn *this;\n}\n\nvoid CString::Trim()\n{\n\tba::trim(m_data);\n}\n\nvoid CString::TrimLeft()\n{\n\tba::trim_left(m_data);\n}\n\nvoid CString::TrimRight()\n{\n\tba::trim_right(m_data);\n}\n\nint CString::Find(char element) const\n{\n\tsize_t position = m_data.find(element);\n\treturn (position != std::string::npos)? static_cast<int>(position) : -1;\n}\n\nint CString::ReverseFind(char element) const\n{\n\tsize_t position = m_data.rfind(element);\n\treturn (position != std::string::npos)? static_cast<int>(position) : -1;\n}\n\nint CString::Find(const char* str) const\n{\n\tsize_t position = m_data.find(str);\n\treturn (position != std::string::npos)? static_cast<int>(position) : -1;\n}\n\nint CString::ReverseFind(const char* str) const\n{\n\tsize_t position = m_data.rfind(str);\n\treturn (position != std::string::npos)? static_cast<int>(position) : -1;\n}\n\nvoid CString::Replace(const CString& toReplace, const CString& with)\n{\n\tstd::string search = toReplace.ToString();\n\tstd::string format = with.ToString();\n\tba::replace_all(m_data, search, format);\n}\n\nCString CString::Left(int count) const\n{\n // make sure that during size_t to int conversion we don't loose data\n\tassert(std::numeric_limits<int>::max() > m_data.length());\n auto length = static_cast<int>(m_data.length());\n\n\tcount = std::clamp(count, 0, length);\n\treturn m_data.substr(0, count);\n}\n\nCString CString::Right(int count) const\n{\n // make sure that during size_t to int conversion we don't loose data\n assert(std::numeric_limits<int>::max() > m_data.length());\n auto length = static_cast<int>(m_data.length());\n\n\tcount = std::clamp(count, 0, length);\n\treturn m_data.substr(length - count);\n}\n\nCString CString::Mid(int first) const\n{\n // make sure that during size_t to int conversion we don't loose data\n assert(std::numeric_limits<int>::max() > m_data.length());\n auto length = static_cast<int>(m_data.length());\n\n\tfirst = std::clamp(first, 0, length);\n\treturn m_data.substr(first);\n}\n\nCString CString::Mid(int first, int count) const\n{\n // make sure that during size_t to int conversion we don't loose data\n assert(std::numeric_limits<int>::max() > m_data.length());\n auto length = static_cast<int>(m_data.length());\n\n\tfirst = std::clamp(first, 0, length);\n\tcount = std::clamp(count, 0, length);\n\treturn m_data.substr(first, count);\n}\n\nCString CString::Tokenize(const char* delimiter, int& startPosition) const\n{\n CString cResult;\n std::string toTokenize = m_data.substr(startPosition);\n boost::char_separator<char> sep(delimiter);\n boost::tokenizer<boost::char_separator<char>> tokens(toTokenize, sep);\n if (tokens.begin() != tokens.end())\n {\n std::string result = *tokens.begin();\n\n // update startPosition\n auto positionInToTokenize = toTokenize.find(result);\n startPosition += positionInToTokenize + result.length();\n\n cResult = result;\n }\n return cResult;\n}\n\nCString CString::SpanExcluding(const char* delimiter) const\n{\n\n CString cResult;\n boost::char_separator<char> sep(delimiter);\n boost::tokenizer<boost::char_separator<char>> tokens(m_data, sep);\n if (tokens.begin() != tokens.end())\n {\n std::string result = *tokens.begin();\n cResult = result;\n }\n return cResult;\n}\n\nCString operator+(const CString& str1, const CString& str2)\n{\n CString cStr1(str1);\n\tcStr1 += str2;\n return cStr1;\n}\n\nCString operator+(const CString& str1, const char* str2)\n{\n CString cStr1(str1);\n\tcStr1 += str2;\n return cStr1;\n}\n\nCString operator+(const char* str1, const CString& str2)\n{\n CString cStr1(str1);\n\tcStr1 += str2;\n return cStr1;\n}\n" }, { "alpha_fraction": 0.4413933753967285, "alphanum_fraction": 0.4902969002723694, "avg_line_length": 28.875362396240234, "blob_id": "dcacd568f753d0e3d4f19c90e10f2573e3bbe8a3", "content_id": "c83aa7bc3331842f57ff558d5c574a7bb1fdaae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10306, "license_type": "no_license", "max_line_length": 120, "num_lines": 345, "path": "/Linux/tests/test_coledatetime.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future \n#include \"stdafx.h\" \n\n#include \"coledatetime.h\"\n\nTEST_CASE(\"COleDateTime operations\", \"[port]\") {\n \n SECTION(\"default date\") {\n \n COleDateTime dateTime;\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1899);\n CHECK(dateTime.GetMonth() == 12);\n CHECK(dateTime.GetDay() == 30);\n }\n\n SECTION(\"custom date\") {\n\n COleDateTime dateTime (1982, 1, 11, 13, 3, 0);\n \n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1982);\n CHECK(dateTime.GetMonth() == 1);\n CHECK(dateTime.GetDay() == 11);\n CHECK(dateTime.GetHour() == 13);\n CHECK(dateTime.GetMinute() == 3);\n CHECK(dateTime.GetDayOfYear() == 11);\n }\n\n SECTION(\"date comparison\") {\n\n COleDateTime first (1982, 1, 11, 13, 3, 0);\n COleDateTime second (1982, 8, 28, 21, 0, 0);\n COleDateTime third (2020, 8, 28, 21, 0, 0);\n COleDateTime fourth (2020, 8, 28, 21, 0, 1);\n COleDateTime fifth (2020, 8, 28, 21, 0, 1);\n \n CHECK(first.GetStatus() == COleDateTime::valid);\n CHECK(second.GetStatus() == COleDateTime::valid);\n CHECK(third.GetStatus() == COleDateTime::valid);\n CHECK(fourth.GetStatus() == COleDateTime::valid);\n CHECK(fifth.GetStatus() == COleDateTime::valid);\n\n CHECK(first < second);\n CHECK(second > first);\n CHECK(first >= first);\n CHECK(second >= second);\n\n CHECK(third < fourth);\n CHECK(fourth > third);\n CHECK(fourth >= fifth);\n CHECK(fourth >= third);\n }\n\n SECTION(\"date operations\") {\n\n COleDateTime first (1982, 1, 11, 13, 3, 0);\n COleDateTime second (1982, 8, 28, 21, 0, 0);\n COleDateTime third (2020, 8, 28, 21, 0, 0);\n COleDateTime fourth (2020, 8, 28, 21, 0, 1);\n COleDateTime fifth (2020, 8, 28, 21, 0, 1);\n \n CHECK(first.GetStatus() == COleDateTime::valid);\n CHECK(second.GetStatus() == COleDateTime::valid);\n CHECK(third.GetStatus() == COleDateTime::valid);\n CHECK(fourth.GetStatus() == COleDateTime::valid);\n CHECK(fifth.GetStatus() == COleDateTime::valid);\n\n CHECK((second - first).GetDays() == 229);\n CHECK((fourth - third).GetDays() == 0);\n\n auto sixth = fifth + COleDateTimeSpan(3.0);\n CHECK((sixth - fifth).GetDays() == 3);\n \n auto seventh = sixth - COleDateTimeSpan(1.0);\n CHECK((seventh - fifth).GetDays() == 2);\n }\n\n SECTION(\"ParseDate\") {\n\n COleDateTime dt;\n dt.ParseDateTime(\"toto\");\n CHECK(dt.GetStatus() == COleDateTime::error);\n \n dt.ParseDateTime(\"1/01/2001\", VAR_DATEVALUEONLY);\n INFO(\"This test fails on Linux due to a bug in gnu compiler https://gcc.gnu.org/bugzilla/show_bug.cgi?id=45896\")\n CHECK(dt.GetStatus() == COleDateTime::valid);\n \n dt.ParseDateTime(\"01/01/2001\", VAR_DATEVALUEONLY);\n CHECK(dt.GetStatus() == COleDateTime::valid);\n \n CHECK(dt.GetYear() == 2001);\n CHECK(dt.GetMonth() == 1);\n CHECK(dt.GetDay() == 1);\n CHECK(dt.GetHour() == 0);\n CHECK(dt.GetMinute() == 0);\n \n dt.ParseDateTime(\"12/31/2020 16:34:05\");\n CHECK(dt.GetStatus() == COleDateTime::valid);\n \n CHECK(dt.GetYear() == 2020);\n CHECK(dt.GetMonth() == 12);\n CHECK(dt.GetDay() == 31);\n CHECK(dt.GetHour() == 16);\n CHECK(dt.GetMinute() == 34);\n }\n\n SECTION(\"Format\") {\n\n COleDateTime dt;\n dt.ParseDateTime(\"12/31/2020 16:34:05\");\n CHECK(dt.GetStatus() == COleDateTime::valid);\n \n CHECK(dt.Format(\"%Y\") == \"2020\");\n CHECK(dt.Format(\"%Y-%m-%d\") == \"2020-12-31\");\n CHECK(dt.Format(\"%m/%d/%Y\") == \"12/31/2020\");\n }\n\n SECTION(\"SetDate\") {\n\n COleDateTime dt;\n dt.SetDate(2020, 12, 1);\n CHECK(dt.GetStatus() == COleDateTime::valid);\n \n CHECK(dt.GetYear() == 2020);\n CHECK(dt.GetMonth() == 12);\n CHECK(dt.GetDay() == 1);\n CHECK(dt.GetHour() == 0);\n CHECK(dt.GetMinute() == 0);\n }\n\n SECTION(\"COleDateTimeSpan\") {\n\n SECTION(\"COleDateTimeSpan()\") {\n {\n COleDateTimeSpan span(2.0);\n CHECK(span.GetDays() == 2);\n }\n {\n COleDateTimeSpan span(2.3);\n CHECK(span.GetDays() == 2);\n }\n {\n COleDateTimeSpan span(4.9);\n CHECK(span.GetDays() == 4);\n }\n }\n\n SECTION(\"COleDateTimeSpan(...)\") {\n {\n COleDateTimeSpan span(2, 48, 50, 1);\n CHECK(span.GetDays() == 4);\n }\n {\n COleDateTimeSpan span(0, 24, 0, 0);\n CHECK(span.GetDays() == 1);\n }\n {\n COleDateTimeSpan span(0, 48, 60 * 24, 0);\n CHECK(span.GetDays() == 3);\n }\n }\n }\n\n SECTION(\"date MFC like\") {\n {\n COleDateTime dateTime(0.0);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1899);\n CHECK(dateTime.GetMonth() == 12);\n CHECK(dateTime.GetDay() == 30);\n \n CHECK(dateTime.GetHour() == 0);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(1.0);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1899);\n CHECK(dateTime.GetMonth() == 12);\n CHECK(dateTime.GetDay() == 31);\n \n CHECK(dateTime.GetHour() == 0);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(2.0);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1900);\n CHECK(dateTime.GetMonth() == 1);\n CHECK(dateTime.GetDay() == 1);\n \n CHECK(dateTime.GetHour() == 0);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(5.25);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1900);\n CHECK(dateTime.GetMonth() == 1);\n CHECK(dateTime.GetDay() == 4);\n \n CHECK(dateTime.GetHour() == 6);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(5.50);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1900);\n CHECK(dateTime.GetMonth() == 1);\n CHECK(dateTime.GetDay() == 4);\n \n CHECK(dateTime.GetHour() == 12);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(5.875);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1900);\n CHECK(dateTime.GetMonth() == 1);\n CHECK(dateTime.GetDay() == 4);\n \n CHECK(dateTime.GetHour() == 21);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(-0.75);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1899);\n CHECK(dateTime.GetMonth() == 12);\n CHECK(dateTime.GetDay() == 30);\n \n CHECK(dateTime.GetHour() == 18);\n CHECK(dateTime.GetMinute() == 0);\n }\n {\n COleDateTime dateTime(-1.75);\n\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n CHECK(dateTime.GetYear() == 1899);\n CHECK(dateTime.GetMonth() == 12);\n CHECK(dateTime.GetDay() == 29);\n \n CHECK(dateTime.GetHour() == 18);\n CHECK(dateTime.GetMinute() == 0);\n }\n }\n\n SECTION(\"To MFC Date\") {\n \n {\n COleDateTime dateTime (1899, 12, 30, 0, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 0.0);\n }\n {\n COleDateTime dateTime (1899, 12, 31, 0, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 1.0);\n }\n {\n COleDateTime dateTime (1900, 1, 1, 0, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 2.0);\n }\n {\n COleDateTime dateTime (1900, 1, 4, 6, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 5.25);\n }\n {\n COleDateTime dateTime (1900, 1, 4, 12, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 5.5);\n }\n {\n COleDateTime dateTime (1900, 1, 4, 21, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 5.875);\n }\n {\n COleDateTime dateTime (1899, 12, 30, 18, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == 0.75);\n }\n {\n COleDateTime dateTime (1899, 12, 29, 18, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == -1.75);\n }\n {\n COleDateTime dateTime (1899, 12, 27, 0, 0, 0);\n CHECK(dateTime.GetStatus() == COleDateTime::valid);\n\n DATE dt;\n dateTime.GetAsDATE(dt);\n CHECK(dt == -3.00);\n }\n }\n}" }, { "alpha_fraction": 0.76888507604599, "alphanum_fraction": 0.7732065916061401, "avg_line_length": 79.89510345458984, "blob_id": "54ac85654aafb9c3b70aacbdccc831f5f8c35111", "content_id": "cac61177912599009d549f81a8ada84916ce283d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 11570, "license_type": "no_license", "max_line_length": 825, "num_lines": 143, "path": "/README.TXT", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "VarroaPop - Varroa Mite Population Model\n\nVarroaPop is a honeybee colony population model that incorporates the effects of a varroa mite infestation. VarroaPop allows the user to evaluate the effects of a variety of initial conditions and weather on the population growth of a Varroa mites and honeybees. It also allows the user to simulate the effects of varroa immigration into an existing colony.\n\n*********************************************************************\nNew Features in Version 2.3\n\nFixed file location issues. Adapted to more common file browsing method. \nFixed problem with multi-treatment of chemicals\n\n\n*********************************************************************\nNew Features in Version 2.2\n\nIncreased the number of actions that can be simulated \nFixed bug that over committed the graphics resources - used to crash if many (>50) charts were displayed over the life of a session\n\n*********************************************************************\nNew Features in Version 2.1\n\nAdded command-line control capabilities. See VarroaPop Help for details\n\n\n*********************************************************************\nNew Features in Version 2.0\n\nRe-Queening feature\n\tNow you can re-queen the colony to more accurately perform a multi-year simulation. Re-queening can either be automatic based on colony parameters or tied to specific dates.\n\n\nMite Treatment\n\tYou can now simulate the treatment of a colony with Varroa pesticides. \n\nMiscellaneous\n\tAdded capability to toggle betweeen weekly and daily results display and storage. This helps if you are looking at multi-year data but don't want to generate reams of paper. Also, the \"Weekly\" selection results in faster simulation runs.\n\t\n\tNow each output graph remains on the screen until the user closes it. This facilitates making several runs with varied initial conditons and graphically comparing the results.\n\n\n*********************************************************************\n\n\n\nSoftware Information\n\n1. System Requirements\n\nIn order to use VarroaPop, you must have Microsoft Windows 95, Windows NT 4.0 , or later versions installed on your computer. The VarroaPop software will consume approximately 1 MB of memory on your hard drive.\n\n2. VarroaPop Files\n\nAfter installing VarroaPop, you will have the following files in the installation directory (e.g. C:\\VarroaPop):\n\nVarroaPop.exe - the VarroaPop program\nMidwest.wth - a weather file containing 1 year of Midwestern US weather date\nSouthwest.wth - a weather file containing 1 year of Southwestern US weather data\nMidwest5yr.wth - a weather file containing five years of Midwestern US weather data\nSthwest5yr.wth - a weather file containing five years of Southwestern US weather data\nExample.col - an example colony file containing a set of initial conditions.\nDefault.vrp - a sample VarropPop session file.\nReadme.txt - this file\n\n3. Running the VarroaPop program\n\nAs with any Windows program, you can run VarroaPop from the Start button or from Windows Explorer. \n\nTo run from the Start button, press Start->Programs and scroll down to and select the VarroaPop folder. When you select this, you will see the VarroaPop icon. Double-click on this and VarroaPop will begin.\n\n \nTo run from Windows Explorer, start Windows Explorer (normally listed when you select Start->Programs). Navigate to the folder containing VarroaPop.exe. Double-click on VarroaPop.exe and VarroaPop will start.\n\nAlternately, you can place a shortcut to VarroaPop on your desktop. This can be done by first navigating to the VarroaPop program with Windows Explorer in the same way as described above. If you drag the VarroaPop icon from the Explorer window and drop it on your desktop, Windows will create a shortcut for you. From that point on, you can run VarroaPop by double-clicking the shortcut.\n\n\nUsing VarroaPop\n\n1. Overview\n\nThe basic concept of VarroaPop is to take some initial conditions for a honeybee colony, combine that with some weather data, then step day-by-day over a specific simulation period and monitor the colony's population changes. As you read this file, we will refer to a VarroaPop session. A session is comprised of the weather data and the colony initial conditions.\n\nAfter the simulation has been run, VarroaPop displays the results two ways. First, a graphical result is displayed on the screen. This graphical result can simultaneously show the value of multiple user-selected parameters plotted over time. The other result information consists of tabular data that is shown on the VarroaPop main window. The tabular and graphical results can be saved to a file for later use and it can be printed.\n\nAs a comment on notation, when we refer to a menu selection, the notation \"File->Save->Results\" means, press the \"File\" menu selection, then press the \"Save\" selection on the resulting menu, and finally, press the \"Results\" selection on the next menu.\n\n\n2. Details\n\n2.1 Initial Screen\n\nWhen VarroaPop first starts, the initial screen is displayed:\n\nNo session is loaded yet so no simulation can be performed. The first step is to either create a simulation from scratch or load an existing simulation. \n\n2.2 Starting a VarroaPop Session\n\nThe easiest thing to do is to load an existing simulation. That can be done from the menu by selecting File->Open. At that point, a file open dialog is displayed and you can select an existing VarroaPop session file. First time users should choose the DEFAULT.VRP file by navigating to the installation directory. Files can also be opened by pressing the File Open button on the toolbar.\n\nTo see the initial conditions that were just loaded with the file, select View->Model Parameters. You will then see the colony name and the initial numbers of workers and drones at all life stages. The number of days a worker can forage is displayed and can be controlled by sliding the small rectangle horizontally. Information about the queen is also displayed. You can change any of these values but you must press the OK button for the changes to take effect.\n\nA new session can be created by selecting File->New. This will display a blank Colony Initial Conditions screen that can be filled out as desired (more about that later). Again, the same result can be obtained by pressing the New button on the toolbar.\n\n2.3 Running a Simulation\n\nOnce a session is loaded, the simulation start and stop dates on the right side of the screen become active and are initially set to the beginning and ending dates in the weather data. You can change the simulation start and stop times to be any timespan of interest within the limits of the weather data dates. Once you have selected the Simulation Start and Stop dates (e.g. Start 01/01/1999, Stop 12/31/2001) press the Run button (found on the lower right side of the main screen) to begin the simulation. When the simulation is complete, the results are displayed graphically. To better see the tabular form press OK on the graphic screen. You may print and/or save the tabular results.\n\nThe simulation may be re-run at any time by just pressing the Run button again. Rather than getting the same results over and over, however, you will usually want to make some changes in your session between successive simulations.\n\n2.4 Modifying the Session\n\nThere are many variables you can modify before running a simulation. These include initial conditions, weather data, graphical display parameters, and immigration profiles. All these modifications are stored when you save the session.\n\nModifying Initial Conditions and Weather Data\n\nInitial conditions and weather data are modified using the Model Parameters dialog box. This display is obtained by selecting View->Model Parameters or by pressing the Model Parameters toggle button on the toolbar. This button looks like a honeycomb.\n\nThe Colony Name field (at the top of the screen) will be blank on a new session. The Colony Name field will contain the name of the current colony data for an existing session. Pressing the down arrow next to the Colony Name field will display all the existing colony files in the current folder.\n\nYou can enter initial conditions for Worker and Drone populations in each of the life stages. You can also enter initial conditions for Varroa mite infestation percentages, mite reproductive rate and survivorship. You can also set the forager lifespan as well as parameters relating to the amount of sperm in the queen's spermacetha and the maximum potential number of eggs the queen can lay per day. In order for any changes made to take effect, you must press the Apply button.\n\nYou can also modify the weather data you are using by pressing the Weather tab. This will display a field identifying the current weather file (blank if this is a new session). Similar to the Colony Name field, pressing the down arrow next to the weather file name field will display all weather files in the folder. You must press the Select button to actually choose a weather file. \n\nThe weather files are text files that can be read by any application that can load text (WordPad, Write, MS Word, etc.) If you need to actually change the data in a weather file, you can load the file into an application, edit it, and re-save it as a text file but be sure the file extension is \".wth\".\n\nTo save the changes made to the Session, select File->Save->Save Session. If you want to save the changes to a file with a different name, select File->SaveAs and enter the filename you would like to use.\n\nModifying Parameters to Graph\n\nYou can graphically display all important parameters resulting from the simulation. In order to allow you to focus on just the data you want, a Graph Selection dialog is displayed by either selecting View->Plot Data Selection or by pressing the Plot Data Selection button on the toolbar. \n\nYou can select as many of these parameters as you would like, however with more than four or five, individual lines are difficult to discern. Select the parameter to be included in the graph by placing the pointer n the box next to the parameter and pressing the left mouse button. Also note that the Proportion of Infested Brood values will always be between 0 and 1 while parameters such as Colony Size can get very large. Therefore, you won't see the Proportion parameters if you plot them with population counts. \n\n\nModifying Immigration Profiles\n\nYou can enable or disable immigration of adult Varroa mites by making the proper selection on the main screen. To change or set up the immigration profiles, press the Setup button in the Immigration block on the main screen. The Immigration block is above the simulation block on the right side of the screen. To set up an immigration event, click on Setup. You will see a screen that allows you to select the immigration distribution from six predefined distributions by clicking on the one desired. You can define how many mites will immigrate by entering a number in the Number of Mites Immigrating box on the left side of the screen. Finally, you can select the Immigration Start and End dates on the right side of the screen. These dates must lie within the simulation timespan. When you are finished, select OK.\n\nSaving Simulation Results\n\nTo save simulation results in tabular form, choose File->Save->Simulation Results and enter the name for the file.\n\n\n\nYou are now ready to run a VarroaPop simulation. You can obtain additional information any time during the operation of the program by select Help from the main menu.\n\n\n" }, { "alpha_fraction": 0.6818810701370239, "alphanum_fraction": 0.6832641959190369, "avg_line_length": 19.08333396911621, "blob_id": "e671875ccbb9529472ecef0947a6544b2f3ea41a", "content_id": "d24c5bebca46789e295f763c8522430522b9c0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 723, "license_type": "no_license", "max_line_length": 95, "num_lines": 36, "path": "/Linux/portcode/varroapop.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"varroapop.h\"\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\n\nbool gl_RunGUI = false;\n\nint MyMessageBox( LPCTSTR lpszText, UINT nType, UINT nIDHelp )\n{\n NOT_IMPLEMENTED();\n return -1;\n}\n\nCString SplitPath(CString PathString, PELEMENT PathElement)\n{\n\tbfs::path path(PathString.ToString());\n\tstd::string result;\n\tswitch (PathElement)\n\t{\n\tcase DRV:\n\t\tresult = path.root_name().string();\n\t\tbreak;\n\tcase DIR:\n\t\tresult = path.parent_path().string() + \"/\"; // add slash at the end for Windows compatibility\n\t\tbreak;\n\tcase FNAME:\n\t\tresult = path.stem().string();\n\t\tbreak;\n\tcase EXT:\n\t\tresult = path.extension().string();\n\t\tbreak;\n\tdefault: \n\t\tNOT_IMPLEMENTED();\n\t}\n return CString(result);\n}\n" }, { "alpha_fraction": 0.6981796622276306, "alphanum_fraction": 0.7119788527488708, "avg_line_length": 20.826923370361328, "blob_id": "487cd1a5e6d0281a6ba26b7f461a1d1e983115ad", "content_id": "bc783106a2eac320a257834257049aab0296cfa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3406, "license_type": "no_license", "max_line_length": 131, "num_lines": 156, "path": "/Linux/portcode/stdafx.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef STDAFX_CUSTOM_H\n#define STDAFX_CUSTOM_H\n\n/**\n * Void all MFC macros \n */\n#define DECLARE_DYNCREATE(Class) public:\n#define IMPLEMENT_DYNCREATE(Class, BaseClass) // void\n\n#define DECLARE_SERIAL(Class) // void\n#define IMPLEMENT_SERIAL(Class, BaseClass, Offset) // void\n\n#define DECLARE_MESSAGE_MAP() // void\n#define BEGIN_MESSAGE_MAP(Class, BaseClass) // void\n#define END_MESSAGE_MAP() // void\n\n#define DDX_Text(_1, _2, _3) // void\n#define DDV_MinMaxInt(_1, _2, _3, _4) // void\n#define DDX_Radio(_1, _2, _3) // void\n\n#define afx_msg \n\n#define XSTR(x) STR(x)\n#define STR(x) #x\n\n#include <cassert>\n#define ASSERT assert\n\n#include <memory>\n#define DEBUG_NEW new\n\n#include <cstdio> // stderr\n#include <cstdlib> // stderr\n#include <iostream> // std::cerr\n\n/**\n * Make the TRACE method to work using the fmt dependency\n */\n#include \"fmt/printf.h\"\n#ifdef TRACE\n#define TRACE(...) fmt::printf(__VA_ARGS__)\n#else \n#define TRACE(...)\n#endif // DEBUG\n\n#ifndef WINDOWS\n#if _WIN32 || _WIN64\n#define WINDOWS\n#endif\n#endif\n\n/**\n * Typedef MFC types to standard types\n */\n#include <cstdint>\ntypedef int32_t BOOL;\ntypedef unsigned char BYTE;\ntypedef BYTE BOOLEAN;\ntypedef unsigned short WORD;\ntypedef unsigned long DWORD;\ntypedef uint64_t ULONG_PTR;\ntypedef ULONG_PTR DWORD_PTR;\ntypedef uint32_t UINT;\ntypedef char* LPTSTR;\ntypedef const char* LPCTSTR;\ntypedef char TCHAR;\ntypedef intptr_t INT_PTR;\ntypedef uintptr_t UINT_PTR;\ntypedef long LONG;\ntypedef int64_t LONGLONG;\ntypedef uint64_t ULONGLONG;\ntypedef double DATE;\n\n#define VAR_TIMEVALUEONLY ((DWORD)0x00000001) /* return time value */\n#define VAR_DATEVALUEONLY ((DWORD)0x00000002) /* return date value */\n\n/**\n * Redeclare the SYSTEMTIME structure for non-Windows systems\n */\n#ifndef WINDOWS\n#ifndef _SYSTEMTIME_\n#define _SYSTEMTIME_\ntypedef struct _SYSTEMTIME\n{\n\tWORD wYear;\n\tWORD wMonth;\n\tWORD wDayOfWeek;\n\tWORD wDay;\n\tWORD wHour;\n\tWORD wMinute;\n\tWORD wSecond;\n\tWORD wMilliseconds;\n} SYSTEMTIME;\ntypedef struct _SYSTEMTIME *PSYSTEMTIME;\ntypedef struct _SYSTEMTIME *LPSYSTEMTIME;\ntypedef struct {\n SYSTEMTIME st;\n WORD wDayOfYear;\n} UDATE;\n#endif\n#else\n#define NOMINMAX\n#include <windows.h>\n#endif\n\n#define TRUE true\n#define FALSE false\n\n// Replacement of the POSITION struct in the MFC framework\n#include \"position.h\"\n\n#include \"carchive.h\"\n#include \"carray.h\"\n#include \"cobject.h\"\n#include \"cdialog.h\"\n#include \"ccmdtarget.h\"\n#include \"cfile.h\"\n#include \"cmapstringtoob.h\"\n#include \"coblist.h\"\n#include \"coledatetime.h\"\n#include \"cptrlist.h\"\n#include \"cstring.h\"\n#include \"cstringarray.h\"\n#include \"ctime.h\"\n#include \"cuintarray.h\"\n\n/**\n * Make CString serializable in fmt formater\n */\n#include \"fmt/format.h\"\ntemplate<>\nstruct fmt::formatter<CString>\n{\n template<typename ParseContext>\n constexpr auto parse(ParseContext& ctx);\n\n template<typename FormatContext>\n auto format(CString const& number, FormatContext& ctx);\n};\n#include \"fmt/ostream.h\"\nstd::ostream &operator<<(std::ostream &stream, const CString& string);\n\n#include \"cstring.format.h\"\n\n/**\n * Define strcpy_s on non-Windows systems\n */\n#if !defined(_WIN32) && !defined(_WIN64) && !defined(__STDC_LIB_EXT1__)\n#define strcpy_s strcpy\n#define strtok_s(_In, _Delim, _SafeToken) strtok(_In, _Delim)\n#endif\n\n#define NOT_IMPLEMENTED() std::cerr << __FUNCTION__ << \": NOT IMPLEMENTED in \" << __FILE__ << \"(\" << __LINE__ << \")\" << std::endl;\n\n#endif // STDAFX_CUSTOM_H\n\n" }, { "alpha_fraction": 0.5271309614181519, "alphanum_fraction": 0.5642802715301514, "avg_line_length": 29.87959861755371, "blob_id": "3d3ac9ab9d920956b276e20751e36c02b4011477", "content_id": "a3c2cd3d8fb7975d06157d8848cbcdccb13a80eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9233, "license_type": "no_license", "max_line_length": 244, "num_lines": 299, "path": "/WeatherGridData.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\n#include \"WeatherGridData.h\"\n#include \"WeatherEvents.h\"\n\n#define _USE_MATH_DEFINES\n#include <math.h>\n\n#include <array>\n#include <cmath>\n#include <fstream>\n#include <numeric>\n#include <regex>\n\n/**\n * Define accessors for Binary data structures access\n */\n\ntemplate<>\nstruct DataItemAccessor<ObservedHistoricalItem>\n{\n const ObservedHistoricalItem& m_dataItem;\n\n DataItemAccessor(const ObservedHistoricalItem& dataItem) : m_dataItem(dataItem) {}\n\n double PPT()\n {\n const static double pptMultiplier = 1.0 / 40.0;\n return m_dataItem.PPT * pptMultiplier;\n }\n double TMAX()\n {\n const static double tMultiplier = 1.0 / 100.0;\n return m_dataItem.TMAX * tMultiplier;\n }\n double TMIN()\n {\n const static double tMultiplier = 1.0 / 100.0;\n return m_dataItem.TMIN * tMultiplier;\n }\n double WIND()\n {\n const static double wMultiplier = 1.0 / 100.0;\n return m_dataItem.WIND * wMultiplier;\n }\n double SPH()\n {\n const static double sphMultiplier = 1.0 / 1000.0;\n return m_dataItem.SPH * sphMultiplier;\n }\n double SRAD()\n {\n const static double sradMultiplier = 1.0 / 40.0;\n return m_dataItem.SRAD * sradMultiplier;\n }\n double RMAX()\n {\n const static double rMultiplier = 1.0 / 100.0;\n return m_dataItem.RMAX * rMultiplier;\n }\n double RMIN()\n {\n const static double rMultiplier = 1.0 / 100.0;\n return m_dataItem.RMIN * rMultiplier;\n }\n};\n\n/**\n * WeatherGridData Implementation\n */\n\ntemplate<typename GridDataType>\nWeatherGridData<GridDataType>::WeatherGridData()\n{\n\n}\n\ntemplate<typename GridDataType>\nconst typename WeatherGridData<GridDataType>::Data& WeatherGridData<GridDataType>::data() const\n{\n return m_data;\n}\n\ntemplate<typename GridDataType>\nvoid WeatherGridData<GridDataType>::load(const std::string& filename)\n{\n m_data.clear();\n\n std::ifstream istream(filename, std::ifstream::in | std::ifstream::binary);\n if (istream.good())\n {\n GridDataType data;\n while (!istream.eof())\n {\n istream.read(reinterpret_cast<char*>(&data), sizeof(data));\n if (istream.good())\n {\n m_data.push_back(data);\n }\n }\n }\n}\n\ntemplate<>\nCOleDateTime WeatherGridData<ObservedHistoricalItem>::getStartTime() const\n{\n return COleDateTime(1979, 1, 1, 0, 0, 0);\n}\n\ntemplate<>\nCOleDateTime WeatherGridData<ModeledHistoricalItem>::getStartTime() const\n{\n return COleDateTime(1950, 1, 1, 0, 0, 0);\n}\n\ntemplate<>\nCOleDateTime WeatherGridData<Rcp85>::getStartTime() const\n{\n return COleDateTime(2006, 1, 1, 0, 0, 0);\n}\n\ntemplate<typename GridDataType>\nCOleDateTime WeatherGridData<GridDataType>::getEndTime() const\n{\n COleDateTimeSpan span(m_data.size() - 1, 0, 0, 0); // remove start day \n return getStartTime() + span;\n}\n\nnamespace WeatherGridDataNs\n{\n template<typename GridDataType>\n WeatherGridData<GridDataType> LoadGridData(const std::string& filename)\n {\n WeatherGridData<GridDataType> data;\n data.load(filename);\n return data;\n }\n\n DayLengthResult DayLength(float latitude, int jDay)\n {\n const double gamma = 2.0 * M_PI / 365.0 * (jDay - 1.0);\n const double delta = 180.0 / M_PI * (0.006918 - 0.399912 * std::cos(gamma) + 0.070257 * std::sin(gamma) - 0.006758 * std::cos(gamma) + 0.000907 * std::sin(gamma) - 0.002697 * std::cos(3.0 * (gamma)) + 0.00148 * std::sin(3.0 * (gamma)));\n const double cosWo = (sin(-0.8333 / 360.0 * 2.0 * M_PI) - sin(latitude / 360.0 * 2.0 * M_PI) *\n sin(delta / 360.0 * 2.0 * M_PI)) / (cos(latitude / 360.0 * 2.0 * M_PI) * cos(delta / 360.0 * 2.0 * M_PI));\n\n DayLengthResult result;\n if (cosWo >= -1.0 && cosWo <= 1.0)\n {\n result.sunrise = 12.0 - std::acos(cosWo) / (15.0 / 360.0 * 2.0 * M_PI);\n result.sunset = 12.0 + std::acos(cosWo) / (15.0 / 360.0 * 2.0 * M_PI);\n result.daylength = result.sunset - result.sunrise;\n }\n else if (cosWo > 1.0)\n {\n result.daylength = 0;\n }\n else if (cosWo < -1.0)\n {\n result.daylength = 24.0;\n }\n if (result.daylength == 24.0)\n {\n result.sunrise = 99.0;\n result.sunset = 99.0;\n }\n return result;\n }\n\n double DayLightHours(float latitude, int JDay)\n {\n int J = JDay;\n double P = std::asin(0.39795 * std::cos(0.2163108 + 2 * std::atan(0.9671396 * std::tan(0.00860 * (J - 186)))));\n double D = 24 - (24 / M_PI) * std::acos((std::sin(0.833 * M_PI / 180) + (std::sin(latitude * M_PI / 180) * std::sin(P))) / (std::cos(latitude * M_PI / 180) * std::cos(P)));\n return D;\n }\n\n int ComputeJDay(const COleDateTime& date)\n {\n const bool leapYear = (date.GetYear() % 4) == 0;\n std::vector<int> months = { 31, (!leapYear)?28:29, 31, 30, 31 , 30, 31 , 31, 30 , 31, 30, 31 };\n const int month = date.GetMonth();\n const int jDay = std::accumulate(months.begin(), months.begin() + month - 1, date.GetDay());\n return jDay;\n }\n\n double ComputeDaylightHours(const std::string& filename, const COleDateTime& date)\n {\n const double latitude = GetLatitudeFromFilename(filename);\n const double jDay = ComputeJDay(date);\n const double daylightHours = DayLength(latitude, jDay).daylength;\n return daylightHours;\n }\n\n class malformated_latitude_exception : public std::exception\n {\n public:\n malformated_latitude_exception(const std::string& filename) \n : m_filename(filename) \n {\n }\n virtual ~malformated_latitude_exception()\n {\n }\n virtual const char* what() const noexcept \n { \n std::string message = \"can't find latitude in file named: \" + m_filename; \n return message.c_str(); \n }\n private:\n std::string m_filename;\n };\n\n double GetLatitudeFromFilename(const std::string& filename)\n {\n const std::regex latitudeSearch(\"_([+-]?[0-9]*[.][0-9]+)_[+-]?[0-9]*[.][0-9]+\");\n std::smatch latutudeMatch;\n if (std::regex_search(filename, latutudeMatch, latitudeSearch))\n {\n if (latutudeMatch.size() == 2)\n {\n return std::atof(latutudeMatch[1].str().c_str());\n }\n else\n {\n throw malformated_latitude_exception(filename);\n }\n }\n else\n {\n throw malformated_latitude_exception(filename);\n }\n }\n \n void HourlyTempraturesEstimator::compute()\n {\n std::double_t tsunset = tmin + ((tmax)-tmin) * std::sin(M_PI * (daylength / ((daylength)+4)));\n\n std::double_t prev_tsunset = 0.0;\n if (prev_tmin != (std::numeric_limits<double>::max)())\n {\n prev_tsunset = prev_tmin + ((prev_tmax)-prev_tmin) * std::sin((M_PI * (daylength) / ((daylength)+4)));\n }\n\n for (int h = 0; h < hourly_temperatures.size(); h++)\n {\n if (h <= sunrise)\n {\n if (prev_tmin != (std::numeric_limits<double>::max)())\n {\n hourly_temperatures[h] = prev_tsunset - ((prev_tsunset - tmin) / std::log((std::max)(1.0, 24.0 - ((prev_sunset)-sunrise))) * std::log(h + 24.0 - prev_sunset + 1));\n }\n else\n {\n hourly_temperatures[h] = tmin;\n }\n }\n else if (h > sunrise && h <= sunset)\n {\n hourly_temperatures[h] = tmin + (((tmax)-tmin) * std::sin(M_PI * ((h)-sunrise) / ((daylength)+4)));\n }\n else if (h > sunset)\n {\n if (next_tmin != (std::numeric_limits<double>::max)())\n {\n hourly_temperatures[h] = tsunset - ((tsunset - next_tmin) / std::log(24.0 - ((sunset)-next_sunrise) + 1) * std::log(h - sunset + 1));\n }\n else\n {\n hourly_temperatures[h] = tmin;\n }\n }\n }\n }\n\n int HourlyTempraturesEstimator::count_dayligth(double temperatureMinThreshold /*= 12.0*/, double temperatureMaxThreshold /*= 43.33*/)\n {\n int count = 0;\n for (size_t h = 0; h < hourly_temperatures.size(); h++)\n {\n if (h > sunrise && h <= sunset)\n {\n if (hourly_temperatures[h] > temperatureMinThreshold && hourly_temperatures[h] < temperatureMaxThreshold)\n count++;\n }\n }\n return count;\n }\n}\n\n\n// Explicit instanciations of template classes\n\n#define INSTANCIATE_WEATHER_GRID_DATA_TEMPLATES(TypeName)\\\n template struct DataItemAccessor<TypeName>;\\\n template class WeatherGridData<TypeName>;\\\n template WeatherGridData<TypeName> WeatherGridDataNs::LoadGridData(const std::string& filename);\n\nINSTANCIATE_WEATHER_GRID_DATA_TEMPLATES(ObservedHistoricalItem);\nINSTANCIATE_WEATHER_GRID_DATA_TEMPLATES(ModeledHistoricalItem);\nINSTANCIATE_WEATHER_GRID_DATA_TEMPLATES(Rcp85);\n" }, { "alpha_fraction": 0.7198085188865662, "alphanum_fraction": 0.7382179498672485, "avg_line_length": 20.90322494506836, "blob_id": "4914e2891606e115cad5486af2a2980f946bf119", "content_id": "3df9b04b7ba8a462189785f43e761c443f7ffa77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2716, "license_type": "no_license", "max_line_length": 94, "num_lines": 124, "path": "/Linux/portcode/coledatetime.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef COLEDATETIME_CUSTOM_H\n#define COLEDATETIME_CUSTOM_H\n\n#include \"stdafx.h\"\n\n#include <chrono>\n\n#define GetCurrentTime() GetTickCount()\n\nclass COleDateTimeSpan;\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n * \n * TODO: Current implemenation uses std::chrono which has the desavantage of not supporting\n * dates before January 1st 1970 which is not the case with the COleDateTime of MFC. We should\n * refactor to another library that does.\n */\nclass COleDateTime\n{\npublic:\n\tenum DateTimeStatus\n\t{\n\t\terror = -1,\n\t\tvalid = 0,\n\t\tinvalid = 1, // Invalid date (out of range, etc.)\n\t\tnull = 2, // Literally has no value\n\t};\n\n\tstatic COleDateTime GetTickCount();\n\n\tfriend class CTime;\n\n\tCOleDateTime();\n\t\n\tCOleDateTime(DATE dateSrc);\n\n\tCOleDateTime(int32_t nYear,\n\t\tint32_t nMonth,\n\t\tint32_t nDay,\n\t\tint32_t nHour,\n\t\tint32_t nMin,\n\t\tint32_t nSec);\n\nprotected:\n\n\tCOleDateTime(const std::chrono::system_clock::time_point& timePoint);\n\npublic:\n\n\tint32_t GetYear() const;\n\tint32_t GetMonth() const;\n\tint32_t GetDay() const;\n\tint32_t GetHour() const;\n\tint32_t GetMinute() const;\n\n\tint32_t GetDayOfYear() const;\n\n\tDateTimeStatus GetStatus() const;\n\n\tbool operator < (const COleDateTime& other) const;\n\tbool operator > (const COleDateTime& other) const;\n\tbool operator >= (const COleDateTime& other) const;\n\tbool operator <= (const COleDateTime& other) const;\n\n\tCString Format(const char* format) const;\n\tbool ParseDateTime(const CString& dateTimeStr, DWORD dwFlags = 0);\n\n\t// returns 0 if successful, 1 otherwise\n\tint SetDate(int32_t year, int32_t month, int32_t day);\n\n\tbool GetAsSystemTime(SYSTEMTIME& time) const;\n\tbool GetAsUDATE(UDATE& date) const;\n\tbool GetAsDATE(DATE& date) const;\n\n\tCOleDateTime operator+(const COleDateTimeSpan& span) const;\n\tCOleDateTime operator-(const COleDateTimeSpan& span) const;\n\n\tCOleDateTime& operator+=(const COleDateTimeSpan& span);\n\tCOleDateTime& operator-=(const COleDateTimeSpan& span);\n\n\tCOleDateTimeSpan operator-(const COleDateTime& date) const;\n\nprotected:\n\n\t// here we use a time point to get milliseconds precision\n\tstd::chrono::system_clock::time_point m_time_point;\n\tDateTimeStatus m_status;\n};\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass COleDateTimeSpan\n{\npublic:\n\tfriend class COleDateTime;\n\n\tCOleDateTimeSpan();\n\n\tCOleDateTimeSpan(double dblSpanSrc);\n\n\tCOleDateTimeSpan(size_t lDays,\n\t\tint32_t nHours,\n\t\tint32_t nMins,\n\t\tint32_t nSecs);\n\nprotected:\n\n\tCOleDateTimeSpan(const std::chrono::seconds& span);\n\npublic:\n\n\tint32_t GetDays();\n\n\tbool operator!=(const COleDateTimeSpan& other) const;\n\nprotected:\n\n\tstd::chrono::seconds m_span;\n};\n\n#endif // COLEDATETIME_CUSTOM_H\n" }, { "alpha_fraction": 0.6348039507865906, "alphanum_fraction": 0.6384803652763367, "avg_line_length": 29.22222137451172, "blob_id": "65361785fad74b0f95580e20b07c32b416f6f8a2", "content_id": "7a9136d5ab194040125115f31da57347f5997386", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 816, "license_type": "no_license", "max_line_length": 81, "num_lines": 27, "path": "/Linux/portcode/CMakeLists.txt", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.10)\n\nfind_package(fmt CONFIG REQUIRED)\nfind_package(Boost CONFIG REQUIRED system filesystem)\n\nset(SOURCES carchive.cpp\n carray.cpp\n ccmdtarget.cpp\n cdialog.cpp\n cfile.cpp\n cmapstringtoob.cpp\n cobject.cpp\n coblist.cpp\n coledatetime.cpp\n cptrlist.cpp\n cstring.cpp\n cstringarray.cpp\n ctime.cpp\n cuintarray.cpp\n position.cpp\n stdafx.cpp\n varroapop.cpp )\n\nadd_library(VarroaPopLinuxPort ${SOURCES})\ntarget_link_libraries(VarroaPopLinuxPort PRIVATE fmt::fmt)\ntarget_link_libraries(VarroaPopLinuxPort PRIVATE Boost::system Boost::filesystem)\ntarget_precompile_headers(VarroaPopLinuxPort PRIVATE \"${VarroaPopPCH}\")\n" }, { "alpha_fraction": 0.5794212222099304, "alphanum_fraction": 0.6553054451942444, "avg_line_length": 39.94736862182617, "blob_id": "363b422c8549ac5674ac7698b450eb0efc23068b", "content_id": "682b87c72da569370c08954da4d8b9f80ffd267a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "no_license", "max_line_length": 127, "num_lines": 38, "path": "/Simulations/plot.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "ax = output.plot(y = 'Colony Size', color = 'b', legend = False, alpha = 0.75)\nax.axvspan(1, 31, facecolor='gray', alpha=0.3)\nax.axvspan(60, 90, facecolor='gray', alpha=0.3)\nax.axvspan(121, 151, facecolor='gray', alpha=0.3)\nax.axvspan(182, 212, facecolor='gray', alpha=0.3)\nax.axvspan(244, 273, facecolor='gray', alpha=0.3)\nax.axvspan(305, 334, facecolor='gray', alpha=0.3)\nax.axvspan(366, 396, facecolor='gray', alpha=0.3)\nax.axvspan(425, 455, facecolor='gray', alpha=0.3)\nax.axvspan(486, 516, facecolor='gray', alpha=0.3)\n#\n#\n#for year in range(2002, 2011):\n# params[\"SimStart\"] = \"01/01/\" + str(year)\n# params[\"SimEnd\"] = \"06/30/\" + str(year + 1)\n# \n# vp = VarroaPop(parameters = params, weather_file = \"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\")\n# output = vp.run_model()\n# output = output.drop(output.index[0])\n# output['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] -output['Adult Workers'] - output['Foragers']\n# \n# output.plot(y = variable, ax = ax, color = 'b', legend = False, alpha = 0.75)\n\n\n#\ny1 = output['Adult Workers']\ny2 = output['Adult Drones']\ny3 = output['Foragers']\ny4 = output['Inactive Foragers']\n\nx = output.index.values\ny = np.vstack([y1, y2, y3, y4])\nlabels = [\"Adult Workers\", \"Adult Drones\", \"Foragers\", \"Inactive Foragers\"] \nax.stackplot(x, y, labels = labels)\nax.legend(loc='upper left', fontsize = 'x-small')\nax.set_ylim(0, 140000)\nplt.title('Colony Size (2090) vs Time')\nplt.savefig('Plots/' + Location + '/OnePlus_Stacked_Colony_Size_2090.png', dpi = 1000)" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 21, "blob_id": "6dbb3808eae2e7b5d142c8ca112d3a18fb28c783", "content_id": "40adebc013817fbb2d56b9a5c43f0d7457d5194d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/Linux/portcode/cptrlist.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cptrlist.h\"\n" }, { "alpha_fraction": 0.6688578724861145, "alphanum_fraction": 0.6692686676979065, "avg_line_length": 20.342105865478516, "blob_id": "da3e3c9889921446275e84b380880c0ec83a25ef", "content_id": "46109d48ccce9b15a19800f1f5d40b3b6f4b0388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2434, "license_type": "no_license", "max_line_length": 86, "num_lines": 114, "path": "/Linux/portcode/cfile.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cfile.h\"\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\nnamespace bs = boost::system;\n\n#include <iostream>\n\nbool CFile::IsValid() const\n{\n return m_fileStream.good();\n}\n\nvoid CStdioFile::Rename(const CString& original, const CString& target)\n{\n bfs::rename(original.ToString(), target.ToString());\n}\n\nCStdioFile::CStdioFile()\n{\n}\n\nCStdioFile::CStdioFile(LPCTSTR lpszFileName, UINT nOpenFlags)\n{\n m_fileStream.open(lpszFileName, static_cast<std::ios_base::openmode>(nOpenFlags));\n m_fileName = lpszFileName;\n}\n\nBOOL CStdioFile::Open(LPCTSTR lpszFileName, UINT nOpenFlags, CFileException* pError)\n{\n m_fileStream.open(lpszFileName, static_cast<std::ios_base::openmode>(nOpenFlags));\n \n if (!m_fileStream.fail())\n {\n m_fileName = lpszFileName;\n }\n else\n {\n CString message;\n message.Format(\"Cannot open file %s\", lpszFileName);\n pError->SetErrorMessage(message.ToString());\n }\n return m_fileStream.is_open();\n}\n\nBOOL CStdioFile::ReadString(CString& str)\n{\n std::string data;\n if (IsValid())\n {\n std::getline(m_fileStream, data);\n }\n str = data;\n return IsValid();\n}\n\nvoid CStdioFile::WriteString(LPCTSTR str)\n{\n std::string lStr (str);\n m_fileStream.write(lStr.c_str(), lStr.length());\n}\n\nCString CStdioFile::GetFileName() const\n{\n return CString(m_fileName);\n}\n\nvoid CStdioFile::Close()\n{\n if (m_fileStream.is_open())\n {\n m_fileStream.close();\n }\n m_fileStream.clear();\n}\n\nvoid CStdioFile::SeekToBegin()\n{\n m_fileStream.seekp(0);\n}\n \nULONGLONG CStdioFile::Seek(LONGLONG lOff, UINT nFrom)\n{\n m_fileStream.seekp(lOff, static_cast<std::ios_base::seekdir>(nFrom));\n return m_fileStream.tellp();\n}\n\nULONGLONG CStdioFile::GetPosition()\n{\n return m_fileStream.tellp();\n}\n\nvoid CStdioFile::GetStatus(CFileStatus& status) const\n{\n // If we need more status information about the file than the size\n // auto fileStatus = bfs::status(m_fileName);\n bs::error_code ec;\n status.m_size = bfs::file_size(m_fileName, ec);\n}\n\nBOOL CFileException::GetErrorMessage(LPTSTR buffer, UINT bufferSize) const\n{\n bool hasErrorMessage = !m_message.empty();\n if (hasErrorMessage)\n {\n snprintf(buffer, bufferSize, \"%s\", m_message.c_str());\n }\n return hasErrorMessage;\n}\n\nvoid CFileException::SetErrorMessage(const std::string& message)\n{\n m_message = message;\n}\n\n" }, { "alpha_fraction": 0.7252964377403259, "alphanum_fraction": 0.7252964377403259, "avg_line_length": 16.44827651977539, "blob_id": "6db3ef479d5f0459655fa420e75bae6df6b2cbd0", "content_id": "01a2c051f176ae0f5d0f7cf5df7bb7b5e077d3df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 506, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/Linux/portcode/ctime.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CTIME_CUSTOM_H\n#define CTIME_CUSTOM_H\n\n#include \"coledatetime.h\"\n\n#include <chrono>\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CTime\n{\npublic:\n\tCTime();\n\tCTime(const SYSTEMTIME& time);\n\n\tCOleDateTime GetTime() const;\n\n\tvoid FromTimeT(const time_t& time);\n\ttime_t GetAsTimeT() const;\n\nprotected:\n\n\t// here we use a time point to get milliseconds precision\n\tstd::chrono::system_clock::time_point m_time_point;\n};\n\n#endif // CTIME_CUSTOM_H\n" }, { "alpha_fraction": 0.6367475986480713, "alphanum_fraction": 0.6378036141395569, "avg_line_length": 36.880001068115234, "blob_id": "e6b8fbbfcc223fe1761c2084da0a2e831b073ef6", "content_id": "82cba0d09511af5c30a8c6eaceda81eb411087a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 947, "license_type": "no_license", "max_line_length": 96, "num_lines": 25, "path": "/Linux/tests/test_varroapop.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\"\n\n#include \"varroapop.h\"\n\nTEST_CASE(\"VarroaPop operations\", \"[port]\")\n{\n SECTION(\"SplitPath\")\n {\n#if defined(WINDOWS)\n CHECK(SplitPath(\"C:/dev/github/something/file.ext\", DRV) == \"C:\");\n#endif\n CHECK(SplitPath(\"C:/dev/github/something/file.ext\", DIR) == \"C:/dev/github/something/\");\n CHECK(SplitPath(\"C:/dev/github/something/file.ext\", FNAME) == \"file\");\n CHECK(SplitPath(\"C:/dev/github/something/file.ext\", EXT) == \".ext\");\n\n CHECK(SplitPath(\"/dev/github/something/file.ext\", DRV) == \"\");\n CHECK(SplitPath(\"/dev/github/something/file.ext\", DIR) == \"/dev/github/something/\");\n CHECK(SplitPath(\"/dev/github/something/file.ext\", FNAME) == \"file\");\n CHECK(SplitPath(\"/dev/github/something/file.ext\", EXT) == \".ext\");\n }\n}\n" }, { "alpha_fraction": 0.7049754858016968, "alphanum_fraction": 0.7245970368385315, "avg_line_length": 17.776315689086914, "blob_id": "af8074243fefd457dc966a9ee3742dfe563c4c7a", "content_id": "df2993bba16333ce0b6e5c0b78e20a72f7e7f782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1427, "license_type": "no_license", "max_line_length": 62, "num_lines": 76, "path": "/Linux/portcode/cdialog.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CDIALOG_CUSTOM_H\n#define CDIALOG_CUSTOM_H\n\n#define IDOK 1\n#define IDNO 7\n#define IDD_CREATE_HEADER 134\n#define IDD_PROGRESS 0\n#define IDC_PROGRESS 0\n\n#define IDC_MINTEMP 0\n#define IDC_MAXTEMP 0\n#define IDC_W_BEG_TIME 0\n#define IDC_W_END_TIME 0\n#define IDC_W_BEG_DATE 0\n#define IDC_W_END_DATE 0\n#define IDC_SIM_TIME 0\n#define IDC_SIM_DATE 0\n#define IDC_SOLAR_RAD 0\n\n#define ON_BN_CLICKED(_1, _2) // void\n\n#define _T(x) x\n\n#include \"ccmdtarget.h\"\n#include \"cstring.h\"\n\n#include <cstddef>\n\nclass CDataExchange;\n\n/**\n * Empty class to be able to compile the VarroaPop application\n */\nclass CPoint\n{\npublic:\n\tCPoint(uint32_t x, uint32_t y);\n};\n\n/**\n * Empty class to be able to compile the VarroaPop application\n */\nclass CWnd : public CCmdTarget\n{\npublic:\n\tvirtual size_t DoModal();\n\tvirtual void DoDataExchange(CDataExchange*);\n\tCWnd* GetDlgItem(int nID) const;\n\tbool EnableWindow(bool bEnable = true);\n\tbool UpdateData(bool bSaveAndValidate = true);\n\tvoid DestroyWindow();\n};\n\n/**\n * Empty class to be able to compile the VarroaPop application\n */\nclass CDialog : public CWnd\n{\npublic:\n\tCDialog();\n\tCDialog(int32_t id, CWnd* pParent);\n\tvirtual void Create(int32_t nIDTemplate);\n\tvirtual void Create(int32_t nIDTemplate, CWnd* pParentWnd);\n\tbool SetWindowText(const CString& text);\n\tvoid OnOK();\n\tvoid OnCancel();\n};\n\nclass CProgressCtrl : public CWnd\n{\npublic:\n\tvoid StepIt();\n};\n\n#endif // CDIALOG_CUSTOM_H\n" }, { "alpha_fraction": 0.7110675573348999, "alphanum_fraction": 0.7115572690963745, "avg_line_length": 34.8070182800293, "blob_id": "d654265660efd1f0a952b58fbd0bb9b6d766b95c", "content_id": "9682c1b06e9e66d07fa50f6ae635760cac20c2ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 133, "num_lines": 57, "path": "/Linux/portcode/position.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef POSITION_CUSTOM_H\n#define POSITION_CUSTOM_H\n\n#include <memory>\n\n//! In the ATL/MFC framework the POSISION struct basically points to a node on the inner structure.\n//! In our case we want to use the iterator structure provided by standard C++ libraries.\n//! To do that let's define a base interface from which each different structure will derive and provide\n//! the necessary method to:\n//! - create an iterator\n//! - copy an iterator\nstruct __POSITION\n{\n\tvirtual ~__POSITION() {}\n\tvirtual __POSITION* copy() = 0;\n};\nstruct POSITION\n{\n //! Iterator reference is unique within a POSITION structure. We want POSITION to work as if it was a\n //! raw pointer on a Node of the inner structure.:\n //! - if POSITION is passed by reference the instance of the POSITION will be modified;\n //! - if POSITION is passed by copy a new instance of the POSITION pointing to the same element is created;\n //! - if POSITION is assigned to another POISITION a copy a new instance of the POSITION pointing to the same element is created.\n\ttypedef std::unique_ptr<__POSITION> POSITION_PTR;\n\tPOSITION_PTR m_position;\n\n\tPOSITION();\n\tPOSITION(const POSITION& other);\n\tPOSITION(const POSITION_PTR& other);\n\tPOSITION(const std::nullptr_t& other);\n\n\tPOSITION& operator = (const POSITION& other);\n\tPOSITION& operator = (const POSITION_PTR& other);\n\tPOSITION& operator = (const std::nullptr_t& other);\n\n\tbool operator == (const POSITION& other) const;\n\tbool operator != (const POSITION& other) const;\n\n\tconst POSITION_PTR& get() const { return m_position; }\n\tPOSITION_PTR& get() { return m_position; }\n};\n\n//! Simple extension method to do a dynamic_cast on an object wrapped in a unique_ptr in a single line.\nnamespace ext\n{\n\ttemplate <typename _Tp, typename _Up>\n\tinline _Tp* dynamic_unique_cast(const std::unique_ptr<_Up> &__r) noexcept\n\t{\n\t\tusing _Sp = std::unique_ptr<_Tp>;\n\t\tif (auto *__p = dynamic_cast<typename _Sp::element_type *>(__r.get()))\n\t\t\treturn __p;\n\t\treturn nullptr;\n\t}\n} // namespace\n\n#endif // POSITION_CUSTOM_H\n\n" }, { "alpha_fraction": 0.7107623219490051, "alphanum_fraction": 0.7107623219490051, "avg_line_length": 25.235294342041016, "blob_id": "d51d3fac2b10683dbf7176ba98f7781fe9ee3583", "content_id": "04d7323798c26b5b7d4df681986dd2ddeb455093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 446, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/Linux/portcode/cstring.format.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "template<typename ParseContext>\nconstexpr auto fmt::formatter<CString>::parse(ParseContext& ctx)\n{\n return ctx.begin();\n}\n\ntemplate<typename FormatContext>\nauto fmt::formatter<CString>::format(CString const& str, FormatContext& ctx)\n{\n return fmt::format_to(ctx.out(), \"%s\", str.ToString());\n}\n\ntemplate<typename... Args>\nvoid CString::Format(const char* format, Args... args)\n{\n\tm_data = fmt::sprintf(format, std::forward< Args >(args)...);\n}\n" }, { "alpha_fraction": 0.6217273473739624, "alphanum_fraction": 0.6229310631752014, "avg_line_length": 17.353591918945312, "blob_id": "5915076fdc55e146567e109687f97919af6bae95", "content_id": "cdd99accb2511669fa7f39fd50268f66e2196e6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3323, "license_type": "no_license", "max_line_length": 85, "num_lines": 181, "path": "/Linux/portcode/coblist.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"coblist.h\"\n\nCObList::CObList()\n{\n}\n\nCObList::~CObList()\n{\n}\n\nINT_PTR CObList::GetCount() const\n{\n return m_data.size(); \n}\n\nBOOL CObList::IsEmpty() const\n{\n return m_data.empty();\n}\n\nPOSITION CObList::FindIndex(INT_PTR index) const\n{\n auto it = m_data.begin();\n std::advance(it, index);\n \n POSITION position; \n position = std::make_unique<CObListNs::InnerPosition>(it);\n return position;\n}\n\nCObject* CObList::GetAt(POSITION position) const\n{\n auto it = ext::dynamic_unique_cast<CObListNs::InnerPosition>(position.get());\n return *it->m_it;\n}\n\nCObject* CObList::GetHead() const\n{\n return m_data.front();\n}\n\nCObject* CObList::GetTail() const\n{\n return m_data.back();\n}\n\nCObject* CObList::GetPrev(POSITION& position) const\n{\n auto it = ext::dynamic_unique_cast<CObListNs::InnerPosition>(position.get());\n CObject* prev = *it->m_it;\n if (it->m_it == m_data.begin())\n {\n position = nullptr;\n }\n else \n {\n it->m_it--;\n }\n return prev;\n}\n\nCObject* CObList::GetNext(POSITION& position) const\n{\n auto it = ext::dynamic_unique_cast<CObListNs::InnerPosition>(position.get());\n CObject* next = *it->m_it;\n it->m_it++;\n if (it->m_it == m_data.end())\n {\n position = nullptr;\n }\n return next;\n}\n\nPOSITION CObList::GetHeadPosition() const\n{\n POSITION position = nullptr;\n if (GetCount() > 0)\n {\n position = std::make_unique<CObListNs::InnerPosition>(m_data.begin());\n }\n return position;\n}\n\nPOSITION CObList::GetTailPosition() const\n{\n POSITION position = nullptr;\n if (GetCount() > 0)\n {\n auto it = m_data.begin();\n std::advance(it, m_data.size() - 1);\n position = std::make_unique<CObListNs::InnerPosition>(it);\n }\n return position;\n}\n\nPOSITION CObList::AddHead (CObject* object)\n{\n m_data.push_front(object);\n return GetHeadPosition();\n}\n\nPOSITION CObList::AddTail (CObject* object)\n{\n m_data.push_back(object);\n return GetTailPosition();\n}\n\nvoid CObList::RemoveAt(POSITION position)\n{\n auto it = ext::dynamic_unique_cast<CObListNs::InnerPosition>(position.get());\n it->m_it = m_data.erase(it->m_it);\n}\n\nCObject* CObList::RemoveHead()\n{\n CObject* head = GetHead();\n m_data.pop_front();\n return head;\n}\n\nCObject* CObList::RemoveTail ()\n{\n CObject* tail = GetTail();\n m_data.pop_back();\n return tail;\n}\n\nvoid CObList::RemoveAll()\n{\n m_data.clear();\n}\n\nCStringList::CStringList()\n{\n}\n\nCStringList::~CStringList()\n{\n}\n\nINT_PTR CStringList::GetCount() const\n{\n return m_data.size(); \n}\n\nBOOL CStringList::IsEmpty() const\n{\n return m_data.empty();\n}\n\nconst CString& CStringList::GetNext(POSITION& position) const \n{\n auto it = ext::dynamic_unique_cast<CStringListNs::InnerPosition>(position.get());\n auto next = std::ref(*it->m_it);\n it->m_it++;\n if (it->m_it == m_data.end())\n {\n position = nullptr;\n }\n return next.get();\n}\n\nPOSITION CStringList::GetHeadPosition() const\n{\n POSITION position = nullptr;\n if (GetCount() > 0)\n {\n position = std::make_unique<CStringListNs::InnerPosition>(m_data.begin());\n }\n return position;\n}\t\n\nvoid CStringList::AddTail(const CString& string)\n{\n m_data.push_back(string);\n}\n\t\nvoid CStringList::RemoveAll()\n{\n m_data.clear();\n}\n\n" }, { "alpha_fraction": 0.6457399129867554, "alphanum_fraction": 0.6573991179466248, "avg_line_length": 37.465518951416016, "blob_id": "396a58ef47586f5504382465dad3fa24c2356917", "content_id": "7b6cd84a02bf44ab98aa911828e3611c10567549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2230, "license_type": "no_license", "max_line_length": 123, "num_lines": 58, "path": "/Linux/scripts/utilities.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import os\nimport re\nfrom threading import Lock\n\nsafe_print_lock = Lock()\n\n\n# Synchronised print to use in multi-threaded script\ndef safe_print(*a, **b):\n # Thread safe print function\n with safe_print_lock:\n print(*a, **b)\n\n\n# Class containing information about weather file information\nclass WeatherFileInfo:\n def __init__(self, location, model, scenario, latitude, longitude):\n self.location = location\n self.model = model\n self.scenario = scenario\n self.latitude = latitude\n self.longitude = longitude\n\n def __str__(self):\n return self.location + ':' + \\\n '\\n\\tmodel: ' + self.model + \\\n '\\n\\tscenario: ' + self.scenario + \\\n '\\n\\tlocation: ' + self.latitude + ',' + self.longitude\n\n\n# Retrieve the WeatherFileInfo from a filename when formatted as <LOCATION>_<MODEL>_<SCENARIO>_<LAT>_<LONG>\ndef parse_weather_filename(filename):\n try:\n location, model, scenario, latitude, longitude = filename.split('_', 5)\n return WeatherFileInfo(location, model, scenario, latitude, longitude)\n except:\n safe_print('Filename: ' + filename + ' is not formatted as <LOCATION>_<MODEL>_<SCENARIO>_<LAT>_<LONG>')\n\n\n# Extracts binary format from filename if specified as <SCENARIO>-<WEATHER_FILE>.txt\ndef parse_binary_format(filename):\n binary_format = {'observed': 'Observed', 'modeled': 'Modeled', 'rcp85': 'Rcp85', 'rcp45': 'Rcp45'}\n filename = os.path.basename(filename)\n for item in re.split('-|/|.txt', filename):\n if item in binary_format.keys():\n return binary_format[item]\n item = os.path.splitext(os.path.basename(filename))[0]\n if item in binary_format.keys():\n return binary_format[item]\n raise Exception('No valid binary format specifier in input filename (observed|modeled|Rcp85|Rcp45)-<weather_file>.txt')\n\n\ndef get_valid_binary_format_identifier(binary_format):\n binary_format_identifiers = {'observed': 'Observed', 'modeled': 'Modeled', 'rcp85': 'Rcp85', 'rcp45': 'Rcp45'}\n try:\n return binary_format_identifiers[binary_format.lower()]\n except:\n safe_print(binary_format + ' is not a valid binary format Observed|Modeled|Rcp85|Rcp45')" }, { "alpha_fraction": 0.6471544504165649, "alphanum_fraction": 0.6471544504165649, "avg_line_length": 23.13725471496582, "blob_id": "9590450900aad6c8f118931e8c01f1b317efad0d", "content_id": "a1f25a4c266bde0d8c0e26d75364397837fbd27c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1230, "license_type": "no_license", "max_line_length": 98, "num_lines": 51, "path": "/Linux/tests/helpers/common.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"common.h\"\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\n\nstd::string FindSimulationDir()\n{\n bfs::path current = bfs::path(__FILE__).parent_path();\n bfs::path previous;\n bfs::path simulations;\n do \n {\n previous = current;\n simulations = current / \"Simulations\";\n current /= \"..\";\n }\n while (previous != current && (!bfs::exists(simulations) || !bfs::is_directory(simulations)));\n\n std:: string simulationsDir;\n if (bfs::exists(simulations) && bfs::is_directory(simulations))\n {\n simulationsDir = simulations.string();\n }\n return simulationsDir;\n}\n\nstd::string GetSimulationsDir()\n{\n static const std::string sSimulationDir = FindSimulationDir();\n return sSimulationDir;\n}\n\nstd::string FindTestsDir()\n{\n bfs::path current = bfs::path(__FILE__).parent_path();\n bfs::path tests = current / \"..\";\n return tests.string();\n}\n\nstd::string GetTestsDir()\n{\n static const std::string sTestsDir = FindTestsDir();\n return sTestsDir;\n}\n\nstd::string GetFileInTempDirectory(const std::string& filename)\n{\n bfs::path file_path = bfs::temp_directory_path();\n file_path = file_path / filename;\n return file_path.string();\n}" }, { "alpha_fraction": 0.67022305727005, "alphanum_fraction": 0.6760426759719849, "avg_line_length": 33.36666488647461, "blob_id": "9299a486a211db29032e74c10b462bf5a54a30fc", "content_id": "7ab3dd6553047f322fb23bed143a6282bb299ee6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1031, "license_type": "no_license", "max_line_length": 83, "num_lines": 30, "path": "/Linux/tests/CMakeLists.txt", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.10)\n\nfind_package(Catch2 CONFIG REQUIRED)\nfind_package(Boost CONFIG REQUIRED system filesystem)\n\nset(SOURCES main.cpp\n helpers/common.cpp\n helpers/myobject.cpp\n test_carchive.cpp\n test_carray.cpp\n test_cfile.cpp\n test_cmapstringtoob.cpp\n test_coblist.cpp\n test_coledatetime.cpp\n test_cptrlist.cpp\n test_cstring.cpp\n test_cstringarray.cpp \n test_cstringlist.cpp \n test_ctime.cpp \n test_cuintarray.cpp\n test_globaloptions.cpp\n test_temperaturedata.cpp\n test_varroapop.cpp )\n\n\nadd_executable(VarroaPopTests ${SOURCES})\ntarget_link_libraries(VarroaPopTests PRIVATE VarroaPopDataModel VarroaPopLinuxPort)\ntarget_link_libraries(VarroaPopTests PRIVATE Catch2::Catch2)\ntarget_link_libraries(VarroaPopTests PRIVATE Boost::system Boost::filesystem)\ntarget_precompile_headers(VarroaPopTests REUSE_FROM VarroaPopLinuxPort)\n" }, { "alpha_fraction": 0.651059091091156, "alphanum_fraction": 0.654403567314148, "avg_line_length": 15.035714149475098, "blob_id": "c98b47c19df754cd226260d4d734be25cec44056", "content_id": "7ea7e8fd8c5afb978e39cfff3d7ccd417aa0b079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 897, "license_type": "no_license", "max_line_length": 65, "num_lines": 56, "path": "/Linux/portcode/cstringarray.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cstringarray.h\"\n\nINT_PTR CStringArray::GetSize() const\n{\n return m_data.size();\n}\n\nINT_PTR CStringArray::GetCount() const\n{\n return m_data.size();\n}\n\nBOOL CStringArray::IsEmpty() const\n{\n return m_data.empty();\n}\n\nINT_PTR CStringArray::GetUpperBound() const\n{\n INT_PTR upperBound = -1;\n if (m_data.size() > 0)\n {\n upperBound = m_data.size() - 1;\n }\n return upperBound;\n}\n\nvoid CStringArray::SetSize(INT_PTR nNewSize, INT_PTR /*nGrowBy*/)\n{\n m_data.resize(nNewSize);\n}\n\nCString CStringArray::GetAt(INT_PTR position)\n{\n return m_data.at(position);\n}\n\nvoid CStringArray::Add(const CString& str)\n{\n m_data.push_back(str);\n}\n\nconst CString& CStringArray::operator[](INT_PTR index) const\n{\n return m_data[index];\n}\n\nCString& CStringArray::operator[](INT_PTR index)\n{\n return m_data[index];\n}\n\nvoid CStringArray::RemoveAll()\n{\n m_data.clear();\n}" }, { "alpha_fraction": 0.684169590473175, "alphanum_fraction": 0.6853364706039429, "avg_line_length": 23.485713958740234, "blob_id": "8111674a000245f1d21d3ae6ef6bbe8d47c38e83", "content_id": "b124dc531109ef0edc79f822ff65725db16b6474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2571, "license_type": "no_license", "max_line_length": 88, "num_lines": 105, "path": "/Linux/portcode/cptrlist.inline.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "template<class BASE_CLASS, class TYPE>\nCTypedPtrList<BASE_CLASS, TYPE>::CTypedPtrList()\n{\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nCTypedPtrList<BASE_CLASS, TYPE>::~CTypedPtrList()\n{\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nBOOL CTypedPtrList<BASE_CLASS, TYPE>::IsEmpty() const\n{\n return m_data.empty();\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nINT_PTR CTypedPtrList<BASE_CLASS, TYPE>::GetCount() const\n{\n return m_data.size();\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nTYPE CTypedPtrList<BASE_CLASS, TYPE>::GetAt(POSITION position) const\n{\n auto it = ext::dynamic_unique_cast<CPtrListNs::InnerPosition<TYPE>>(position.get());\n return *it->m_it;\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nPOSITION CTypedPtrList<BASE_CLASS, TYPE>::AddTail(TYPE object)\n{\n m_data.push_back(object);\n return GetTailPosition();\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nvoid CTypedPtrList<BASE_CLASS, TYPE>::RemoveAt(POSITION position)\n{\n auto it = ext::dynamic_unique_cast<CPtrListNs::InnerPosition<TYPE>>(position.get());\n it->m_it = m_data.erase(it->m_it);\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nPOSITION CTypedPtrList<BASE_CLASS, TYPE>::GetHeadPosition() const\n{\n POSITION position = nullptr;\n if (GetCount() > 0)\n {\n position = std::make_unique<CPtrListNs::InnerPosition<TYPE>>(m_data.begin());\n }\n return position;\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nPOSITION CTypedPtrList<BASE_CLASS, TYPE>::GetTailPosition() const\n{\n POSITION position = nullptr;\n if (GetCount() > 0)\n {\n auto it = m_data.begin();\n std::advance(it, m_data.size() - 1);\n position = std::make_unique<CPtrListNs::InnerPosition<TYPE>>(it);\n }\n return position;\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nTYPE CTypedPtrList<BASE_CLASS, TYPE>::GetHead() const\n{\n return m_data.front();\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nTYPE CTypedPtrList<BASE_CLASS, TYPE>::GetTail() const\n{\n return m_data.back();\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nTYPE CTypedPtrList<BASE_CLASS, TYPE>::GetNext(POSITION& position) const\n{\n auto it = ext::dynamic_unique_cast<CPtrListNs::InnerPosition<TYPE>>(position.get());\n TYPE next = *it->m_it;\n it->m_it++;\n if (it->m_it == m_data.end())\n {\n position = nullptr;\n }\n return next;\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nTYPE CTypedPtrList<BASE_CLASS, TYPE>::RemoveHead() \n{\n TYPE head = GetHead();\n m_data.pop_front();\n\treturn head;\n}\n\ntemplate<class BASE_CLASS, class TYPE>\nvoid CTypedPtrList<BASE_CLASS, TYPE>::RemoveAll() \n{\n m_data.clear();\n}\n" }, { "alpha_fraction": 0.6306374073028564, "alphanum_fraction": 0.6309380531311035, "avg_line_length": 37.005714416503906, "blob_id": "e1dbbc697f0820aeb5f02fc13b4f3a25c5a40101", "content_id": "3c82fe5f63c6465495d14752a2348a9a7c730869", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6652, "license_type": "no_license", "max_line_length": 153, "num_lines": 175, "path": "/Linux/setup.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import filecmp, os, re, shutil, stat\nfrom string import Template\n\ndef get_linux_root():\n return os.path.dirname(os.path.abspath(__file__))\n\n\ndef get_data_model_root():\n return os.path.abspath(os.path.join(get_linux_root(), \"datamodel\"))\n\n\ndef get_port_code_root():\n return os.path.abspath(os.path.join(get_linux_root(), \"portcode\"))\n\n\ndef get_project_root():\n return os.path.abspath(os.path.join(get_linux_root(), \"..\"))\n\n\ndef is_source_file(filename):\n return os.path.splitext(filename)[1].lower() in [\".cpp\", \".cxx\", \".c\"]\n\n\ndef is_header_file(filename):\n return os.path.splitext(filename)[1].lower() in [\".h\", \".hpp\"]\n\n\ndef is_code_file(filename):\n return is_header_file(filename) or is_source_file(filename)\n\n\ndef set_file_as_readonly(path):\n if os.path.exists(path):\n readonly_mode = stat.S_IREAD | stat.S_IRGRP\n os.chmod(path, readonly_mode)\n \n\ndef set_file_as_writable(path):\n if os.path.exists(path):\n readonly_mode = stat.S_IREAD | stat.S_IRGRP\n readwrite_mode = readonly_mode | stat.S_IWRITE | stat.S_IWGRP\n os.chmod(path, readwrite_mode)\n\n\ndef get_data_model_files():\n return [\n \"Adult.h\",\n \"Adult.cpp\",\n \"Bee.h\",\n \"Bee.cpp\",\n \"Brood.h\",\n \"Brood.cpp\",\n \"Colony.h\",\n \"Colony.cpp\",\n \"ColdStorageSimulator.h\",\n \"ColdStorageSimulator.cpp\",\n \"ColonyResource.h\",\n \"ColonyResource.cpp\",\n \"CreateWeatherHdr.h\",\n \"CreateWeatherHdr.cpp\",\n \"DateRangeValues.h\",\n \"DateRangeValues.cpp\",\n \"EGG.H\",\n \"EGG.CPP\",\n \"EPAData.h\",\n \"EPAData.cpp\",\n \"GlobalOptions.h\",\n \"GlobalOptions.cpp\",\n \"IEDItem.h\",\n \"IEDItem.cpp\",\n \"LARVA.H\",\n \"LARVA.CPP\",\n \"Matrix.h\",\n \"Matrix.cpp\",\n \"Mite.h\",\n \"Mite.cpp\",\n \"MiteTreatmentItem.h\",\n \"MiteTreatmentItem.cpp\",\n \"MiteTreatments.h\",\n \"MiteTreatments.cpp\",\n \"NutrientContaminationTable.h\",\n \"NutrientContaminationTable.cpp\",\n \"Queen.h\",\n \"Queen.cpp\",\n \"Spores.h\",\n \"Spores.cpp\",\n \"VarroaPopSession.h\",\n \"VarroaPopSession.cpp\",\n \"WeatherEvents.h\",\n \"WeatherEvents.cpp\",\n \"WeatherGridData.h\",\n \"WeatherGridData.cpp\"\n ]\n\ndef get_port_code_headers():\n headers = []\n dir_it = os.scandir(get_port_code_root()) \n for dir_item in dir_it:\n if dir_item.is_file and dir_item.name.endswith('.h'):\n headers.append(dir_item.name)\n return headers\n\n\ndef fix_include_directives(source, target):\n with open(source, 'r') as code_file:\n code_file_content = code_file.read()\n result = re.findall(r'#include\\s+\\\"(.*)\\\"', code_file_content)\n set_file_as_writable(target)\n if len(result):\n with open(target, 'w') as code_file:\n port_code_headers = get_port_code_headers()\n for include in result:\n if any(file.lower() == include.lower() for file in get_data_model_files()) or any(file == include.lower() for file in port_code_headers):\n code_file_content = code_file_content.replace(include, include.lower())\n code_file.write(code_file_content)\n else:\n shutil.copy(source, target)\n set_file_as_readonly(target)\n\n\ndef copy_data_model():\n # check existance of data model file and if they changed\n data_model_dir = get_project_root()\n # data_model_target_dir will be the location of the working header files\n data_model_target_dir = get_data_model_root()\n # data_model_target_original_dir will be the location of the original copied files used for comparison \n data_model_target_original_dir = os.path.join(data_model_target_dir, \".hidden\")\n if os.path.exists(data_model_target_original_dir) == False:\n os.mkdir(data_model_target_original_dir)\n for data_model_file in get_data_model_files():\n data_model_path = os.path.join(data_model_dir, data_model_file)\n data_model_target_path = os.path.join(data_model_target_dir, data_model_file.lower())\n data_model_target_path_original = os.path.join(data_model_target_original_dir, data_model_file.lower())\n if os.path.exists(data_model_dir):\n target_file_exists = os.path.exists(data_model_target_path_original)\n if target_file_exists == False or filecmp.cmp(data_model_path, data_model_target_path_original) == False:\n set_file_as_writable(data_model_target_path_original)\n shutil.copy(data_model_path, data_model_target_path_original)\n set_file_as_readonly(data_model_target_path_original)\n fix_include_directives(data_model_target_path_original, data_model_target_path)\n print(\"Copying file {} to {}\".format(data_model_path, data_model_target_path))\n else:\n print(\"Cannot copy file {} to {} because it is missing\".format(data_model_path, data_model_target_path))\n\n\ndef make_data_model_cmakelists():\n cmake_filename = \"CMakeLists.txt\"\n cmake_template_filename = \"CMakeLists.txt.template\"\n # extract source files from the files to be added to datamodel\n source_files = [file for file in get_data_model_files() if is_source_file(file)]\n source_files = [s.lower() for s in source_files] # set all source file name lowercase\n # load the template CMakeLists.txt.template\n cmake_template = os.path.join(get_data_model_root(), cmake_template_filename)\n with open(cmake_template) as cmake_template_file:\n cmake_template_content = Template(cmake_template_file.read())\n cmake_template_content = cmake_template_content.substitute(VarroaPopDataModelSourceFiles=str(' '.join(source_files)))\n # keep track weither or not we should replace the content of the cmake file\n write_cmake_target_file = True\n cmake_target = os.path.join(get_data_model_root(), cmake_filename)\n if os.path.exists(cmake_target):\n with open(cmake_target) as cmake_target_file: # check content\n cmake_target_content = cmake_target_file.read()\n if cmake_target_content == cmake_template_content:\n write_cmake_target_file = False\n if write_cmake_target_file == True: # write CMakeLists.txt\n with open(cmake_target, 'w') as cmake_target_file:\n cmake_target_file.write(cmake_template_content)\n print(\"Writing file {} to {}\".format(cmake_filename, get_data_model_root()))\n else:\n print(\"{} is up-to-date\".format(cmake_filename))\n\n\nif __name__ == \"__main__\":\n copy_data_model()\n make_data_model_cmakelists()\n\n" }, { "alpha_fraction": 0.7082601189613342, "alphanum_fraction": 0.7113356590270996, "avg_line_length": 19.880733489990234, "blob_id": "4a2e03a3909537e9a092cd3f3d7e514172008626", "content_id": "31e4df5cb8571f307a4343bbba4c1e4c4646b9ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2276, "license_type": "no_license", "max_line_length": 92, "num_lines": 109, "path": "/Linux/portcode/cfile.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CFILE_CUSTOM_H\n#define CFILE_CUSTOM_H\n\n#include \"cstring.h\"\n\n#include <fstream>\n\nstruct CFileStatus\n{\n\tULONGLONG m_size; // size in Bytes\n};\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CFile\n{\npublic:\n\n\t// Flag values\n\tenum OpenFlags \n\t{\n\t\tmodeRead = std::ios_base::in,\n\t\tmodeWrite = std::ios_base::out,\n\t\tmodeReadWrite = modeRead | modeWrite,\n\t\tmodeCreate = 0, // default is C++\n\t\tmodeNoTruncate = std::ios_base::app,\n\t\ttypeText = 0, // used in derived classes (e.g. CStdioFile) only\n\t\ttypeBinary = std::ios_base::binary, // used in derived classes (e.g. CStdioFile) only\n\n\t\t// shared access to file will be defaulted to C++ standard behavior which is shareDenyNone\n\t\tshareCompat = 0, \n\t\tshareExclusive = 0,\n\t\tshareDenyWrite = 0,\n\t\tshareDenyRead = 0,\n\t\tshareDenyNone = 0\n\t};\n \n enum SeekPosition \n\t{ \n\t\tbegin = std::ios_base::beg, \n\t\tcurrent = std::ios_base::cur, \n\t\tend = std::ios_base::end \n\t};\n\n\tfriend class CArchive;\n\nprotected:\n\n\tconst std::fstream& GetStream() const {return m_fileStream;}\n\tstd::fstream& GetStream() {return m_fileStream;}\n\n\t// Returns true if the stream is valid\n\tbool IsValid() const;\n\nprotected:\n\n\tstd::fstream m_fileStream;\n\tstd::string m_fileName;\n};\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CFileException\n{\npublic:\n\tBOOL GetErrorMessage(LPTSTR buffer, UINT bufferSize) const;\n\n\tvoid SetErrorMessage(const std::string& message);\n\nprotected:\n\tstd::string m_message;\n};\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CStdioFile : public CFile\n{\npublic:\n\tstatic void Rename(const CString& original, const CString& target);\n\n\tCStdioFile();\n\tCStdioFile(LPCTSTR lpszFileName, UINT nOpenFlags);\n\n\tBOOL Open(LPCTSTR lpszFileName, UINT nOpenFlags, CFileException* pError);\n\n\tvoid GetStatus(CFileStatus& status) const;\n\n\tBOOL ReadString(CString& str);\n\t\n\tvoid WriteString(LPCTSTR str);\n\n\tCString GetFileName() const;\n\t\n\tvoid Close();\n\n\tvoid SeekToBegin();\n\n ULONGLONG Seek(LONGLONG lOff, UINT nFrom);\n\t\t\n\t// Here we removed the constness of the method since in std c++ the rdstate of the stream\n\t// will be change is tellg fails\n\tULONGLONG GetPosition();\n};\n\n#endif // CFILE_CUSTOM_H\n" }, { "alpha_fraction": 0.7710508108139038, "alphanum_fraction": 0.7974947690963745, "avg_line_length": 22.950000762939453, "blob_id": "19de67ab65a195db35a5922b389096a536e40b5b", "content_id": "dab92a5c71585362ed55d26691ea56175c716347", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1437, "license_type": "no_license", "max_line_length": 78, "num_lines": 60, "path": "/Linux/CMakeLists.txt", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.10)\n\nset (CMAKE_CXX_STANDARD 17)\n\n## package manager\n\ninclude(\"cmake/HunterGate.cmake\")\n\nHunterGate(\n URL \"https://github.com/cpp-pm/hunter/archive/v0.23.235.tar.gz\"\n SHA1 \"d8992c8a66c1ff2f68566c658ddaa465eca844e4\"\n)\n\nproject(VarroaPop)\n\n## download dependencies\n\n# fmt library is included from the vcpck install of it \n# follow instructions https://github.com/microsoft/vcpkg/\nhunter_add_package(fmt)\n\n# unit tests framework\nhunter_add_package(Catch)\n\n# Boost framework\nhunter_add_package(Boost COMPONENTS system filesystem)\n\n# Add CXXOPTS for command line options\nhunter_add_package(cxxopts)\n\n## precompiled header\n\nset(VarroaPopPCH \"${CMAKE_SOURCE_DIR}/portcode/stdafx.h\")\n\n## include directories\n\ninclude_directories(BEFORE . portcode datamodel)\n\n## specific definitions\n\n## sub directories\n\nadd_subdirectory(portcode)\nadd_subdirectory(datamodel)\nadd_subdirectory(tests)\n\n## build application\n\nfind_package(Boost CONFIG REQUIRED system filesystem)\nfind_package(cxxopts CONFIG REQUIRED)\n\nset (SOURCES main.cpp varroapopcmdbridge.cpp)\n\n# add_executable(VarroaPop WIN32 ${SOURCES})\nadd_executable(VarroaPop ${SOURCES})\n\ntarget_link_libraries(VarroaPop PRIVATE VarroaPopDataModel VarroaPopLinuxPort)\ntarget_link_libraries(VarroaPop PRIVATE Boost::system Boost::filesystem)\ntarget_link_libraries(VarroaPop PRIVATE cxxopts::cxxopts)\ntarget_precompile_headers(VarroaPop REUSE_FROM VarroaPopLinuxPort)\n" }, { "alpha_fraction": 0.7173818945884705, "alphanum_fraction": 0.7217165231704712, "avg_line_length": 23.80645179748535, "blob_id": "0b7800a18b3f01eb6904f75d5abcb329ac03e7d0", "content_id": "db82ef09c1b412b40bc182a8c73c6b2a0256641d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2307, "license_type": "no_license", "max_line_length": 75, "num_lines": 93, "path": "/Linux/portcode/cstring.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CSTRING_CUSTOM_H\n#define CSTRING_CUSTOM_H\n\n#include <string>\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CString\n{\npublic:\n \n\tCString();\n\tCString(const std::string& cStr);\n CString(const char* cStr);\n\n\tbool operator==(const CString& str) const;\n\tbool operator==(const char* str) const;\n\n\tbool operator!=(const CString& str) const;\n\tbool operator!=(const char* str) const;\n\n\tchar& operator[](const size_t& index);\n\tconst char& operator[](const size_t& index) const;\n\n\tCString& operator+=(const CString& str);\n\tCString& operator=(const CString& str);\n\t\n\tCString& operator+=(const char& c);\n\n\tbool operator<(const CString& str) const;\n\n\tconst std::string& ToString() const;\n\n\toperator const char* () const;\n\n\tint GetLength() const;\n\n\tCString& MakeLower();\n\tCString& MakeUpper();\n\n\tvoid Trim();\n\tvoid TrimLeft();\n\tvoid TrimRight();\n\t\n\ttemplate<typename... Args>\n\tvoid Format(const char* format, Args... args);\n\n\t// Find the first occurence of element \n\t// return -1 if not found and index otherwise\n\tint Find(char element) const;\n\n\t// Find the last occurence of element\n\t// return -1 if not found and index otherwise\n\tint ReverseFind(char element) const;\n\t\n\t// Find the first occurence of str \n\t// return -1 if not found and index otherwise\n\tint Find(const char* str) const;\n\n\t// Find the last occurence of str\n\t// return -1 if not found and index otherwise\n\tint ReverseFind(const char* str) const;\n\n\tvoid Replace(const CString& toReplace, const CString& with);\n\n\t// Return the substring consisting of the leftmost 'count' characters\n\tCString Left(int count) const;\n\n\t// Return the substring consisting of the rightmost 'count' characters\n\tCString Right(int count) const;\n\n\t// Return the substring starting at index 'first'\n\tCString Mid(int first) const;\n\n\t// Return the substring starting at index 'first', with length 'count'\n\tCString Mid(int first, int count) const;\n\n\tCString Tokenize(const char* delimiter, int& startPosition) const;\n\n\tCString SpanExcluding(const char* delimiter) const;\n\nprotected:\n\n\tstd::string m_data;\n}; \n\nCString operator+(const CString& str1, const CString& str2);\nCString operator+(const CString& str1, const char* str2);\nCString operator+(const char* str1, const CString& str2);\n\n#endif // CSTRING_CUSTOM_H\n" }, { "alpha_fraction": 0.7462202906608582, "alphanum_fraction": 0.7473002076148987, "avg_line_length": 33.296295166015625, "blob_id": "f88c24013db7d3ff57908b01e2ab22b21aa5792b", "content_id": "19902f1d11666fd1ee9152651ae980be91c782b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 926, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/Linux/tests/test_globaloptions.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"globaloptions.h\"\n\nTEST_CASE(\"GlobalOptions operations\", \"[settings]\") \n{ \n SECTION(\"Foragers aging behavior\") \n {\n GlobalOptions options;\n\n CHECK(options.ShouldForageDayElectionBasedOnTemperatures());\n CHECK_FALSE(options.ShouldComputeHourlyTemperatureEstimation());\n CHECK_FALSE(options.ShouldForagersAlwaysAgeBasedOnForageInc());\n\n CHECK_THROWS(options.ShouldForagerAgingBasedOnHourlyTemperatureEstimate());\n\n options.ShouldForagerAgingBasedOnHourlyTemperatureEstimate.Set(true);\n\n CHECK_FALSE(options.ShouldForageDayElectionBasedOnTemperatures());\n CHECK(options.ShouldComputeHourlyTemperatureEstimation());\n CHECK(options.ShouldForagersAlwaysAgeBasedOnForageInc());\n }\n}\n" }, { "alpha_fraction": 0.5777904987335205, "alphanum_fraction": 0.5825688242912292, "avg_line_length": 28.559322357177734, "blob_id": "dccb3ccb183db1d29aa891eef6f5ca49e064afe6", "content_id": "a54959dd951575e56d35c31e7d46e1a1fdcf9a77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5232, "license_type": "no_license", "max_line_length": 116, "num_lines": 177, "path": "/Linux/tests/test_cfile.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cfile.h\"\n\n#include \"helpers/common.h\"\n\n#include <boost/filesystem.hpp>\nnamespace bfs = boost::filesystem;\n\n// On Windows the newline character is still written \\n -> \\r\\n\n// to handle this difference we are going to explicitely add 1 character for each \\n\nint AdjustedStrlen(const char* str) {\n int count = std::strlen(str);\n#ifdef WINDOWS \n std::for_each(str, str+count, [&count](const char& c)\n {\n if (c == '\\n') count++;\n }\n );\n#endif\n return count;\n}\n\nTEST_CASE(\"CFile operations\", \"[port]\") {\n \n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeWrite);\n \n const char* fileContent = \"This is a single line file\\n\";\n int fileContentSize = AdjustedStrlen(fileContent);\n\n file.WriteString(fileContent);\n file.Close();\n\n SECTION(\"Read dummy file\") {\n \n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeRead);\n\n CString content;\n file.ReadString(content);\n CHECK(content == \"This is a single line file\");\n\n file.Close();\n }\n\n SECTION(\"Tell and Seek in read mode\") {\n\n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeRead);\n\n CHECK(file.GetPosition() == 0);\n\n file.SeekToBegin(); \n CHECK(file.GetPosition() == 0);\n \n CHECK(file.Seek(0, CFile::end) == fileContentSize);\n CHECK(file.GetPosition() == fileContentSize);\n \n CHECK(file.Seek(10, CFile::begin) == 10);\n CHECK(file.GetPosition() == 10);\n \n CString content;\n file.ReadString(content);\n CHECK(content == \"single line file\");\n\n file.Close();\n }\n\n SECTION(\"Tell and Seek in write mode\") {\n\n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeWrite);\n\n CHECK(file.GetPosition() == 0);\n CHECK(file.Seek(10, CFile::begin) == 10);\n CHECK(file.GetPosition() == 10);\n\n const char* additionalContent = \"double single line file\";\n\n file.WriteString(additionalContent);\n CHECK(file.GetPosition() == 10 + AdjustedStrlen(additionalContent));\n \n file.Close();\n }\n\n SECTION(\"Tell and Seek in read/write mode\") {\n\n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeReadWrite);\n\n CString content;\n file.ReadString(content);\n CHECK(content == \"This is a single line file\"); \n\n CHECK(file.GetPosition() == fileContentSize);\n CHECK(file.Seek(0, CFile::end) == fileContentSize);\n CHECK(file.GetPosition() == fileContentSize);\n\n const char* additionalContent = \"This is a second line in the file\\n\";\n int additionalContentSize = AdjustedStrlen(additionalContent);\n\n file.WriteString(additionalContent);\n CHECK(file.GetPosition() == fileContentSize + additionalContentSize);\n\n file.Close();\n\n SECTION(\"Iterate on the file content\") {\n \n CStdioFile file (GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeRead);\n\n std::vector<std::string> fileContent = {\n \"This is a single line file\",\n \"This is a second line in the file\"\n };\n\n size_t lineIt = 0;\n \n CString content;\n while(file.ReadString(content)) {\n\n CHECK(content == fileContent[lineIt]);\n lineIt++;\n }\n\n CHECK(lineIt == 2);\n\n file.Close();\n }\n\n SECTION(\"Get file status\") {\n\n CStdioFile file;\n CFileException error;\n\n CHECK(file.Open(GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeRead, &error));\n CHECK(file.GetFileName() == GetFileInTempDirectory(\"dummy.txt\"));\n\n CFileStatus status;\n file.GetStatus(status);\n CHECK(status.m_size == fileContentSize + additionalContentSize);\n \n file.Close();\n }\n\n SECTION(\"Get file exception\") {\n\n CStdioFile file;\n CFileException error;\n\n CHECK_FALSE(file.Open(GetFileInTempDirectory(\"dummy-not-exists.txt\").c_str(), CFile::modeRead, &error));\n CHECK(file.GetFileName() == \"\");\n\n char buffer[12];\n CHECK(error.GetErrorMessage(buffer, sizeof(buffer)));\n \n file.Close();\n }\n\n SECTION(\"Rename\") {\n\n CStdioFile::Rename(GetFileInTempDirectory(\"dummy.txt\"), GetFileInTempDirectory(\"dummy-rename.txt\"));\n\n CStdioFile file;\n CFileException error;\n\n CHECK_FALSE(file.Open(GetFileInTempDirectory(\"dummy.txt\").c_str(), CFile::modeRead, &error));\n CHECK(file.GetFileName() == \"\");\n\n file.Close();\n\n CHECK(file.Open(GetFileInTempDirectory(\"dummy-rename.txt\").c_str(), CFile::modeRead, &error));\n CHECK(file.GetFileName() == GetFileInTempDirectory(\"dummy-rename.txt\"));\n \n file.Close();\n }\n }\n}\n" }, { "alpha_fraction": 0.5026488900184631, "alphanum_fraction": 0.5054036974906921, "avg_line_length": 26.436046600341797, "blob_id": "055859cf37eb37aae7e5b56c9b1b4465f436acb8", "content_id": "15c2e65e0523791f45539d5206360eddc0d6d74e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4719, "license_type": "no_license", "max_line_length": 92, "num_lines": 172, "path": "/Linux/tests/test_cmapstringtoob.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cmapstringtoob.h\"\n\nclass CMyObject : public CObject {\npublic:\n CMyObject()\n : m_data(\"nodata\")\n , m_initialized(false){\n }\n virtual ~CMyObject(){\n m_data = \"badfood\";\n m_initialized = false;\n }\n void SetInitialized(){m_initialized = true;}\n bool IsInitialized()const{return m_initialized;}\n void UpdateData(const std::string& data){m_data = data;}\n const std::string& GetData()const{return m_data;}\nprotected:\n std::string m_data;\n bool m_initialized;\n};\n\nTEST_CASE(\"CMapStringToOb operations\", \"[port]\") {\n \n CMapStringToOb map;\n\n SECTION(\"IsEmpty\") {\n \n CHECK(map.IsEmpty());\n }\n\n std::vector<std::string> keys = {\n \"a\",\n \"b\",\n \"c\"\n };\n\n std::vector<std::pair<bool, std::string>> values = {\n {false, \"nodata\"},\n {true, \"nodata\"},\n {false, \"code\"}\n };\n\n std::map<std::string, std::unique_ptr<CMyObject>> data;\n for (auto it=keys.begin(); it != keys.end(); it++)\n {\n data.insert(std::make_pair(it->c_str(), std::unique_ptr<CMyObject>(new CMyObject)));\n }\n\n for (auto it = data.begin(); it != data.end(); it++)\n {\n map.SetAt(it->first.c_str(), it->second.get());\n }\n\n SECTION(\"Iteration\") {\n\n size_t mapIdx = 0;\n\n CString key;\n CMyObject* value = nullptr;\n POSITION position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx++]);\n }\n\n CHECK(mapIdx == 3);\n }\n\n SECTION(\"Lookup\") {\n\n CMyObject* value = nullptr;\n REQUIRE(map.Lookup(\"b\", (CObject*&)value));\n value->SetInitialized();\n\n value = nullptr;\n REQUIRE(map.Lookup(\"c\", (CObject*&)value));\n value->UpdateData(\"code\");\n\n value = nullptr;\n CHECK_FALSE(map.Lookup(\"d\", (CObject*&)value));\n CHECK(value == nullptr);\n \n SECTION(\"Iteration\") {\n\n size_t mapIdx = 0;\n\n CString key;\n CMyObject* value = nullptr;\n POSITION position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx]);\n CHECK(value->IsInitialized() == values[mapIdx].first);\n CHECK(value->GetData() == values[mapIdx].second);\n mapIdx++;\n }\n\n CHECK(mapIdx == 3);\n }\n\n SECTION(\"RemoveKey\") {\n\n size_t mapIdx = 0;\n\n CString key;\n CMyObject* value = nullptr;\n POSITION position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx]);\n map.RemoveKey(key);\n mapIdx++;\n }\n \n CHECK(mapIdx == 3);\n\n // second iteration should have no element\n mapIdx = 0;\n\n position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx]);\n mapIdx++;\n }\n \n CHECK(mapIdx == 0);\n }\n\n SECTION(\"RemovalAll\") {\n\n size_t mapIdx = 0;\n\n CString key;\n CMyObject* value = nullptr;\n POSITION position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx]);\n mapIdx++;\n }\n \n CHECK(mapIdx == 3);\n\n map.RemoveAll();\n\n // second iteration should have no element\n mapIdx = 0;\n\n position = map.GetStartPosition();\n for (POSITION position = map.GetStartPosition(); position != nullptr; ) \n {\n map.GetNextAssoc(position, key, (CObject*&)value);\n CHECK(key == keys[mapIdx]);\n mapIdx++;\n }\n \n CHECK(mapIdx == 0);\n }\n }\n}\n" }, { "alpha_fraction": 0.5502728223800659, "alphanum_fraction": 0.5931410789489746, "avg_line_length": 28.494253158569336, "blob_id": "ed75dc2076bc0b23c2ab99b0f9eacab21de1ddd1", "content_id": "4ecc9c6e809e8c86ee9f19f7556478685dab157e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2566, "license_type": "no_license", "max_line_length": 154, "num_lines": 87, "path": "/Linux/tests/test_carchive.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"carchive.h\"\n\nTEST_CASE(\"CArchive operations\", \"[port]\") {\n \n struct ToSerialize\n {\n float aFloat;\n double aDouble;\n int32_t aInt32;\n uint32_t aUInt32;\n int8_t aInt8;\n COleDateTime aDateTime;\n CString aString;\n CTime aTime;\n };\n\n SYSTEMTIME systemTime;\n systemTime.wYear = 1982;\n systemTime.wMonth = 01;\n systemTime.wDay = 11;\n systemTime.wHour = 11;\n systemTime.wMinute = 30;\n systemTime.wSecond = 18;\n\n ToSerialize data {23.3f, 89.0, -2147483647, 4294967295, -127, COleDateTime(2020, 01, 11, 13, 30, 43), CString(\"This is a string\"), CTime(systemTime)};\n\n SECTION(\"Store\") {\n\n CStdioFile file(\"store.bin\", CFile::modeWrite | CFile::typeBinary);\n CArchive archive(&file, CArchive::store);\n\n archive << data.aFloat;\n archive << data.aDouble;\n archive << data.aInt32;\n archive << data.aUInt32;\n archive << data.aInt8;\n archive << data.aDateTime;\n archive << data.aString;\n archive << data.aTime;\n\n file.Close();\n }\n\n SECTION(\"Load\") {\n\n CStdioFile file(\"store.bin\", CFile::modeRead | CFile::typeBinary);\n CArchive archive(&file, CArchive::load);\n\n ToSerialize loaded;\n archive >> loaded.aFloat;\n archive >> loaded.aDouble;\n archive >> loaded.aInt32;\n archive >> loaded.aUInt32;\n archive >> loaded.aInt8;\n archive >> loaded.aDateTime;\n archive >> loaded.aString;\n archive >> loaded.aTime;\n\n CHECK(loaded.aFloat == data.aFloat);\n CHECK(loaded.aDouble == data.aDouble);\n CHECK(loaded.aInt32 == data.aInt32);\n CHECK(loaded.aUInt32 == data.aUInt32);\n CHECK(loaded.aInt8 == data.aInt8);\n\n CHECK(loaded.aDateTime.GetYear() == 2020);\n CHECK(loaded.aDateTime.GetMonth() == 1);\n CHECK(loaded.aDateTime.GetDay() == 11);\n CHECK(loaded.aDateTime.GetHour() == 13);\n CHECK(loaded.aDateTime.GetMinute() == 30);\n\n CHECK(loaded.aString == \"This is a string\");\n \n CHECK(loaded.aTime.GetTime().GetYear() == 1982);\n CHECK(loaded.aTime.GetTime().GetMonth() == 1);\n CHECK(loaded.aTime.GetTime().GetDay() == 11);\n CHECK(loaded.aTime.GetTime().GetHour() == 11);\n CHECK(loaded.aTime.GetTime().GetMinute() == 30);\n\n file.Close();\n }\n}\n" }, { "alpha_fraction": 0.8302752375602722, "alphanum_fraction": 0.8302752375602722, "avg_line_length": 28.68181800842285, "blob_id": "f10ecc3208e3f61f49fcc530745c507c1b5bda65", "content_id": "f58b5a5787e4f3c976ec068d0fdf7e877ce190b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 654, "license_type": "no_license", "max_line_length": 171, "num_lines": 22, "path": "/GlobalOptions.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\n#include \"GlobalOptions.h\"\n\nGlobalOptions& GlobalOptions::Get()\n{\n\tstatic GlobalOptions sGlobalOptions;\n\treturn sGlobalOptions;\n}\n\nGlobalOptions::GlobalOptions()\n\t: ShouldForagerAgingBasedOnHourlyTemperatureEstimate(*this)\n{\n}\n\ntemplate<>\nvoid GlobalOptions::AggregateOption<GlobalOptions::ForagerAgingBasedHourlyTemperatureEstimate>::Set(const GlobalOptions::ForagerAgingBasedHourlyTemperatureEstimate& value)\n{\n\tm_value = value;\n\tm_options.ShouldForageDayElectionBasedOnTemperatures.Set(!m_value);\n\tm_options.ShouldComputeHourlyTemperatureEstimation.Set(m_value);\n\tm_options.ShouldForagersAlwaysAgeBasedOnForageInc.Set(m_value);\n}\n\n" }, { "alpha_fraction": 0.7197916507720947, "alphanum_fraction": 0.731249988079071, "avg_line_length": 39, "blob_id": "0d546dcd072b5013b4adb27de31e2d81bb17be8e", "content_id": "5ad9d4d124e034a450cc77cbbd3b7e08367fea68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 960, "license_type": "no_license", "max_line_length": 83, "num_lines": 24, "path": "/Linux/portcode/cdialog.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"cdialog.h\"\n\nCPoint::CPoint(uint32_t x, uint32_t y)\n{\n NOT_IMPLEMENTED();\n}\n\nsize_t CWnd::DoModal() {NOT_IMPLEMENTED(); return 0;}\nvoid CWnd::DoDataExchange(CDataExchange*) {NOT_IMPLEMENTED();}\nCWnd* CWnd::GetDlgItem(int /*nID*/) const {NOT_IMPLEMENTED(); return nullptr;}\nbool CWnd::EnableWindow(bool /*bEnable*/) {NOT_IMPLEMENTED(); return false;}\nbool CWnd::UpdateData(bool /*bSaveAndValidate*/) {NOT_IMPLEMENTED(); return false;}\nvoid CWnd::DestroyWindow() {NOT_IMPLEMENTED();}\n\n\nCDialog::CDialog(){NOT_IMPLEMENTED();}\nCDialog::CDialog(int32_t id, CWnd* pParent){NOT_IMPLEMENTED();}\nvoid CDialog::Create(int32_t nIDTemplate){NOT_IMPLEMENTED();}\nvoid CDialog::Create(int32_t nIDTemplate, CWnd* pParentWnd){NOT_IMPLEMENTED();}\nbool CDialog::SetWindowText(const CString& text){NOT_IMPLEMENTED(); return false;}\nvoid CDialog::OnOK(){NOT_IMPLEMENTED();}\nvoid CDialog::OnCancel(){NOT_IMPLEMENTED();}\n\nvoid CProgressCtrl::StepIt(){NOT_IMPLEMENTED();}\n" }, { "alpha_fraction": 0.6757844090461731, "alphanum_fraction": 0.6765888929367065, "avg_line_length": 20.807018280029297, "blob_id": "ae89a799c2d631ed6457ed3c7dce01a27f254f40", "content_id": "3e3ac1b6660808f0d1625de05f5edc8a61ef8ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2486, "license_type": "no_license", "max_line_length": 98, "num_lines": 114, "path": "/Linux/portcode/carray.inline.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "template<class TYPE, class ARG_TYPE>\nBOOL CArray<TYPE, ARG_TYPE>::IsEmpty() const\n{\n return m_data.empty();\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nINT_PTR CArray<TYPE, ARG_TYPE>::GetCount() const\n{\n return m_data.size();\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::RemoveAll()\n{\n return m_data.clear();\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::Copy(const CArray<TYPE, ARG_TYPE>& array)\n{\n if (this != &array)\n {\n m_data = array.m_data;\n }\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::Add(const TYPE& element)\n{\n return m_data.push_back(element);\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nconst TYPE& CArray<TYPE, ARG_TYPE>::operator[](INT_PTR index) const\n{\n return m_data.at(index);\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nTYPE& CArray<TYPE, ARG_TYPE>::operator[](INT_PTR index)\n{\n return m_data.at(index);\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::RemoveAt(INT_PTR index)\n{\n auto it = m_data.begin();\n std::advance(it, index);\n m_data.erase(it);\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::SetSize(INT_PTR size, INT_PTR /*growBy*/)\n{\n m_data.resize(size);\n}\n\ntemplate<class TYPE>\nvoid SerializeElements(CArchive& ar, TYPE* pElements, INT_PTR nCount)\n{\n\t// default is bit-wise read/write\n\tif (ar.IsStoring())\n\t{\n\t\tTYPE* pData;\n\t\tUINT_PTR nElementsLeft;\n\n\t\tnElementsLeft = nCount;\n\t\tpData = pElements;\n\t\twhile( nElementsLeft > 0 )\n\t\t{\n\t\t\tUINT nElementsToWrite;\n\n\t\t\tnElementsToWrite = UINT(std::min(nElementsLeft, std::numeric_limits<int>::max()/sizeof(TYPE)));\n\t\t\tar.Write(pData, nElementsToWrite*sizeof(TYPE));\n\t\t\tnElementsLeft -= nElementsToWrite;\n\t\t\tpData += nElementsToWrite;\n\t\t}\n\t}\n\telse\n\t{\n\t\tTYPE* pData;\n\t\tUINT_PTR nElementsLeft;\n\n\t\tnElementsLeft = nCount;\n\t\tpData = pElements;\n\t\twhile( nElementsLeft > 0 )\n\t\t{\n\t\t\tUINT nElementsToRead;\n\n\t\t\tnElementsToRead = UINT(std::min(nElementsLeft, std::numeric_limits<int>::max()/sizeof(TYPE)));\n\t\t\tar.Read(pData, nElementsToRead*sizeof(TYPE));\n\t\t\tnElementsLeft -= nElementsToRead;\n\t\t\tpData += nElementsToRead;\n\t\t}\n\t}\n}\n\ntemplate<class TYPE, class ARG_TYPE>\nvoid CArray<TYPE, ARG_TYPE>::Serialize(CArchive& ar)\n{\n\t// CObject::Serialize(ar); // Julien should be doing nothing\n\tif (ar.IsStoring())\n\t{\n ar.WriteCount(m_data.size());\n\t}\n\telse\n\t{\n\t\tDWORD_PTR size = ar.ReadCount();\n\t\tSetSize(size);\n\t}\n\tSerializeElements<TYPE>(ar, m_data.data(), m_data.size());\n}\n" }, { "alpha_fraction": 0.5904095768928528, "alphanum_fraction": 0.6081917881965637, "avg_line_length": 23.900497436523438, "blob_id": "3d40b5fe8a4514e75c35213b1c4e15347332c161", "content_id": "30595aca13022edaabbd4886768a1ad53a1e4963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5005, "license_type": "no_license", "max_line_length": 80, "num_lines": 201, "path": "/Linux/tests/test_cstring.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cstring.h\"\n\nTEST_CASE(\"CString operations\", \"[port]\") {\n\n\t// check creation and comparison\n\tCString myString(\"ThisIsAString\");\n\tCHECK(myString == \"ThisIsAString\");\n\tCHECK(myString == CString(\"ThisIsAString\"));\n\t\n\tSECTION(\"append data\") {\n\n\t\tmyString += \" - SomeAdditional Data\";\n\t\tCHECK(myString == \"ThisIsAString - SomeAdditional Data\");\n\t}\n\n\tSECTION(\"overwrite data\") {\n\n\t\tmyString = \"Some New Data\";\n\t\tCHECK(myString == \"Some New Data\");\n\t}\n\n\tSECTION(\"conversions\") {\n\n\t\tCHECK(myString.ToString() == std::string(\"ThisIsAString\"));\n\n\t\tchar* buffer = new char[myString.GetLength()+1];\n\t\tsprintf(buffer, \"%s\", (const char*)myString);\n\t\tCHECK(myString == buffer);\n\t\tdelete[] buffer;\n\t}\n\t\n\tSECTION(\"casing\") {\n\t\t\n\t\tCHECK(myString.MakeLower() == \"thisisastring\");\n\t\tCHECK(myString.MakeUpper() == \"THISISASTRING\");\t\n\t}\n\n\tSECTION(\"triming\") {\n\t\t\n\t\tmyString = \" With Some Leading Spaces and Trailing Spaces \";\n\n\t\tSECTION(\"both\") {\n\n\t\t\tmyString.Trim();\n\t\t\tCHECK(myString == \"With Some Leading Spaces and Trailing Spaces\");\n\t\t}\n\t\t\n\t\tSECTION(\"left\") {\n\n\t\t\tmyString.TrimLeft();\n\t\t\tCHECK(myString == \"With Some Leading Spaces and Trailing Spaces \");\n\t\t}\n\t\t\n\t\tSECTION(\"right\") {\n\n\t\t\tmyString.TrimRight();\n\t\t\tCHECK(myString == \" With Some Leading Spaces and Trailing Spaces\");\n\t\t}\n\t}\n\n\tSECTION(\"format\") {\n\n\t\tmyString.Format(\"This is an integer %d\", 10);\n\t\tCHECK(myString == \"This is an integer 10\");\n\t\tstd::string fmtString = fmt::sprintf(\"This is an integer %d\", 10);\n\t\tCHECK(fmtString == \"This is an integer 10\");\n\t}\n\n\tSECTION(\"Access operator\"){\n\t\t\n\t\tCHECK(myString == \"ThisIsAString\");\n\t\tCHECK(myString[8] == 't');\n\t\tmyString[6] = ' ';\n\t\tCHECK(myString == \"ThisIs String\");\n\t}\n\t\n\tSECTION(\"Search\") {\\\n\t\n\t\tmyString = \"ThisIsAStringThisIsAString\";\n\n\t\tCHECK(myString.Find('z') == -1);\n\t\tCHECK(myString.Find('\\0') == -1);\n\t\tCHECK(myString.Find('i') == 2);\n\t\t\n\t\tCHECK(myString.Find(\"toto\") == -1);\n\t\tCHECK(myString.Find(\"is\") == 2);\n\t\tCHECK(myString.Find(\"Is\") == 4);\n\n\t\tCHECK(myString.ReverseFind('z') == -1);\n\t\tCHECK(myString.ReverseFind('\\0') == -1);\n\t\tCHECK(myString.ReverseFind('i') == 23);\n\t\t\n\t\tCHECK(myString.ReverseFind(\"toto\") == -1);\n\t\tCHECK(myString.ReverseFind(\"is\") == 15);\n\t\tCHECK(myString.ReverseFind(\"Is\") == 17);\n\t}\n\n\tSECTION(\"Replace\") {\n\n\t\tmyString.Replace(\"ThisIsAString\", \"My new string is actually a sentence\");\n\t\tCHECK(myString == \"My new string is actually a sentence\");\n\n\t\tmyString.Replace(\"Toto\", \"Not in string\");\n\t\tCHECK(myString == \"My new string is actually a sentence\");\n\t\t\n\t\tmyString.Replace(\"\", \"Not\");\n\t\tCHECK(myString == \"My new string is actually a sentence\");\n\t\t\n\t\tmyString.Replace(\" \", \"Not\");\n\t\tCHECK(myString == \"MyNotnewNotstringNotisNotactuallyNotaNotsentence\");\n\t\t\n\t\tmyString = \"This is a sentence with a several a thingy!\";\n\t\tmyString.Replace(\"a\", \"Euh\");\n\t\tCHECK(myString == \"This is Euh sentence with Euh severEuhl Euh thingy!\");\n\t}\n\n\tSECTION(\"Substring\") {\n\n\t\tCString left = myString.Left(100);\n\t\tCHECK(left == \"ThisIsAString\");\n\t\tleft = myString.Left(3);\n\t\tCHECK(left == \"Thi\");\n\t\tleft = myString.Left(-100);\n\t\tCHECK(left == \"\");\n\n\t\tCString right = myString.Right(100);\n\t\tCHECK(right == \"ThisIsAString\");\n\t\tright = myString.Right(4);\n\t\tCHECK(right == \"ring\");\n\t\tright = myString.Right(-100);\n\t\tCHECK(right == \"\");\n\n\t\tCString mid = myString.Mid(100);\n\t\tCHECK(mid == \"\");\n\t\tmid = myString.Mid(-100);\n\t\tCHECK(mid == \"ThisIsAString\");\n\n\t\tmid = myString.Mid(4, 4);\n\t\tCHECK(mid == \"IsAS\");\n\t\tmid = myString.Mid(7, 100);\n\t\tCHECK(mid == \"String\");\n\t}\n\n SECTION(\"Tokenize\") {\n\n myString = \"%First Second#Third\";\n\n CString resToken;\n int curPos = 0;\n\n resToken = myString.Tokenize(\"% #\", curPos);\n CHECK(resToken == \"First\");\n CHECK(curPos == 6);\n resToken = myString.Tokenize(\"% #\", curPos);\n CHECK(resToken == \"Second\");\n CHECK(curPos == 13);\n resToken = myString.Tokenize(\"% #\", curPos);\n CHECK(resToken == \"Third\");\n CHECK(curPos == 19);\n resToken = myString.Tokenize(\"% #\", curPos);\n CHECK(resToken == \"\");\n CHECK(curPos == 19);\n }\n\n SECTION(\"Span Excluding\") {\n\n myString = \"World Cup '98\";\n\n CString spaned = myString.SpanExcluding(\";,.-'\");\n CHECK(spaned == \"World Cup \");\n\n CString str1 (\"Hello World! Goodbye!\");\n CString str2 = str1.SpanExcluding(\".!?\");\n CHECK(str2 == \"Hello World\");\n\n CString str3 (\"Hello World Goodbye\");\n CString str4 = str3.SpanExcluding(\".!?\");\n CHECK(str4 == \"Hello World Goodbye\");\n }\n\n SECTION(\"Operations\") {\n\n myString = \"World Cup '98\";\n\n CString added = myString + \" toto\";\n CHECK(added == \"World Cup '98 toto\");\n }\n\n SECTION(\"Format\") {\n\n CString added;\n\t\tadded.Format(\"%7.2f\", 6.432432);\n CHECK(added == \" 6.43\");\n }\n}\n" }, { "alpha_fraction": 0.734929084777832, "alphanum_fraction": 0.734929084777832, "avg_line_length": 25.23255729675293, "blob_id": "8987b80f1d34ac146902c573c278402e0040abe8", "content_id": "b8971b031e8848ccab1b2d3733e3b8d0df6f1abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 92, "num_lines": 43, "path": "/Linux/portcode/cmapstringtoob.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CMAPSTRINGTOOB_CUSTOM_H\n#define CMAPSTRINGTOOB_CUSTOM_H\n\n#include \"cobject.h\"\n#include \"cstring.h\"\n\n#include \"stdafx.h\"\n\n#include <cstddef>\n#include <map>\n\n/**\n * TODO: Improve the way to replace the behavior the of MFC POSITION structure since\n * this implementation is not efficient (lots of allocations)\n */\nnamespace CMapStringToObNs { struct InnerPosition : public __POSITION {\n\tInnerPosition(const typename std::map<CString, CObject*>::const_iterator& it) : m_it(it) {}\n\tInnerPosition* copy() { return new InnerPosition(m_it); }\n std::map<CString, CObject*>::const_iterator m_it;\n}; }\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CMapStringToOb : public CObject\n{\npublic:\n\n\tBOOL IsEmpty() const;\n\tPOSITION GetStartPosition() const;\n\tvoid SetAt(LPCTSTR string, CObject* value);\n\tvoid GetNextAssoc(POSITION& position, CString& string, CObject*& value) const;\n\tBOOL Lookup(LPCTSTR string, CObject*& value) const;\n\tBOOL RemoveKey(LPCTSTR string);\n\tvoid RemoveAll();\n\nprotected:\n\n std::map<CString, CObject*> m_map;\n};\n\n#endif // CMAPSTRINGTOOB_CUSTOM_H\n" }, { "alpha_fraction": 0.7265917658805847, "alphanum_fraction": 0.7265917658805847, "avg_line_length": 15.6875, "blob_id": "faf680357e6a7725d8384d376149a124b38c4ff7", "content_id": "0ade24e79b042a3c27629feab904aa16ebd1e8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 267, "license_type": "no_license", "max_line_length": 62, "num_lines": 16, "path": "/Linux/portcode/ccmdtarget.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CCMDTARGET_CUSTOM_H\n#define CCMDTARGET_CUSTOM_H\n\n#include \"cobject.h\"\n\n/**\n * Empty class to be able to compile the VarroaPop application\n */\nclass CCmdTarget : public CObject\n{\npublic:\n\tvirtual ~CCmdTarget() {}\n};\n\n#endif // CCMDTARGET_CUSTOM_H\n" }, { "alpha_fraction": 0.5468243360519409, "alphanum_fraction": 0.5982479453086853, "avg_line_length": 37.694915771484375, "blob_id": "6fbba23ea24eeb6fa7931761b3e4280fc6b1630a", "content_id": "05b33fdd5966a6037ad9f23e06e18d3186bba99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11415, "license_type": "no_license", "max_line_length": 180, "num_lines": 295, "path": "/Simulations/Simulate.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 6 18:27:47 2019\n\n@author: Reet Barik\n\"\"\"\n\nimport os\nos.chdir(\"D:/Coursework/PACCAR/VarroaPOP/Beepop/Simulations/\")\n\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport math\nimport json\nfrom VarroaPy.VarroaPy.RunVarroaPop import VarroaPop\nimport matplotlib.pyplot as plt\n\ndef photoperiod(latitude, date):\n day = datetime.strptime(str(date), \"%Y-%m-%d %H:%M:%S\")\n J = day.timetuple().tm_yday\n \n P = math.asin(0.39795 * math.cos(0.2163108 + 2 * math.atan(0.9671396 * math.tan(0.00860 * (J - 186)))))\n D = 24 - (24 / math.pi) * math.acos((math.sin(0.833 * math.pi / 180) + (math.sin(latitude * math.pi / 180) * math.sin(P))) / (math.cos(latitude * math.pi / 180) * math.cos(P)))\n return D\n \n\nobsHist = True\nmodHist = False\nrcp = False\n\nweatherFile = \"\"\nLocation = 'Richland'\n\nif Location == 'Omak':\n weatherFile = 'data_48.40625_-119.53125'\nif Location == 'Wenatchee':\n weatherFile = 'data_47.40625_-120.34375'\nif Location == 'Richland':\n weatherFile = 'data_46.28125_-119.34375'\nif Location == 'WallaWalla':\n weatherFile = 'data_46.03125_-118.34375'\n \nif obsHist: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short'), ('SPH', 'short'), ('SRAD', 'short'), ('RMAX', 'short'), ('RMIN', 'short')])\n data = np.fromfile('ObservedHistoricalBinary/' + weatherFile, dtype=dt)\nif modHist: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short')])\n data = np.fromfile('ModeledHistoricalBinary/' + weatherFile, dtype=dt)\nif rcp: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short')])\n data = np.fromfile('Rcp85Binary/' + weatherFile, dtype=dt)\n \n\ndf = pd.DataFrame(data)\n\nif obsHist:\n df['PPT'] = df['PPT'] / 40\n df['TMAX'] = df['TMAX'] / 100\n df['TMIN'] = df['TMIN'] / 100\n df['WIND'] = df['WIND'] / 100\n df['SPH'] = df['SPH'] / 1000\n df['SRAD'] = df['SRAD'] /40\n df['RMAX'] = df['RMAX'] / 100\n df['RMIN'] = df['RMIN'] /100\nelse:\n df['PPT'] = df['PPT'] / 40\n df['TMAX'] = df['TMAX'] / 100\n df['TMIN'] = df['TMIN'] / 100\n df['WIND'] = df['WIND'] / 100\n\n\ncolumns = ['Date', 'Max', 'Min', 'Ave', 'Wind', 'Rain']\n\nweather = pd.DataFrame(columns=columns)\n\nif obsHist:\n weather['Date'] = pd.date_range(start='1/1/1979', periods=len(df), freq='D')\nif modHist:\n weather['Date'] = pd.date_range(start='1/1/1950', periods=len(df), freq='D')\nif rcp:\n weather['Date'] = pd.date_range(start='1/1/2006', periods=len(df), freq='D')\n\nweather['Max'] = df['TMAX']\nweather['Min'] = df['TMIN']\nweather['Ave'] = (weather['Max'] + weather['Min']) / 2\nweather['Wind'] = df['WIND']\nweather['Rain'] = df['PPT']\n\nlatitude = float(weatherFile.split('_')[1])\n\nweather['Hrs light'] = weather['Date'].apply(lambda x: photoperiod(latitude, x))\n\nf = open(\"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\",\"w\") \nf.write(\"WEATHER_FILE\\n\")\nf.write(\"Temperature Scale . . . . . . . . . = C\\n\")\nf.write(\"Weather File Name . . . . . . . . . = \" + weatherFile + \".wth\\n\")\nf.write(\"Format of the Weather File . . . . = MINMAX\\n\")\nf.write(\"Begin Time of the Weather . . . . . = 12:00\\n\")\nf.write(\"Begin Date of the Weather . . . . . = \" + weather['Date'][0].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Ending Time of the Weather . . . . = 12:00\\n\")\nf.write(\"Ending Date of the Weather . . . . = \" + weather['Date'][len(weather) - 1].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Start Time of the Simulation . . . = 12:00\\n\")\nf.write(\"Start Date of the Simulation . . . = \" + weather['Date'][0].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Column # of the Temperature . . . . = 4\\n\")\nf.write(\"Column # of the Max Temperature . . = 2\\n\")\nf.write(\"Column # of the Min Temperature . . = 3\\n\")\nf.write(\"Column # of the Daylight . . . . . . = 7\\n\")\nf.write(\"Column # of the Wind Speed . . . . = 5\\n\")\nf.write(\"Column # of the Rain . . . . . . . = 6\\n\")\nf.write(\"Interval Between Readings . . . . . = 1440\\n\")\nf.write(\"----------begin---------\\n\")\n\nweather['Date'] = weather['Date'].apply(lambda x: x.strftime(\"%#m/%#d/%Y\"))\nweather = weather.set_index('Date')\nf.write(weather.to_string()) \nf.close()\n\nwith open('parameters.json', 'r') as f:\n params = json.load(f)\n \n \nvp = VarroaPop(parameters = params, weather_file = \"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\")\noutput = vp.run_model()\noutput = output.drop(output.index[0])\noutput['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] - output['Adult Workers'] - output['Foragers']\n\nvariableList = ['Colony Size', 'Total Eggs', 'DD', 'L', 'N', 'P', 'dd', 'l', 'n']\nvariable = variableList[0]\n\noutput['%Colony'] = output['Colony Size'] / output['Colony Size'] * 100 \n\nax = output.plot(y = variable, color = 'b', legend = False, alpha = 0.75)\nax.axvspan(1, 31, facecolor='gray', alpha=0.3)\nax.axvspan(60, 90, facecolor='gray', alpha=0.3)\nax.axvspan(121, 151, facecolor='gray', alpha=0.3)\nax.axvspan(182, 212, facecolor='gray', alpha=0.3)\nax.axvspan(244, 273, facecolor='gray', alpha=0.3)\nax.axvspan(305, 334, facecolor='gray', alpha=0.3)\nax.axvspan(366, 396, facecolor='gray', alpha=0.3)\nax.axvspan(425, 455, facecolor='gray', alpha=0.3)\nax.axvspan(486, 516, facecolor='gray', alpha=0.3)\n\n\nfor year in range(2002, 2011):\n params[\"SimStart\"] = \"01/01/\" + str(year)\n params[\"SimEnd\"] = \"06/30/\" + str(year + 1)\n \n vp = VarroaPop(parameters = params, weather_file = \"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\")\n output = vp.run_model()\n output = output.drop(output.index[0])\n output['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] -output['Adult Workers'] - output['Foragers']\n \n output.plot(y = variable, ax = ax, color = 'b', legend = False, alpha = 0.75)\n\n#ax.set_ylim(0, 150)\nplt.title(variable + ' vs Time')\n#plt.savefig('Plots/' + Location + '/OnePlus_Stacked_Colony_Size_2001%.png', dpi = 1000)\n#plt.close()\n \n \nobsHist = False\nmodHist = False\nrcp = True\n\n \nif Location == 'Omak':\n weatherFile = 'data_48.40625_-119.53125'\nif Location == 'Wenatchee':\n weatherFile = 'data_47.40625_-120.34375'\nif Location == 'Richland':\n weatherFile = 'data_46.28125_-119.34375'\nif Location == 'WallaWalla':\n weatherFile = 'data_46.03125_-118.34375'\n \nif obsHist: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short'), ('SPH', 'short'), ('SRAD', 'short'), ('RMAX', 'short'), ('RMIN', 'short')])\n data = np.fromfile('ObservedHistoricalBinary/' + weatherFile, dtype=dt)\nif modHist: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short')])\n data = np.fromfile('ModeledHistoricalBinary/' + weatherFile, dtype=dt)\nif rcp: \n dt = np.dtype([('PPT', 'ushort'), ('TMAX', 'short'), ('TMIN', 'short'), ('WIND', 'short')])\n data = np.fromfile('Rcp85Binary/' + weatherFile, dtype=dt)\n \n\ndf = pd.DataFrame(data)\n\nif obsHist:\n df['PPT'] = df['PPT'] / 40\n df['TMAX'] = df['TMAX'] / 100\n df['TMIN'] = df['TMIN'] / 100\n df['WIND'] = df['WIND'] / 100\n df['SPH'] = df['SPH'] / 1000\n df['SRAD'] = df['SRAD'] /40\n df['RMAX'] = df['RMAX'] / 100\n df['RMIN'] = df['RMIN'] /100\nelse:\n df['PPT'] = df['PPT'] / 40\n df['TMAX'] = df['TMAX'] / 100\n df['TMIN'] = df['TMIN'] / 100\n df['WIND'] = df['WIND'] / 100\n\n\ncolumns = ['Date', 'Max', 'Min', 'Ave', 'Wind', 'Rain']\n\nweather = pd.DataFrame(columns=columns)\n\nif obsHist:\n weather['Date'] = pd.date_range(start='1/1/1979', periods=len(df), freq='D')\nif modHist:\n weather['Date'] = pd.date_range(start='1/1/1950', periods=len(df), freq='D')\nif rcp:\n weather['Date'] = pd.date_range(start='1/1/2006', periods=len(df), freq='D')\n\nweather['Max'] = df['TMAX']\nweather['Min'] = df['TMIN']\nweather['Ave'] = (weather['Max'] + weather['Min']) / 2\nweather['Wind'] = df['WIND']\nweather['Rain'] = df['PPT']\n\nlatitude = float(weatherFile.split('_')[1])\n\nweather['Hrs light'] = weather['Date'].apply(lambda x: photoperiod(latitude, x))\n\nf = open(\"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\",\"w\") \nf.write(\"WEATHER_FILE\\n\")\nf.write(\"Temperature Scale . . . . . . . . . = C\\n\")\nf.write(\"Weather File Name . . . . . . . . . = \" + weatherFile + \".wth\\n\")\nf.write(\"Format of the Weather File . . . . = MINMAX\\n\")\nf.write(\"Begin Time of the Weather . . . . . = 12:00\\n\")\nf.write(\"Begin Date of the Weather . . . . . = \" + weather['Date'][0].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Ending Time of the Weather . . . . = 12:00\\n\")\nf.write(\"Ending Date of the Weather . . . . = \" + weather['Date'][len(weather) - 1].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Start Time of the Simulation . . . = 12:00\\n\")\nf.write(\"Start Date of the Simulation . . . = \" + weather['Date'][0].strftime(\"%#m/%#d/%y\") + \"\\n\")\nf.write(\"Column # of the Temperature . . . . = 4\\n\")\nf.write(\"Column # of the Max Temperature . . = 2\\n\")\nf.write(\"Column # of the Min Temperature . . = 3\\n\")\nf.write(\"Column # of the Daylight . . . . . . = 7\\n\")\nf.write(\"Column # of the Wind Speed . . . . = 5\\n\")\nf.write(\"Column # of the Rain . . . . . . . = 6\\n\")\nf.write(\"Interval Between Readings . . . . . = 1440\\n\")\nf.write(\"----------begin---------\\n\")\n\nweather['Date'] = weather['Date'].apply(lambda x: x.strftime(\"%#m/%#d/%Y\"))\nweather = weather.set_index('Date')\nf.write(weather.to_string()) \nf.close()\n\nwith open('parameters.json', 'r') as f:\n params = json.load(f)\n \nparams[\"SimStart\"] = \"01/01/2090\"\nparams[\"SimEnd\"] = \"06/30/2091\" \n\nvp = VarroaPop(parameters = params, weather_file = \"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\")\noutput = vp.run_model()\noutput = output.drop(output.index[0])\noutput['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] - output['Adult Workers'] - output['Foragers']\n\nvariableList = ['Colony Size', 'Total Eggs', 'DD', 'L', 'N', 'P', 'dd', 'l', 'n']\nvariable = variableList[0]\n\noutput['%Colony'] = output['Colony Size'] / output['Colony Size'] * 100 \n\nax = output.plot(y = variable, color = 'b', legend = False, alpha = 0.75)\nax.axvspan(1, 31, facecolor='gray', alpha=0.3)\nax.axvspan(60, 90, facecolor='gray', alpha=0.3)\nax.axvspan(121, 151, facecolor='gray', alpha=0.3)\nax.axvspan(182, 212, facecolor='gray', alpha=0.3)\nax.axvspan(244, 273, facecolor='gray', alpha=0.3)\nax.axvspan(305, 334, facecolor='gray', alpha=0.3)\nax.axvspan(366, 396, facecolor='gray', alpha=0.3)\nax.axvspan(425, 455, facecolor='gray', alpha=0.3)\nax.axvspan(486, 516, facecolor='gray', alpha=0.3)\n\n\nfor year in range(2090, 2100):\n params[\"SimStart\"] = \"01/01/\" + str(year)\n params[\"SimEnd\"] = \"06/30/\" + str(year + 1)\n \n vp = VarroaPop(parameters = params, weather_file = \"VarroaPy/VarroaPy/files/weather/\" + weatherFile + \".wth\")\n output = vp.run_model()\n output = output.drop(output.index[0])\n output['Inactive Foragers'] = output['Colony Size'] - output['Adult Drones'] -output['Adult Workers'] - output['Foragers']\n \n output.plot(y = variable, ax = ax, color = 'b', legend = False, alpha = 0.75)\n\n#ax.set_ylim(0, 150)\nplt.title(variable + ' vs Time')\n#plt.savefig('Plots/' + Location + '/OnePlus_Stacked_Colony_Size_2001%.png', dpi = 1000)\n#plt.close()\nplt.savefig('Plots/' + Location + '/DD_ObsHist_OnePlus_Overlay_Superimposed_2001_2092.png', dpi = 1000)\nplt.close()\n" }, { "alpha_fraction": 0.5358209013938904, "alphanum_fraction": 0.5850746035575867, "avg_line_length": 19.9375, "blob_id": "05c6abca672984633f7ca9b56d666179b008743c", "content_id": "08cf36cac4c07473bc74ef9df813bbca718f0393", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 670, "license_type": "no_license", "max_line_length": 80, "num_lines": 32, "path": "/Linux/tests/test_cuintarray.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"cuintarray.h\"\n\nTEST_CASE(\"CUIntArray operations\", \"[port]\") {\n \n CUIntArray array;\n \n CHECK(array.GetSize() == 0);\n\n for (UINT i=15; i<100; i++)\n array.Add(i);\n \n CHECK(array.GetSize() == (100-15));\n\n CHECK(array.GetAt(0) == 15);\n CHECK(array.GetAt(10) == 25);\n\n array.RemoveAt(10);\n \n CHECK(array.GetAt(0) == 15);\n CHECK(array.GetAt(10) == 26);\n CHECK(array.GetAt(20) == 36);\n \n array.RemoveAll();\n \n CHECK(array.GetSize() == 0);\n}\n" }, { "alpha_fraction": 0.5334292054176331, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 22.576271057128906, "blob_id": "84223170397e82adefeba9aadabe29b4da8f2982", "content_id": "a18913383593433ff23f13cd07ee13bc9e77a74e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 80, "num_lines": 59, "path": "/Linux/tests/test_cstringlist.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"coblist.h\"\n\n#include <array>\n#include <sstream>\n\nTEST_CASE(\"CStringList operations\", \"[port]\") {\n \n CStringList list;\n\n SECTION(\"Empty list checks\") {\n \n CHECK(list.GetCount() == 0);\n CHECK(list.IsEmpty());\n CHECK(list.GetHeadPosition() == nullptr);\n }\n\n SECTION(\"Insert and Find - 1 element\") {\n\n list.AddTail(\"tail\");\n\n CHECK(list.GetCount() == 1);\n CHECK_FALSE(list.IsEmpty());\n\n POSITION head = list.GetHeadPosition();\n auto cObj = list.GetNext(head);\n CHECK(cObj == \"tail\");\n }\n\n SECTION(\"Insert and Find - 2 elements\") {\n\n list.AddTail(\"head\");\n list.AddTail(\"tail\");\n\n CHECK(list.GetCount() == 2);\n CHECK_FALSE(list.IsEmpty());\n\n POSITION it = list.GetHeadPosition();\n {\n auto cObj = list.GetNext(it);\n CHECK(cObj == \"head\");\n CHECK(it != nullptr);\n }\n {\n auto cObj = list.GetNext(it);\n CHECK(cObj == \"tail\");\n CHECK(it == nullptr);\n }\n list.RemoveAll();\n CHECK(list.GetCount() == 0);\n CHECK(list.IsEmpty());\n CHECK(list.GetHeadPosition() == nullptr);\n }\n}\n" }, { "alpha_fraction": 0.5677139759063721, "alphanum_fraction": 0.5850487351417542, "avg_line_length": 23.289474487304688, "blob_id": "da894f28363686ecdda956166b773b2971f36705", "content_id": "c7e62f6074e73e35bc56707d71f5f701ba328d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 923, "license_type": "no_license", "max_line_length": 80, "num_lines": 38, "path": "/Linux/tests/test_ctime.cpp", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#include \"catch2/catch.hpp\"\n\n//! include explicitely since we are not using the pre-compiled header for tests\n//! may change this in the future\n#include \"stdafx.h\" \n\n#include \"ctime.h\"\n\n#include <ctime>\n\nTEST_CASE(\"CTime operations\", \"[port]\") {\n \n SECTION(\"construct\") {\n\n COleDateTime dateTime(2020, 1, 11, 0, 0, 0);\n\n SYSTEMTIME systemTime;\n dateTime.GetAsSystemTime(systemTime);\n\n CTime time(systemTime);\n\n CHECK(time.GetTime() >= dateTime);\n }\n \n SECTION(\"now\") {\n\n CTime time;\n auto now = std::time(nullptr);\n auto tm = std::localtime(&now);\n\n auto oleTime = time.GetTime();\n CHECK(oleTime.GetYear() == tm->tm_year + 1900);\n CHECK(oleTime.GetMonth() == tm->tm_mon + 1);\n CHECK(oleTime.GetDay() == tm->tm_mday);\n CHECK(oleTime.GetHour() == tm->tm_hour);\n CHECK(oleTime.GetMinute() == tm->tm_min);\n }\n}\n" }, { "alpha_fraction": 0.7269399762153625, "alphanum_fraction": 0.7269399762153625, "avg_line_length": 21.393442153930664, "blob_id": "cd71d088ef589a6bd0fe522e914ff820152b1088", "content_id": "f47d17f55511dbccd443b24ed2a41c8dbf07dbbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1366, "license_type": "no_license", "max_line_length": 86, "num_lines": 61, "path": "/Linux/portcode/cptrlist.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef CPTRLIST_CUSTOM_H\n#define CPTRLIST_CUSTOM_H\n\n#include \"cobject.h\"\n\n#include <cstddef>\n#include <memory>\n#include <list>\n\n/**\n * TODO: Improve the way to replace the behavior the of MFC POSITION structure since\n * this implementation is not efficient (lots of allocations)\n */\nnamespace CPtrListNs { template<class TYPE> struct InnerPosition : public __POSITION {\n\tInnerPosition(const typename std::list<TYPE>::const_iterator& it) : m_it(it) {}\n\tInnerPosition* copy() { return new InnerPosition(m_it); }\n typename std::list<TYPE>::const_iterator m_it;\n}; }\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\nclass CPtrList : public CObject\n{\n public:\n \n};\n\n/**\n * Only supports the necessary interface for the good behavior of VarroaPop\n */\ntemplate<class BASE_CLASS, class TYPE>\nclass CTypedPtrList\n{\npublic:\n\n\tCTypedPtrList();\n\t~CTypedPtrList();\n\n\tBOOL IsEmpty() const;\n\tINT_PTR GetCount() const;\n\tTYPE GetAt(POSITION position) const;\n\tPOSITION AddTail(TYPE object);\n\tvoid RemoveAt(POSITION position);\n\tPOSITION GetHeadPosition() const;\n\tPOSITION GetTailPosition() const;\n\tTYPE GetHead() const;\n\tTYPE GetTail() const;\n\tTYPE GetNext(POSITION& position) const;\n\tTYPE RemoveHead();\n\tvoid RemoveAll();\n\nprotected:\n\n std::list<TYPE> m_data;\n};\n\n#include \"cptrlist.inline.h\"\n\n#endif // CPTRLIST_CUSTOM_H\n" }, { "alpha_fraction": 0.618298351764679, "alphanum_fraction": 0.6480186581611633, "avg_line_length": 29.64285659790039, "blob_id": "bfb55bd2a70e964236b2e0a934d86be9eaf6268d", "content_id": "78292585d13e5047c2b481e2a9c45f55e56a0163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1716, "license_type": "no_license", "max_line_length": 113, "num_lines": 56, "path": "/Linux/run-simulations.py", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport subprocess\n\nclass Location:\n\n def __init__(self, city, weather_filename):\n self.city = city\n self.weather_filename = weather_filename\n\n\nclass Weather:\n\n def __init__(self, type, input_filename):\n self.type = type\n self.input_filename = input_filename\n\n def input_filename_without_extension(self):\n return os.path.splitext(self.input_filename)[0]\n\n\nplaces = [\n Location('walla-walla', 'data_46.03125_-118.34375'),\n Location('omak', 'data_48.40625_-119.53125'),\n Location('wenatchee', 'data_47.40625_-120.34375')\n]\n\nweathers = [\n Weather('ObservedHistoricalBinary', 'observed.txt'),\n Weather('Rcp85Binary', 'rcp85.txt')\n]\n\nbuild_directory = 'build/'\n\nnow = datetime.datetime.now()\noutput_directory = now.strftime(\"results-%Y%m%d%H%M%S\")\n\npython = 'py'\nprefix_executable = ''\nexecutable = 'VarroaPop.exe'\nif os.name != 'nt':\n python = 'python3'\n prefix_executable = './'\n executable = 'VarroaPop'\n\nfull_executable = prefix_executable + build_directory + executable\n\nif __name__ == '__main__':\n for place in places:\n for weather in weathers:\n sub_command = python + ' scripts/simulations.py --exe ' + full_executable\n sub_command += ' --vrp ../Simulations/VarroaPy/VarroaPy/files/exe/simplified.vrp'\n sub_command += ' --output_directory ' + os.path.join(output_directory, place.city)\n sub_command += ' --input_file ../Simulations/VarroaPy/VarroaPy/files/input/' + weather.input_filename\n sub_command += ' --weather_file ../Simulations/' + weather.type + '/' + place.weather_filename\n subprocess.call(os.path.normpath(sub_command), shell=True)\n" }, { "alpha_fraction": 0.659681499004364, "alphanum_fraction": 0.6789606213569641, "avg_line_length": 28.09756088256836, "blob_id": "c87486e08e38473c584c37977f97e7a536cdd8b5", "content_id": "f77a3bd0102b45aef84048be05a0e8c13d884df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 101, "num_lines": 41, "path": "/Adult.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "// Adult.h: interface for the CAdult class.\n//\n//////////////////////////////////////////////////////////////////////\n\n// For some reason the guard in Adult.h AFX_ADULT_H__8C6C41B6_7899_11D2_8D9A_0020AF233A70__INCLUDED_ \n// was conflicting on some Linux machines so let's get rid of it\n#pragma once\n\n#include \"Bee.h\"\n#include \"Mite.h\"\n\nclass CAdult : public CBee {\n private:\n\tfloat m_Lifespan;\n\tfloat m_CurrentAge;\n\tCMite m_Mites;\n\tdouble m_Virgins;\n\tdouble m_ForageInc;\n\n public:\n\tCAdult();\n\tCAdult(int Num);\n\tCAdult(CAdult* oldAdult);\n\tvoid SetLifespan(int span) {m_Lifespan = (float)span;}\n\tvoid SetCurrentAge(float age) {m_CurrentAge = age;}\n\tvoid SetPropVirgins(double prop) {m_Virgins = prop;}\n\tdouble GetPropVirgins() {return m_Virgins;}\n\tvoid IncrementAge(float increment) {m_CurrentAge += increment;}\n\tfloat GetCurrentAge() {return m_CurrentAge;}\n\tint GetLifespan() {return int(m_Lifespan);}\n\tvoid SetForageInc( double Inc ) {m_ForageInc = Inc;}\n\tdouble GetForageInc() {return m_ForageInc;}\n\tvoid Serialize(CArchive &ar);\n\tvirtual ~CAdult();\n\n\tCAdult& operator=(const CAdult& theAdult);\n\n\tvoid SetMites(CMite theMites) {m_Mites = theMites;}\n\tCMite GetMites() {return m_Mites;}\n\n};\n" }, { "alpha_fraction": 0.7672955989837646, "alphanum_fraction": 0.7672955989837646, "avg_line_length": 16.77777862548828, "blob_id": "f10429f1632653446a289b101ee4cd18ed4793e3", "content_id": "b13e5a771f1e3b8c22a8ff3dc9606e51c8bc18a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 159, "license_type": "no_license", "max_line_length": 64, "num_lines": 9, "path": "/Linux/tests/helpers/common.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <string>\n\nstd::string GetSimulationsDir();\n\nstd::string GetTestsDir();\n\nstd::string GetFileInTempDirectory(const std::string& filename);" }, { "alpha_fraction": 0.6427379250526428, "alphanum_fraction": 0.6427379250526428, "avg_line_length": 20.39285659790039, "blob_id": "dd15cbf779361e795872df87157361bcc3675da5", "content_id": "474ce4abe8ea0beb8a18b84bc617e3b6fe09289e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 599, "license_type": "no_license", "max_line_length": 60, "num_lines": 28, "path": "/Linux/tests/helpers/myobject.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef MYOBJECT_H\n#define MYOBJECT_H\n\n#include \"cobject.h\" \n\n#include <string>\n\nclass MyObject : public CObject {\npublic:\n MyObject()\n : m_data(\"nodata\")\n , m_initialized(false){\n }\n virtual ~MyObject(){\n m_data = \"badfood\";\n m_initialized = false;\n }\n void SetInitialized(){m_initialized = true;}\n bool IsInitialized()const{return m_initialized;}\n void UpdateData(const std::string& data){m_data = data;}\n const std::string& GetData()const{return m_data;}\nprotected:\n std::string m_data;\n bool m_initialized;\n};\n\n#endif // MYOBJECT_H\n" }, { "alpha_fraction": 0.7894981503486633, "alphanum_fraction": 0.7894981503486633, "avg_line_length": 32.640625, "blob_id": "cc7ae3d7b9571a7c148a3a04964af8c4a22cc85b", "content_id": "1b18179cf4859c53707b1342956513ba4b6d6fb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2152, "license_type": "no_license", "max_line_length": 97, "num_lines": 64, "path": "/Linux/varroapopcmdbridge.h", "repo_name": "RobertCurry/VarroaPopLinux", "src_encoding": "UTF-8", "text": "#pragma once\n#ifndef VARROAPOPCMDBRIDGE_H\n#define VARROAPOPCMDBRIDGE_H\n\n#include \"varroapopsession.h\"\n\nclass VarroaPopCmdBridge : public CVarroaPopSessionBridge\n{\npublic:\n\n\tVarroaPopCmdBridge(CVarroaPopSession& session);\n\t~VarroaPopCmdBridge();\n\n\tvoid SetResultsFileName(const CString& resultsFileName) { m_ResultsFileName = resultsFileName; }\n\n\t//! method called when Simulation start time is present in the input file\n\tvirtual void SimulationStartUpdated();\n\n\t//! method called when Simulation end time is present in the input file\n\tvirtual void SimulationEndUpdated();\n\n\t//! method called before the main loop of the simulation starts\n\tvirtual void StartSimulation(CVarroaPopSession& session);\n\n\t//! method called after the main loop of the simulation\n\tvirtual void EndSimulation(CVarroaPopSession& session);\n\n\t//! method called after the immigration flag was read in the session\n\tvirtual void ImmigrationEnabled(bool enabled);\n\n\t//! method called if a weather filename was missing in the session\n\tvirtual void WeatherFileMissing();\n\n\t//! method called if a weather filename was specified in the session\n\tvirtual void WeatherFileLoaded(bool loaded, const CString& filename);\n\n\t//! method called when the session file was fully loaded\n\tvirtual void SessionFileLoaded(CArchive& ar);\n\n\tvirtual CString GetDefaultPathName(CArchive& ar);\n\n\t//! method called when a variable defined in the input file is not handled\n\tvirtual void InputFileUnknownVariable(const CString& name);\n\n\t//! method called when an exception occured trying to load the input file\n\tvirtual void InputFileException(const CString& name);\n\n\t//! method called when an exception occured trying to write the output file\n\tvirtual void OutputFileException(const CString& name);\n\n\t//! method called to get the version number to display in the result file\n\tvirtual CString GetVersion();\n\n\t//! method called is the ShowWarning option is on and dates are incompatible\n\t//! if the method returns True the simulation will proceed \n\tvirtual BOOL CheckDateConsistencyFailed(const CString& warning);\n\nprotected:\n\n\tCVarroaPopSession& m_Session;\n\tCString m_ResultsFileName;\n};\n\n#endif // VARROAPOPCMD_H" } ]
84
jonathanhunsucker/amtrak-on-time
https://github.com/jonathanhunsucker/amtrak-on-time
316d197781e615dec28b864a1786e24417cf2e7f
e8ff7a4b0db7cd2649ccaf59d74dc1806ec2d37d
c25ca05e96b7644cac6a2492e1bd832edfca17e1
refs/heads/main
2022-12-10T05:24:16.766576
2019-12-10T04:48:36
2019-12-10T04:48:36
225,792,425
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6d914e82e47cc26199a13862423f604b49a1f43d", "content_id": "efd1952a71248e69c9903bd1c3a079a5caea7026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/536.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 536\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "1c5b6fd271d32e9e5bd9226c5f106a4ad45221a5", "content_id": "5fded68095fe7f91965887e6ced693e9b6980564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/56.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 56\n---\n" }, { "alpha_fraction": 0.540229856967926, "alphanum_fraction": 0.6781609058380127, "avg_line_length": 16.399999618530273, "blob_id": "3fa3cee69f98f95350953121647d03f1bc0970a1", "content_id": "c8fbda31f1281c196f2470a00ef6eee4348a41d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/content/routes/city-of-new-orleans.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: City of New Orleans\nslug: city-of-new-orleans\ntrains: [1159,59,58,1158]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ddcb6000d8fd49f07a9daef42aa77b7c2b2468c9", "content_id": "9cf745335bd705dcc3024db4eafbca2e765d571f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/179.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 179\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6dd215e1add48e25762b256047e632ec2139a3e1", "content_id": "0175e9d7c83270802b7b9eb040c701347dbfb4fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/291.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 291\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a9353c10c876c87e2fbadd97971d6d08579950d6", "content_id": "96b898e03a86fea3167be9a2e234a81e7863012d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/590.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 590\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b97c7b1ec180ec1819ece03a1c0e6504bfb4dc39", "content_id": "f68651979ec77f595899ff186785b9e88c912816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/525.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 525\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e75a0d2319ffff63c84e6b27b93be2dc3cc6824e", "content_id": "72345664ac2fe4f7e3282974cb3b2dbb85e0bc42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/284.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 284\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "baa00e312eb2e2ba9603abc5279d6815cbdbf492", "content_id": "093e43c9916a28d82336b1dba95c6297766fb85d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/382.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 382\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "90d65b82cf87fae05a8683231ad373716206c929", "content_id": "2d6af236badeed45ed5a5014d61cb2584affee5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/290.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 290\n---\n" }, { "alpha_fraction": 0.4878048896789551, "alphanum_fraction": 0.5975610017776489, "avg_line_length": 26.33333396911621, "blob_id": "0f7c0bcdf02aa9d1416659d012df71d3dbca6f7a", "content_id": "bed827e03b7287cb63888d34cbc6b481929c7cc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 82, "license_type": "no_license", "max_line_length": 70, "num_lines": 3, "path": "/script/analyze", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ngrep NYP ./data/2018/2100/* | awk '{ print $7 }' | php src/toDelay.php\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "aa5dd5dde433e9754b4e0984fa7f5b48dabe3e96", "content_id": "bbd485eb0dc12ac67a6ac0a19122de0b804cced0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/720.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 720\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "b2d3f68199469acfc87b2af9270fbbc02929fe94", "content_id": "3f23cde7faf13537a4c63a3815d4137b1be030f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/49.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 49\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "56662f18ac4c7ef815e4831858952ede000a0f94", "content_id": "603845301860b2fd6db77e3cdd87d8e8833c68f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/175.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 175\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "389ccd5c1f6fa7757831dab952617cc02e55e242", "content_id": "f807f5160bb565ed7222202571d33cb22de9ddec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/646.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 646\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3c81d27a4f3b117a0d7d4e17340b4a55bea1f6f2", "content_id": "f824dd96995bb9e4167264cda40e689408516f0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/651.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 651\n---\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.6507936716079712, "avg_line_length": 11.600000381469727, "blob_id": "f6aeac2559310227b32828cf5ddb5db20446279d", "content_id": "0ed55bf13c8b7d2c310bd247342d3d2e0ec7deda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/content/routes/vermonter.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Vermonter\nslug: vermonter\ntrains: [57,56,55,54]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "70802fa59d3469e8c7ef61e48647f7441e9bac09", "content_id": "134a3345d25c85a0f3168512d193a9e71ebd8139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/685.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 685\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3f48af05a6d829d4cee88cc19663a95e521cbec4", "content_id": "b7dc04bff382a243942bbda079dec0a5621eab21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/161.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 161\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "27f636fafc0af978b2de1a265acb6e186766be63", "content_id": "b0bc63ff20b2b98aaa986f79a5c06852f59cc481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1143.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1143\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b0173c4d84833eb1ad8e251100da2204970b9d86", "content_id": "7150e6cd8a00267943194ba257088d0157c9da7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/591.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 591\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "65743459b9931a7c642ad257169b23e773074739", "content_id": "00e531e2d7d6926f38c8320dd383f937575d4beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1063.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1063\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0d49103d3bfbec33a5af5a6190e20e9855f4fba3", "content_id": "71dbc92bd9a4dc0d464feaa183f016943157fcd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/371.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 371\n---\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 12, "blob_id": "633658b7fa5c0c8cce5c660ae594f602001603c3", "content_id": "f324b760415160b107541580c282a42c7b5046c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 65, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/content/routes/silver-meteor.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Silver Meteor\nslug: silver-meteor\ntrains: [98,97]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e5d89868e905b7dae1b79ac375ce24c42f2ad4e7", "content_id": "11e7c1559f9105ca603fbb50ca58d37e14be2f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2220.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2220\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2d50f2eec7851a9db05b466dbb2b1e742f5c5e2c", "content_id": "14962e92162613b7b80b8b6e755587afb246d507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/355.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 355\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "00c9c54750f71034ecf90141fd66d1a5e575acef", "content_id": "22da4046b83cc66e2b5f5493c1a442f6ebeb8510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/567.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 567\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cb697d78489af9a464c4b14f46c911060774f814", "content_id": "3fdd0b861b46e4e82dbf3a017b17a891d3abdb93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/306.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 306\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b9672fc78ed4f65a6b5abbbcb557f7a57b83e4d5", "content_id": "8869d4525138984f70f9981509367c6f1c2df6be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/501.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 501\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "115ff15fc1f7c1be06f9896d83bdb359c294d312", "content_id": "1d31257b0aae4068b7dee8dcacc59abd79baa89c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2254.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2254\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "868f8e17002dc4edbfa671190e210be555252ea6", "content_id": "1e178c1b66e217ac9b2a4512516b467d93947c28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/644.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 644\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "65225e14e86a940fb27fa7686c6d7546d3f3e1de", "content_id": "864a060a8ef215dd0e17ca28ae9d01dfa23860cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/300.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 300\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fe5a0dd5ac22a9385adea601273daebe9d611131", "content_id": "a7c0262782e2660f50542cdbc131dbb174510bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/256.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 256\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9b2a5a6d95f95452642b7399f41103da19f6e9d3", "content_id": "ad710cf88a67630cf5d9d305f9b99ded260a54b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/385.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 385\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3e4e1645d6a66455912d6ec31fb33ac4e1e2b298", "content_id": "6362aa1a3c2f8d721cfb7822ce19057423a5c20f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/250.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 250\n---\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 5, "blob_id": "62fa609ee6593982ef7ce105a9f686c342364f4f", "content_id": "754b111193cae7df6d2fed0ffdd28f7d855f6fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/content/trains/8.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 8\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9212160bcb7713ec4fe25dc0b393da6444d2b6b6", "content_id": "b6080960aeb630c4099d61d92b5a500d867ad0c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/244.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 244\n---\n" }, { "alpha_fraction": 0.2864077687263489, "alphanum_fraction": 0.7718446850776672, "avg_line_length": 40.20000076293945, "blob_id": "cfabda8141fe5ca071558ecd69cea1eecf032d27", "content_id": "510ec40496d5a7d6a43ae99c5303bc7e2cac9721", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 206, "license_type": "no_license", "max_line_length": 134, "num_lines": 5, "path": "/content/routes/northeast-regional-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Northeast Regional Extra\nslug: northeast-regional-extra\ntrains: [1198,1136,1194,1182,1141,1140,1195,1121,1184,1171,1167,1193,1186,1173,1164,1129,1168,1134,1175,1143,1196,1179,1139,1174,1135]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8ead0b852da53f28d93c037f0314fadfca3d0acf", "content_id": "5f4fff49710f38b87aa934b5bf78df3ef876d757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/727.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 727\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cc3e1aff96b050a1cfb62ce0c536b6ad91cc4911", "content_id": "a83f813231fd6e708ac076671eda74d106e7fbdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/173.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 173\n---\n" }, { "alpha_fraction": 0.5932203531265259, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 10.800000190734863, "blob_id": "8c770da19faca71e3410d83612be9ddabe458871", "content_id": "8f00fc46dbd4a65fbe1b942442c17ecdfb2c0651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/carolinian.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Carolinian\nslug: carolinian\ntrains: [80,79]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "460a7e52cf238fb7081172b7229337c513bec256", "content_id": "6e2eb4f6b1d140d8234a016c8126f7288929047d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2261.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2261\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "df20477358f18cdccedba87a08442307fddf7166", "content_id": "723163c1425e641d9847d11bafb430ad997cd6ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/236.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 236\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "58a791ea15b09b850655408d4dbf9f1d3c236fae", "content_id": "9afc0dfb1f5f2e2236b3e9fc5762fb9fc5cba8a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/185.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 185\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a0122a8573a4a3c14d0cbbd50718b2a4ecf15ca8", "content_id": "3bb53b33385556fae9b7a03725fb76f550317df1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/373.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 373\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fd62dc3ca00682529d36c202a3fe99a1176a8700", "content_id": "3ec109fde3e88ca3770ea2d0985c0a03ff619f98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/710.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 710\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9680dfcbf9cf410a8ffdf25b45fa349a4d613512", "content_id": "ed80c39094c265a165218e965ed232bacff791ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/697.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 697\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "56cd2fd8caf463b81aa6678223dd4f90f71760eb", "content_id": "ff603e3c0510ca3fabd98efe8980423442b98f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/184.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 184\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "62366dcc7170086c4d350fb499701b0868f55df6", "content_id": "163b0c0ca817fec0ed3b53549e7ad998a6ea2424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/620.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 620\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "4b25a7c8b6f93f27df1efd7ca8066780d0721322", "content_id": "1140195ea1b82c294e94c37e1df4b4fc691d1e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1986.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1986\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "489dfa22971532f0eff63f885b7d558b4f8e75c3", "content_id": "e00ab4d65ae5d1c6d020d3ed172d94a86f752fc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/540.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 540\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ff90e5e6b5dcbd1de399b7ebd415dfae99919721", "content_id": "fd40bea27960395a69d3e12635e0c5d548ec310a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/612.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 612\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5458967b5445ef551787f02cbc3cb3be5903adfe", "content_id": "72c2d0280a4d604e6c0106593e8f31339831012e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/527.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 527\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "91688dbd4f86c83101653b10ea8f614ad6fe91d3", "content_id": "0be0234dc24603542b4eb36ef70b241b991e332f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/688.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 688\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e992242e8ad2b2a96b91d78664c80cf01644f2b5", "content_id": "e76142e991b83cf951eff9f92913e717d21360ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/503.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 503\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "009ce27d8b7594c9990bf8bd92a032fcef662ff9", "content_id": "3cf245dbecb1fc17c6b133dedcf50661fc2c61ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/782.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 782\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "381c58c34f176245c3c603e2055c3a02d4820e71", "content_id": "e131130b985e168ddb919196f89e310254f01f53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1168.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1168\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6cb8e7104e16bb5731a525ee1c65b5fdadc1b6bd", "content_id": "f63bb5556e807feaee135cb7abc661fca1eb925c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/293.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 293\n---\n" }, { "alpha_fraction": 0.604938268661499, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 15.199999809265137, "blob_id": "1842ed2f14ca5bddb820aa96e0f7b960d36d07fc", "content_id": "5223c3a3b08ebbe5441a1156b014af6484987b5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/content/routes/winter-park-express.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Winter Park Express\nslug: winter-park-express\ntrains: [1005,1006]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "10b427ce1c5f6433c6fb0e2baf96acbcd90a6a7d", "content_id": "1397afc9354d7ccece08904ceac576db8c6a820b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/20.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 20\n---\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 5, "blob_id": "95d854633e65e663e016489d8915674075d6e6a6", "content_id": "c47bc7cd7456718b603c8441237f8119679e9445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/content/trains/5.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 5\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3005c0799522fb2ccebccac9d5e1c8f8905d67b7", "content_id": "926b3609bf3c59a9ecdacf2b82eb14cdf66bb1df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2205.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2205\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7f1a24fd6f1e8c6c4eb55e6ded34cbd5c4300cc1", "content_id": "b6e87e79e252911099a4454944f6a92b9bab6db6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/822.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 822\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3a685e5ffd115efb42d155836d6e4761ef7bbc1e", "content_id": "7c73c1b8c818dba4344a87534a61d145bdbec030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/639.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 639\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5c2321ed959fea75827111caf99497da6da15a03", "content_id": "026f6e5c7aaf2e40a1e35e5b60d3db2611f884e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2239.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2239\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "626be62ce42ea03d07e726525307d316b1d3b008", "content_id": "6a74d794f4d32a86d05595e700f6c23982552203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/448.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 448\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7ea2f8ca4245510560382fa66f95ee7ae78aa31a", "content_id": "0694af19364faa307c6f4c40112f2b5bc7c6e41f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/513.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 513\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c5acecfdfeeb9295b34cabf611502c09bf2d0332", "content_id": "197171d6f63e5d9ae1852141ff0e3196fb28b424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/141.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 141\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ad8e96bba55fa7dcd6748e4a8120a0dfb2f40531", "content_id": "d6613b7ac191b04482ce0f5fa0f79afcc61c5223", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/532.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 532\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "7eddc85425dec21b51c8c81fb0b0ddaee0bf6c17", "content_id": "861b557d0c473343c082837c226aab842652077d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1193.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1193\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6f47f938cefa33f627298ef759d2b0b34d992947", "content_id": "d8ec2faf5cd1eab59ac049e2f11ebb74b00e7685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/254.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 254\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "48427ad8d122746a5d4681ef6dcead78809fe9e1", "content_id": "058831dc0129d1d2e2d2315824b329c2a7ff574f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/187.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 187\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9d197be38b5c7368db505308ec8472e9240331c9", "content_id": "f2279ddcfcca1f7fea85f4557b13fd3d4a79ad64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2150.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2150\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5f98169b9450f474f7e099c244fe1dfb240da011", "content_id": "f8174db46ebc94f67aff83630bf43dc06d316bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/658.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 658\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "aefa8a21abb767e07d49b94b2687d3849b4f8813", "content_id": "45379cdfeba78dd4728106ee5e667b5c05927d1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/529.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 529\n---\n" }, { "alpha_fraction": 0.7131147384643555, "alphanum_fraction": 0.7540983557701111, "avg_line_length": 23.399999618530273, "blob_id": "f5ccfa78e6956fca572dda19271310814a4b0455", "content_id": "446ff1b437366ea5ecf6f60b5a811dae6204d89e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 48, "num_lines": 5, "path": "/content/routes/this-train-has-experienced-cancellations..md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: THIS TRAIN HAS EXPERIENCED CANCELLATIONS.\nslug: this-train-has-experienced-cancellations.\ntrains: [83,133]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "da54d33e7a505931dec72ef0280037d71f481777", "content_id": "c61197b5f68e17198a3b3f3dfbf8e3f80ec4ede7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/821.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 821\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3c1389c523852be3e3a1dcba9f912cf7cde0070f", "content_id": "0d4d79997014c605d87439f87a667a70514abc5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2128.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2128\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "230d38ec1987ca28e81aef3a0f2e6eb2e561c356", "content_id": "e11139a5e64f67d26e0763f15a0ccbb62e2b2825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1065.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1065\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c7a0a58f7f37fc166d1e2f6faa511712f9ab3828", "content_id": "e36bc0a7f6b2d4bf9309d88110736cb63a97ac3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/307.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 307\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "652497b03f3c4d028f299164b230d63fa0a3f3ae", "content_id": "7f8b3ebcda876610c43bc4e73aea01fc9f3e41c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/364.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 364\n---\n" }, { "alpha_fraction": 0.80859375, "alphanum_fraction": 0.80859375, "avg_line_length": 50.20000076293945, "blob_id": "917671e03ca24ef332ad35ae98a4afc6820d33b0", "content_id": "806d8405bdd995676316d1e9bc0e5a5e78336792", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "no_license", "max_line_length": 170, "num_lines": 5, "path": "/README.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "# Amtrak On Time\n\nReported timetables aside, when do Amtrak trains actually arrive?\n\nCode supporting a post at https://jonathanhunsucker.com/posts/amtrak-acela-on-time-probability-distribution/, and website at https://amtrak-on-time.jonathanhunsucker.com.\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e12806eab474edc6bee2dd64ed3437128410fcd2", "content_id": "5e6efec5e21409c114e00996fd2c04cfecc5e014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/593.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 593\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "73a0392e1c452d3885bf1f4b7b3c1a386d127b14", "content_id": "9681fda2c29070e64435ec11d9e519c5fe899a00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/495.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 495\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6c9b080d1f8d6ea9b70be40ae95201b255869cb5", "content_id": "4bd7f6b1b8fe8a19cb68fce1fbd9b94df8201646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/615.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 615\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bdd184fb5454efa26a6341bf4f88cb312838fbee", "content_id": "ca336496c924bcf4b3b0bfea22e18db5386160c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/281.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 281\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a20b547c0d6c9652f6a7896deffe6d3e51280c68", "content_id": "0b83f8832cb8c4e47d447cf123731806fbb167d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/714.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 714\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c7378433f7369ba7b9cef118291db359c0152a5d", "content_id": "523b83a022683da9c5b697ab43a771951825a3b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/465.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 465\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7fa13973212a1a1bc4588d1d1a64dc788240e005", "content_id": "a5f05f3e0432e6408b0557a1dd14993dd33f9c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/543.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 543\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9f91a7c06647ded2e72954304d256be6f21e7782", "content_id": "25aa9584a00dcf4b5d90a4a9453081fe62622679", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/767.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 767\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7faa9623ed5e1503706d24c55083cd922c27c7a0", "content_id": "056607aa0d0413e739404ccbb934f6530091b50e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/451.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 451\n---\n" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10, "blob_id": "fce123506e79b8ea98a37f51b9b1d467bb0cfedf", "content_id": "ac379a4871d5777e14731ce901076aeb30be4e7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/content/routes/crescent.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Crescent\nslug: crescent\ntrains: [20,19]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "78fa34404b70ff1af93d016fb99ebb89bb71f7dd", "content_id": "f4309710b5c105efcb489e1810f1fe2f173ae08f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2108.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2108\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a175618bcf0ba88745c0318613e2fba0f2b5dc17", "content_id": "b2777cceab45ba8ca4bb29bfb1e6c8bf594004b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2208.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2208\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "3c8e22a88a52fa39afd76b5243c53857419572b3", "content_id": "c9b90720c537279636241415da7693b17df67eb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/69.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 69\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "afd91ae78ca622888c61479c5bf995c487de0770", "content_id": "b5a9323b87e4a1b3309b5f0767a8be5557c346e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/137.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 137\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b1e2cf1c545c3bd771803838d9bba7989c08e208", "content_id": "c9cafd35ebd98145f9cd68c797d1e09bd1b34616", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/703.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 703\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ccf99eadab29e350dd6bdff62b501d54d896831d", "content_id": "1654673f17f4ac0c3e5b116022637e97942cc21e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/715.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 715\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8c0374b0c4d35eb80b8bb8608002cf26b52dfe26", "content_id": "19454a3d8f6b0f6ca132d65aa8d5deed37608aa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/509.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 509\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ebd5d7d68dd9af3459f53226dd2c1f4452251651", "content_id": "778df3aca0b14766c61d223e53c9ae3cf8388347", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/332.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 332\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "65490cefd41ea996065d2cdbed8560a6b4a5538c", "content_id": "aa100a9957707d6c74f904b00cb0a1316bdad492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2195.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2195\n---\n" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 9.600000381469727, "blob_id": "7cd117654f9a0125bfaa168d790cf83895c1677a", "content_id": "ae8f11debd780b7fdc150e4ff7b5cb6d04201501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/illini.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Illini\nslug: illini\ntrains: [392,393]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2c41c398ea92c6c45e8bdc48d31d6f6d6cc5a9a1", "content_id": "c65d015587d76e1f4b2170732ff0be9d675a1643", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/158.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 158\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2b92cec2f5410be161061b88243a878dbed97864", "content_id": "3e400a97ad70cf05b219e849ea12b2ac2b27b662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/392.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 392\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "53b65026b29a17788cea4e5324ab230290ad49a0", "content_id": "acb2d1674474c11d89885592dc434716311be4e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2257.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2257\n---\n" }, { "alpha_fraction": 0.49367088079452515, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 14.800000190734863, "blob_id": "5e95b99d955382f6c4e2270c7b6129990a6727de", "content_id": "e48738fa43afe3cd287634fd874857079da875de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/content/routes/holiday-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Holiday Extra\nslug: holiday-extra\ntrains: [1065,1063,1054,1056]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "38f8cd097aeb6efac2c4643812c46d186544a822", "content_id": "8159169616d239a0f8f61509fe7affd967c7b591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2171.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2171\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bf7cc025c9875219004f65df6609208182f87802", "content_id": "a458af34fbaaab3abeb9eeaba9ecf6bd2b835652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/243.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 243\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f19ad4eda3e31f2c3d341fb23355d3511e00a94d", "content_id": "99c6da1a7b9263f173ff86600dd750b47992b223", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2246.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2246\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8e5654bc9cf49fafcc6c8c855127d916ec272f79", "content_id": "20bd17704bfb476e68bfb30944848d016547c344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/696.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 696\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "769df1dc0216bce1b9e63a06222c387264d1d96d", "content_id": "f21b7d39211d1f66219b5063c660062e666ebd5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/339.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 339\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ee34a188bb2c10b498352139626a6a7623776e33", "content_id": "18a9b8701831ba7b71b98c294cff4670a41edd9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/313.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 313\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f05cdb0d4280db9d54a9c83dae6c2a34a6d7aaeb", "content_id": "8e49a8f376f7721542bee72920d159ea16251796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/521.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 521\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "66a1f8496a6441ed77b76e534ac9506ef24aeb62", "content_id": "d34036e2868e6ff5e36e8553a2792907f008c06e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/390.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 390\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2bc233d5fb8b03cb30ef557c63743f355639059c", "content_id": "da0398b0a6c0cfb11d2ddc031b3d57c495f31efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1565.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1565\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8ab57ff969531a6fa693aaad88aafc8a217a7990", "content_id": "eea3490245c8f25bb379119c28b512568ca7d2e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/155.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 155\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d0525f162aece5110c867f7451596c17863dfcfb", "content_id": "da66730dc217240297be04dd5277933fef1250c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/531.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 531\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "60252650ea4b9c4e4b0dc39054bfbb8ab9481a66", "content_id": "94927cd23a260ab779bd530efd4e22fcd03d9079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/380.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 380\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "86cf4b137ce6b010acb57d0acfbc760b63aeee29", "content_id": "cdc5fbbd5babd8dd5a7c160ac23a59c44ed4a7aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/471.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 471\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b4feb982733211ca8ab81a4dbf6e8f82a2966485", "content_id": "4d112c64d10aa63f38f510ad95a6300464948499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/661.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 661\n---\n" }, { "alpha_fraction": 0.577464759349823, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 13.199999809265137, "blob_id": "175bc44a8d7234bb534e4355220575eb7a5be6cc", "content_id": "490dafa6a02ae2aa73b5f9b3aea5df93defc4b3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/content/routes/empire-builder.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Empire Builder\nslug: empire-builder\ntrains: [7,27,28,8]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "faffa643ca4c3977a932cc0004da5b45f7050fa5", "content_id": "70f11ef4d34d6f7f722c530e975681e8b905b73e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/547.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 547\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c8d070aa0f3e1698cd7bdd95a444ee1ecc9fccf3", "content_id": "42740815fa89fe050369135c28a8b5d680815b91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/253.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 253\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "36e2442d2523a7a4677ca95ec861ca38289d371f", "content_id": "e9a68de91db871157785b712e04276763e204c9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/528.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 528\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c819409c8fba1a637ee815698075e1016dc3e5f4", "content_id": "2db749624c38192ce9c72b0c99444775d89c9712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/166.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 166\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c3e5ce9694c9b6651e4ddd526d69d8bcaf0846e4", "content_id": "186e34066261ca32046b48ff613b46ec4adccb59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/692.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 692\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3bafb743d539a3cf89e9bc1ef7821fea98f81086", "content_id": "e3c083d6ec1ccf3a047e6f799ec71cb8aa2734d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2193.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2193\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e282cebb8cf23a70be96dc5c9757375c780044ad", "content_id": "6f8c6c76bdfe94af458d7128a2e7a795f6b8c019", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/745.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 745\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "33bc8a8a8da638d822da7ce70d915139ca292a6b", "content_id": "80e91dafff3aa621a41f7ad87aa954883a55f0f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1568.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1568\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "34fb2b3d665c98704cca637443579b8f80754aca", "content_id": "b91618c57bd035a973c45f2a2ac9c166676f3eac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/145.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 145\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "67c0dc6b6c0327918bae9750d113cf09634cc598", "content_id": "23d045de35e379d3d69299d649c09cab14cb22e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1182.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1182\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6d550ed9f227b46ce0c2b62dde77a69e7366bb52", "content_id": "be53a60e76e03ff63c869535c32659f71984191e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2170.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2170\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0475ab419f73ad7257e1cd32ab22aade8e245499", "content_id": "05bbcaf9a5a7daac4940b426e78cca4132158453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/522.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 522\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "cf840352d0e70aaea7ed1ceb8f7fce96a5da4d27", "content_id": "87831deb8a0efbc4a830f148e34bd38fe2a3cfa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1761.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1761\n---\n" }, { "alpha_fraction": 0.5632184147834778, "alphanum_fraction": 0.7011494040489197, "avg_line_length": 16.399999618530273, "blob_id": "d74998ce9c7ced60498824f66e5c6408485b9100", "content_id": "6b4237c39fcfcf88926dadfc36da57509e6aad99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/content/routes/ethan-allen-express.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Ethan Allen Express\nslug: ethan-allen-express\ntrains: [292,293,295,296]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "aa556020b3a7addf5f9f40c1792e2a7183c7de51", "content_id": "1d732a3dcd3b0decd45a167a923915f18e6301f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/167.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 167\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e46b6eba4e637ee96613f9224cd87d9f3aef6d5c", "content_id": "53594ea8de3a62bc0b17f9ec1381dbc5d6a72a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/138.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 138\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "8d958beab1e0a34c08c70c507e1111c22a024a3f", "content_id": "80573bdc6d53b096a10deb3389ae08e9d871b287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2249.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2249\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5756c02dfc3f8d097461592b0ca5b3c9be6158e1", "content_id": "0284e833fe6e7f745dd901f14786818abc9013ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/759.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 759\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "56271297fba6883541a4acd1623041e0a6608e66", "content_id": "5a01d736a32918b405b0e72b1c21fed4f28334e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2253.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2253\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e71da30b04e6e9e647088aa05faae2e0dd0179b1", "content_id": "0685cb12ebff917590726dff9e0500a4a3432c8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/746.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 746\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "a39c0aff41b6abeee67ef38605b60c9f4ab1ec55", "content_id": "66cca1c59004f862e73364db75da0a118e788ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/19.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 19\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c41bf43b0e76578c4dd85ae39abad7bb1a9214a9", "content_id": "ba9ee2af37cfb06bf6ae7d109c06f1d0647001a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/665.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 665\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "25efc6f8183febaaafd4a654f4f7c5155077418c", "content_id": "267026836e313b613136ae55abf6841e32603c0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/643.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 643\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f3fe0265f84da861c7b56a751f9355e636ed8cbc", "content_id": "7b184ce7a163bcce430a0e1e27760c2aa2484c5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3568.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3568\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c475ba8acb840379bc9735828f154cd91ff27d1f", "content_id": "2c78861ab2f520c6ed74d24588644d31031360a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/530.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 530\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b12b29043e8e5b8089c64f0f6582e26659eda174", "content_id": "803f74aa6763a705d6dd80c7e08d263e1a55e6f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/127.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 127\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f566dd3505497965f5dba5c46946ae80cd3fb245", "content_id": "1695dbcfeb6691c2ac035a567b049522c7680a2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2258.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2258\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "aa2d2914888aef6c5dc01637ff76b6f85e4bda5c", "content_id": "27f4a6d270c39bc0d0deba29437d4bcc10593015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/579.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 579\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e13cb37e001d6983bf63c5e53c8b9a1239d44fe3", "content_id": "6f1e55592927ce92839f44eadb172261d216da0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1573.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1573\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "423eba4e63a951f4fbd1d53b26f7da2e7a814fbe", "content_id": "302f4727ca9d2cf157cc8e94ef09d0a5f994073d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/653.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 653\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "7c21d7c9d02548059bb62c27a75cd3945d7ba574", "content_id": "2483a154920c2c96031289f4e2f8ddcadfb5e902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/58.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 58\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "015071750bd21f5efcec2dc1c02aedca35879d6a", "content_id": "a6cd5d759d4498500c47d2397c29bafd205135e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/334.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 334\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "e58057446f7f10c1f7b819a3cd420995ee552fe2", "content_id": "873209bf54793d8196d358b7019d9b53a4586392", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/30.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 30\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "fc871bc2613c483aa95bc7e69071b4ee3ba016af", "content_id": "c604ebf5385508f4df4cc93fadbc93b1f627d0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2262.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2262\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ffcaf1a1c21c3d2135f6eb8cc4517b7920675f69", "content_id": "30c3c58036353ef96a2e8093103a2346b57faddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/301.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 301\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "76cca31ad7aa5687edab931e44f2cc1ba7b6eb6f", "content_id": "8b6d602d2fcbe7a0fc84c086747a743f35972672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2151.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2151\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0c9fa6e75705740f01b70ad985f3da6c349f3167", "content_id": "201868bf518c261f0c472029b844a8a484b5dfb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/164.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 164\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "977ecb7df82ae24ca522e68bd5067e86741e9903", "content_id": "d56408b58f5aea2c265144c34ed27bf2f5adb017", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/850.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 850\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bbbf53170749cb61f4b8e7bbc1eb19cfeee9bed3", "content_id": "9c5b2d140e631187d0ff2c9cce8857214781eb00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/470.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 470\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4818de4df268b58e25746baba6a5aabd976ca76c", "content_id": "6797f07d8c3ee0bedc087f73f3a1c901620ac373", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/238.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 238\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "678fa1a436ab4c90af81c04ba0c2267fa8521f05", "content_id": "36b9c4cdae3ae94533f9e4f0260e29f302da0205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/768.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 768\n---\n" }, { "alpha_fraction": 0.30693069100379944, "alphanum_fraction": 0.6930692791938782, "avg_line_length": 19.200000762939453, "blob_id": "a857692fa60c1c3148c06503ec14cae802351d62", "content_id": "fc2146dbb56ff34adb832e3e6edde8e8fe6dcb9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 101, "license_type": "no_license", "max_line_length": 61, "num_lines": 5, "path": "/content/routes/hiawatha.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Hiawatha\nslug: hiawatha\ntrains: [338,343,334,335,342,339,333,330,331,341,336,337,340]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "fb74f4909be3ab33df44accd5f1b6678d25aaf98", "content_id": "8308f1c791e0f35c5f3c8bb07d2ed6776b8f0269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2267.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2267\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "33d4cfd81d410e38c33e75ae5183c1dda27e1300", "content_id": "e06290ddda2499a648ca2d1c2e65d4f4434dc3f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/125.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 125\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "02dd9f87be86cf072d2e9ce00a24d71a9b5ccd53", "content_id": "02e4a2511c630292b54f0c7fc5c6b525eeb44b1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/748.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 748\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b4462ef24d8f10309fff0bf60222127cc9f8c130", "content_id": "9322c669475db7dc0b816c567fc8eecaadeab034", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/136.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 136\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "0933e535c48d2e196874ed80d0d2b521dc9bbfa0", "content_id": "57da3d17ff9520c18f59fff265147a69942fd621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2235.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2235\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "7d7d3b780a087a7bdfd7fc86e791327ba6e13ced", "content_id": "f53efaa3d3930c2cfe4062aba1e2a5be45d011a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1550.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1550\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "865d6f92d7b40dd93897877f4ce54383cfe326f5", "content_id": "0f607156fefe600fe740cfd84ae617ac0b5956da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2103.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2103\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "4e02c2abf1cdf874adf70e3289f31deeb90a8c28", "content_id": "2a1dfcef3acea925b002ee54363cbc6aef0645d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2297.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2297\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4a56873441d8b7c99b683c062235e8d07040ffa8", "content_id": "0edb7c289b342a50ec3ce6f1911296ed6993ab7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/460.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 460\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "c199e403d6ebb5b5354d60d21046fe7ecb7c37d0", "content_id": "0ce0dd440ea6815d1fc64ba0cac80e7b53cc093c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2252.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2252\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e5ea6ff1d25a70ebb1a19032e5a5aa4191b30d23", "content_id": "7826e920b9c308fd7432f1a8a111c08bd2964dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/331.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 331\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "52913e9879039ff2dea79544302281d7ec044873", "content_id": "643a4865873ec9f9b3b141082516a4e0efb96194", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/343.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 343\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "337833212b613c2096874399064ed21a53ef012f", "content_id": "d22fb48ae49a68daadc517a46f3cf839f059dd23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/150.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 150\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dcd6728395dba141a84e601805adbd9255044d59", "content_id": "91ac271e3ef5ea80e591797a286bf075412bc6b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/473.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 473\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6b22f41f51ae1ac6fe5ca89de81369cbf17735d5", "content_id": "2d3a238127bce8f1ef927717ec0634f448237d8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/479.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 479\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "059c3c4529ecd0c87a1ecb335b94a7f7b93e907a", "content_id": "2932651a143c97cb6d67a9ebbefb20804d57b2c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/605.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 605\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "35b22b2c31437a70278241528780c89c09099e4f", "content_id": "ad23c8a2ced532ae9614f01dca2563e1c4ee7b4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/78.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 78\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7ea33763c5c20308b61081dc415b0a3b9927e4f6", "content_id": "f17c29ab4b0de8f07bc704cc83f03e32720fea68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/252.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 252\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c30723697989dd775249f29c516ea7ff357b3f71", "content_id": "7e766cda7d334e41fe556e0a079692bcbe42932d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/718.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 718\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "077282471741c4a68672564e95829e600874b5f8", "content_id": "5a3947f8a072ff3dbef1edbac8fc84c8080824d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2122.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2122\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "bb28794ec5b638bd9f398b80091fef5060fd684e", "content_id": "77598955facbb5e8a4a4624dfcc7907a6fd9af69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/71.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 71\n---\n" }, { "alpha_fraction": 0.48924732208251953, "alphanum_fraction": 0.7473118305206299, "avg_line_length": 36.20000076293945, "blob_id": "f06bcf9a92f973e20db2592320578386729d326a", "content_id": "9d25f802891a000dcaf1348d5640d13821b8b42d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "no_license", "max_line_length": 74, "num_lines": 5, "path": "/content/routes/this-train-experienced-a-service-disruption..md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: THIS TRAIN EXPERIENCED A SERVICE DISRUPTION.\nslug: this-train-experienced-a-service-disruption.\ntrains: [355,821,354,236,332,467,353,329,1,50,127,170,242,235,290,822,291]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "26d23209f1d16bedbb55e825e1ef91b64a1082fa", "content_id": "ee533b19e081bb6c31033e3b3aa480165948e2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/351.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 351\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6ec2aaaaae9f2e794651e4a472acf08da6958722", "content_id": "5d66685a4f51852d65435e8476fa4ff9c76c6f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/330.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 330\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "820f3e1c402c685d419945bb3506664d2fa05559", "content_id": "bbe162330acf6c9ad8e3c5dc13cdd370734e6b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/378.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 378\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6690bc7a8854e89c80e2932ec402c11173095aa6", "content_id": "4af9a12fbddac1a68aebe3433558a64d7b481f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/292.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 292\n---\n" }, { "alpha_fraction": 0.6385542154312134, "alphanum_fraction": 0.7108433842658997, "avg_line_length": 15.600000381469727, "blob_id": "00fc99b7d441e3898bd18cbc7e289b787762c8dc", "content_id": "1555f9cc05f9a947bbfd69b6c270d6c1586a4406", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/content/routes/lincoln-service-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Lincoln Service Extra\nslug: lincoln-service-extra\ntrains: [308,309]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "4091877f36cc63f3a600623726f519173446979a", "content_id": "3a1582d375db1f0c9ea0ffa97637bb874172d009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/94.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 94\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3d6bdd8a3d90a827e5de1c6c4a37ac00dd61e0f7", "content_id": "4d08577ca59717762d6b7bb627dc65e1e52b19b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/123.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 123\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1bd58ba7a13d0e9b809b278bae5f5c3e88e2fffe", "content_id": "861870530f9afc7ffe61f636d52b1a72b59bcca9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/160.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 160\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "359d0a7e8e7c94ef2323f40a2962b736a587ee57", "content_id": "2aabda607f7e9136780caff6b0b3c1e32d8d34d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/494.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 494\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "90a5519077b277ae8f1f355ce2d5ed324ecab72e", "content_id": "d8df0f0c7485fa4e11275dddb34b2dc9d6fea856", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/712.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 712\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f871d7d62e42de5547877da9636d6b07cba83740", "content_id": "12745b4010dff837f2cf77ec4962507ea80233b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/645.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 645\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6acc95f53d098c1260ae76bf040d4c4e97e1268d", "content_id": "9a91fb29cd6b8e090d13f7e77a54003b1fcb0f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/493.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 493\n---\n" }, { "alpha_fraction": 0.4343434274196625, "alphanum_fraction": 0.7070707082748413, "avg_line_length": 18.799999237060547, "blob_id": "5dc52b70d4888ba53c29c443db8096b440034a59", "content_id": "bf2d91995795b07e436aaeaf0c899b68dab2cb24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 99, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/content/routes/lincoln-service.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Lincoln Service\nslug: lincoln-service\ntrains: [302,303,304,305,942,306,307,300,301]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0f8f78504ede0f580362f76aed251d7f0e7fbf13", "content_id": "23c97590f1f688639dedd6a5e8d429b7f5a4720c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/182.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 182\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "4b594c42daae4b6f3304dd6c2f68420b98ec4e9d", "content_id": "978f0e0a6cc27f7b1f329d192daed392cb780978", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1564.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1564\n---\n" }, { "alpha_fraction": 0.5932203531265259, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 10.800000190734863, "blob_id": "059465caeee3602c6d3218b82beefffe597b8731", "content_id": "ccc3a204a181cd0a8eb5b1660b15b12a62b5f982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/adirondack.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Adirondack\nslug: adirondack\ntrains: [69,68]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f6683013ef8992687648570b1dcad39a87339714", "content_id": "1c8950669063a95a3b6cad6895e68d15a898e27b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/785.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 785\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7ff52f00d0fd01aebf07e999eb7d442c52ee64f8", "content_id": "5a13cab4d756d3dc233a1125023e31b6b22610a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/778.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 778\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e53745b37be6e7bf7c42cb5dcbc6f639ceaa9a7d", "content_id": "08f23e8e20dd0d53a3be2a59ecd2a015ee6f6e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/515.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 515\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "cded39fcce091bb4da057b745a5218b1282ced81", "content_id": "339f983298e0bace869e1edff0a72b5097becb5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3577.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3577\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5d562e4f47f8c3f3b2d37ae3a4aeb5855e2c9536", "content_id": "97457fde5ffe3ed0cafb3ff007695b89955fb3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/474.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 474\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "97df60fcecc12d3f307455fd194323f4e97c4efe", "content_id": "d90293a1c00d01bdcd54e8d002f945bc6a929083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2163.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2163\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2dcecaab855dea959c1b7773393bc74d24e12f16", "content_id": "91e7c1957e70b4af184ef632c7f4820488c8e104", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/566.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 566\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5f058971a4a5acebbd88b4d1d35995bde12d063c", "content_id": "ab333925d47c42f67581401e06455ef058469938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1006.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1006\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9596c02ab9e9655daac61fa36dc05836339a951f", "content_id": "a1b535feecb2c448e98ee148ddaec5d2012fb40a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/196.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 196\n---\n" }, { "alpha_fraction": 0.3093525171279907, "alphanum_fraction": 0.7194244861602783, "avg_line_length": 26.799999237060547, "blob_id": "fccfaf79f0a7464d0b0007bb607e7ae5ec688c66", "content_id": "ed9e5fdf19a1aca6a3ebde6bb07fa07d8509a98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 139, "license_type": "no_license", "max_line_length": 85, "num_lines": 5, "path": "/content/routes/amtrak-cascades.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Amtrak Cascades\nslug: amtrak-cascades\ntrains: [504,508,509,505,513,518,514,502,503,515,519,500,516,517,501,506,510,511,507]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "4be01b37cab337222a69c2d1d0a82f5bd8c840b2", "content_id": "ea5588f5d452aefc3d7f88d65ecc1f622ae64d0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/54.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 54\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "14ed0b4889c855b211e3e80f7527af93cab178c9", "content_id": "90be52368c22144605f43c1771a68ea8f11c3960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2121.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2121\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2a2b571b9b5b5030bd7c01f857c36ab67d1dc453", "content_id": "68b860d5c28cfd206da02a2229fe48c7ebf85e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/670.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 670\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3d2f48282b59c7b61f86f4b3a335ae4e7c143ae3", "content_id": "124efd623df51a7d372c7d2e2bd1b5764e89e3de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1171.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1171\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "64d19113bdf8481308f6d34e46a7dfc26d8b08e1", "content_id": "e5033c746594d9f23c3d234101e53aaf17f78a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2228.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2228\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "830118c141b89de747cb4c8a202eb2b6582deff9", "content_id": "d35dd64ce210e02c82a2230a4f8338268be4b362", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/719.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 719\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bf51ec583c2b3d84504969a9f4557373d568ece1", "content_id": "d98f2c9a5958893f63787c7a0ec4511f3d342b82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/744.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 744\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "020aa2517fe3ff396807421e8c171540e5d7e736", "content_id": "f82d262200b5e592d642b2519adfdd64e138f0d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2104.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2104\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6892156e9e7ecf4324694fde208da21685abe0f6", "content_id": "12a6b1542e07259a689055fbae243e54e9207686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2255.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2255\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "475a94def9da9944a1deb2319a71e132fae85854", "content_id": "f3feacf5911eca4a5bd15e067b755b8ae91b7591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/84.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 84\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a25e5bef220f59749c7e3fbc66c3a7a5b125f4e4", "content_id": "33bce9fc26f634848d8761cd398b202a3644186a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1701.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1701\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "09495d08a6e11ecb9a0eddf76a00bc3465a44c6d", "content_id": "2b12e68cb89537be110f457cda74e035fc9357d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/311.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 311\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "3b327de8d467667f102182208f0a1e5347b3641d", "content_id": "3eba4cee51f7f4a68a2071f7ca69ba48b6ea945d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/82.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 82\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0f6677c8c9715350f381f07bbddb85a665b59990", "content_id": "293e0ffee3bd4d9454d69d6ab5fbd331ae76479a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/704.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 704\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "db4879915a38e73f07592536170421b6c724d3cf", "content_id": "041b878dc9a64bea4d86ee452b0b40c79a26ddde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/546.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 546\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2b206e76e2407194585ce00d545887a21f8cae45", "content_id": "6f4653bb997d9715d86ae2cdb33123f7a098c5a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2245.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2245\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "c9f3a935c8d13359f38a655c6a316e50a12972be", "content_id": "ccf1f36180e92a423666df0517b69eda8f125760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1164.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1164\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1023a25bd3465eb153d5b4e75813dae7e4e1b5d9", "content_id": "b6e957f0c91494a101b76bbb7db39a052905e305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/569.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 569\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8612e328aae6a2ea5bf5617ccb9c3923016b5e4c", "content_id": "ce7ba66ddca3a9477e30ef9ba7447fcd1b34c2eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/239.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 239\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d965a938d82d32aeee0955f5b07e05f00d3714d9", "content_id": "95d9fcdae6bf453527fd497dd5dbebe32b0a81fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1167.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1167\n---\n" }, { "alpha_fraction": 0.5358361601829529, "alphanum_fraction": 0.5460751056671143, "avg_line_length": 47.83333206176758, "blob_id": "d64bded5577b5e12d1ecc1314dd87a10f923fe15", "content_id": "21fcbfd7c36c618145cc2ef7c5919041678c71df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 293, "license_type": "no_license", "max_line_length": 135, "num_lines": 6, "path": "/layouts/partials/time/relative.html", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "{{ $minutes := int . }}\n{{ $positive := ge $minutes 0 }}\n{{ $zero := eq $minutes 0 }}\n{{ $abs := cond $positive ($minutes) (mul $minutes -1) }}\n{{ $label := cond $positive ( cond $zero ( printf \"on time\" ) ( printf \"%d minutes late\" $abs ) ) ( printf \"%d minutes early\" $abs ) }}\n{{ $label }}\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3d3f2b4d57e9d358a0c4f5188b0ff5acc6066edf", "content_id": "54fd5ed479a7036515541d5f2f3dbe6d221a44c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1195.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1195\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a0b4b24d54e48cee2e64c7038e0deb2abe01d53e", "content_id": "bce23164acbfc7cb46150500574301572ee0fc27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/263.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 263\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "560cefd2d3d220885e6f9899dfd587f8016c400b", "content_id": "3c1cf72327ae5433df65cf4ae9d3f4a0814e7e93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2174.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2174\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "723cbe87b6e24ca50e91048e3c457d57f6b0a5bd", "content_id": "eb60db256c5e280ce3afac2f4e2f2772fa59f20c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/717.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 717\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5b75dd025e72989ddb534b95b73f0a9bf4ffda6c", "content_id": "c9dd626569befb0c2bbf9379ad93375da2591aab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/149.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 149\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "489bd40a188d5b672d1ad0a04438d4d485944761", "content_id": "cf8d4840452c5d558ad1607303e3948774e1e18d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/165.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 165\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "115e61e528ac03013596118d57381304bf78e9e2", "content_id": "a8b42599d226256211a848da39c5abf8ca6c80d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/169.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 169\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "609130549e5320825cfe88e7eb0a771b4a620563", "content_id": "57e820c731ac6b0e4ec594a9bf4c4a509157c97c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/232.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 232\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "62c5051c2894ec13ac62dddd111faf165fabdfd4", "content_id": "a4f216f66e460113302db22f4e4db13f1ec6aef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/542.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 542\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b8f08efc4ae67268cf4fa0c06b0b7e7457d43e4a", "content_id": "738182e38c44d47289bacbc44be4552fae3c419d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2263.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2263\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "853081b87f912531062c45d889ed4a7a01070331", "content_id": "a2dd5b726d30ac489921826b6a53f3b86ac24c1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/338.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 338\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4e206a975256a19b336426ae62bc43d5916c8f9f", "content_id": "1cc2365c7b6b35efc60dbde4f0f5334613552665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/190.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 190\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "630f928e733a29a96b6e414434bebb7f81f866fa", "content_id": "e5f26883e18dee558c41719c3620c7d528c8da59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/732.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 732\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dd8acb01d4b130317895c861f597d86a4a61d965", "content_id": "e6193b387b916417f50a037a0f6c595ef5672e9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/723.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 723\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "748b0964472b4ba29a8b45d9834f1a9defcd7b0c", "content_id": "f55e9c54d8aaa28ab500050fe13bc14f7dc35e45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/171.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 171\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "fa40732da7ea799e1602b378048331304c50d16f", "content_id": "44abad7b9c005a8611cb02b0df99273b69b7cc16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2155.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2155\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "934bb89f3eada5e5a4728db2e7f7b14003580209", "content_id": "2b1d1fb0ef6146b6cda1d87e8649c2ccab4e16be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/510.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 510\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c6e559998bfdee04d402395a2005dea9809aaf92", "content_id": "1da17ce4f425391140ace438bab67921c5753a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/637.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 637\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d04117e63ab6881a6c87979fd3e3d26b1aa2e6c4", "content_id": "942af8b2ceed3f91916c2ec39b98cd772229a59b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1569.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1569\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a94974293f7c5bb3c32ecca3f78102f6a7c6677f", "content_id": "98b7f7afc6bd689083e1c756efef7a33a9f43180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/736.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 736\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bfa574916bb25063314f6efaf593f1311cd0f6aa", "content_id": "bdaa724d760b9227f83771098e34c3d3a6451ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/702.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 702\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "57eec18732ea6f6914d2c0cc44345c909606c6c4", "content_id": "2a3daffc43c0e86f96df2b5ee90f7db0f68c34a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2117.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2117\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0280d6814c0c966171dc983e374fbd3e60570885", "content_id": "74c50a3e4f04be15bf3e1e1841083d89be338789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/769.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 769\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "2248a96399950972ee0902227efd13f86d180035", "content_id": "92ca0064d90cf45b036b0e6754cb9016aef52276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/53.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 53\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dbfe5cdbe99323a0947a0bd59233e8743f9262f8", "content_id": "1885accf9a4795a2031c50e020febd1620032870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/172.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 172\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "43a186bff199da3f34adf71d1d4a7fbb6a405ae0", "content_id": "514e8505aa4ecb2b296ccd4083e9f08e5edc87f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/680.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 680\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b9345deb52e933bc03fc81e9ca68c389281c8323", "content_id": "9b386bd9ab7e05aff7aa3c24fd8391ff50e23a7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/174.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 174\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "52d5b2623f8f58d6523936cbd1bee85cc168209a", "content_id": "2f2cdea2a6e17449fc1a84301235b11c14911ce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/610.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 610\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d5490deb8af0103139705155b73c37bac3895f62", "content_id": "d3d23a40152c31b2e39a746a1da8bc5e2ced095f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2175.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2175\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0b42e571ca180a1951b92ad45841d3ab75d95fea", "content_id": "f9bf36a5b231278da9e0428e87b66043b5a46ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/508.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 508\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4cffb7da839bf1d4da7588d2a69b717b947b51f0", "content_id": "b46d381eb5d9d3d36c29137705e9fe871f4b0f23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/728.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 728\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4791ea8e18d11664ff138c3d26314f008b1577e3", "content_id": "1380d382bb6b4feb301f97028fc61faf0dc48042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/260.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 260\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "747f5bdb0736a2ff1be4d80d5a7a19db10b53e3d", "content_id": "4780aaa3a307ca070e78bc55c5036ac67dd31dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/550.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 550\n---\n" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 9.600000381469727, "blob_id": "6f9f5fc5223739466d4f73acdacb2b79404da774", "content_id": "a58bc7f5f85a3ece62d78bff5a6cdd6fc15b41ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/saluki.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Saluki\nslug: saluki\ntrains: [390,391]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f620494a7e9f0e8c4d146cc58d75d8d53883e0af", "content_id": "306ee1672fdf5936b4a9b7b231c208160b4ef432", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/650.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 650\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "9566a66b00d29a3771cf175229c311477881b566", "content_id": "713c4ea73c488b71e2c955500a2e7f0819dfde99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/29.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 29\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "67be38cf6ed6e331a68fd56a0b892ffe6d678f7b", "content_id": "a1f6cfa769cb298d7bc6a9179e7cd5106b230afb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/667.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 667\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7ad02209edd3bb8b975d892a234bd36485c46717", "content_id": "72628959831c5e7065038cbbd2b886eadf2c461a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/511.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 511\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6392a2e0ab11bc4568a7eb0ba7fbfc52951e6cda", "content_id": "1c197e78a4d2a36fa79edaa48bc99136950151b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1196.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1196\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2304699e91ff5c85440263549fb0881be17c3721", "content_id": "25cb1e672192bbf1fc4ba5e59f745398219e74aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/505.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 505\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5b64c5aa038f5156e5c022140fc8e0eb56ab07e2", "content_id": "a5252b3b94e77337b0663620d6f6c38cd6ffe86a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/541.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 541\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fc60e4f004eed0d2c8062b782b5f681c2bbe8b6e", "content_id": "01be9a61b24c8a5c5cde410fee64b241a26f66e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/686.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 686\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a0b02ea88de653bf1c6f73ea7cace7d5adbb02db", "content_id": "cf6d583227fcea4d6932c25735ce15adb0c35556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/189.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 189\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "7cd1fa9d9e0057c7a0277c4fe420fa6f9c242f70", "content_id": "12e50bd1b5faa0a9700a73c5a94c56433793d943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/28.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 28\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dc4a7854bcc493652950260c6813eeacea011bfd", "content_id": "f1d5167856b6f688e8a19e2a2c6bf56e3f8c83d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/372.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 372\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b4901d78ad660164e480a09cf96ad4945f9698e5", "content_id": "e5a0367f4870139e587a4907d4d9eeeb0500ee00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2221.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2221\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "daa33b83c49cd34e58082e05712d0454398a8cf4", "content_id": "73637c4ca1b46b70ff8a3a931bb56fe54d840889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/309.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 309\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1bcfc730cc30ca66d9a2ace74f21f52bf3d48831", "content_id": "c5fe3b2d7994ad19851af9dfd2b8142d82ccddfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/652.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 652\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "258450439973c4b2b9d5ac191c2b5c4c1bfd3a5f", "content_id": "c685bf61892232f0fc353090f2eedfbb43db9092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/548.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 548\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b61185f68ca2a53fb752dc5da859ed721fafa306", "content_id": "9e8a2e7fac49a1b4437ce46e138f32d317ba74b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2293.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2293\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b62dddd7e7c8816149279e4c818237341311d4b2", "content_id": "4ea748733d97c4de85eefb34dd83d0027766fa9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/693.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 693\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2993eeafd06e831e82e700a0bc88fbd4bfd657c6", "content_id": "eb6a71513df92ed4f1e54a0067f462461a1c46e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/737.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 737\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "9a5f3167e22396292f5f633ca164ac7041491a9e", "content_id": "a9c25eb0f1301753f6652d786095efb28c448538", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/55.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 55\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f8daabf8fb9b1e77835ccfe963efa0e9ca9b5366", "content_id": "7eaabb342b20b51266e41b8a1ea54f5abe513da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/619.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 619\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "795c9b0ba16cbc3efc7832a21894ff364df4a77d", "content_id": "8ca10217d20791885297d0fa7852bc09ddf736aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/288.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 288\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "040327ec404f778d5e60953dbb411c444abc59b2", "content_id": "eeaef199b7858ea3f28e331651c40e479fa0553d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/92.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 92\n---\n" }, { "alpha_fraction": 0.6231883764266968, "alphanum_fraction": 0.6811594367027283, "avg_line_length": 12.800000190734863, "blob_id": "eb86f112ac8ace782a81eea668c21855fc74d5f9", "content_id": "1b439df192e34ce9d2ec779f45502f92d9e16902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/content/routes/coast-starlight.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Coast Starlight\nslug: coast-starlight\ntrains: [11,14]\n---\n" }, { "alpha_fraction": 0.5409836173057556, "alphanum_fraction": 0.6393442749977112, "avg_line_length": 11.199999809265137, "blob_id": "2cefe879d39572ab731e65470396878af13b3690", "content_id": "888b6616888258c94f2a2eef15a5fe569cda8e45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/blue-water.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Blue Water\nslug: blue-water\ntrains: [365,364]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "819b9144d33d1d27fa06a92d65a2919db6d451a1", "content_id": "d867c626d88c4618b9fc23396ba6db47df34d3f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/467.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 467\n---\n" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 14.800000190734863, "blob_id": "6dc3d58a2b6c8fd8dd750c540a8afb041e2190f4", "content_id": "c7aa73c1b1aa6497870219a458c681d7116ca92d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/content/routes/carl-sandburg-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Carl Sandburg Extra\nslug: carl-sandburg-extra\ntrains: [384,385]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "656acded1566f3f9700ef6fd2d6295470468b6cb", "content_id": "3e19c42a55a5f505ce843a879be1e88bc1546fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2165.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2165\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7e88eecf1fe405c8b5c48f575341a123a4fa15fa", "content_id": "5a8f4aee2aeefd42dc5d2886c111c1947d6fe54d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/564.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 564\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0350aa73ffcbb67677ce532ecf5155231d99774f", "content_id": "06f71705a5badf363bb3af3d446625fc51058cf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/851.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 851\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d34b52db1d5e972e306dcfc8c3e3fc97ffbb172d", "content_id": "03805f1cc9ce4416153e399b58c0237f30f0617f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/691.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 691\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3666fa1ba963b842f68016afed65270b68f2562c", "content_id": "59ee79cb8316ad6b7eabacd48b1da8b81087c91d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2219.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2219\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b5352d1b8784155c44d2e56608168589e7fe26c4", "content_id": "445833738724bfca65fa3350b3474c3e6479743d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/523.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 523\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4caf548068b0cff87e2c745f887d6680e50a7b1d", "content_id": "b07ca654477aa1d6bfc5d3479303c8e5830bae20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/747.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 747\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6e15001276e72dbbbb068cdc8b9139319e0301bc", "content_id": "8edd14be14f7070b11436c357131e529d9e0c3e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1566.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1566\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fd41c689904b9854cecd1e482dff20ed3d7a35d7", "content_id": "9bbda3a6506c232473fb06d70ca66993f2f758e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/152.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 152\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "771ccb0d5735370a255b710c82e79b1b1ca438ac", "content_id": "2f67490a1707780a43c057c90c05e1a819d8decb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2291.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2291\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dee54b79cac154996ea639b907cc22971ba28d82", "content_id": "89feb778d20bc815e1d7acd8378e693d660a39fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/763.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 763\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "475eacbfd137fa06952bcfbf5cdb5de5ffd15036", "content_id": "27af7c5898d2b5d7e788553a3cf52d9a9cb44153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/73.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 73\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cfd87b01434b2a8c08c8dc866bc0d915f0e6b8ca", "content_id": "0116d0acb28b8ea630edcf20289fccdbd15d3087", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/140.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 140\n---\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 5, "blob_id": "b537a39e21f0a1538b4ed3bf0627861226086ab4", "content_id": "7ac6ceeafd53c6f294f2e911c9eaf75844c8235c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/content/trains/4.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 4\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b9060a54ba0e17b473613a1a5ffccce492c71140", "content_id": "1041a7c87891caf05a13ca083e50b692a0fa0503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/690.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 690\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "283d24b0bbabeabda79f3ed8a34526d963ccc502", "content_id": "a3db09e0661c40353744bd6a21bc9868959077c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/618.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 618\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "7276617049b60072ae6a0e31bf841c8c8bdc872b", "content_id": "21c77b4996da0b38f9e5fe20747786e150a6835c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2172.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2172\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6e34e2f5f3ad6810e7c259129a5d9ccfdabc4fb3", "content_id": "f10bc95ddfe410ef6a8eeeef6a1ded041743fc80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/375.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 375\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "edb635e5a13f066bd6152674a6fd9d25be93c453", "content_id": "1df54efd1e1d4b441d31bd1fa94cc98ad34fd2bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/647.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 647\n---\n" }, { "alpha_fraction": 0.6886792182922363, "alphanum_fraction": 0.7264150977134705, "avg_line_length": 20.200000762939453, "blob_id": "0c1b094a343d936c1a3dec6fafeff010006eacec", "content_id": "c0fdca95e2f8751c8697c250371b5a219115cf60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 42, "num_lines": 5, "path": "/content/routes/lake-shore-limited-boston-section.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Lake Shore Limited - Boston Section\nslug: lake-shore-limited-boston-section\ntrains: [1448]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "4bdd18dd80fe59a6f3a7c55d769c7a8249095559", "content_id": "636974d85e5e802a61f3dceee13b0f7a36b64d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/99.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 99\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ed4044fa03fdc39ece2e12e932ce6ea566520ebf", "content_id": "f88b67d2a40ac1a005ffd10cfc1e7718f407b533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/538.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 538\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3fabaabb0dae6a06366af70a081a1c9eddd3be39", "content_id": "7f3540e5457b66d2be0a41bb08f5bb2cd294b0cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/163.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 163\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b31966dfa0fb140a1e6bc5c02d2b8223f3573803", "content_id": "c9bd9ecb707b71fb8ff18f45cd6492ea1fd1f7b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/524.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 524\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "81fadf9063f1d0d3d5214454fe786f612117e831", "content_id": "80522a74e8d8cb3687385c37bda558675e13bcd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2234.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2234\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9da6baf775265aaf6a04e8d14e99711afa0f9665", "content_id": "678d27da9ef20018e160b8c1740c44771979bac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/537.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 537\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d4298f17a92f422ae6acfcc9efed07ae3f26b672", "content_id": "02bd9b0cfcacfe80b8ae482b1f5ead58584555a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/241.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 241\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "15fc1fe16652faeadbe24af42743121342247aac", "content_id": "00431c0222be25a00577f41880ac84c9ee8e774d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1129.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1129\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "47ca3926b548497507ae0849b42b605bec5057cf", "content_id": "dc6d2decfa61a6c4f0712f1182bb1c80b46c7456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/245.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 245\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "301f03165acb6741a1ee4b819384498ac5f1034e", "content_id": "559cd72a1664fe428ee2a308126c89f2d7a38d07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/74.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 74\n---\n" }, { "alpha_fraction": 0.6307692527770996, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12, "blob_id": "35de78c696d1da845520c69f6774138aa5ed55f0", "content_id": "84cb627176e0f14c8c48e67cd7aacb1b5fbe8307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 65, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/content/routes/pennsylvanian.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Pennsylvanian\nslug: pennsylvanian\ntrains: [43,42]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4732f4552294e0570217664daea12a0c0df91bf5", "content_id": "b3e9b63f390fe678dc9b019cd240a7ac745c9823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/478.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 478\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7a4b70a2866a61ae3524edc6016bf232f36e73f2", "content_id": "4d0f815af2f772890f1102c6c7eefd307540f95d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/796.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 796\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3cca41acbd4eab68132a1b117a0fb48f23cc4c23", "content_id": "10fca44f580ae3adc4ee6851de5cf018638c6c1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/777.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 777\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dcb18f7a341112e797d884bd28d2e6e58dcf77a0", "content_id": "e7d7223bf96db559942fcfca8616728d674a6729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/183.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 183\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2e4ed15ae3ba934c40c996535f2bc25d25ec7279", "content_id": "4943629c9ee404a349e234e6a788c6323ee3707d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/765.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 765\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "2d037ba694abb685b86e75f6d40dcc61fa5b9f2c", "content_id": "5d34142471074f948c9dfd9399afa57359d0ff51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/57.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 57\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5a563f2895f741d941a8038a0c24dc7bf787292a", "content_id": "aca63ec5e209437bd0f266a7201e4d0ba08a8cd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/506.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 506\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "89aff200c8ff0f615b0165ae1812101984accc1e", "content_id": "545697f18c35816c6b5075bfae4696f5eeebfe02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/156.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 156\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "da9648fe8fd3783584a3090ab33596325150c741", "content_id": "ccdf2b1c8d61b663120c39e9fe3ac908b5111c8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/648.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 648\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e2a66ae5e7ad4aa35e9a501a3c89454dcbc2ff35", "content_id": "310c0f1cedfcfa1ab0883fe2ce2de51224e270c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/383.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 383\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "95c25f891acce0d570f68ae8d9ca773d28f509f8", "content_id": "5f486f93eb02793e41a5a64e832dcfecfe3298a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1579.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1579\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "81b0f902a70189c4f9376e8197ebedd5e24549e6", "content_id": "d10f59dd82b0073722ee1170838a649728559bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/304.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 304\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8436e38cb79234cc189fdb228786c2612408ef8e", "content_id": "c9e0cfe120e381848bf4c06a18f20763a9048880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/922.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 922\n---\n" }, { "alpha_fraction": 0.6445783376693726, "alphanum_fraction": 0.6586345434188843, "avg_line_length": 29.15151596069336, "blob_id": "66bc7f3800cea3c9e12523fd1ad982b88716caed", "content_id": "d2c7166cca5e0c3d7cb8902f6d76e784d98af229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "no_license", "max_line_length": 122, "num_lines": 33, "path": "/script/build", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport argparse\nimport os\nimport subprocess\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--train')\nargs = parser.parse_args()\n\ndef load(train):\n info_path = 'info/2018/%s' % train\n print('Converting %s to json...' % info_path, end='')\n completed_process = subprocess.run(['php', 'src/toJson.php', info_path], capture_output=True)\n print('DONE')\n\n data_path = 'data/train/%s.json' % train\n print('Writing to %s...' % data_path, end='')\n data = open(data_path, 'w')\n data.write(completed_process.stdout.decode('UTF-8'))\n print('DONE')\n\n content_path = 'content/trains/%s.md' % train\n print('Writing to %s...' % content_path, end='')\n content = open(content_path, 'w')\n content.write(\"---\\nnumber: %s\\n---\\n\" % train)\n print('DONE')\n\nif (args.train):\n load(args.train)\nelse:\n trains = filter(lambda path: os.path.isdir(os.path.join('info/2018', path)), [path for path in os.listdir('info/2018')])\n list(map(lambda train: load(train), trains))\n\n" }, { "alpha_fraction": 0.5972222089767456, "alphanum_fraction": 0.6805555820465088, "avg_line_length": 13.399999618530273, "blob_id": "d52b7c0145e75db7a375890945c0448b32d2a4bc", "content_id": "4300ccc138ae002056f688e89722ff1a9e53d515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/content/routes/southwest-chief.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Southwest Chief\nslug: southwest-chief\ntrains: [1003,3,4]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "ff231ffacf2078ffa94b38de550e9c3a1d26cbb3", "content_id": "3a93ce3f62ce5a490c236ef1618ed2bb9b7619fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/89.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 89\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b1b40bf443e50ae76da88f35803226efd9c5c896", "content_id": "e0da9d699403bdefeb50a8e426c267174fe0b841", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/353.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 353\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f388d2334c0ad893fb01e7bb75d585e7cde7f46d", "content_id": "741ed31e631be2c458779fb9d9273f3e50519db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/516.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 516\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a3b9def4d8fe2cceb8818d03bbd397ab17890155", "content_id": "12bee1a4d1b5d0c31e4d05eaa82f545b40ca9e90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/193.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 193\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cfa027c90f97674c63e390cc1f6143c91e495f4d", "content_id": "1e6abc35dfa2a7f3b1061c837b27893f90896194", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/111.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 111\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ea207d6d2bf7261a761d0e0de05e2386313af493", "content_id": "397b15818ebb1f2a66f6aa10d5047ffa826338ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/148.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 148\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "84656f7cfd02f4e2ece7e0064d13d609ec2bcfa7", "content_id": "ab21659b73c62c52244804313ac7f7a22a119793", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2241.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2241\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "46be3c6604503776c2fa8a81793b03ad8acbffde", "content_id": "907298987c7c7873550cd71f0d3db258aee476e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/21.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 21\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "eb402bcf7ac127a0429d620a8b73efa9828d57be", "content_id": "2615eeb5edba77e207d3a6570819f523d3ae2dbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/743.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 743\n---\n" }, { "alpha_fraction": 0.5820895433425903, "alphanum_fraction": 0.6716417670249939, "avg_line_length": 12.399999618530273, "blob_id": "14ba9f5525bbd117269e17c72fabc4d27a42fec8", "content_id": "179c4ae6e3c713edf47b5969003b51b03911f1ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/content/routes/carl-sandburg.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Carl Sandburg\nslug: carl-sandburg\ntrains: [381,382]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "78bbe65f4c827d660246a715a5c4c14c6a4fb27c", "content_id": "de6cb891c84b115ef8200e2776e123e5dc3256a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/674.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 674\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "e3da5f7f93500d1394cf6792f553c245c5acbb9b", "content_id": "f1922a47b3a566b943501a5705da7632847fd555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/95.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 95\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "db5b9af2bf61acb495beb391aff3be9146ec2052", "content_id": "f698cd3ff35cea93a6100a614185d539864eecc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/63.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 63\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "def6b09ce9c10e520c5758f87b50e9f1caf0aa5c", "content_id": "167d2a2b14aa06c56954e87f9f18cc2decf75788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/514.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 514\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8c4f8268469648fdaa434bb09f1ecec81f71ddc6", "content_id": "e61ddbaf607ee5ec668196ae13fd1ee6d80c0ce3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/409.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 409\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5dc3782d81c612b9032b3499fe6624d387f2b031", "content_id": "b99a14c8d7c8d9c041a7ff36891a35c5596cd847", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1981.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1981\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f62a660d9137e0d279470486d9ba4bc354f2ef83", "content_id": "dd91e1ccadfdc9f0cce517a59f40514eac258801", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2269.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2269\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1312ca9c623aa6e7417dc2490b9413aa825fdbdb", "content_id": "a29df232b7ea812d619969032e8a164bf855bf9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/497.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 497\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7ccfdb8e45193bc747e1a33c7e8ec70775a977d1", "content_id": "a5d768d0eff51630fe7604621f04a9829a950c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/568.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 568\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "9d56d3a056df4e66d6883916a07095c4549630ed", "content_id": "0b096820762c6ef04e73e2ef30a26ab6161cd5bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/96.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 96\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "978c176c8c77f05ee8fe770efb04db994cd1965b", "content_id": "221b7fe726bea8ea2e3dfb5b08dd3e80c8c08319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/314.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 314\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "777bfae8e78411aecf493c69d1d82335be8475b6", "content_id": "c4e599f2a0e094fbf882b6cb00f5ba244daceea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/738.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 738\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "cc26bff641f299f9595fcfc4beaff0361414d74e", "content_id": "e4ae512e916ce6a1a1d3ea8122d9d7b313f9b6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2216.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2216\n---\n" }, { "alpha_fraction": 0.5571847558021545, "alphanum_fraction": 0.5953079462051392, "avg_line_length": 21.733333587646484, "blob_id": "52916c213a338559e05c78add419b12988b064c1", "content_id": "ccacc2039abc20432076cbc538ca3e63f5e3fb9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 341, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/src/toDelay.php", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "<?php\n\nfunction toMinutes($hours_minutes)\n{\n $hour = intval(substr($hours_minutes, 0, 1), 10);\n $minutes = intval(substr($hours_minutes, 1, 3), 10);\n\n $arrival_time = $hour * 60 + $minutes;\n return $arrival_time;\n}\n\nwhile ($line = trim(fgets(STDIN))) {\n $delay = toMinutes($line) - toMinutes('852');\n echo $delay . \"\\n\";\n}\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e44a489c1b3fceecd610daf7587e2b3597cfb89b", "content_id": "af6a71b29e4f230bf386adc3397e574f19e53b29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1689.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1689\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "324158132a421e69efac65f66d882cc51dac2cc3", "content_id": "f71346190b862a7a33930eef2570f8ac679110a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2100.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2100\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f4914d154fcabf83d85871ef89b5a8f7425daa5c", "content_id": "b1fa0c546dbc1d15207d7da0d0441ea1a9138f24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2222.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2222\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d645a2352df28fd3913cd100706db38d161b3ff0", "content_id": "d7d5d2d48383b5b7d9352dafdc4dda638222cfdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/545.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 545\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b64f9726da82d95c29e1e196ec797c5a2629d886", "content_id": "1b8521e91ad49919ff3385c2816bde74c16314af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1139.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1139\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b2474887bb69d1741c2756abc77572a7855ad209", "content_id": "760d1b4c311f7bc6e0bc41ced920f8a471b3b8ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/490.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 490\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e8c40011508a29bd6000b7c67e5eeebbe414cd03", "content_id": "d105047a0266083a62af33cbbedf04ab2b0e173a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/699.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 699\n---\n" }, { "alpha_fraction": 0.6847826242446899, "alphanum_fraction": 0.72826087474823, "avg_line_length": 17.399999618530273, "blob_id": "8862fa4c66c40c2b792b002e125c656404c6df16", "content_id": "3ec2b865a4e7eb9d806776cb8c7ff7220d459f1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/content/routes/acela-express-holiday-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Acela Express Holiday Extra\nslug: acela-express-holiday-extra\ntrains: [2245]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "87189a63a168a4b60341873fc17bc6e38718afcf", "content_id": "b238263544c31fea49bed6ea31e38bc3b888b575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/669.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 669\n---\n" }, { "alpha_fraction": 0.5214105844497681, "alphanum_fraction": 0.5768262147903442, "avg_line_length": 19.894737243652344, "blob_id": "b644fa45a148849c01d440d8c6789408b6a55dc7", "content_id": "7d8874676e74247ad12d7f1d2ee6815bdbaf4397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 397, "license_type": "no_license", "max_line_length": 90, "num_lines": 19, "path": "/static/js/core.js", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "class Defaults {\n static get blue() {\n return 'rgba(0, 0, 255, 0.8)';\n }\n static get translucentBlue() {\n return 'rgba(0, 0, 255, 0.1)';\n }\n static get red() {\n return 'rgba(255, 0, 0, 0.8)';\n }\n}\n\nclass Functional {\n static get unique() {\n return (accumulation, item) => {\n return accumulation.indexOf(item) === -1 ? accumulation.concat(item) : accumulation;\n };\n }\n}\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "33ec29caea01f94f195c934100b10d3d9720b992", "content_id": "ea6fd4b96775ce815ac9dab17cdf81088c77b040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/11.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 11\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "909edd53d401bca1c1845b628e0a349aef4e5377", "content_id": "47043bbb18b474abadbc639300f3ec976c2e8bc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/139.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 139\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "69fa9cf38f0392731e94bbffdeea44e88c1fa800", "content_id": "9fa50ed41ec40af0358d8c2cb61f3d2bdc048419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/476.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 476\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "639129d3a4037392448133d6524711bfb438b484", "content_id": "4719b4e8a7acfa54d69616609c76079a093f888b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/303.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 303\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cf9e7fc1bfa24581790569bd50679909b80f5f86", "content_id": "5f13d240488046e284a3dd3841aa8a9b98489564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/682.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 682\n---\n" }, { "alpha_fraction": 0.5737704634666443, "alphanum_fraction": 0.6393442749977112, "avg_line_length": 11.199999809265137, "blob_id": "860e323cbb3344091504001c1d0eb43e6b89e07a", "content_id": "e499cf556cefd6419687a290b80d14bcf07e2cbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 18, "num_lines": 5, "path": "/content/routes/texas-eagle.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Texas Eagle\nslug: texas-eagle\ntrains: [21,22]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a4380dd743dca65d77f08b8ff104b37e59eb75c9", "content_id": "659ed062fa086e8dc3ad125bad9aae8b7ce6dcb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/724.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 724\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "53537025a444f44010f7e3ad80d2dec45a58b33e", "content_id": "193e0d0fdc12adffe430db2457bd7c5458d349e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2251.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2251\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "6999b8c8c2fdcdfaaffdecd0cb7cc94d54d4447f", "content_id": "8e415e22d5735bcbe4db6fde22f1d37a22df6e93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/79.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 79\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "ab164453bd8d5eadbb63a8e8f59804d8c800e507", "content_id": "3d914f3f23cb594d62afa2f57485e0667dba2d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/83.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 83\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "08433b02b06bfa49c1c5f74f9d20ec5a284ec602", "content_id": "f012f77dbc56be54d31f1d4e6b4131354982574c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/126.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 126\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7b0365396854e60d89a009707372e8909223963f", "content_id": "a05d4c75e9e6872d778211a0897f8f52b25c3288", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/143.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 143\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6e466cb53715798163e4dd12bea47156960a9144", "content_id": "1751afed5c73fd823a782caaf9bce8d281cc1be7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/188.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 188\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "591302f03642334d905fe466517d5e52a83a8b03", "content_id": "15bfd6e5ba402025f7c21a09d08b3023c8bf0b1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2203.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2203\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f9d7222fa4e0842c2c09e9acaaabca2d9ac44a20", "content_id": "c55f37de3a2dbca3ef005dd342c8aaa0ba581924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2161.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2161\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0922c924c90955554204bb4d348602f670fd9937", "content_id": "4ca4968eeb4dc9591efce65262f4b7c4b93a1292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/488.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 488\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "7df7cb9991f9366c0f32ff1ecb088f42ad3090f3", "content_id": "0f8dea35b3b98a4dd42d2e4d110c9db41a256a6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2192.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2192\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "57c940ec3016262f7f4b0cab9dd0b046c089a00d", "content_id": "e0c30be6ccb0efd44a9048880310f4fb406948a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/85.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 85\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8231d215cd8d29566f2e991f8c0960061d26a268", "content_id": "94ae5718aca0bb12f8e85585ae51c44aa3370d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/283.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 283\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "95e45077a4f752528188e4fd848ca31498d5c590", "content_id": "a145aac1636beb5574bbc4775122be13690359b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/354.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 354\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5f9cdea87f13a7b8e2bb012a49622f6edf47f764", "content_id": "e2f993a20f5d143ddb963cdeb3f5d8a06083563f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2167.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2167\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2613fe8a7367f855fa3b4dbc0c3bdb1f077f75da", "content_id": "0c89475e32be1e4228b18356fb92f1628bdef45f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1158.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1158\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7aee2cfdef4ede958d7d42336e07589359634a66", "content_id": "1dc45c78019f406c07650c2fb29a8f7ea6abed48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/261.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 261\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "337d325bf20fb5c05c1f20f5584c299d22813e97", "content_id": "10f4d9cd478144d2baf2faded260e2aa25c5b726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2159.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2159\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e497cfebf9bacfee4ac83a5cb9e88dada3ac202a", "content_id": "045bf4678b63c5a6be2f166aca87b9d68a5ddfb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1174.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1174\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "636d2ce8156fe09b842214cfe14bcdaa7d9dc325", "content_id": "4df422cf9aec060b498eb506be0db8e2136ed04b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/518.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 518\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b90c85bfaaa4858b0189a46a7cccdb72e319e452", "content_id": "ccd2003e4e4aa2fe3aff6b883275120a352d75df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/335.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 335\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "884b15dea6a199a04682a636cbafa082e8d017e6", "content_id": "895ad93d7a6869975ffe26005d7a9cee49d16539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/50.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 50\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a2eb01078b7c4715fd04484f6233313797bb4077", "content_id": "3e8f412ee0b11cf0816a2bde18404e7977cb0632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2247.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2247\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b475ad239e28a3f904bf9ee115db55be7acecf7d", "content_id": "85703f3d4f5274e4014e829c3a2b559f9aae8e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/641.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 641\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bc38c21ddee2df33329dcf2f4eeb55833fe454ef", "content_id": "3662f65271da9959d44d8f402aee09840366245d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/640.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 640\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7b0190d30f284768f8e0cd1a414ec04d5bead7df", "content_id": "06ed3bc3465433edfd154562096a14b934a4afa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/500.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 500\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2a8ae3d5fcb393e447dbbdb4a7562631ac136922", "content_id": "a98a8c010a5ee1787486aaa0e8efeed971f86261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2248.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2248\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "847f726f925eb447f9a56c35ec05d2661cae2b6e", "content_id": "e64575d5b041959f90375895cfaf3af93eac9a41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/407.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 407\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "760dd4afa904b34422c7420541b3de4aef0b1848", "content_id": "709b74469202d694856e14b477b6664dd56e5d66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/507.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 507\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dc662b9d47e7a51995f3d484ee3172bf4c6b2c17", "content_id": "3cbb17ddab75bbad67577cbdfab1bc3d390ea468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/302.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 302\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1abb43de74033d878ffd3e9fa5ec8ba2906609ac", "content_id": "3508218e67a1a7cc04c41298bf041e8599d7e857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/192.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 192\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "52afa1d2631c146e4731d533a782c5940779df0d", "content_id": "7ea3db8ddb2755e0516e2c2d9d8a5a406dc3f8b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2210.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2210\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2baa96ee40f3c2c857e5c5b739ddf0bfd4aab10f", "content_id": "d826b7329fd28e9dbba74204ba6d06ee2970f61d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1179.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1179\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "cbd3877e611d82b0ca563ac87f72672015a93fd4", "content_id": "0f9b10abdf1413828e2fe793c580609bf9833837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2231.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2231\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "ff0d6ac256efb11d0d0684467f67064ee3fb1c88", "content_id": "4fdbf443df3c765f1bf6eace16464f6f5490cec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/59.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 59\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cb8f90ba6a5b73286ef5e9cd20f65e6d7e63b467", "content_id": "db5c98de0ac94618e76c1095b3cbe2a47981eec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/573.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 573\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9c26e3d934b98f2fd05fd0762a3b19cdd78c0d26", "content_id": "8c89c4a368c2e70563a529449ab8e0ed5ef22b55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1988.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1988\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1cf180d6bd107c946aefd7be4e7817dd880a2ce4", "content_id": "49b4df9bc5b9bec35df5adfa9a7f21f93e905b04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/449.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 449\n---\n" }, { "alpha_fraction": 0.46268656849861145, "alphanum_fraction": 0.641791045665741, "avg_line_length": 12.399999618530273, "blob_id": "cb230de3a99099eea2a7461df76ed19155058014", "content_id": "11a83aa35f4c7ba18986bab820b2807504fa0a17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/content/routes/piedmont.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Piedmont\nslug: piedmont\ntrains: [77,76,73,75,78,74]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "24f9ec8db5df771c2d12096cd77f7c5b88792267", "content_id": "b92c4dfecf9800a24e5c745239b2a206743c1bcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1590.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1590\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b566326a645b8972c278e71d4ff48b8402975db4", "content_id": "bd1bd70f7448b1a48f4cb10560570a225ca4363f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1767.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1767\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c75658ccc7021101f8f5f3af4200c3f26a90e707", "content_id": "a5b5d1bebf25819c0da9ce9df0e09d994af37b53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/687.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 687\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5010e72f7bf806b5e1362be0e8c0e32ad1436f46", "content_id": "066a87f6cbfab9281ee6ad008e91e9aeb8255484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2153.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2153\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "14e1b998865b601361a06e9cb97d864399b27391", "content_id": "03fd239de3de37384c08c32cfd13072aff7854c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/664.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 664\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "49d6d78f8d117c6ac7772a415bc2cfc4be6c5319", "content_id": "3120ecbac9d51a2c09a9050520ac6cfb401a6969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/365.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 365\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "dafe981903d0b5f8e7c2b262b4cd9c318bd423ed", "content_id": "cc07ad5f70ef6f3494c41f29ed927609d937501a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1003.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1003\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "508a8fc9148915caec22952073949c9988503eb2", "content_id": "8a76f6ee7730b2a1a24aa3070d0b6f5071b8d7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2166.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2166\n---\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 15.199999809265137, "blob_id": "00f1b3d0cde9c6de06f0495a1aa5a0aeeaae2cec", "content_id": "23bd311c840c02b9132060705ca4187bf045ad89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/content/routes/pere-marquette-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Pere Marquette Extra\nslug: pere-marquette-extra\ntrains: [373,372]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b27477f70a0f201a664316cdc8691728b7c0b62f", "content_id": "a8633e5cc0ea7b12cf93eee6560fa9a1d75a7293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/316.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 316\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a2b5857b8dd4f98f78caba2671d4b894c8f7db9b", "content_id": "33ebacf172bc30ed7941305417e33f5087c743dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/681.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 681\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "074f678323531604841b824f218adeedd70bc914", "content_id": "a0b724c0004be6d33a72179cf8d2a92a54006177", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/549.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 549\n---\n" }, { "alpha_fraction": 0.5824176073074341, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 17.200000762939453, "blob_id": "f4223271dbb8ec9eee999a1cb50971b6fe9e1796", "content_id": "ba601c2d41cac5d9a9f19b31a93037f722988cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 91, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/content/routes/missouri-river-runner.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Missouri River Runner\nslug: missouri-river-runner\ntrains: [314,313,311,316]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d6190c93fbc938d244cb90f624a08b242d0c7b46", "content_id": "244de492af9c49c79193e2b50394c45a1fa00ea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1198.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1198\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6cfabeed4f723273b6797cbbd4f57b1c4dda8842", "content_id": "ef109bd493d27cd1dc78e06cbe5e1c96076b3345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/741.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 741\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "689eb4c43eddcb0ea3e1c99087c09e0c0a047fd8", "content_id": "ba778bf8a59d1de316f64b606f92ffa9dd461207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/234.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 234\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ab4a7ca098cc69943e40a0897d492e1ac15addee", "content_id": "fede8518f64bf326c1395ae878f44873caa47311", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/942.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 942\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5cb1389a92832588e6b2bf0cc5a2ca4e5c1e00ea", "content_id": "5f84676b1a8a9b368e708111c818006888c29b8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/684.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 684\n---\n" }, { "alpha_fraction": 0.7259519100189209, "alphanum_fraction": 0.7565129995346069, "avg_line_length": 54.44444274902344, "blob_id": "7aae0122981423e21bae9db8407c47d72487d81e", "content_id": "203c237796800b79b3cfe109d40c1734058f5717", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1996, "license_type": "no_license", "max_line_length": 316, "num_lines": 36, "path": "/content/_index.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\n---\n\nThis site catalogues and analyzes historical Amtrak train data, including arrival time variance beyond Amtrak's public-facing on-time performance metric.\n\n## Find train arrival time insight\n- [By route](/routes)\n- [By train number](/trains)\n\n## Understanding the on-time performance metric\nAmtrak considers their trains on-time if they arrive within a threshold of time [^bts]. The threshold varies by the route's distance. Longer routes can arrive later and still be \"on-time\". \n\n| Trip length (miles) | Threshold (minutes) |\n|------|-----|\n| 0 to 250 | 10 or less |\n| 251 to 350 | 15 or less |\n| 351 to 450 | 20 or less |\n| 451 to 550 | 25 or less |\n| Over 551 | 30 or less |\n\n[^bts]: [Amtrak On-Time Performance Trends and Hours of Delay by Cause](https://www.bts.gov/content/amtrak-time-performance-trends-and-hours-delay-cause), Bureau of Transportation Statistics\n\nThis metric is a [percentile rank](https://en.wikipedia.org/wiki/Percentile_rank), whose score depends on the trip length. Percentile ranks are useful when the score is relevant to your needs. For example, when \"10 minutes or less\" is the difference between making or missing a connection, an important meeting, etc.\n\nIf you need stronger guarantees about arrival times, then a single percentile rank won't cut it.\n\n## Comparison with this site's visualizations\nThe natural extension of a single percentile rank, is multiple ranks. Each train's page includes the median and 90th percentile of arrival delay. Take Acela Express' Train 2203, which departs New York Penn Station at 8:00a every weekday:\n\n<blockquote>{{< train/timeliness 2203 >}}</blockquote>\n\nThe next extension of percentile ranks, is a probability distribution. Each train's page also includes a bar graph displaying the distribution of arrival times. Take Train 2110 for example. It's scheduled to arrive at 1:50p:\n\n<blockquote>{{< train/arrival-delay-distribution 2110 >}}</blockquote>\n\nCurious about your own route? <a href=\"/routes\">Dive in</a>.\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9664624e3ee01120a94fa208aeed8ed14a77493b", "content_id": "770dddc4cc8284891782c5797e29bd446638ae6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/133.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 133\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "594e3695b95175264cb64f996858a3893f0434e3", "content_id": "8253a83eaedb0d97cf9ff75a82bb3cdc35179e68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3561.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3561\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dde73005f4cee90acaad0c59f4e13cc9d3ffa4ea", "content_id": "f95001d152b012b0aeece4b89e06270140e5b2f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/655.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 655\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1a026441c6302d9d47a3f63dccfafea745cc62ec", "content_id": "ae1e359dbc7b2b81c0254d2e13f50e96d1be14eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/170.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 170\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f5b78e84db81a6b9c55a26e88d0b0b53d2d830ec", "content_id": "4becce61925d497933364b8f87e0ba2489d47851", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/562.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 562\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fd84ab0ba4d6cb99bca42965e255747f49d7d81b", "content_id": "748998bd29cf2af09260b5d09fb3dcb57ac82f06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/713.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 713\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "95d7ff4544c6308a77ef7395d485383a0d2c4ce5", "content_id": "c74f7d88025e652ec1b1bba081b779fc03cbd1ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/450.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 450\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "ee33b13bfb6777f99a6cef423292812432ddb22e", "content_id": "dd64dc0960e40895cfd7d901b30291ec2af1bd73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2209.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2209\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a15fdea0bbdadc4fd2acf7ff48b7a2a31f4e7a25", "content_id": "53c68d371648a9572d716b1e75f98a5039f179b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/251.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 251\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9904a44ffa67cbc579533e515b20b6019aba1cd8", "content_id": "02a5e78f36395638e819e0220fc858bcddd426bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/417.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 417\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "0611b1d08c8008f44821ecea54b6e974e34268b2", "content_id": "714304d6a9b1157182efceeaf73f831c6cb7594e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2237.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2237\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8e40680c3158cab8776df9b73710ee74c8405862", "content_id": "45acd1f15282ef83a2af1803dcc79b36f6077986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/186.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 186\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "77f72012b7da109698127888216a4f8e87e2b97e", "content_id": "555a5c9b16e13724f56b4772e35f14e30a4a6430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3582.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3582\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6dfcda71766ea6d781b1337fefdfa56fd975ab8f", "content_id": "80d8d4dce302dc96a79b11c143cac8dae1e62276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/671.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 671\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "316bbcf5af4b71217a674f94ebe51a5e99d6dba4", "content_id": "ba3d2622dec0c2f4f3f2a4375c12ecf6815c1185", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/642.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 642\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "1d3ee2939fc327989666708e39565d02698a2b5f", "content_id": "c711ea490b18799b304afc194afe5c4f6af1194d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1159.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1159\n---\n" }, { "alpha_fraction": 0.6231883764266968, "alphanum_fraction": 0.6811594367027283, "avg_line_length": 12.800000190734863, "blob_id": "44e17940d41eb75e8aec0f3667d7557033339c0f", "content_id": "2d42fc81c43d2fdcf84ab0457e96bfe7694851e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/content/routes/capitol-limited.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Capitol Limited\nslug: capitol-limited\ntrains: [30,29]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "66f0749d811f8d7fc2357b1ac81596a4a0547baf", "content_id": "4bf724a60bdb2907bc37d423cdfb49b58159250b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/412.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 412\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "dd09a391aab1bd6b5519da2f52922b5e3c16839a", "content_id": "b0932e1ce2823335a078997b22199db394256385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2154.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2154\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2666edea843a7e11d12fd913a7d9792ddea76f95", "content_id": "35e249990f03ba7d72f7ad829898d846079579ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/607.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 607\n---\n" }, { "alpha_fraction": 0.5820895433425903, "alphanum_fraction": 0.6716417670249939, "avg_line_length": 12.399999618530273, "blob_id": "193d629e52cf1bb1a632b43ab7a302782de8530c", "content_id": "dea66b9dd57990062b9aa453d8e19680ccf920ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/content/routes/hoosier-state.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Hoosier State\nslug: hoosier-state\ntrains: [851,850]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "8f12e3d2e6a981eea50eb40d69356968b29af5db", "content_id": "efa9cb62f1cf338149fc619aa5a9c6375a76d49c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1005.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1005\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "63282883db92bafe5b12e7223644e199b9679adc", "content_id": "3b685120c9c1ffcc11b6ee2202c9a9f31607c1f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/67.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 67\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e32cbc9ecd194f4f434910dfc9baa10e0337e0c8", "content_id": "401a581dda62a72e7b2df708e07a4f8b0e66787f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2224.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2224\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6756a9bd676fcfe1ca4c6644438efdc5d60cc469", "content_id": "9b744c50c8f80d54c355636c59e9e612762d70a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1184.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1184\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ec1dca2c62fab1a16bce739213855e4d7d6ccd36", "content_id": "928509b26e4deb99be19d9a3ce678e3b81f3e7ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/774.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 774\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bd9b5be13a7a4cc7b67b5f92d9db59adce7d68b9", "content_id": "4fae8828b4a1161148cb9ed9e2c6c66fa4326c5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/405.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 405\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e21355029051adf44f6798dd6c556518c8a21f09", "content_id": "5eb332069d58b06315b3ea342bb515f19e07d7ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/350.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 350\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "48a1110c02bb30e815110e81a7c265315b12849c", "content_id": "6f819fd84191cdbb95dbd6c18e4ebbb1f32e068e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2232.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2232\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9fdd84143f323dbe677bbdc969edba10767a739e", "content_id": "70f415757764cd5b822e044656b010b948f54063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1135.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1135\n---\n" }, { "alpha_fraction": 0.28925618529319763, "alphanum_fraction": 0.702479362487793, "avg_line_length": 23.200000762939453, "blob_id": "38f1bb0f0d756468ebd6651112d129c83f22f7bb", "content_id": "067bfa6a1ab284c13088bdc88465f8683f9e9989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 121, "license_type": "no_license", "max_line_length": 75, "num_lines": 5, "path": "/content/routes/san-joaquin.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: San Joaquin\nslug: san-joaquin\ntrains: [713,704,712,719,715,703,702,714,718,1701,701,717,1718,716,711,710]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6c578247792dde5bec4ac6cd93ebe258619ea6ce", "content_id": "fa0f7e9f7974e822bb15ac4758d8ffeb8e6863fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1448.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1448\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f56b07d63c621892ab578c888453265b4f1d7b3d", "content_id": "75054a136268f565033468badf69cc0d02c17499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/177.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 177\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "55cbd4456858894e14279fcc5a34971ee593bff3", "content_id": "8b0ed0e7b860c1eefb684d29fd393ae0cd2e2995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3563.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3563\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5eba41140af18e62be23306f95baad438f21e154", "content_id": "485a563aea5f3237552bb50a1e8d195bdeb18a7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2119.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2119\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "53cfe595d30c52985255b8166dd971d28861aff2", "content_id": "42f836ec8775b6f4c24de77fb24993480ab5014e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/656.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 656\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ba5b2ab913becc4c8e8456ca1130a5c74e4ac5b3", "content_id": "f2b7f6b0dddc93ff7bfdd64adcafc113afd86a0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/609.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 609\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "874844557342e750864fb65393bcd45efe40ac4d", "content_id": "e3b954cbd1198d98b3f48d7f354f6f8a946ec1ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/168.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 168\n---\n" }, { "alpha_fraction": 0.6619718074798584, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 13.199999809265137, "blob_id": "30d00f82ec95c8bcd1e5877ba1ae54f6281ef4df", "content_id": "52f9c0f9523094cf811d0d1c2e4d2b2de38b9552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/content/routes/california-zephyr.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: California Zephyr\nslug: california-zephyr\ntrains: [6,5]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "636a5255e9e7f77d348c2c95dbec186f84fb8b78", "content_id": "8528227325d8edec44b5932812c7750525c7bdc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/259.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 259\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6eee8e652672b2ff1b1ca48f9dd87d158d026bea", "content_id": "d01256b68ed2069eb4adaab454e8e63521716102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1136.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1136\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f1c6e2229194af23906d8ab967efbe018040d6e0", "content_id": "13dc1c93fdf223c13acf89210a4925b62b5020f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/520.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 520\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b81cde0a5b17608abddc29cffe2b67f99e67398a", "content_id": "4027f8541d554ca6c5263970a1f61ee562063bb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1982.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1982\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a012985bb85fc59d3adab4f481612d63533abf96", "content_id": "e74560f574fe3800d0a5091892b914d6fa302ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2168.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2168\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "dcb3eed6b06e895df5758fc8eabb600412939d14", "content_id": "70b8fdf6b1f5052824147bc910bb0b33514bd611", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2160.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2160\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2f94d2cc00937c8cff68bc8b22bff5b795164acc", "content_id": "4d4867a0b09873bdcd9f9700cb24ee5d0b6aeffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2238.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2238\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c0de61c8043ac06c92ca71d0f136efe7b8727279", "content_id": "a4de955a558ce27e80315c7c038852a7b5c4eaee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/517.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 517\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f1f50e1fc0b6c12071849f3a98febbb522c373b8", "content_id": "3abaad38f16873586f25f5f217007f199ab32085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3585.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3585\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3b59215a7de86c0566f21d909a978d3111b38f6a", "content_id": "67632616200dad9ceea85d4d5f2059e10464820d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/162.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 162\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4ede303d0b7ae8be480ffb317c5484438a3534df", "content_id": "0c22281df3078daeead0d2440391faa96907b0bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/391.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 391\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "a8c654be0c2af3513f5b89898a584babdb2a81c1", "content_id": "c0c9cfde02d70f0affa79b866b17e03e20fd9e21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/97.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 97\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1f4b1470d84c37c0ab22df66114eeae2fced1238", "content_id": "da28d912fb948b36b86e35f1a028fd6076192b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/701.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 701\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "81597cb6875156a6066b815215960d77c8803fc7", "content_id": "7d3ba0987cf1ad4a31042d9d5c256ada4fa1f453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2250.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2250\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "572dcd4b52e4c7331877dd7138d3ae8a75f57b81", "content_id": "6441d1627fd273b05d3d5d1c0400434047b22c8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/295.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 295\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4ce4f156b840df1e31784deeb95d18a820fc094a", "content_id": "015207faab7deea08a484f5bf59d02029464dba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/235.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 235\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "87ec4c1b58220f337bd3db4b4644aa5800b3b7d7", "content_id": "cb8ded9610c9dfc2332a75de83bc89553567e830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2290.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2290\n---\n" }, { "alpha_fraction": 0.41558441519737244, "alphanum_fraction": 0.649350643157959, "avg_line_length": 14.399999618530273, "blob_id": "7b4285b574210cd83868ff27dc77a688bbbab2a5", "content_id": "c0e9d5d96fbfbda0be8ee8d3dfecbc82139cdee4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 77, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/content/routes/pere-marquette.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Pere Marquette\nslug: pere-marquette\ntrains: [375,374,378,371,370,377]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ee5205c81f5fd889517e4d3b0775b831b7d56a15", "content_id": "8ceaae392546b695bd49bfcb984129641b16b205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/233.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 233\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "fb4fe5f484124559ddd987b7eb3b4cd7c3458d28", "content_id": "a92d0e8b1399f5e31165fddff3c853859d2f0147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/48.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 48\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2e56b503a360ef940eb91c8bd67e3c811fb5f2d0", "content_id": "f0bc12d11d73d135d6f0f5d70aa20c06bc8e16e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/336.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 336\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "da9b222859c3e2f062a9eb3256e084135e904fb4", "content_id": "1e92be8330b458048e8560e37447ab554282b2d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/649.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 649\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e99e95bb12b0902b4484018a36fd92ac1ca91036", "content_id": "d945ff3dcc3a81f433ca88fd3e3653d012952d29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/683.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 683\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "133606d44d64935df60f405d512151ceab775778", "content_id": "5b67280d917462e780eba49fa1852dbe899f7467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/342.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 342\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f8cd18f0a02842d2757a097bbbd6f8910953a0dd", "content_id": "7bd713023cd75bc7877e9682978a0511065d5a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/154.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 154\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4e8a8b06e898e4adbfdf4e6c543a9ba2df8927d0", "content_id": "abbcf6f527b105486bea626321dcdef56b3d94fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/124.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 124\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "11391eda6f3834855053cca274fde60d66471a7f", "content_id": "7179ff2fe724fe0eeb00316cabd4a560978dde0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/580.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 580\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ded2365535742605ab5fe3dc2eeebc7380d6787f", "content_id": "e74dc35fdc08a79a81dbbce0ee084a69d50a7c3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/280.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 280\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6898570d819145e51c461fd178556b5a893b2023", "content_id": "d8e6fa1dbda0caaf05a5131b6fdc4185843d25d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/180.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 180\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "dfad0e8a70d849112074855c8bc32fd0f23c7a9c", "content_id": "fa342b25bff5ed2163f4022dd7babd12ea70a6ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2110.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2110\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "bd6bf00d725f250cf102eaf3ae50c4a27d945058", "content_id": "2b191d162cff3da7dec39389459d8618cdcdaf38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1054.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1054\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9d47badd5650bac6e36097ed2cf8228691728b81", "content_id": "cc7290c0a74c0da4f259f1910309c361427cbfe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/352.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 352\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a9d9e8993d797aa933e4a1d98fbea28a0ae8e8f6", "content_id": "936a665575c71e59caf423ecd5c4def01064d1e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2256.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2256\n---\n" }, { "alpha_fraction": 0.5303030014038086, "alphanum_fraction": 0.6515151262283325, "avg_line_length": 12.199999809265137, "blob_id": "b337889f769e320bfc9cb32942b74584fe1c7ad5", "content_id": "9f43a4918af5b6fd1f327dad1d3fe5808417809f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/content/routes/silver-star.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Silver Star\nslug: silver-star\ntrains: [92,91,1092]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "7a6ea546044da030eb2cf1a3129985019541499f", "content_id": "d6d62656499966bfc09a5e08c6f3beae9cb93e91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2126.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2126\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5658fe3c09c0694fe8dfcb2ffd27cc90e0d84910", "content_id": "038ec43b60f14146ce556b37d1de9b37f73fdf4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1186.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1186\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "06b6a220845e9ae48e7bf1645bc776887337f810", "content_id": "64add170e5d82155b351fdb5e9f9b1ee82d23c41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3796.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3796\n---\n" }, { "alpha_fraction": 0.27272728085517883, "alphanum_fraction": 0.7379679083824158, "avg_line_length": 36.400001525878906, "blob_id": "85322e7b1f4d85b41694878a9ec063f63b45d488", "content_id": "3c145d0ec085bf1bcc443636565d016862589778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 187, "license_type": "no_license", "max_line_length": 125, "num_lines": 5, "path": "/content/routes/springfield-shuttle.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Springfield Shuttle\nslug: springfield-shuttle\ntrains: [416,494,461,476,460,495,417,401,488,471,451,493,450,407,470,473,432,465,490,412,405,409,464,479,475,463,474,497,478]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d76c28b93344c60d3ea54a0254a233225ab20bca", "content_id": "c89a766bf598213377df9410dfe83c52a60fb33c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/157.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 157\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "258f008e3216e9eb4694351129cb47d65ce87c12", "content_id": "5a0516a105b401e59167cb98a0a897bd2dc01f20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2265.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2265\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "45ee85eb427127c32a2070485beda9691bffb29d", "content_id": "5ec3456be6e75b2a2f7f0f47a2c3dc6c24b7ded6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/519.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 519\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8b05af9fd9c1a0af4325b1a2b5f61f77b2c5235b", "content_id": "939a9e62dfed5f5b0407302e19b3854c143e068d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/695.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 695\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ceeed70ea06a884dedcfc15ab20b5de8ca33ff53", "content_id": "5140a7b2705369509f5bc7f22c51b0de31edd1f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/475.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 475\n---\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 11.600000381469727, "blob_id": "464649c7589f3ec3d6b69d98f50a2d63fb7c9e2f", "content_id": "bfffdeb7f69ec80de393bc0458f501932ad5c53c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/content/routes/wolverine.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Wolverine\nslug: wolverine\ntrains: [352,351,350]\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "5f5ba55c477a778ff97bc7b94a6e750a52cfab6b", "content_id": "f97506dc6f3ca9743b64c2faedf89bf981f0f252", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/22.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 22\n---\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 5, "blob_id": "9e47ed8b08e6839bede00355d4fe15b272ed91a3", "content_id": "f00ed0e0ae673ac7c2864cef1e40d570afb8b5b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/content/trains/7.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 7\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f88365c09e038fe2219d5598cd62e8d7e22b7bf5", "content_id": "e9e56fd7fdec374ff350266d38024ed226d860af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/694.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 694\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e791f02f390ffa387c468d0d2266b11b63e1b3cc", "content_id": "85d2aaf71d9b5e8b4510f0aa88fc8f1400cb32ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/601.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 601\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "8888ed17f36d0a3f993b44250e1957182dccbd0d", "content_id": "295dd259ee96805d24f44b1ed1c800dd9d03df09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2158.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2158\n---\n" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 13.199999809265137, "blob_id": "9ab909776b658b08c0b57e0b34ac905697a7b9ab", "content_id": "3f0cba6be6d9ff5e12f17369fc0096f8ac6e55f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/content/routes/wolverine-extra.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Wolverine Extra\nslug: wolverine-extra\ntrains: [359,356]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2602de331a7df21e51dbe4eb5a9df35df89a7e67", "content_id": "47f87e408849e13950202f7e1ec27ec5d0fdffa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/792.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 792\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "90038d0068e86d9d4830f4ee6e902a4295dbce3b", "content_id": "0f0bb2fabbf64058687652225fd70b91c3b7dd27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2173.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2173\n---\n" }, { "alpha_fraction": 0.5966386795043945, "alphanum_fraction": 0.7478991746902466, "avg_line_length": 22.799999237060547, "blob_id": "d50ca66451b22d09bea97954ed4940ada307d20e", "content_id": "8c181bc049e6beb1d5d0fc41b388d065a3290e92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/content/routes/northeast-regional-springfield.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Northeast Regional - Springfield\nslug: northeast-regional-springfield\ntrains: [143,148,146,140,136,141]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "060a7dd3613073b131961c2e314c9bf97956e787", "content_id": "b44138c89be7cec30be60bca271983812cb1da78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/416.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 416\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e7404f4315d30d3327283b82e08c57ea81f8a4c7", "content_id": "eeefc3a096ae38ee0c455d3501390aaaa97d3b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2124.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2124\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "3fba50685be564c5cfeff053aa65fea8222b02b3", "content_id": "ef30e8b86d178401080386aeafedf5f11a89bc7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/131.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 131\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a1a58679b1083ab3a5428693c8197458bca4acb3", "content_id": "c810a9a8ba5ad348040fe1b8f045b9836e3a13b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2243.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2243\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "a89b8d7a6302d6b6e3a363c6625ad89f14d99495", "content_id": "e00fef0d683d9adb29b91d9ea459615ce9dcc4f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/88.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 88\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "364a2848ac99acba9c0c8a2b873d6cea99fd63e3", "content_id": "4b8218e74044e49286b3c080d268f8ed40d04167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/329.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 329\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fca5d5f3d160ad6afcd2e1d7b4ab384b2e07bf7a", "content_id": "3da4e6c26bacf70872122878d313a165439f9bcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/662.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 662\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "4b5a41968a848a172c70f4840d9ad02051a027f8", "content_id": "9d2259e263ef492c8db7fcece16f37bf11cfcb79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2190.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2190\n---\n" }, { "alpha_fraction": 0.38235294818878174, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 33, "blob_id": "41048a8593963b57ad855012dec678f9e63cbe52", "content_id": "0dfda121c91240da354633d3979e1f9e215fe90b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "no_license", "max_line_length": 90, "num_lines": 5, "path": "/content/routes/northeast-regional-virginia.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Northeast Regional - Virginia\nslug: northeast-regional-virginia\ntrains: [174,94,82,95,99,125,84,164,71,88,67,66,85,93,86,65,171,87,195,156,96,176,157,194]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d611bf41a52b8831625249e1771ee61907d3e566", "content_id": "1fe02a285fb281995c4e637a106c0229328bd725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2233.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2233\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b94a44890611858ccca48c82abf9b532cecc7207", "content_id": "8008887b11b0c51cc02682174813eb7128c78078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/535.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 535\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d4f1ca8a44f167a51dab076b06998b1f4d2130f9", "content_id": "343d3e5ccf6fd04e02f9ae06812c97490896536f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1140.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1140\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "d0a10205fb22e15cf03ebb47bdf8112ad3a453f8", "content_id": "bacc81cafebb7f7825415531dc885016a173a9f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2244.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2244\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "c0bfa113974c6efade8c58d05ccb3a2645549d6b", "content_id": "c333eb4a597c54f268292606c12b9fab2d5d2f7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/64.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 64\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "329bbd1ed777c5e6763d12dff99453c708f92cab", "content_id": "f27a3054777af81b00ff92953e69021d2b3f94f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/230.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 230\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "66c7d191e26f74d2d1e9d2e6bab346229051c5f1", "content_id": "81d039b823fab5cb5626e3267f2b06c7ba340986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/195.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 195\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2b26fd406f38e3c6265834094f1adb736cbe5bfd", "content_id": "ed61766560be3836f16081a3fcbb1714ab3d0cfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/384.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 384\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9599c31317e1865cc612ef93c1566fea3bf7bee4", "content_id": "0e6f2b2d00f9e444ebda3190ed7336030752cfff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2259.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2259\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "0f0298c6231265a99df39d6fd9332b39cc828b46", "content_id": "659127205e4dece749d07ca444df78dcb955b9ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1092.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1092\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "93989ffe650eaf98bed74d2f9b9383f5f5e28b22", "content_id": "2a1c693c52cd227bf2714c72de521581c8e6387e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/565.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 565\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "452b5520d42e4785da2009e23d99adceaf88aa5d", "content_id": "a0e9f887d4ce30433361c235d88004fe4fcd4d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/3774.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 3774\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0dd796c3309028bcde9d9a211d490c3376d75b57", "content_id": "8d2c722bb9bec7a7806a75a6182e45605e16c0f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/761.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 761\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f52aa3ef9c1f266f24e7113a0eca1cec4b381d26", "content_id": "59687525549020241b3c6654297e441b310a432a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/135.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 135\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e507e4584ed6efb90e93978c5e69913b912e87bd", "content_id": "6d7262373f5c01210f1902ab59529477eb8cc57f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/464.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 464\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6f74fb799fdcc06aa69baf5664f7d63b318eff41", "content_id": "9df65c47bcd25976e4c8e586cfb5c66c0977be0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/698.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 698\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "9acc6f306291a5f584848e8f7728632ccb16d1ef", "content_id": "3238dc14b7379d3d9e9a5207743f2e2d735db9df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/393.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 393\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "ac0ba98dfd7836b81615f93df3f301dc4b7ee901", "content_id": "9e36515eb67a5a3fdd8ec01de8426852f8693a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1572.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1572\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "8bb030c4fc7b20f6011621e43de1f9172f52694e", "content_id": "61a6f9dfc0aa1c0de4321a4003886e8b19f0a50a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1173.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1173\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "96cd9358057b98b3a9ff3df4f028f4bd489d4190", "content_id": "8378d6d2e90aba679748a14f9b1c86324a575fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2207.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2207\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "369c945a00a4bb9c2e366b25cdc1ba6792d39167", "content_id": "0a3fa593d297ec7780082fb61f425fd118b05cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/622.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 622\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bfedd31622725931a8b170965757fed028018a2c", "content_id": "38c4bd5e53e24468bc94876bc16f78e9f1c0608c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/337.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 337\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ef71d9ba1e9f39e535b58c154ad4d913e3a6c3bb", "content_id": "055b5d5c9506cbbe5d18b3ffdb30e2b7e59368f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/356.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 356\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "44a4dcdf5df5a218d60dfbbaacfc58ac8fc2929d", "content_id": "3ab26e027907ee0a83cbf7e48b137feee56353c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1718.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1718\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "5a64e4d6dd40a14bcf79d6c558525138430c5e5b", "content_id": "fab9615cecddcc214aacdd969c0fd94ead9a81a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/660.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 660\n---\n" }, { "alpha_fraction": 0.5593220591545105, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 10.800000190734863, "blob_id": "517dadd85aefdac84524778223a84a8e5f2e8bb1", "content_id": "b0afd2f74df4fe81269397d1f21be398bbb04dd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/maple-leaf.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Maple Leaf\nslug: maple-leaf\ntrains: [64,63]\n---\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 5, "blob_id": "15c14d7a36e5ac6f23d1dc842c4a5f3f66327a38", "content_id": "2fa0eeda1556ae50b6cfc6a11680d15c93399fdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/content/trains/6.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 6\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2fa94756a5305ec1971f930e84c1f0c6839929a3", "content_id": "7f2a86a99c4dd176351dfd568f63a1e80e9267fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2164.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2164\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "71f977ad446482112ff7e0732b329d6e09f17cf7", "content_id": "0d4acc5586e4ccff852f041fe36e7a5a71d6cd08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/176.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 176\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "93d79fb216050522c8078240a3cc4806240efd37", "content_id": "08242cd3d1cb3fd50e6db204b1cc39909e796f48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/729.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 729\n---\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 15, "blob_id": "6d81516c2eda98ae5f062d3927486851021a761d", "content_id": "5f9b697767f43adf08c5671c5df7c95b16faeb85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 96, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/script/deploy/build", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n\n# build the site into public/\nHUGO_ENV=production hugo\n\ncp -r .circleci public/\n" }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 9.399999618530273, "blob_id": "eb36a338cbbb5c4ac6429dda1c48ab8313c046fe", "content_id": "5fee3d51afa62932a3d7d0b300d5fd8f13ec4f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/content/routes/cardinal.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Cardinal\nslug: cardinal\ntrains: [51]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "dd4c4c77d0a4cd39c55a2cf4e0c3121aaea63e21", "content_id": "6b2de75440a4955175a2e3293f4564a70a4f8ea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/178.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 178\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "c8ea8b5bb3770b62bc3e17711925eeedd01a38c2", "content_id": "6c8e92046e82abbac73aaf0cc131da0a2eb74ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/751.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 751\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "0b213459b397810821a4aa37f81ac19ac5312135", "content_id": "a7c2c65e9e62d45f44fb949f0c35c7ac61b0f627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/42.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 42\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "24fabe3cebb4af8cd03c296f3f5ccd2912b78271", "content_id": "8b3aae47f2be902bf12672a4b1dd5f1fdd7ccf02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/75.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 75\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "c64a336213126a6d7e2a9a42a5c25df01a902cec", "content_id": "03d9e9f2bac59bdd67fd6bbcb7436365dd8b871f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/86.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 86\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bb2d68a849ad959f29557c7d0787d438c5bade97", "content_id": "a161b0df06d5cf12d2c78a60dd896147f7818e9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/595.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 595\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "27fcdb284c27386c97ad3b2afdbf147533f0c448", "content_id": "067d1a8e76d701cadc76f0f2646114d22e59b484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/672.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 672\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "b8473ee61a9459db560ca6a2d426345bef5ccd2f", "content_id": "6bac96993332788482475769833942e81b0df4fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/381.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 381\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2f3c28c7722bd3c6344c98d531ad9bb85f739b7e", "content_id": "1e8a282ff702514e2c77ec4ab607d5ce05bb2911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/146.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 146\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d23f06289284db2c37f30b484a3d1e24f8aff7e6", "content_id": "3dfc1181ebc3e8971e040d0a6898d7a9bef7db94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/716.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 716\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "74d736ca7d5106103a68807c0acde2b8117a3bba", "content_id": "c0e63466414a963e764216e480ea321787985aee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/374.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 374\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "8f8b8d44fdafbd8452506c852e8b8f8633992c25", "content_id": "0092496ded989f1692076011c77b75c6edf16146", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2240.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2240\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "303a911e2b9b1e11da097b6e21099f577f78c02e", "content_id": "06fb5db6ed916f04f53c978d0ca9f0dd51ab911e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2201.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2201\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "787e6c4b5fe1199fc23f2ab705470dfa43d79f12", "content_id": "0541f9e85a93e4d87e24fc03473e70fb99335a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/132.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 132\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "3fdfaef401d2a2e977c169a8aea209675876b063", "content_id": "dfa87acee4a379f0ec2c138f4062c628825b9be3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2275.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2275\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "5cec352662c0b49d53967d6100596ec69d25209f", "content_id": "425fc6cead44cfed81b307c45af6e50e3a7f07ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1175.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1175\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e162faf99794612e90cef794ed7ed1e619c8f069", "content_id": "c9d58ccde5173b62af8bc14851ccbe58cc8970ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/305.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 305\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "4441f6c2be262479028353e01e0bd786b5307449", "content_id": "5d1537fc901bce7da1fd9519e7466933cc179451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2260.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2260\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "4e097560a64bd29fb1546337b4b57ea9577f4a95", "content_id": "c8eac09403d5136576c58821f17e830d8b8ab557", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/90.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 90\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "99acfa7f88bd4e0fdb1d69464e2dc0710340b72b", "content_id": "9581779ebff7ad21b9a0eef6f52dc2b9c72210f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/130.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 130\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d89e7637ee5806a80c77073310e72c129007ba04", "content_id": "374d35a02aa4927720017f0a7e21bd169e670457", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/553.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 553\n---\n" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10, "blob_id": "28502e8fd134e22b04e7cc23e935ae1d9dfdf923", "content_id": "f035f93270bfcad233392b10559f7272169359d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 55, "license_type": "no_license", "max_line_length": 15, "num_lines": 5, "path": "/content/routes/palmetto.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Palmetto\nslug: palmetto\ntrains: [89,90]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "84280287faa5b4fc0f51b3c4195113d440772697", "content_id": "dea83fc5d914e85fc775a7af41325e488c679b99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/129.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 129\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "c8030066c0eea360700134d899080ce9f23c3abe", "content_id": "586bcf8e91bf3b679be43cae2c812419f785316d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2292.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2292\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "2a31154459562c6c40ba2a7e9e5239d40c3382e5", "content_id": "331096c7c4a0bba65b31826923d53bad5970d83e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1584.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1584\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ca3ec2ea7b278995f4af4a9e5a1074231cac6ee6", "content_id": "4dcc38ed96d00e6ab60074bbfddf6e3bb8939bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/134.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 134\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "b344829da20b68bf0f5cdd29b1b36e97483fc9c9", "content_id": "f4084a263f8e64d557df439a0e689596690e5b54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1121.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1121\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "81ce1b3002e63aa1a92a2456944256310c7a0e55", "content_id": "6669186f6f2fd2547f21b12cfd00390154ab5fa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/749.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 749\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "619df725a7bce9d3204d82c1b6f0815269e5e371", "content_id": "5e038fbe3a3d0345906655ebda0e2f4b676edc67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/27.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 27\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d43435eaa3dec4de779a8a9eda05b2970afdae6f", "content_id": "454240a42f97a5f56614a26c429705c712d11331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/296.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 296\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "35df68d4039f0085d3f9709b893f132a09f61831", "content_id": "2df9a17b35d97c6be6ba99bc818997563d02fcf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2213.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2213\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a114505f9731590ec19c51769bd0cd2d4fe71d83", "content_id": "4b07a5f168a58a071a34741c91b5fe89e71768f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/584.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 584\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "e1e27fd70e64f908df827498e5495e1e76f84b6d", "content_id": "a999c05c7ed25df34edca0b4544f203517048136", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1056.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1056\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "f0b3c5df2dd42d6f9ceb016e96a87897b5737a26", "content_id": "ad69dda6f050749e3a041fd4fee0106a38644743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/340.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 340\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "6f456a1b85ee4151ba19e1f26283e0b311f30aa9", "content_id": "1eea7b8cc6d1a49df627e35944bfd95e5c9c7042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/93.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 93\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "36d56fb43569e7be558b50648b2e9cb76fe3b666", "content_id": "b6cab20b77feab499831ad2fa7efed23b59c8dd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/600.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 600\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "76b01a7ced29afd050fa2f9a6799336bad7d8836", "content_id": "a31494cb2d72100f623ab2158095b34efc9b75e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/87.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 87\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8a9748a4e76b0a691fe091cd6bb0cc2e23b7b126", "content_id": "bc21ca112533f2e3744c0442fa6ea4714ff756f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/502.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 502\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d5f3daf0b6ac5c9962affd31b787c58ef6b43991", "content_id": "c93f420ad4fc9a3c12cff705f1d818f6ec253924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/611.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 611\n---\n" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 13.199999809265137, "blob_id": "54db348201efc5f2dd37ac82ca11897ec646e704", "content_id": "4a09e0438e43dcabbf138d0a2a72bfb2a9dba7c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/content/routes/illinois-zephyr.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Illinois Zephyr\nslug: illinois-zephyr\ntrains: [380,383]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "30105345f964b16d752b3ec7455f044c5678102a", "content_id": "fb05267bb862272ed4725f0cbc44fe03ccea7ad5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2218.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2218\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "a17f09488dce4b217fe28ab15af6cc0f9a308f25", "content_id": "d761034343e5fca4cac454a8061d9ef03dd29a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1567.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1567\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "cb6b6e45afce0feee1990b878396e324fcc5b4b6", "content_id": "47022d58497c45bc38fe31cf27f4787f27ed867f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2236.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2236\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "a5b3997825b9b19771926b643bc5475482709aa3", "content_id": "5205b93f49f584fd53ac266e6fb393780c621f49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/98.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 98\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "1ce07e94339771a20ace7f348c99734bacc0584e", "content_id": "e09feac7e2dbb79cd73066058470b527df4177c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/333.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 333\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fcc8ca06883fe5abb0ef7de016634fd997f666f9", "content_id": "8369ceb0a97bffbe247085dd953f3cf556520946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/504.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 504\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "e66e69e2cbb138d28905cffb6ffbf23ff5bdd975", "content_id": "42bf6cd2f6d4afe287643ac69cac31070056dbc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/66.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 66\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "e2d6563e17145a5f4e85689524626f170ef03f40", "content_id": "406a21d7ea6b5349ebabb7b9e3379d1633a48ec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/43.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 43\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "a2b298b74c1cffc29f955491080a7829bea058f3", "content_id": "59c03582d1bca0a71ef334a22388cb84204a7df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/401.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 401\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "25af992ea54fcd9f3136ded7ba06b877b539ddc4", "content_id": "98998eac6a70187ae13e26220e0c610844e5836f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/341.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 341\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "e44825fd4d1371ebbeb689d6c0c29fea8a8fa5e7", "content_id": "1f5a8a6b5c7163baaf6e86fe82716eca1c70c43a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/80.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 80\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "47ec670dfd9c5a6989fed131b11d1c65a4d5c624", "content_id": "fa468fa9a26231d01a730c6eddec4ea72a501de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/52.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 52\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4fdfc83ecb0d8d80c238719523e5b54e407ab27e", "content_id": "dccf2629b512fcccdd883c08f9bc22deae7e41c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/194.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 194\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "27aa85949d2c2751a0df3fd538ba576c14cb58d0", "content_id": "e837adbee12863aad90515406086b8874d693fca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/544.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 544\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "25ea4e6e1667dae336599180139223f5fb60adca", "content_id": "b074d2a9f4e36d0ec04f70d0db610c4a43a89042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/534.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 534\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "e86d992f6e89960d22221b6a32c26909e94efbb3", "content_id": "5ec7ac6de1fc591248da9854d45f0043379cfafd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/242.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 242\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9a6362b04d6e5fcd1c387e3fd4d7c75429968a2a", "content_id": "364090516fc48e5e8582acd2c9e2eb50bf0da913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1134.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1134\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "0b73ef9c0df26b21e1378fd5f6d9c0f2dc65d6a7", "content_id": "11d50ae744d185ba17240b5e181e70cb4b4cdb07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/597.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 597\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "89a3c7e1dee4697429dde3666aadfc1ad5217d22", "content_id": "b8f493d81502fed1d7a717a2faf68b7118a27366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/689.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 689\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f224f6cd384c44961ee540121fa71d990813be5f", "content_id": "0ed937ca9d2a1881f245352996ebe5a532606efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2162.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2162\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "df65153f0f1736cd45a47efdf8a2e645d06189ae", "content_id": "6e68405268dd04a7b986f560d27a9b0dbad8994f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/654.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 654\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "c831ea203b8c4cd4b3a085ffe339b466e446cd86", "content_id": "98b4583e7b99652f48b076968e76568fef19f121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/68.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 68\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "08521df188c44679d9da8d6a2722010f766b0a70", "content_id": "783d7877fbc26409394cfd18ea563faac36d82af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2109.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2109\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "885a1f3fa13cdc05653d50927c686dd74d646646", "content_id": "6fca6eb5cc858c95c2d2830bf3d631d81f9122d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/370.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 370\n---\n" }, { "alpha_fraction": 0.5197804570198059, "alphanum_fraction": 0.5281690359115601, "avg_line_length": 22.551219940185547, "blob_id": "ff7ba1e3cff33b6f711bc570a6019107de1d3ce1", "content_id": "1ff2807478bca17921a40701ab443ba53a2e038c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 9656, "license_type": "no_license", "max_line_length": 111, "num_lines": 410, "path": "/src/toJson.php", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "<?php\n\n$source_directory = $argv[1];\n\nfunction toMinutes($hours_minutes, $days)\n{\n $hour = intval(substr($hours_minutes, 0, -3), 10);\n $hour %= 12;\n $hour -= 1;\n\n $minutes = intval(substr($hours_minutes, strlen($hours_minutes) - 3, strlen($hours_minutes) - 1), 10);\n\n $meridiem = strtolower($hours_minutes[strlen($hours_minutes) - 1]);\n if ($meridiem === 'p') {\n $hour += 12;\n }\n\n $arrival_time = $hour * 60 + $minutes;\n $arrival_time = $arrival_time + 24 * 60 * $days;\n return $arrival_time;\n}\n\nfunction prefix($prefix, $haystack)\n{\n return substr($haystack, 0, strlen($prefix)) === $prefix;\n}\n\nfunction keyFromPath($path)\n{\n list($number, $date) = explode('_', $path);\n $year = substr($date, 0, 4);\n $month = substr($date, 4, 2);\n $day = substr($date, 6, 2);\n return \"$year-$month-$day\";\n}\n\nfunction median($list)\n{\n if (count($list) === 0) {\n throw new Exception('cannot take median on emty list');\n }\n\n asort($list);\n $list = array_values($list);\n $middle = count($list) / 2;\n\n if (count($list) % 2 === 1) {\n // odd, can take middle\n $median = $list[(count($list) - 1) / 2];\n } else {\n // even, average the left and right\n $left = $list[count($list) / 2 - 1];\n $right = $list[count($list) / 2];\n $median = average([$left, $right]);\n }\n\n return $median;\n}\n\nfunction percentile($list, $percentile)\n{\n asort($list);\n $list = array_values($list);\n\n $index = count($list) * $percentile;\n if ($index === round($index)) {\n $p = $list[$index];\n } else {\n // interpolate\n $p = $list[floor($index)];\n // TODO linear interpolation\n }\n\n return $p;\n}\n\nfunction average($list)\n{\n return array_sum($list) / count($list);\n}\n\nfunction recordsFromDirectory($directory)\n{\n $records = [];\n foreach (scandir($directory) as $path) {\n if (in_array($path, ['.', '..'])) {\n continue;\n }\n\n $key = keyFromPath($path);\n $records[$key] = file_get_contents($directory . '/' . $path);\n }\n\n return $records;\n}\n\n$records = recordsFromDirectory($source_directory);\n\nclass Line\n{\n public function __construct($line)\n {\n $this->raw = $line;\n }\n\n public function stop()\n {\n return $this->read(2, 3);\n }\n\n public function delay()\n {\n $destination_scheduled_arrival = $this->scheduledArrivalTime();\n $destination_actual_arrival = $this->actualArrival();\n\n if ($destination_scheduled_arrival === null || $destination_actual_arrival === null) {\n return null;\n }\n\n // time in minutes\n $scheduled = toMinutes($destination_scheduled_arrival, $this->scheduledArrivalDay());\n $actual = toMinutes($destination_actual_arrival, $this->actualArrivalDay());\n\n $delay = $actual - $scheduled;\n\n return $delay;\n }\n\n public function scheduledDepartureDay()\n {\n $day = $this->read(16, 3);\n\n if ($day !== null) {\n $day = intval($day, 10);\n }\n\n return $day;\n }\n\n public function scheduledDepartureTime()\n {\n return $this->read(19, 5);\n }\n\n public function scheduledArrivalDay()\n {\n $day = $this->read(7, 3);\n\n if ($day !== null) {\n $day = intval($day, 10);\n }\n\n return $day;\n }\n\n public function scheduledArrivalTime()\n {\n return $this->read(10, 5);\n }\n\n public function actualDepartureDay()\n {\n $day = $this->scheduledDepartureDay();\n\n if ($day !== null && $this->rolledOverDay($this->scheduledDepartureTime(), $this->actualDeparture())) {\n // day rolled over\n $day += 1;\n }\n\n return $day;\n }\n\n public function actualDeparture()\n {\n return $this->read(31, 5);\n }\n\n public function actualArrivalDay()\n {\n $day = $this->scheduledArrivalDay();\n\n if ($day !== null && $this->rolledOverDay($this->scheduledArrivalTime(), $this->actualArrival())) {\n // day rolled over\n $day += 1;\n }\n\n return $day;\n }\n\n private function rolledOverDay($scheduled, $actual)\n {\n if ($scheduled === null || $actual === null) {\n return false;\n }\n\n $difference = toMinutes($scheduled, 1) - toMinutes($actual, 1);\n\n return $difference > 450;\n }\n\n public function actualArrival()\n {\n return $this->read(25, 5);\n }\n\n public function comments()\n {\n return $this->read(37, 0);\n }\n\n public function read($start, $length)\n {\n $value = trim(substr($this->raw, $start, $length));\n if ($value === '' || $value === '*') {\n return null;\n }\n return $value;\n }\n\n public function missingData()\n {\n return $this->actualArrival() === null || $this->actualDeparture() === null;\n }\n\n public function getData()\n {\n $data = [\n 'stop' => $this->stop(),\n //'raw' => $this->raw,\n ];\n\n $data['missing_data'] = $this->missingData();\n\n $data['scheduled_arrival'] = $this->scheduledArrivalTime();\n $data['scheduled_arrival_day'] = $this->scheduledArrivalDay();\n $data['actual_arrival'] = $this->actualArrival();\n $data['actual_arrival_day'] = $this->actualArrivalDay();\n $data['scheduled_departure'] = $this->scheduledDepartureTime();\n $data['scheduled_departure_day'] = $this->scheduledDepartureDay();\n $data['actual_departure'] = $this->actualDeparture();\n $data['actual_departure_day'] = $this->actualDepartureDay();\n\n return $data;\n }\n}\n\nclass Record\n{\n private $raw;\n\n private $lines;\n\n public function __construct($record)\n {\n $this->raw = $record;\n $this->lines = explode(\"\\r\\n\", trim($this->raw));\n }\n\n public function origin()\n {\n $line = $this->lines()[0];\n return [\n 'stop' => $line->stop(),\n 'time' => $line->scheduledDepartureTime(),\n ];\n }\n\n public function lines()\n {\n $data_begin = null;\n foreach ($this->lines as $index => $line) {\n if (prefix('* V ', $line)) {\n $data_begin = $index;\n break;\n }\n }\n\n $data_lines = array_slice($this->lines, $data_begin + 1);\n\n $objects = array_map(function ($line) {\n return new Line($line);\n }, $data_lines);\n\n $contiguously_keyed = array_values($objects);\n\n return $contiguously_keyed;\n }\n\n public function destination()\n {\n $line = $this->lastLine();\n return [\n 'stop' => $line->stop(),\n 'time' => $line->scheduledArrivalTime(),\n ];\n }\n\n public function route()\n {\n return trim(substr($this->lines[0], 2));\n }\n\n public function delay()\n {\n return $this->lastLine()->delay();\n }\n\n private function actualArrival()\n {\n return $this->lastLine()->actualArrival();\n }\n\n public function lastStop()\n {\n return $this->lastLine()->stop();\n }\n\n public function wasCancelled()\n {\n return $this->lastLine()->comments() === \"Station Stop Canceled\";\n }\n\n public function experiencedServiceDisruption()\n {\n return $this->lines[0] === '* THIS TRAIN EXPERIENCED A SERVICE DISRUPTION.';\n }\n\n public function missingData()\n {\n return $this->lastLine()->missingData();\n }\n\n public function getData()\n {\n return [\n 'cancelled' => $this->wasCancelled(),\n 'experienced_service_disruption' => $this->experiencedServiceDisruption(),\n 'missing_data' => $this->missingData(),\n 'delay' => $this->delay(),\n 'actual_arrival' => $this->actualArrival(),\n 'lines' => array_map(function ($line) {\n return $line->getData();\n }, $this->lines()),\n //'raw' => $this->raw,\n ];\n }\n\n private function lastLine()\n {\n $line = $this->lines[count($this->lines) - 1];\n return new Line($line);\n }\n}\n\nclass RecordSet\n{\n private $records;\n\n public function __construct($records)\n {\n $this->records = $records;\n }\n\n public function stats()\n {\n $valid_records = $this->records;\n\n $delays = array_values(array_map(function ($record) {\n return $record->delay();\n }, $valid_records));\n\n $stats = [];\n\n $stats['cancellations'] = count(array_filter($this->records, function ($record) {\n return $record->wasCancelled();\n }));\n\n $stats['service_disruptions'] = count(array_filter($this->records, function ($record) {\n return $record->experiencedServiceDisruption();\n }));\n\n $stats['valid_records'] = count($valid_records);\n\n if ($delays) {\n $stats['median'] = median($delays);\n $stats['p90'] = percentile($delays, .90);\n }\n\n return $stats;\n }\n}\n\n$data = [\n 'name' => 'Train ' . basename($source_directory),\n 'records' => array_map(function ($record) {\n return (new Record($record))->getData();\n }, $records),\n];\n\nif ($records) {\n $record = new Record($records[array_keys($records)[0]]);\n $data['origin'] = $record->origin();\n $data['destination'] = $record->destination();\n $data['route'] = $record->route();\n\n $set = new RecordSet(array_values(array_map(function ($data) {return new Record($data);}, $records)));\n $data['stats'] = $set->stats();\n}\n\necho json_encode($data);\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "dccb837154956f3dfc654d96ed35dee920a9c3e6", "content_id": "badfcf9b7c363a417e1fbb1cee4226601c24cb5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/76.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 76\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ad61024906273f058e3161f66363bb38fa624790", "content_id": "f609f045f66842784cd38c74dc78bc2d0e705d98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/551.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 551\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "0a6c75f9a0e4071a41541b7ae251132559d70b46", "content_id": "fc73a7e7b9336fae658637449fb1e5d7e0f723ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1588.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1588\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bfcd6c4751e7d7933d2d417216eb806d0df3ef08", "content_id": "83fd9e33dad04435a18ea3816aa6c7dedb52a050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/359.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 359\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "ea8355b73409554f22174b7a508c139fa5e48a35", "content_id": "b080add082442b0a63518e1a13f04ffe03d9e1d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1985.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1985\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "72fa7f8f2812f1aa463fd1e304d57b6f86163c43", "content_id": "1822aaf04982f0bbc8f2d5e68df2f901d8840581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/147.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 147\n---\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.7560975551605225, "avg_line_length": 23.600000381469727, "blob_id": "781872ddc01a742b006ca5ffd640613560ebc67f", "content_id": "2360a2f7b77922746e2e5a93d78a0edb591566e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/content/routes/northeast-regional-springfield,-virginia.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Northeast Regional - Springfield, Virginia\nslug: northeast-regional-springfield,-virginia\ntrains: [145,147]\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "71f33726f9fbad59bca00cc8094c39f5dddc8d5d", "content_id": "627cef8c7770376da64e2f203e87012dcbf54296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2215.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2215\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "cbb374c38f09ed289eac931dd02aa27d975d0ab0", "content_id": "24c6c2ba979a8d02ea3ae7fc9dc31e1adf3c7fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/711.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 711\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "fdef1db3712f5035d2356962e3f92ab79c885f5b", "content_id": "45003db3ad71bd65f6aa719b14a6089095d3934a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1194.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1194\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f9e0fec8128584ad427fc648155bff09165c5fd6", "content_id": "2aed56c28c6d253ba96ab40631b33a84010b5227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2242.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2242\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "7a7e2c8e426b466f1972970797bd2d3338da974b", "content_id": "739d22fad9d0bcc57f47aaca0ceb37d806d8a05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/461.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 461\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "204368d49da23525f93b0ef73cf14106f525244c", "content_id": "72c05c69ea13dc461c8133379a69c67b2ac5a722", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/583.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 583\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "ac4eed81634f63848d60102f6c71e7effe1e8087", "content_id": "08fc13d6d0dd1aed27749618706ef03af16349e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/77.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 77\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9957b9b31f8bb59765ed60d4c12c297ee11f7aba", "content_id": "099ee012b341799be272e82606d435250dafaa05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2226.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2226\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "91081c0cb59459d6c3824dc384def71678137877", "content_id": "2577403d377bc5c49bac21a1f5743ae07ab200e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/91.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 91\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d234ff271318d9babfdb8b1848ef02d8bceeeb37", "content_id": "56f38538b7ecf2edebee457019f449065269d24e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/734.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 734\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4f8b4d7fa876c9edce677d660e5117d956399e91", "content_id": "2956e78292d410166247dcaa2229a41d784524b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/151.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 151\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "9a98b0c15cc69c5c133a3f31edcdfdbed440fa81", "content_id": "d9a3ea20d773c42671d6830ac80a018cc10346e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2107.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2107\n---\n" }, { "alpha_fraction": 0.5593220591545105, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 10.800000190734863, "blob_id": "26406cc75847a9f602a2fe98985312d7d7cddfed", "content_id": "7ab74798df91d8853f6be12230b00fc2c2839707", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/content/routes/auto-train.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Auto Train\nslug: auto-train\ntrains: [53,52]\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "36e2036350ab61e2d782b2a5d683946097663a95", "content_id": "1ebda9ccaa0685b62d1cff600d0d0a8a69807e37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/572.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 572\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "d307ebf22abc0f7872601d4138410d91fb8b5035", "content_id": "08a9863a664a6e6862b92c89c37dbd6894696a46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/237.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 237\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "02cde20c1af05596a7de973730f73f95c2336bdb", "content_id": "8e0d86a831a6441c028a16bd86fe1138b0702d8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/153.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 153\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "333d59056bbff97f83b14197d5c577dc005edf02", "content_id": "27ddbc29a37b8d7b99d5fb0ea9cfa7fc527eb816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/14.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 14\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "bc11873869e8d795aebb278ed3673226ea387e3b", "content_id": "87a4ff255b423b1afba859a34f30fce43e8f2246", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2214.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2214\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "2b16c31c14161de3bf553079a51747c099609ba6", "content_id": "4c9ae4fbd6b5ba45f0712cd669f4529fd593ef64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/110.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 110\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "8684fcef6e109b994a0739009f7a5b0c9821d4a9", "content_id": "8ca4dc079a563e5dc5f04d0a74cb47bbf6f967a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/561.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 561\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "6e573b7f7cbb6b11e4e1d6d00423c6bcdf3686ba", "content_id": "aa12aa313a1d371bcd9b89a2cbbd085acf876221", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/377.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 377\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ef2d7d6692a6bee6f329ec3c6ee70b1aeb835394", "content_id": "e9cc109a85dd8833354ab239688e102148e7969a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/432.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 432\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "fac3f02a97f0c9f7e17f4e1fec75e84e00eec2e3", "content_id": "881dadec996e135911ce7e75d54055098fbbdcfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/159.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 159\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "f8ca4155cac7b80fd8ad21a91fe174abd136f324", "content_id": "67e5045b11eb2d2d1ef614d274cad0eb7376988b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/1141.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 1141\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4954be4b261f55a059fee5b7650b15938d07d287", "content_id": "1bcfe234f4bdd4708abb6eab135b299654e3ef49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/742.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 742\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "4eb091d820916eecb1edcd9dd4923403e6086d12", "content_id": "a85395cbec7cb731717ea669f3486ab5ba271398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/733.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 733\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "bd85cda2480ecc5c85648e084affa4d8a9bb6f72", "content_id": "6af6220330ac03b10df3bb22ee40530c56a6f9f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/255.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 255\n---\n" }, { "alpha_fraction": 0.2857142984867096, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 6, "blob_id": "6f0a5149943815c9315af7861328735927872c4c", "content_id": "a17657628da4272c960306dd5856c73046e26e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 12, "num_lines": 3, "path": "/content/trains/2230.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 2230\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "150a07388964e92bf84abedc7edcc93e239439f5", "content_id": "7d593627e5ffb1a4b7c85ce951211ccf5cf62df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/308.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 308\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "13b601f9f962e461badbf5e46e225f36eaab4324", "content_id": "6e72360611ed65321e02b0a990e72c585959b522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/666.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 666\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "11921dced7b431eddf5356a533aca84f9b1f32f2", "content_id": "24aa1a1b29be9c717651365029e1e65f225cbed9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/65.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 65\n---\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 5.333333492279053, "blob_id": "29fd9004b3bb798d016d140487ebaec362d38428", "content_id": "94ee03439b75afa51e56ed38c2146c21c1f3cd86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/content/trains/51.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 51\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "ee6ccfa96950ec18965257e82551eed4f6b08f93", "content_id": "83e4e467c60d88b5dd2c532447a4333e0478db41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/663.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 663\n---\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 5.666666507720947, "blob_id": "88ef3204caa6af505ea5163ca8de50c0633d57dc", "content_id": "73ed610cab607c3f3c020aa38493dab06b2379ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/content/trains/463.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nnumber: 463\n---\n" }, { "alpha_fraction": 0.5662650465965271, "alphanum_fraction": 0.6867470145225525, "avg_line_length": 15.600000381469727, "blob_id": "2a9f1f53ac031b86f801b58941697a1715e0916c", "content_id": "647ef4de8ebf607795dad4c2001fc3422cb1b716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/content/routes/lake-shore-limited.md", "repo_name": "jonathanhunsucker/amtrak-on-time", "src_encoding": "UTF-8", "text": "---\nroute: Lake Shore Limited\nslug: lake-shore-limited\ntrains: [449,49,48,448]\n---\n" } ]
676
ShaneRandell/Lab_08
https://github.com/ShaneRandell/Lab_08
696673de094065e1abf71992b10b754f002b0b5e
3aa9f755473ebe0bf517f57a89a9f21b60afbbcc
e61e835e95e3e82011a302d566a6b335dbeed834
refs/heads/main
2023-03-08T16:57:29.173289
2021-02-21T20:37:49
2021-02-21T20:37:49
339,158,686
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6170886158943176, "alphanum_fraction": 0.6635020971298218, "avg_line_length": 29.700000762939453, "blob_id": "3bf603661f262a349d6edc6586fe93e063bc9b02", "content_id": "03d8f58c04e4261221641147c563ffda3e84c921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 948, "license_type": "no_license", "max_line_length": 131, "num_lines": 30, "path": "/Lab_8_part1.py", "repo_name": "ShaneRandell/Lab_08", "src_encoding": "UTF-8", "text": "from guizero import App, Slider,TextBox\nfrom gpiozero import AngularServo\nimport time \n\nmaxPW = 2/1000\nminPW = 1/1000\n\n\ndef slider_read(slider_value):\n textbox.value = slider_value\n print(slider_value)\n servo1 = AngularServo(20, min_pulse_width = minPW, max_pulse_width = maxPW, initial_angle = 0, min_angle = -90, max_angle = 90)\n servo1.angle = int(slider_value) \n time.sleep(0.5)\n \ndef slider2_read(slider_value):\n textbox.value = slider_value\n print(slider_value)\n servo2 = AngularServo(21, min_pulse_width = minPW, max_pulse_width = maxPW, initial_angle = 0, min_angle = -90, max_angle = 90)\n servo2.angle = int(slider_value) \n time.sleep(0.5) \n\napp = App()\nslider1 = Slider(app, start=-90, end=90, width=\"fill\",command = slider_read)\nslider2 = Slider(app, start=-90, end=90, width=\"fill\",command = slider2_read)\ntextbox = TextBox(app)\n\nprint(textbox.value)\n\napp.display()\n\n\n\n \n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 13.5, "blob_id": "4a8d0ae72c5452544d8792f48bf0d52235ddfb18", "content_id": "94414c5a5a817c14bdd4c4e5ed874672923a2182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 29, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/README.md", "repo_name": "ShaneRandell/Lab_08", "src_encoding": "UTF-8", "text": "# Lab_08\nLab 8 Servo control\n" } ]
2
Shivendra2308/second-mid-solution
https://github.com/Shivendra2308/second-mid-solution
f2692a9d7070afaa5feba0806f680d7a1e7df041
6e4840dd899f92b045c1106938309ba9fac6b402
6c86b0d13a8ccbc1d2c83ca0bf586745a7d20cdd
refs/heads/master
2020-05-04T16:11:24.736534
2019-06-21T17:34:36
2019-06-21T17:34:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 11, "blob_id": "5e08baddd80d6deed013e476797baa230df5be6d", "content_id": "36300dfe5636e27330a2f007b510ffb7e0d2f193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/stingmethod.py", "repo_name": "Shivendra2308/second-mid-solution", "src_encoding": "UTF-8", "text": "a=input()\nb=a.capitalize()\nprint(b)\nc=a.istitle()\nprint(c)\nd=a.isnumeric()\nprint(d)\n" } ]
1
the-pedropaulo/Repositorio-Pedro
https://github.com/the-pedropaulo/Repositorio-Pedro
d7de0a5afae1f8bb21b259a41c3e003f4ad90e04
e79fa6b41704574e0e01c42a5bd4af04ae6b67f0
5770fcf040b41ce88f8f8018ce7ca8c7326695f4
refs/heads/main
2023-05-31T13:51:28.440900
2021-06-20T23:03:33
2021-06-20T23:03:33
352,402,125
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 23, "blob_id": "bad2baa612a262e3b0ed324513dede8e05a3470d", "content_id": "c08ac0b99b4fad59bd3ddd78c2daf0e969f8ed53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 125, "license_type": "permissive", "max_line_length": 42, "num_lines": 5, "path": "/README.md", "repo_name": "the-pedropaulo/Repositorio-Pedro", "src_encoding": "UTF-8", "text": "# Repositório Pedro ALteração!\n Meu primeiro repositório\n\n Modificação do Repositório\n Essa nova linha eu alterei no site GITHUB\n" }, { "alpha_fraction": 0.5696558356285095, "alphanum_fraction": 0.5790213346481323, "avg_line_length": 28.455171585083008, "blob_id": "7f886e730029194cd64e7dd5353528aa78118549", "content_id": "420b7db821fcc224dd22a241c452463c2326e5fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4308, "license_type": "permissive", "max_line_length": 78, "num_lines": 145, "path": "/programatrabalho.py", "repo_name": "the-pedropaulo/Repositorio-Pedro", "src_encoding": "UTF-8", "text": "# BIBLIOTECA PARA LIMPAR A TELA\nimport os\n\n# MENU PRINCIPAL - INVOCADO NA ULTIMA LINHA PELO CÓDIGO -> menu()\n\n\ndef menu():\n\n print(\"### Calcular a quantidade necessária de metros de fio - 1\")\n print(\"### Calcular a quantidade necessária de tinta - 2\")\n print(\"### Sair - 3\")\n\n opcao = int(input(\"Digite a opção: \"))\n\n if opcao == 1:\n menuSecundarioFio()\n elif opcao == 2:\n menuSecundarioTinta()\n elif opcao == 3:\n sair()\n else:\n menu()\n\n\ndef menuSecundarioFio():\n print(\" ### Único - 1\")\n print(\" ### Orçamento - 2\")\n\n opcao = int(input(\"Digite a opção: \"))\n\n if opcao == 1:\n # RECEBER OS VALORES DO USUÁRIO\n metroQuadrado = float(input(\"Digite o metro quadrado do imóvel: \"))\n preco = float(input(\"Digite o preço do metro do fio: \"))\n\n # FAZER OS CALCULOS\n calculoFio = 3 * metroQuadrado\n calculoValorFio = preco * calculoFio\n\n # IMPRIMIR A RESPOSTA NA TELA\n print(\"\"\"Você vai precisar de {} metros de fio \\n\n Vai gastar {} reais\n \"\"\".format(calculoFio, calculoValorFio))\n\n # PERGUNTAR SE QUER CONTINUAR OU SAIR DO PROGRAMA\n continuar = input(\"Deseja voltar ao menu? (s/n) \")\n if continuar == 's':\n menu()\n elif continuar == 'n':\n os.close\n else:\n menuSecundarioFio()\n\n elif opcao == 2:\n # RECEBER OS VALORES DO USUÁRIO\n metroQuadrado = float(input(\"Digite o metro quadrado do imóvel: \"))\n preco1 = float(input(\"Digite o preço do metro do fio na loja 1: \"))\n preco2 = float(input(\"Digite o preço do metro do fio na loja 2: \"))\n\n # FAZER OS CALCULOS\n calculoFio = 3 * metroQuadrado\n calculoPreco1 = preco1 * calculoFio\n calculoPreco2 = preco2 * calculoFio\n\n # IMPRIMIR A RESPOSTA NA TELA\n if calculoPreco1 > calculoPreco2:\n print(\"A loja 2 é mais barata\")\n else:\n print(\"A loja 1 é mais barata\")\n\n # PERGUNTAR SE QUER CONTINUAR OU SAIR DO PROGRAMA\n continuar = input(\"Deseja voltar ao menu? (s/n) \")\n if continuar == 's':\n menu()\n elif continuar == 'n':\n os.close\n\n else:\n # EM CASO DE ERRO, RETORNAR PARA O MENU SECUNDARIO\n menuSecundarioFio()\n\n\ndef menuSecundarioTinta():\n print(\" ### Único - 1\")\n print(\" ### Orçamento - 2\")\n\n opcao = int(input(\"Digite a opção: \"))\n\n if opcao == 1:\n # RECEBER OS VALORES DO USUÁRIO\n metroQuadrado = float(input(\"Digite o metro quadrado do imóvel: \"))\n preco = float(input(\"Digite o preço do litro da tinta: \"))\n\n # FAZER OS CALCULOS\n calculoLitroTinta = metroQuadrado / 2\n calculoValorTinta = preco * calculoLitroTinta\n\n # IMPRIMIR A RESPOSTA NA TELA\n print(\"\"\"Você vai precisar de {} litros de tinta \\n\n Vai gastar {} reais\n \"\"\".format(calculoLitroTinta, calculoValorTinta))\n\n # PERGUNTAR SE QUER CONTINUAR OU SAIR DO PROGRAMA\n continuar = input(\"Deseja voltar ao menu? (s/n) \")\n if continuar == 's':\n menu()\n elif continuar == 'n':\n os.close\n\n elif opcao == 2:\n # RECEBER OS VALORES DO USUÁRIO\n metroQuadrado = float(input(\"Digite o metro quadrado do imóvel: \"))\n preco1 = float(input(\"Digite o preço do metro da tinta na loja um: \"))\n preco2 = float(\n input(\"Digite o preço do litro da tinta na loja dois: \"))\n\n # FAZER OS CALCULOS\n calculoLitroTinta = metroQuadrado / 2\n calculoValorTinta1 = preco1 * calculoLitroTinta\n calculoValorTinta2 = preco2 * calculoLitroTinta\n\n # IMPRIMIR A RESPOSTA NA TELA\n if calculoValorTinta1 > calculoValorTinta2:\n print(\"A loja 2 é mais barata\")\n else:\n print(\"A loja 1 é mais barata\")\n\n # PERGUNTAR SE QUER CONTINUAR OU SAIR DO PROGRAMA\n continuar = input(\"Deseja voltar ao menu? (s/n) \")\n if continuar == 's':\n menu()\n elif continuar == 'n':\n os.close\n\n else:\n # EM CASO DE ERRO, RETORNAR PARA O MENU SECUNDARIO\n menuSecundarioTinta()\n\n\ndef sair():\n os.close\n\n\n# INVOCAÇÃO DA FUNÇÃO\nmenu()\n" } ]
2
lennyvarghese/hrtf_audio
https://github.com/lennyvarghese/hrtf_audio
aa97e969e23932d25788d227ebd5c45a07baeeca
9943e513b7f81016c2c4ec9a10f4efece5ab6426
ee021a324995b323457795733bdc084bf09a1fb7
refs/heads/master
2021-05-25T11:16:18.843706
2015-04-29T03:15:45
2015-04-29T03:15:45
24,000,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5599708557128906, "alphanum_fraction": 0.5803864598274231, "avg_line_length": 31.39763832092285, "blob_id": "05a26dc892441d066ae827b11a3910fa0c6e2759", "content_id": "26ef55bad10026c75e016e453413779db39d0e0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8229, "license_type": "no_license", "max_line_length": 114, "num_lines": 254, "path": "/make_binaural.py", "repo_name": "lennyvarghese/hrtf_audio", "src_encoding": "UTF-8", "text": "\"\"\"\nHacky way to get csound to programmatically generate sounds with HRTFs;\ngenerates a temporary .csd file with some string mashing then uses a system\ncall to call csound\n\nDependencise:\n numpy (tested using 1.9.2 via pip)\n scikits.audiolab (tested using 0.11.0 via pip)\n libsndfile (tested using 1.0.25-5 via Debian Wheezy repo; needed for\n scikits.audiolab)\n csound (tested using 5.17 on Debian Wheezy, installed via repo)\n audio_tools\n\nusage:\n python make_binaural.py SAMPLERATE TIMBRE DURATION [f0] [headrad] [wav]\n\n SAMPLERATE: must be 44100, 48000, 96000\n TIMBRE: impulse, click, clicktrain, puretone, sawtooth, or custom. If\n custom, the last argument must be specified and point to the wav file\n to be interpolated.\n DURATION: duration in seconds. For clicks and impulses, this defines the\n total length of the wav file and not the clicks themselves. The clicks\n are always 80 microseconds in duration. When \"impulse\" is selected, a\n single sample is set to 1 (i.e., 1/Fs in duration). Set to 0 if using\n \"custom\", as this will be ignored.\n f0, required for sawtooth, clicktrain, puretone; the fundamental\n frequency for all timbres except impulse/click. Specify as 0 for click\n and impulse if the next argument is necessary, otherwise omit.\n headrad, optional, specify head radius in cm for interpolation (defaults to\n 9 cm, same as csound)\n wav: path to a wav file to be processed. Only required when timbre=custom.\n\nnote:\nthe .dat files are copies of the hrtf set included with csound and were copied\nfrom /usr/share/csound/hrtf/hrtf-*.dat. The distance on these files is always\n1.4m.\n\nFor details on the origin of the hrtf files, see:\n http://alumni.media.mit.edu/~kdm/hrtfdoc/section3_2.html\n http://sound.media.mit.edu/resources/KEMAR.html\n\nAdditional references (should be cited when this is used):\n http://sound.media.mit.edu/resources/KEMAR/hrtfdoc.ps\n http://www.csounds.com/manual/html/hrtfmove.html\n\nLast updated: LAV 2015-04-28\n\"\"\"\n\nfrom __future__ import print_function\nfrom scikits import audiolab\nimport audio_tools\nimport numpy as np\nimport os\nimport errno\nimport sys\n\nwavFileList = []\n\n\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n' CREATE MONO AUDIO '\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\nprint(\"Generating Sound Files . . . \\n\\n\")\nFs = int(sys.argv[1])\ntheTimbre = str(sys.argv[2]).lower()\ndur = float(sys.argv[3])\ntry:\n f0 = float(sys.argv[4])\nexcept IndexError:\n f0 = 0.0\n\ntry:\n headRad = float(sys.argv[5])\nexcept IndexError:\n headRad = 9.0\n\nif not (Fs == 48000 or Fs == 44100 or Fs == 96000):\n raise ValueError('Fs must be 44100, 48000, or 96000')\n\nif (theTimbre in ['sawtooth', 'puretone', 'clicktrain']) and f0 == 0.0:\n raise ValueError('F0 must be specified for clicktrain, puretone, sawtooth')\n\n# Get timbre\nif theTimbre == 'sawtooth':\n audioOut = audio_tools.sawtooth(dur, f0, Fs)\nelif theTimbre == 'puretone':\n audioOut = audio_tools.puretone(dur, f0, Fs)\nelif theTimbre == 'clicktrain':\n audioOut = audio_tools.clicktrain(dur, f0, Fs)\nelif theTimbre == 'impulse':\n audioOut = audio_tools.impulse(dur, Fs)\nelif theTimbre == 'click':\n audioOut = audio_tools.click(dur, Fs)\nelif theTimbre == 'custom':\n try:\n audioOut, thisFs, enc = audiolab.wavread(str(sys.argv[6]))\n if Fs != thisFs:\n raise AssertionError('sample rate on supplied wav file does not' +\n 'match sample rate specified')\n if len(audioOut.shape) > 1:\n raise AssertionError('custom wav file must be monaural')\n\n dur = len(audioOut) / float(thisFs)\n\n print(dur)\n\n except IndexError:\n raise(IndexError, 'when specifying timbre=custom, last argument must' +\n 'be path to the wav file.')\n\naudioOut = audioOut / max(abs(audioOut))\n\nradStr = '{:.2f}'.format(headRad).replace('.', 'p')\nf0Str = '{:.2f}'.format(f0).replace('.', 'p')\ndurStr = '{:.2f}'.format(1000*dur).replace('.', 'p')\n\n# wav file directory to save everything to\nif f0:\n wavDir = 'output/{:d}_{:s}_dur{:s}ms_rad{:s}cm_f0{:s}'.format(Fs,\n theTimbre,\n durStr,\n radStr,\n f0Str)\nelse:\n wavDir = 'output/{:d}_{:s}_dur{:s}ms_rad{:s}cm'.format(Fs,\n theTimbre,\n durStr,\n radStr)\n\n# Output Sound file\ntry:\n os.mkdir('output')\nexcept OSError, e:\n if e.errno == errno.EEXIST:\n pass\n\ntry:\n os.mkdir(wavDir)\nexcept OSError, e:\n if e.errno == errno.EEXIST:\n pass\n\nmonoFilename = 'original.wav'\n\n# save as 32-bit float wav file\naudiolab.wavwrite(audioOut,\n os.path.join(wavDir, monoFilename),\n Fs,\n enc='float32')\n\n\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n' CSOUND HRTFSTAT '\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n# span 0 to 180 in 0.1 degree increments\nazimuthVals = np.linspace(0.0, 180, 1801, endpoint=True)\n\nprint(\"Calling cSound...\\n\\n\")\n\nCSoundCode = '''\n<CsoundSynthesizer>\n <CsOptions>\n -o {0:s} -W -f\n </CsOptions>\n\n <CsInstruments>\n sr = {1:d}\n ksmps = 10\n nchnls = 2\n instr 1\n ain soundin \"{2:s}\"\n aleft,aright hrtfstat ain, {3:f}, 0, \"hrtf/hrtf-{1:d}-left.dat\", \"hrtf/hrtf-{1:d}-right.dat\", {4:f}, {1:d}\n outs aleft, aright\n endin\n </CsInstruments>\n\n <CsScore>\n i1 0 {5:f}\n </CsScore>\n</CsoundSynthesizer>\n'''\n\n# Get sound input file\nsoundIn = os.path.join(wavDir, monoFilename)\n\nfor azimuth in azimuthVals:\n\n # Open temp file\n tempFile = open('.tempCSD.csd', 'w')\n\n if azimuth > 0 and azimuth < 180:\n aStr = 'pos' + '{:.1f}'.format(azimuth).replace('.', 'p')\n elif azimuth == 0:\n aStr = '0'\n elif azimuth == 180:\n aStr = '180'\n\n # Get Filepath & append to 'wavFileList'\n filepath = os.path.join(wavDir, aStr + '.wav')\n wavFileList.append(filepath)\n\n # Write to CSound Synthesizer\n inputCode = CSoundCode.format(filepath, Fs, soundIn,\n azimuth, headRad, dur)\n tempFile.write(inputCode)\n tempFile.close()\n os.system('csound .tempCSD.csd')\n\nos.remove('.tempCSD.csd')\n\n\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n' STIMULUS RESCALING '\n''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n# open everyhting just generated, and obtain the maximum value across all\n# tokens and channels\nmaxVals = []\nfor x in wavFileList:\n audioData, _, _ = audiolab.wavread(x)\n maxVals.append(np.max(np.abs(audioData)))\n\nscaler = max(maxVals) + 1E-16\n\n# scale everything so that the max value across the entire set of tokens is ~1.\n# Also estimate the ITD and ILD from the stimuli and write these to a csv file.\noutputInfo = open(os.path.join(wavDir, 'info.csv'), 'w')\n\nprintStr = '{},{},{},{}\\n'\noutputInfo.write(printStr.format('filename', 'azimuth (deg)',\n 'itd (s)', 'ild (dB)'))\nprintStr = '{:s},{:.1f},{:E},{:E}\\n'\n\nfor x in range(len(wavFileList)):\n\n audioData, fs, enc = audiolab.wavread(wavFileList[x])\n\n azimuth = azimuthVals[x]\n itdVal = audio_tools.get_itd(audioData, fs)\n ildVal = audio_tools.get_ild(audioData)\n\n audiolab.wavwrite(audioData/scaler, wavFileList[x], fs, enc)\n\n # reverse to get left side; see Gardner/Martin technical note for why this\n # makes sense\n if 'pos' in wavFileList[x]:\n xsp = wavFileList[x].split('pos')\n leftFilename = xsp[0] + 'neg' + xsp[1]\n audiolab.wavwrite(np.fliplr(audioData/scaler), leftFilename, fs, enc)\n\n outputInfo.write(printStr.format(wavFileList[x], azimuth, itdVal, ildVal))\n\noutputInfo.close()\n" }, { "alpha_fraction": 0.7061704397201538, "alphanum_fraction": 0.7272282242774963, "avg_line_length": 31.41269874572754, "blob_id": "ea8e54232e87bd768adfc68e2fe280c8a691b0f2", "content_id": "db9b06e4f97cee824cf2b19ab1864b1fbf81e5ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 78, "num_lines": 63, "path": "/README.md", "repo_name": "lennyvarghese/hrtf_audio", "src_encoding": "UTF-8", "text": "hrtf_audio\n==========\n\nHacky way to get csound to programmatically generate sounds with HRTFs;\ngenerates a temporary .csd file with some string mashing then uses a system\ncall to call csound\n\nDependencies\n-------------\n * numpy (tested using 1.9.2 via pip)\n\n * scikits.audiolab (tested using 0.11.0 via pip)\n\n * libsndfile (tested using 1.0.25-5 via Debian Wheezy repo; needed for\n scikits.audiolab)\n\n * csound (tested using 5.17 on Debian Wheezy, installed via repo)\n\n * audio_tools\n\nUsage\n-------------\n```python make_binaural.py SAMPLERATE TIMBRE DURATION [f0] [headrad] [wav]```\n\nSAMPLERATE: must be 44100, 48000, 96000\n\nTIMBRE: impulse, click, clicktrain, puretone, sawtooth, or custom. If\n custom, the last argument must be specified and point to the wav file\n to be interpolated.\n\nDURATION: duration in seconds. For clicks and impulses, this defines the\n total length of the wav file and not the clicks themselves. The clicks\n are always 80 microseconds in duration. When \"impulse\" is selected, a\n single sample is set to 1 (i.e., 1/Fs in duration). Set to 0 if using\n \"custom\", as this will be ignored.\n\nf0, required for sawtooth, clicktrain, puretone; the fundamental\n frequency for all timbres except impulse/click. Specify as 0 for click\n and impulse if the next argument is necessary, otherwise omit.\n\nheadrad, optional, specify head radius in cm for interpolation (defaults to\n 9 cm, same as csound)\n\nwav, required if timbre=custom. Path to a wav file to be interpolated.\n\nNotes:\n----------\n\nthe .dat files are copies of the hrtf set included with csound and were copied\nfrom /usr/share/csound/hrtf/hrtf-*.dat. The distance on these files is always\n1.4m.\n\nFor details on the origin of the hrtf files, see:\n\n * http://alumni.media.mit.edu/~kdm/hrtfdoc/section3_2.html\n\n * http://sound.media.mit.edu/resources/KEMAR.html\n\nAdditional references (should be cited when this is used):\n\n * http://sound.media.mit.edu/resources/KEMAR/hrtfdoc.ps\n\n * http://www.csounds.com/manual/html/hrtfmove.html\n" }, { "alpha_fraction": 0.589248776435852, "alphanum_fraction": 0.6202619075775146, "avg_line_length": 25.78769302368164, "blob_id": "89fe11ec36fe6d274c6d92bec904c74c7f8f1ee0", "content_id": "05003a7316c67756b7a152ba726260ce515be05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8706, "license_type": "no_license", "max_line_length": 79, "num_lines": 325, "path": "/audio_tools.py", "repo_name": "lennyvarghese/hrtf_audio", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy import signal\n\nSAMPLERATE = 44100\nDURATION = 1.000\nF0 = 107.\n\n\ndef clicktrain(duration=DURATION, f0=F0, sampleRate=SAMPLERATE):\n '''\n Generate an alternatinv polarity click train.\n\n duration: length in seconds (default: 1 s)\n\n f0: fundamental frequency (determines impulse spacing; default: 107 Hz)\n\n sampleRate: the desired samplerate (default: 44100)\n '''\n\n t = np.linspace(0.0, duration, duration*sampleRate, endpoint=False)\n\n audioOut = np.zeros(t.shape, dtype=np.float)\n\n clickSpacingSamples = int(sampleRate / f0)\n\n clickSamples = np.arange(0, t.shape[0], clickSpacingSamples)\n polarities = np.zeros(clickSamples.shape, dtype=float)\n polarities[0::2] = 1.0\n polarities[1::2] = -1.0\n\n audioOut[clickSamples] = polarities\n\n nSamplesHigh = int(np.round(80E-6*sampleRate))\n for y in range(nSamplesHigh):\n audioOut[clickSamples+y] = audioOut[clickSamples]\n\n return scale_rms(audioOut)\n\n\ndef impulse(duration=DURATION, sampleRate=SAMPLERATE):\n '''\n Generate a single impulse (x = 1) at t = 0, with zeros afterwards specified\n by duration\n '''\n\n x = np.zeros(duration*sampleRate, dtype=float)\n\n x[0] = 1.0\n\n return x\n\n\ndef click(duration=DURATION, sampleRate=SAMPLERATE):\n '''\n Generate a single 80 us click (x = 1) at t = 0\n '''\n\n x = np.zeros(duration*sampleRate, dtype=float)\n\n nSamplesHigh = int(np.round(80E-6*sampleRate))\n for y in range(nSamplesHigh):\n x[y] = 1.0\n\n return x\n\n\ndef sawtooth(duration=DURATION, f0=F0, sampleRate=SAMPLERATE, N=64):\n '''\n Generate a sawtooth wave.\n\n duration: length in seconds (default: 1 s)\n\n f0: fundamental frequency (determines impulse spacing; default: 107 Hz)\n\n sampleRate: the desired samplerate (default: 44100)\n\n N: number of sinusoids to sum (default: 64)\n '''\n\n t = np.linspace(0.0, duration, duration*sampleRate, endpoint=False)\n\n audioOut = np.zeros(t.shape, dtype=np.float)\n\n for k in range(1, N+1):\n audioOut += (np.sin(2*np.pi*k*f0*t) / k)\n\n return scale_rms(audioOut)\n\n\ndef puretone(duration=DURATION, f0=F0, sampleRate=SAMPLERATE):\n '''\n Generate a pure tone.\n\n duration: length in seconds (default: 1 s)\n\n f0: fundamental frequency (determines impulse spacing; default: 107 Hz)\n\n sampleRate: the desired samplerate (default: 44100)\n '''\n\n t = np.linspace(0.0, duration, duration*sampleRate, endpoint=False)\n\n audioOut = np.sin(2*np.pi*f0*t)\n\n return scale_rms(audioOut)\n\n\ndef scale_rms(audio):\n \"\"\"\n scales audio samples so that entire sample RMS = 1\n\n you don't really want to do this if nchan > 1\n\n \"\"\"\n return audio / get_rms(audio)\n\n\ndef get_rms(audio):\n \"\"\"\n computes rms down the first axis\n \"\"\"\n\n return np.sqrt((audio ** 2.0).mean(axis=0))\n\n\n# binaural functions\n\n\ndef get_itd(audioInput, sampleRate, normalize=False):\n\n '''\n computes itd by cross-correlating left and right channels.\n negative lag indicates left-ear leads\n '''\n\n assert audioInput.shape[1] == 2\n\n leftSignal = audioInput[:, 0]\n rightSignal = audioInput[:, 1]\n\n if normalize:\n leftSignal = scale_rms(leftSignal)\n rightSignal = scale_rms(rightSignal)\n\n q = fftcorrelate(rightSignal, leftSignal)\n\n lagSamples = len(q) - len(rightSignal) - np.argmax(q)\n\n return lagSamples / float(sampleRate)\n\n\ndef get_ild(audioInput):\n '''\n computes ild\n returns ild in db (right signal relative to left signal)\n '''\n assert audioInput.shape[1] == 2\n\n leftSignal = audioInput[:, 0]\n rightSignal = audioInput[:, 1]\n\n leftSignalRMS = get_rms(leftSignal)\n rightSignalRMS = get_rms(rightSignal)\n\n return 20*np.log10(rightSignalRMS/leftSignalRMS)\n\n\ndef nextpow2(value):\n '''\n Computes X >= input, such that log2(X) is an integer.\n '''\n return int(2**np.ceil(np.log2(value)))\n\n\ndef fftconvolve(x, b, nfft=None):\n '''\n 1D convolution using FFT\n '''\n if nfft is None:\n nfft = nextpow2(len(x) + len(b) - 1)\n\n result = np.fft.irfft(np.fft.rfft(x, nfft) * np.fft.rfft(b, nfft), nfft)\n\n return result[0:(x+b)]\n\n\ndef fftcorrelate(x1, x2=None, meanSubtract=False):\n '''\n 1D auto/cross correlation using FFT\n '''\n if not isinstance(x1, np.ndarray):\n x1 = np.array(x1)\n\n if meanSubtract:\n x1 = x1 - x1.mean()\n\n if np.any(x2):\n if not isinstance(x2, np.ndarray):\n x2 = np.array(x2)\n\n if meanSubtract:\n x2 = x2 - x2.mean()\n\n nSamps = x1.shape[0] + x2.shape[0] - 1\n nfft = nextpow2(nSamps)\n X1 = np.fft.rfft(x1, nfft)\n X2 = np.fft.rfft(x2, nfft)\n toRoll = x2.shape[0] - 1\n else:\n nSamps = 2*x1.shape[0] - 1\n nfft = nextpow2(nSamps)\n X1 = np.fft.rfft(x1, nfft)\n X2 = X1\n toRoll = x1.shape[0] - 1\n\n result = np.fft.irfft(X1 * np.conjugate(X2), nfft)\n\n return np.roll(result, toRoll)[0:nSamps]\n\n\ndef vocoder(inputData, f0=F0, sampleRate=SAMPLERATE, nBands=64):\n '''\n Performs click-train vocoding on an input stimulus. Input signal is\n full-wave rectified and band-pass filtered to obtain the local envelope,\n then each envelope is multiplied by a click train. The result is passed\n back through the bandpass filter, and the results across bands summed to\n get the output.\n\n Each click is 80 uSeconds. First filter corner frequency for high-pass is\n 500 Hz. Corner frequency for low-pass on last filter is 200 Hz below\n Nyquist.\n\n inputData: monaural stimulus to be vocoded\n\n f0: fundamental frequency (determines impulse spacing; default: 107 Hz)\n\n sampleRate: the desired samplerate (default: 44100)\n\n nBands: number of vocoding bands (bandpass filters) to use (default: 64)\n '''\n\n if not isinstance(inputData, np.ndarray):\n inputData = np.ndarray(inputData)\n\n if len(inputData.shape) > 1:\n raise ValueError('only 1 channel audio is supported.')\n\n nyquist = sampleRate / 2.0\n\n # generate the click kernel\n clickSamples = np.ceil(sampleRate * 0.00008)\n clickSpacing = np.round(1.0/f0 * float(sampleRate))\n\n # envelope filter\n nTapsEnv = 0\n transition = 50. / nyquist\n while nTapsEnv % 2 != 1:\n nTapsEnv, betaEnv = signal.kaiserord(100.0, transition)\n transition = transition * 1.01\n envFilter = signal.firwin(nTapsEnv, 20. / nyquist,\n window=('kaiser', betaEnv),\n pass_zero=True)\n\n # vocoding filters\n maxLen = envFilter.shape[0]\n\n fc = (np.logspace(np.log10(500),\n np.log10(sampleRate/2.0 - 200),\n nBands+1) / nyquist)\n b = []\n for z in range(len(fc)-1):\n nTaps = 0\n transition = (fc[z+1] - fc[z])\n while nTaps % 2 != 1:\n nTaps, beta = signal.kaiserord(60.0, transition)\n transition = transition * 1.01\n\n b.append(signal.firwin(nTaps, [fc[z], fc[z+1]],\n window=('kaiser', beta),\n pass_zero=False))\n if b[-1].shape[0] > maxLen:\n maxLen = b[-1].shape[0]\n\n nfft = nextpow2(inputData.shape[0] + 2*maxLen - 1)\n\n output = []\n portionLengths = []\n # time for some action\n for f in range(len(b)):\n # filter the samples with appropriate bandpass\n grpDelay1 = (len(b[f]) - 1) / 2\n rectResult = fftconvolve(inputData, b[f], nfft)[grpDelay1:]**2.0\n\n # filter the samples with envelope filter\n grpDelay2 = (len(envFilter) - 1) / 2\n envelope = np.abs(fftconvolve(rectResult, envFilter, nfft)[grpDelay2:])\n\n clickTrain = np.zeros(envelope.shape)\n clickTrainStarts = np.arange(0, clickTrain.shape[0], int(clickSpacing))\n clickTrainEnds = clickTrainStarts + clickSamples\n\n # generate the alternating polarity click train\n for n in range(len(clickTrainStarts)):\n if n % 2 == 1:\n clickTrain[clickTrainStarts[n]:clickTrainEnds[n]] = 1.0\n else:\n clickTrain[clickTrainStarts[n]:clickTrainEnds[n]] = -1.0\n\n # pass result back through bandpass filter\n finalResult = fftconvolve(envelope*clickTrain, b[f], nfft)[grpDelay1:]\n\n output.append(finalResult)\n portionLengths.append(output[-1].shape[0])\n\n maxPortionLength = max(portionLengths)\n for y in range(len(output)):\n output[y] = np.pad(output[y],\n ((0, maxPortionLength - output[y].shape[0])),\n 'constant')\n\n output = np.sum(np.array(output), axis=0)\n\n output /= np.max(output)\n\n return output\n" } ]
3
christophens/playground
https://github.com/christophens/playground
efd716b80d8ca8e6acca6c24d317e194c1be4e0e
67f80b72f1867ea8a7908f045cf7a508fe1ac9ef
f9cd7e6d1930f85084efa18eee448b6a841a2b78
refs/heads/master
2022-12-12T20:57:20.445132
2020-09-08T08:31:49
2020-09-08T08:31:49
293,298,523
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.559269905090332, "alphanum_fraction": 0.5681911110877991, "avg_line_length": 34.58394241333008, "blob_id": "91d6705ca332c15bdda6f34d4af9e9b965e5d9cc", "content_id": "c0499937e00d9d937116438a4c4907ed678ba830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9752, "license_type": "no_license", "max_line_length": 111, "num_lines": 274, "path": "/calculator.py", "repo_name": "christophens/playground", "src_encoding": "UTF-8", "text": "import math\nimport ast\nimport re\nimport random\nimport string\n\n# Revert to original.\n\ndef get_user_input(svar_dict: dict) -> (str, str):\n \"\"\"\n Get user input from terminal.\n \"\"\"\n svar_dict_keys = svar_dict.keys()\n\n while True:\n print('Input: ')\n user_input = input()\n\n if user_input == 'workspace':\n for entry in svar_dict:\n print(entry + ': ' + str(svar_dict[entry]))\n \n elif 'clear' in user_input:\n clear_regex = r'clear\\s*'\n var_to_clear = re.sub(clear_regex, '', user_input)\n if var_to_clear:\n del svar_dict[var_to_clear]\n else:\n svar_dict = {'ans' :svar_dict['ans']}\n else:\n eq_sign = user_input.find('=')\n var = None\n if eq_sign != -1:\n var = user_input[0:eq_sign]\n var = var.replace(' ','')\n text = user_input[eq_sign + 1:]\n else:\n text = user_input\n return var, text\n\ndef get_next_operation(text: str) -> (list, str, str):\n \"\"\"\n Find the next expression to evaluate. \n Input:\n - String with mathematical operation. Example: '4 * 3 * (3 + 4 * (5 / 7))'\n \n \n Return objects:\n - List with two integers that indicate the position of the next expression\n within the innput string.\n - String with the expression.\n - String that contains a mathematical function keyword such as sin, cos, ...\n Example: 4 * 3 * (3 + 4 * (5 / 7)) -> 5 / 7\n \"\"\"\n # Define regex to extract the innermost complete pair of parantheses. \n # Use re.search method to obtain the first complete set of parantheses.\n # If at least one set of parantheses exist it returns a re.match object.\n # The match includes the paranthesis, i.e. '(5 / 7)'\n paranthesis_regex = r'\\([^\\(\\)]+\\)'\n result = re.search(paranthesis_regex, text)\n\n # Define a set of accepted function keywords that preceed the parantheses.\n # Set the default return value that contains the function keyword to None\n func_set = {'sin', 'cos', 'exp', 'abs', 'log', 'sind', 'cosd', 'sqrt'}\n func = None\n # If no parantheses are found, return the entire string and set the list \n # with the position of the extracted string to \n # [-1, length of the the string - 1]. \n if result is None:\n string = text\n indices = [-1, len(text) - 1]\n # If a match object is found, slice and return the resulting string without the parantheses. \n else:\n indices = [result.start(), result.end()]\n string = result.group(0)[1:-1]\n # If a valid function keyword preceeds the parantheses, change the\n # position of the extracted string to include the function keyword \n # and return the keyword.\n for i in range(3,5): \n if text[result.start() - i: result.start()] in func_set:\n indices = [result.start() - i, result.end()]\n func = text[result.start() - i: result.start()]\n break\n\n\n \n return indices, string, func\n\ndef get_numbers_operators(text: str, var_dict: dict, svar_dict:dict) -> (list, list, dict):\n \"\"\"\n Returns a list with variables and operators in order of their appearance in a given expression.\n A dict maps values to each variable.\n\n Input:\n - text: string with an operation to evaluate. Must not contain parantheses. \\n \\\n Example: '5 - ans * az'\n - var_dict: Dict that keeps intermediate results. \\n \\\n Example: {'az': -4.36}\n - svar_dict: Dict that keeps stored variables from previous evaluations. \\n \\\n Example: {'ans': 3.0}\n \n Output objects:\n - var_list: list with floats in the order of their apperance. \\n \\\n Example: 5 - ans * az returns [5.0, 3.0, -4.36] \\n\n\n - var_operators: list with arithmetic operators in order of their appearance. \\n \\\n Example: 5 - ans * az returns ['-', '*'] \\n\n \"\"\"\n\n\n # Define regex to extract all numbers in a string, as well as placeholders for intermediate results.\n # These placeholders start with a character, followed by a sequence of characters and numbers.\n # Use re.findall method to get a list of all numbers from the string.\n variables_regex = r\"((?<=[\\+\\-\\*\\/\\^\\,])|^)\\s*[\\+\\-]?\\s*(\\d+\\.?\\d*(e-?\\d+)?|[A-Za-z]+[A-Za-z0-9]*)\"\n var_list = re.findall(variables_regex, text)\n var_list = [i[1] for i in var_list]\n\n # Create dynamic view objects of the keys in var_dict and svar_dict.\n var_dict_keys = var_dict.keys() # returns DYNAMIC view object\n svar_dict_keys = svar_dict.keys()\n\n # Loop over var_list to assign variables to numbers and to copy saved variables from svar_dict to var_dict.\n for idx, entry in enumerate(var_list):\n # Do nothing if an entry is already stored in var_dict\n if not entry in var_dict_keys:\n # Check if entry is contained in svar_dict\n if not entry in svar_dict_keys:\n var_list[idx] = float(entry)\n else:\n var_list[idx] = svar_dict[entry]\n else:\n var_list[idx] = var_dict.pop(entry)\n\n \n operator_string = re.sub(variables_regex, '', text)\n operator_list = [i for i in operator_string if i !=' ']\n\n # Return both lists and the dictionairy.\n return var_list, operator_list, var_dict\n\ndef evaluate_expression(var_list: list, operator_list: list, func: str) -> float:\n \"\"\"\n Evaluate all operations based on the established order of operations.\n \"\"\"\n \n mul_diff_exp_list = ['^', '*', '/']\n for operation in mul_diff_exp_list:\n while operation in operator_list:\n operator_index = operator_list.index(operation)\n a = var_list[operator_index]\n b = var_list.pop(operator_index + 1)\n var_list[operator_index] = arithmetic_operations(a, b, operator_list.pop(operator_index))\n \n index = 0\n while operator_list:\n if operator_list[0] != ',':\n a = var_list[index]\n b = var_list.pop(index + 1)\n var_list[index] = arithmetic_operations(a, b, operator_list.pop(index))\n else:\n index = 1\n del operator_list[0]\n \n if index == 1:\n var_list[0] = evaluate_func(func, var_list[0], var_list.pop(1))\n elif func:\n var_list[0] = evaluate_func(func, var_list[0])\n\n return var_list[0] #var_list has 1 entry, so this function returns a float, not a list\n\ndef arithmetic_operations(a: float, b:float, operand:str) -> (float):\n \"\"\"\n Evaluate arithmetic operations.\n \"\"\"\n\n significant_digits = get_significant_decimals(a, b, operand)\n switcher = {\n '/' : lambda a, b : a / b,\n '*' : lambda a, b : a * b,\n '+' : lambda a, b : a + b,\n '-' : lambda a, b : a - b,\n '^' : lambda a, b : pow(a, b)\n }\n\n result = switcher.get(operand)(a, b)\n #result = round(result, significant_digits)\n \n return result\n\ndef get_significant_decimals(a: float, b: float, operator: str) -> int:\n \"\"\"\n Get the number of significant decimals\n \"\"\"\n a = str(a)\n b = str(b) \n decimal_regex = r'(?<=\\.)[0]*[1-9]+'\n a_dec = re.search(decimal_regex, a)\n b_dec = re.search(decimal_regex, b)\n\n a_dec = a_dec.end()- a_dec.start() if a_dec else 0\n b_dec = b_dec.end()- b_dec.start() if b_dec else 0\n\n \n if operator in ['+', '-']:\n return max(a_dec, b_dec)\n elif operator == '*':\n return a_dec + b_dec\n else:\n return 20\n\ndef evaluate_func(func: str, *args: float) -> dict:\n \"\"\"\n Evaluate mathematical functions.\n \"\"\"\n switcher = {\n 'sin' : lambda a: math.sin(a[0]), # sin in rad\n 'sind': lambda a: math.sin(math.radians(a[0])), # sin in deg\n 'cos' : lambda a: math.cos(a[0]), # cos in rad\n 'cosd': lambda a: math.cos(math.radians(a[0])), # cos in deg\n 'tan' : lambda a: math.tan(a[0]), # tan in rad\n 'tand': lambda a: math.tan(math.radians(a[0])), # tan in deg\n 'exp' : lambda a: math.exp(a[0]), # e ^ x\n 'abs' : lambda a: abs(a[0]), # |x|\n 'log' : lambda a: math.log(a[0], a[1]), #log(a,b) = n <-> b ^ n = a\n 'sqrt': lambda a: math.sqrt(a[0])\n }\n \n\n return switcher.get(func)(args)\n\ndef create_new_var(var_list: list) -> str:\n \"\"\"\n Create new variable.\n \"\"\"\n if not var_list:\n key = ''.join(random.choices(string.ascii_letters, k=2))\n else:\n key = var_list[0] \n while key in var_list:\n key = ''.join(random.choices(string.ascii_letters, k=2))\n return key\n\ndef main_c(*args):\n svar_dict ={}\n svar_dict_keys = svar_dict.keys()\n if args:\n text = args[0]\n else:\n var, text = get_user_input(svar_dict)\n while text !='quit':\n var_dict = {}\n var_dict_keys = var_dict.keys()\n repeat = True\n while repeat:\n indices, string, func = get_next_operation(text)\n\n var_list, op_list, var_dict = get_numbers_operators(string, var_dict, svar_dict)\n intermediate_result = evaluate_expression(var_list, op_list, func)\n\n new_key = create_new_var(list(svar_dict_keys) + list(var_dict_keys))\n var_dict[new_key] = intermediate_result\n text = text.replace(text[max(indices[0], 0) : indices[1]], new_key)\n\n if indices[0] == -1:\n repeat = False\n svar_dict['ans'] = intermediate_result\n if var:\n svar_dict[var] = intermediate_result\n print(str(intermediate_result) + '\\n')\n var, text = get_user_input(svar_dict)\n \n return (intermediate_result)\n\nmain_c()\n\n\n" }, { "alpha_fraction": 0.4509316682815552, "alphanum_fraction": 0.5478261113166809, "avg_line_length": 31.239999771118164, "blob_id": "3379496d88eaaff79f2def10bcf44c231b0dfa94", "content_id": "26a754ddb960545325c0706e5c4ff21a4116b84a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 70, "num_lines": 25, "path": "/testing.py", "repo_name": "christophens/playground", "src_encoding": "UTF-8", "text": "import unittest\nfrom calculator import main_c\n\nclass BasicArithmeticTests(unittest.TestCase):\n\n def test_addition(self):\n self.assertEqual(main_c('3 + 4.7'), 7.7)\n self.assertEqual(main_c(' +3 + 4.7'), 7.7)\n self.assertEqual(main_c('3 + +4.7'), 7.7)\n self.assertEqual(main_c('3 + (4.7 )'), 7.7)\n self.assertEqual(main_c('3 + (+ 4.7)'), 7.7)\n self.assertEqual(main_c('7.999999999999+0.000000000001'), 8.0)\n \n def test_subtraction(self):\n self.assertEqual(main_c('3 - 4.2'), -1.2)\n self.assertEqual(main_c(' +3 - 4.7'), -1.7)\n self.assertEqual(main_c('3 - +4.7'), -1.7)\n self.assertEqual(main_c('3 - (4.7 )'), -1.7)\n self.assertEqual(main_c('3 - (+ 4.7)'), -1.7)\n \n \n\n\nif __name__ == '__main__':\n unittest.main()" } ]
2
scmagi/sizzler
https://github.com/scmagi/sizzler
ab84ef43e84712af3dbdf9e58ac55dc31ba30d67
6f1b0afb0786e56488012c7e8e16b97194f275cd
22824663e94e2e4441f94f9002fed626ae99be6d
refs/heads/master
2021-05-02T09:14:38.145214
2018-02-24T01:53:59
2018-02-24T01:53:59
120,819,321
13
3
null
null
null
null
null
[ { "alpha_fraction": 0.5170320272445679, "alphanum_fraction": 0.5220910906791687, "avg_line_length": 26.453702926635742, "blob_id": "a17f1f0cc047c7a169477c65b573522972bab8dc", "content_id": "2220cd9d4a4cfc360b6788e01aa675dd3594f9e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2965, "license_type": "permissive", "max_line_length": 78, "num_lines": 108, "path": "/sizzler/__main__.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\"\"\"\n------------------------------------------------------------------------------\nCheck running environment:\n * ensure running under Python 3.5+;\n * ensure 3rd party packages installed.\n\"\"\"\n\nimport sys\nif sys.version_info < (3, 5):\n print(\"Error: you have to run Sizzler with Python 3.5 or higher version.\")\n exit(1)\n\ntry:\n import websockets\n import nacl\n import yaml\nexcept:\n print(\"Error: one or more 3rd party package(s) not installed.\")\n print(\"To fix this, run:\\n sudo pip3 install -r requirements.txt\")\n exit(1)\n\nimport os\nimport asyncio\nimport logging\n\nfrom .util.root import RootPriviledgeManager\nfrom .util.cmdline import parseCommandLineArguments\nfrom .config.parser import loadConfigFile\nfrom .tun import SizzlerVirtualNetworkInterface\nfrom .transport.wsserver import WebsocketServer\nfrom .transport.wsclient import WebsocketClient\n\ndef main():\n\n \"\"\"\n --------------------------------------------------------------------------\n Parse command line arguments.\n \"\"\"\n\n argv = parseCommandLineArguments(sys.argv[1:])\n\n ROLE = \"server\" if argv.server else \"client\"\n CONFIG = loadConfigFile(argv.server if ROLE == \"server\" else argv.client)\n logging.basicConfig(level=argv.loglevel.upper())\n\n \"\"\"\n --------------------------------------------------------------------------\n We need root priviledge.\n \"\"\"\n\n priviledgeManager = RootPriviledgeManager()\n if not priviledgeManager.isRoot():\n print(\"Error: you need to run sizzler with root priviledge.\")\n exit(1)\n\n \"\"\"\n --------------------------------------------------------------------------\n With root priviledge, we have to set up TUN device as soon as possible.\n \"\"\"\n\n tun = SizzlerVirtualNetworkInterface(\n ip=CONFIG[\"ip\"][\"client\" if ROLE == \"client\" else \"server\"],\n dstip=CONFIG[\"ip\"][\"server\" if ROLE == \"client\" else \"client\"]\n )\n\n \"\"\"\n --------------------------------------------------------------------------\n Now root is no longer required.\n \"\"\"\n\n try:\n priviledgeManager.dropRoot()\n assert not priviledgeManager.isRoot()\n except Exception as e:\n print(\"Error: failed dropping root priviledge.\")\n print(e)\n exit(1)\n\n \"\"\"\n --------------------------------------------------------------------------\n Start the server or client.\n \"\"\"\n\n if ROLE == \"client\":\n transport = WebsocketClient(uris=CONFIG[\"client\"], key=CONFIG[\"key\"])\n\n else:\n transport = WebsocketServer(\n host=CONFIG[\"server\"][\"host\"],\n port=CONFIG[\"server\"][\"port\"],\n key=CONFIG[\"key\"]\n )\n\n tun.connect(transport)\n\n \"\"\"\n --------------------------------------------------------------------------\n Start event loop.\n \"\"\"\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.gather(tun, transport))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5396113395690918, "alphanum_fraction": 0.5485799908638, "avg_line_length": 19.90625, "blob_id": "9083668b7ee3451bb4ef56339404db71c87a2550", "content_id": "ea55f9db7c0886eb1d59511109829c7fd9c3944b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "permissive", "max_line_length": 74, "num_lines": 32, "path": "/sizzler/util/root.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport pwd\nimport grp\n\n\nclass RootPriviledgeManager:\n\n def isRoot(self):\n return os.geteuid() == 0\n\n def dropRoot(self):\n if not self.isRoot(): return True\n\n try:\n user, group = (\"nobody\", \"nogroup\")\n\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n os.setgroups([]) # Remove group privileges\n\n os.setgid(gid)\n os.setuid(uid)\n\n old_umask = os.umask(0o077)\n except:\n if self.isRoot():\n raise Exception(\"Failed dropping root to nobody:nogroup.\")\n\n return not self.isRoot()\n" }, { "alpha_fraction": 0.6375839114189148, "alphanum_fraction": 0.6510066986083984, "avg_line_length": 21.923076629638672, "blob_id": "504b740f4f04ab6a039c6a3e3d96bda689bb03a5", "content_id": "28e229578ba995fbf4a4b8d29e4928d252926495", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "permissive", "max_line_length": 53, "num_lines": 13, "path": "/sizzler/transport/_transport.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nclass SizzlerTransport:\n\n def __init__(self):\n self.connections = 0\n self.toWSQueue, self.fromWSQueue = None, None\n\n def increaseConnectionsCount(self):\n self.connections += 1\n\n def decreaseConnectionsCount(self):\n self.connections -= 1\n" }, { "alpha_fraction": 0.607587456703186, "alphanum_fraction": 0.618553638458252, "avg_line_length": 30.157407760620117, "blob_id": "20d7004b677ba3c6ea21421bd307bf7e5381b81d", "content_id": "5774b3bbd4ccef4ce1b9f75dcdc3f585afe2f0d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3374, "license_type": "permissive", "max_line_length": 77, "num_lines": 108, "path": "/sizzler/crypto/padding.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport asyncio\nimport os\nimport random\nimport struct\nimport time\n\n\n# tell calculation how many bytes will be added after encryption with respect\n# to input before padding\n\nPADDING_FORMAT_OVERHEAD = 2 + 8\nENCRYPTION_OVERHEAD = 40\n\nPADDING_TOTAL_OVERHEAD = ENCRYPTION_OVERHEAD + PADDING_FORMAT_OVERHEAD\n\n# How many nonces may be (theoretically) issued per second, limits the max.\n# network speed!\nNONCES_RESOLUTION = 1e6 # < if packet size = 4kB, limits to 4GB/s(???)\nDELETE_NONCES_BEFORE = 300 * NONCES_RESOLUTION\n\n\nclass NonceManagement:\n\n def __init__(self):\n self.nonces = []\n\n def new(self):\n return int(time.time() * NONCES_RESOLUTION)\n\n def verify(self, nonce):\n if not self.nonces:\n self.nonces.append(nonce)\n self.oldest = nonce - DELETE_NONCES_BEFORE\n return True\n if nonce < self.oldest or nonce in self.nonces:\n print(\"Nonce failure: Replay attack or unexpected bug!\")\n return False\n self.nonces.append(nonce)\n return True\n\n def __await__(self):\n while True:\n # recalculate acceptable nonce time\n if self.nonces:\n self.oldest = max(self.nonces) - DELETE_NONCES_BEFORE\n # clear nonces cache\n self.nonces = [e for e in self.nonces if e >= self.oldest]\n yield from asyncio.sleep(30)\n\n\nclass RandomPadding:\n \n def __init__(self, targetSize=4096):\n assert targetSize > PADDING_TOTAL_OVERHEAD\n self.maxAfterPaddingLength = targetSize - PADDING_TOTAL_OVERHEAD\n self.paddingTemplate = os.urandom(65536)\n self.nonces = NonceManagement()\n\n def __packHead(self, dataLength):\n # put `dataLength` and nonce(timestamp-based) into a header\n return struct.pack(\"<HQ\", dataLength, self.nonces.new())\n\n def __unpackHead(self, data):\n # unpack header, extract nonce and dataLength.\n dataLength, nonce = struct.unpack(\"<HQ\", data)\n # verify nonce, if invalid, drop it internally\n if self.nonces.verify(nonce):\n return dataLength\n else:\n return None\n\n def pad(self, data):\n dataLength = len(data)\n if dataLength >= self.maxAfterPaddingLength:\n return self.__packHead(dataLength) + data\n else:\n targetLength = random.randint(\n dataLength, self.maxAfterPaddingLength\n )\n paddingLength = targetLength - dataLength\n padding = self.paddingTemplate[:paddingLength]\n return self.__packHead(dataLength) + data + padding\n\n def unpad(self, data):\n dataLength = self.__unpackHead(data[:PADDING_FORMAT_OVERHEAD])\n if not dataLength: return None\n if dataLength > len(data) - PADDING_FORMAT_OVERHEAD: return None\n return data[PADDING_FORMAT_OVERHEAD:][:dataLength]\n\n def __await__(self):\n async def job1():\n while True:\n self.paddingTemplate = os.urandom(65536)\n await asyncio.sleep(5) # change random padding every 5 sec\n yield from asyncio.gather(job1(), self.nonces)\n\n\nif __name__ == \"__main__\":\n async def main():\n p = RandomPadding(100)\n print(p.pad(b\"aaa\"*10))\n\n \n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n \n" }, { "alpha_fraction": 0.5432307124137878, "alphanum_fraction": 0.5489245057106018, "avg_line_length": 31.93055534362793, "blob_id": "cad0b8975b0c50ee96e2a076320226a59ed6b5c8", "content_id": "48a3d398b80d71a6cd5f0f5082fbe55a0f5c69ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4742, "license_type": "permissive", "max_line_length": 77, "num_lines": 144, "path": "/sizzler/transport/_wssession.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport time\nimport asyncio\nimport hashlib\nfrom logging import info, debug, critical, exception\n\nfrom ..crypto.crypto import getCrypto\nfrom ..crypto.padding import RandomPadding\n\n\nwsid = 0\n\nTIMEDIFF_TOLERANCE = 300\nCONNECTION_TIMEOUT = 30\nPADDING_MAX = 2048\n\nclass WebsocketSession:\n\n wsid = 0\n\n def __init__(\n self,\n websocket,\n path,\n key,\n fromWSQueue,\n toWSQueue\n ):\n global wsid\n wsid += 1\n self.wsid = wsid\n self.websocket = websocket\n self.fromWSQueue = fromWSQueue\n self.toWSQueue = toWSQueue\n self.encryptor, self.decryptor = getCrypto(key)\n self.padder = RandomPadding(PADDING_MAX) \n\n # get path, which is the unique ID for this connection\n try:\n f = path.find(\"?\")\n assert f >= 0\n self.uniqueID = hashlib.sha512(\n path[f:].encode(\"ascii\")\n ).hexdigest()\n except:\n raise Exception(\"Connection %d without valid ID.\" % self.wsid)\n\n # parameters for heartbeating\n self.peerAuthenticated = False\n self.lastHeartbeat = time.time()\n\n\n def __beforeSend(self, data=None, heartbeat=None):\n # Pack plaintext with headers etc. Returns packed data if they are\n # ok for outgoing traffic, or None.\n ret = None\n if data:\n ret = b\"d-\" + data\n if heartbeat:\n ret = (\"h-%s-%s\" % (self.uniqueID, time.time())).encode('ascii')\n return self.padder.pad(ret)\n\n def __afterReceive(self, raw):\n # unpack decrypted PLAINTEXT and extract headers etc.\n # returns data needed to be written to TUN if any, otherwise None.\n raw = self.padder.unpad(raw)\n if not raw: return None\n if raw.startswith(b\"d-\"):\n return raw[2:]\n if raw.startswith(b\"h-\"):\n self.__heartbeatReceived(raw)\n return None\n\n # ---- Heartbeat to remote, and evaluation of remote sent heartbeats.\n\n def __heartbeatReceived(self, raw):\n # If a remote heartbeat received, record its timestamp.\n try:\n heartbeatSlices = raw.decode('ascii').split('-')\n assert heartbeatSlices[0] == \"h\"\n assert heartbeatSlices[1] == self.uniqueID\n timestamp = float(heartbeatSlices[2])\n nowtime = time.time()\n if timestamp <= nowtime + TIMEDIFF_TOLERANCE:\n self.lastHeartbeat = max(self.lastHeartbeat, timestamp)\n self.peerAuthenticated = True\n except:\n warning(\"Warning: invalid heartbeat!\")\n\n async def __sendLocalHeartbeat(self):\n # Try to send local heartbeats.\n while True:\n d = self.__beforeSend(heartbeat=True)\n e = await self.encryptor(d)\n await self.websocket.send(e)\n await asyncio.sleep(5)\n\n async def __checkRemoteHeartbeat(self):\n # See if remote to us is still alive. If not, raise Exception and\n # terminate the connections.\n while True:\n await asyncio.sleep(5)\n if time.time() - self.lastHeartbeat > CONNECTION_TIMEOUT:\n raise Exception(\"Connection %d timed out.\" % self.wsid)\n\n\n # ---- Data transfer\n\n async def __receiveToQueue(self):\n while True:\n e = await self.websocket.recv() # data received\n raw = await self.decryptor(e)\n if not raw: continue # decryption must success\n d = self.__afterReceive(raw)\n if not d: continue # if any data writable to TUN\n if self.peerAuthenticated: # if peer authenticated\n await self.fromWSQueue.put(d)\n debug(\" --|%3d|%s Local %5d bytes\" % (\n self.wsid,\n \"--> \" if self.peerAuthenticated else \"-//-\",\n len(e)\n ))\n\n async def __sendFromQueue(self):\n while True:\n d = await self.toWSQueue.get() # data to be sent ready\n s = self.__beforeSend(data=d) # pack the data\n if not s: continue # if packer refuses, drop it\n e = await self.encryptor(s) # encrypt packed data\n await self.websocket.send(e) # send it\n debug(\" Internet <--|%3d|-- %5d bytes\" % (\n self.wsid,\n len(s)\n ))\n\n def __await__(self):\n yield from asyncio.gather(\n self.__receiveToQueue(),\n self.__sendFromQueue(),\n self.__sendLocalHeartbeat(),\n self.__checkRemoteHeartbeat(),\n self.padder\n )\n" }, { "alpha_fraction": 0.5926773548126221, "alphanum_fraction": 0.5949656963348389, "avg_line_length": 22, "blob_id": "8249a51bd2214e53ed49b007fc0f07ddc657de49", "content_id": "9ab8b732e0f397df01ae508f29a461abf5a03371", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "permissive", "max_line_length": 57, "num_lines": 19, "path": "/sizzler/config/parser.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport yaml\n\ndef loadConfigFile(filename):\n try:\n config = yaml.load(open(filename, \"r\").read())\n except:\n raise Exception(\"Cannot read given config file.\")\n\n try:\n assert type(config[\"key\"]) == str\n assert type(config[\"ip\"][\"server\"]) == str\n assert type(config[\"ip\"][\"client\"]) == str\n\n except:\n raise Exception(\"Malformed config file.\")\n\n return config\n" }, { "alpha_fraction": 0.5590214133262634, "alphanum_fraction": 0.5746177434921265, "avg_line_length": 31.700000762939453, "blob_id": "53eceb87b39d4bd7c2f1b5007da58c8879a11382", "content_id": "6f5490b29e05b10f28a61d7912cd213deda3522d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3270, "license_type": "permissive", "max_line_length": 77, "num_lines": 100, "path": "/sizzler/tun.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport fcntl\nimport struct\nimport asyncio\nfrom logging import info, debug, critical, exception\n\nfrom .transport._transport import SizzlerTransport\n\nTUNSETIFF = 0x400454ca \nIFF_TUN = 0x0001 # Set up TUN device\nIFF_TAP = 0x0002 # Set up TAP device\nIFF_NO_PI = 0x1000 # Without this flag, received frame will have 4 bytes\n # for flags and protocol(each 2 bytes)\n\ndef _getTUNDeviceLocation():\n if os.path.exists(\"/dev/net/tun\"): return \"/dev/net/tun\"\n if os.path.exists(\"/dev/tun\"): return \"/dev/tun\"\n critical(\"TUN/TAP device not found on this OS!\")\n raise Exception(\"No TUN/TAP device available.\")\n\ndef _getReader(tun):\n loop = asyncio.get_event_loop()\n async def read():\n future = loop.run_in_executor(None, os.read, tun, 65536)\n return await future\n return read\n\ndef _getWriter(tun):\n loop = asyncio.get_event_loop()\n async def write(data):\n future = loop.run_in_executor(\n None,\n os.write,\n tun,\n data\n )\n await future\n return write\n\n\nclass SizzlerVirtualNetworkInterface:\n\n def __init__(self, ip, dstip, mtu=1500, netmask=\"255.255.255.0\"):\n self.ip = ip\n self.dstip = dstip\n self.mtu = mtu\n self.netmask = netmask\n self.__tunR, self.__tunW = self.__setup()\n self.toWSQueue = asyncio.Queue()\n self.fromWSQueue = asyncio.Queue()\n self.transports = []\n\n def __setup(self):\n try:\n self.tun = os.open(_getTUNDeviceLocation(), os.O_RDWR)\n ret = fcntl.ioctl(\\\n self.tun,\n TUNSETIFF,\n struct.pack(\"16sH\", b\"sizzler-%d\", IFF_TUN)\n )\n tunName = ret[:16].decode(\"ascii\").strip(\"\\x00\")\n info(\"Virtual network interface [%s] created.\" % tunName)\n\n os.system(\"ifconfig %s inet %s netmask %s pointopoint %s\" %\n (tunName, self.ip, self.netmask, self.dstip)\n )\n os.system(\"ifconfig %s mtu %d up\" % (tunName, self.mtu))\n info(\n \"\"\"%s: mtu %d addr %s netmask %s dstaddr %s\"\"\" %\n (tunName, self.mtu, self.ip, self.netmask, self.dstip)\n )\n\n return _getReader(self.tun), _getWriter(self.tun)\n except Exception as e:\n exception(e)\n raise Exception(\"Cannot set TUN/TAP device.\")\n\n def connect(self, transport):\n assert isinstance(transport, SizzlerTransport)\n self.transports.append(transport)\n transport.fromWSQueue = self.fromWSQueue\n transport.toWSQueue = self.toWSQueue\n\n def __countAvailableTransports(self):\n count = sum([each.connections for each in self.transports])\n return count\n\n def __await__(self):\n async def proxyQueueToTUN():\n while True:\n s = await self.fromWSQueue.get()\n await self.__tunW(s)\n async def proxyTUNToQueue():\n while True:\n s = await self.__tunR()\n if self.__countAvailableTransports() < 1: continue\n await self.toWSQueue.put(s)\n yield from asyncio.gather(proxyQueueToTUN(), proxyTUNToQueue())\n" }, { "alpha_fraction": 0.6036248803138733, "alphanum_fraction": 0.609929084777832, "avg_line_length": 23.230770111083984, "blob_id": "5a8a9114111e2d73aa49856f39b1fed6fc67a1d6", "content_id": "080ed0cf3f52ddbf274e7f13465619bd421f19b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "permissive", "max_line_length": 63, "num_lines": 52, "path": "/sizzler/crypto/crypto.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport asyncio\nimport hashlib\nimport nacl.secret\n\ndef __getEncryptor(box):\n loop = asyncio.get_event_loop()\n async def encrypt(data):\n future = loop.run_in_executor(None, box.encrypt, data)\n return await future\n return encrypt\n\ndef __getDecryptor(box):\n loop = asyncio.get_event_loop()\n async def decrypt(data):\n def _wrapDecrypt(data):\n try:\n return box.decrypt(data)\n except:\n return None\n future = loop.run_in_executor(None, _wrapDecrypt, data)\n return await future\n return decrypt\n\n\n\ndef getCrypto(key):\n if type(key) == str: key = key.encode('utf-8')\n assert type(key) == bytes\n \n encryptKey = hashlib.sha512(key).digest()\n authkey = hashlib.sha512(encryptKey).digest()\n\n encryptKey = encryptKey[:nacl.secret.SecretBox.KEY_SIZE]\n\n box = nacl.secret.SecretBox(encryptKey)\n\n return __getEncryptor(box), __getDecryptor(box)\n\n\n\nif __name__ == \"__main__\":\n async def main():\n encryptor, decryptor = getCrypto(\"test\")\n d = await encryptor(b\"plaintext\")\n print(d)\n d = await decryptor(d)\n print(d)\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n \n" }, { "alpha_fraction": 0.5747126340866089, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 13.5, "blob_id": "a323aabb31277ba001d6de00aa45df72eac18d7a", "content_id": "e4df91dac4abc491e4bfd24924cae354d1a61bda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "permissive", "max_line_length": 28, "num_lines": 6, "path": "/sizzler/transport/router.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nclass PacketRouter:\n\n def __init__(self, tun):\n pass\n" }, { "alpha_fraction": 0.6128226518630981, "alphanum_fraction": 0.6136552691459656, "avg_line_length": 29.024999618530273, "blob_id": "fccbb86d28a332bad49e3453915c6a72f0a2bbf6", "content_id": "76c2133fc3522cb4af16fb94766f60405f16d744", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "permissive", "max_line_length": 75, "num_lines": 40, "path": "/sizzler/transport/wsserver.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport asyncio\nimport websockets\nimport time\nimport sys\nfrom logging import info, debug, critical, exception\n\nfrom ._wssession import WebsocketSession\nfrom ._transport import SizzlerTransport\n\n\nclass WebsocketServer(SizzlerTransport):\n\n def __init__(self, host=None, port=None, key=None):\n SizzlerTransport.__init__(self)\n self.host = host\n self.port = port\n self.key = key\n\n async def __wsHandler(self, websocket, path):\n info(\"New connection: %s\" % path)\n try:\n self.increaseConnectionsCount()\n await WebsocketSession(\n websocket=websocket,\n path=path,\n key=self.key,\n fromWSQueue=self.fromWSQueue,\n toWSQueue=self.toWSQueue\n )\n except Exception as e:\n debug(\"Server connection break, reason: %s\" % e)\n finally:\n self.decreaseConnectionsCount()\n info(\"Current alive connections: %d\" % self.connections)\n\n def __await__(self):\n assert self.toWSQueue != None and self.fromWSQueue != None\n yield from websockets.serve(self.__wsHandler, self.host, self.port)\n" }, { "alpha_fraction": 0.7540322542190552, "alphanum_fraction": 0.7620967626571655, "avg_line_length": 34.42856979370117, "blob_id": "67893aa1a2353c39cde86fe932df6b4df497f85a", "content_id": "057a42df0bda6eeadfe8ebc088773853b6925afa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 248, "license_type": "permissive", "max_line_length": 77, "num_lines": 7, "path": "/Makefile", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "upload-test: setup.py\n\tpython3 setup.py sdist\n\ttwine upload -r pypitest --skip-existing --config-file sizzler.pypirc dist/*\n\nupload-release: setup.py\n\tpython3 setup.py sdist\n\ttwine upload -r pypi --skip-existing --config-file sizzler.pypirc dist/*\n" }, { "alpha_fraction": 0.6455976366996765, "alphanum_fraction": 0.6569109559059143, "avg_line_length": 31.26984214782715, "blob_id": "2a65707168179a2335848c9e114bce20db72d15e", "content_id": "bdabc90bfefa8c4c8c0a65eba6a873c98939debc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4066, "license_type": "permissive", "max_line_length": 79, "num_lines": 126, "path": "/sizzler/util/cmdline.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport argparse\n\nLICENSE = \"\"\"\nCopyright (c) 2018 Sizzler\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n#----------------------------------------------------------------------------#\n\nEXAMPLE_CONFIG = \"\"\"\n# An example config file for Sizzler\n# ----------------------------------\n# Please edit this file according to instructions. Lines beginning with # are\n# comments and will be ignored.\n#\n# Save this file as something like `config.yaml`, and tell Sizzler to use it\n# upon starting:\n# sizzler -c config.yaml # for starting Sizzler as a client\n# sizzler -s config.yaml # for starting Sizzler as a server \n\n\n# This is the key for authorized access to your virtual network.\n# Must be kept secret.\n\nkey: example-key\n\n# These are IP addresses allocated in virtual network for both server and\n# client.\n\nip:\n server: 10.1.0.1\n client: 10.1.0.2\n\n# The server will listen on the address and port as follow.\n\nserver:\n host: localhost\n port: 8765\n\n# The client will attempt accessing the server via following URI. This may\n# differ from above server settings, especially when you desire to use e.g.\n# reverse proxies.\n#\n# Listing multiple URIs will make client also use multiple connections.\n\nclient:\n - ws://123.1.1.1:8765 # suppose this is the server's Internet IP\n - ws://example.com/foo # if you can redirect this to 123.1.1.1:8765\n - wss://example.org/bar # you may also use wss:// protocol\n\"\"\"\n\n#----------------------------------------------------------------------------#\n\ndef parseCommandLineArguments(args):\n global EXAMPLE_CONFIG\n\n parser = argparse.ArgumentParser(\n prog=\"sizzler\",\n description=\"\"\"Sizzler is a Linux tool for setting up virtually\n connected network interfaces on 2 different computers. The network\n traffic between both interfaces will be encrypted and transmitted via\n WebSocket. To enable this over Internet, one computer must behave like\n a normal HTTP/HTTPS server, which listens for incoming WebSocket\n connections, while the other works like a normal web client.\"\"\",\n epilog=\"\"\"For documentation, bug and discussions, visit\n <https://github.com/scmagi/sizzler>.\"\"\"\n )\n\n parser.add_argument(\n \"-l\",\n \"--loglevel\",\n choices=[\"debug\", \"warning\", \"error\", \"critical\", \"info\"],\n default=\"info\"\n )\n\n job = parser.add_mutually_exclusive_group(required=True)\n \n job.add_argument(\n \"-s\",\n \"--server\",\n metavar=\"CONFIG_FILE\",\n type=str,\n help=\"\"\"Run as a server using given config file.\"\"\"\n )\n\n job.add_argument(\n \"-c\",\n \"--client\",\n metavar=\"CONFIG_FILE\",\n type=str,\n help=\"\"\"Run as a client using given config file.\"\"\"\n )\n\n job.add_argument(\n \"-e\",\n \"--example\",\n action=\"store_true\",\n help=\"\"\"Print an example config file and exit.\"\"\"\n )\n\n results = parser.parse_args(args)\n\n if results.example:\n print(EXAMPLE_CONFIG)\n exit()\n \n return results\n" }, { "alpha_fraction": 0.760937511920929, "alphanum_fraction": 0.76171875, "avg_line_length": 33.5945930480957, "blob_id": "e2bfa8882b5bd458704587312fd2df86f57e9e2b", "content_id": "069851d506c897d51428f40c064fbfe9a114f76c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1280, "license_type": "permissive", "max_line_length": 79, "num_lines": 37, "path": "/README.md", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "Sizzler: VPN over WebSocket\n===========================\n\nSizzler is a Linux tool, which sets up a virtual network interface on a\ncomputer, and transmit the data sent/received from it to another computer\nrunning the same program.\n\nThe transmission utilizes WebSocket, a common technology used in modern\nwebsites. Therefore all other technologies for optimizing WebSocket connections\napply also for Sizzler: firewalls allowing WebSockets will also allow Sizzler\nconnections; reverse proxies for accelerating accesses may also work.\n\nThe network interface set up by Sizzler behaves like a normal network\ninterface. Since transmitted are IP packets, not only TCP but also UDP and ICMP\nare supported.\n\nSizzler is MIT licensed.\n\n# Install\n\nUse PyPI to install:\n\n sudo pip3 install sizzler\n\n# Usage\n\n`sizzler` can be run in command line:\n\n* `sizzler -h` for help\n* `sudo sizzler -c CONFIG_FILE`, supply a config file in [YAML format][YAML]\n and start the program in client mode. **Sizzler requires root priviledge!**\n But it will drop that right after virtual network interface is set up and\n run.\n* `sudo sizzler -s CONFIG_FILE`, just like above, but in server mode.\n* `sizzler -e` will print an example config file to standard output.\n\n[YAML]: https://en.wikipedia.org/wiki/YAML\n" }, { "alpha_fraction": 0.5470704436302185, "alphanum_fraction": 0.5503620505332947, "avg_line_length": 30.64583396911621, "blob_id": "84116ce4d8bd5769b89bad0c390db92c64e32d3d", "content_id": "59ae4a1818f3d8b1fa18b106770c2cd75337f219", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "permissive", "max_line_length": 76, "num_lines": 48, "path": "/sizzler/transport/wsclient.py", "repo_name": "scmagi/sizzler", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport asyncio\nimport websockets\nimport os\nimport sys\nimport time\nfrom logging import info, debug, critical, exception\n\nimport yaml\n\nfrom ._wssession import WebsocketSession\nfrom ._transport import SizzlerTransport\n\n\nclass WebsocketClient(SizzlerTransport):\n\n def __init__(self, uris=None, key=None):\n SizzlerTransport.__init__(self)\n self.uris = uris\n self.key = key\n\n async def __connect(self, baseURI):\n while True:\n try:\n uri = baseURI\n if not uri.endswith(\"/\"): uri += \"/\"\n uri += \"?_=%s\" % os.urandom(32).hex()\n async with websockets.connect(uri) as websocket:\n self.increaseConnectionsCount()\n await WebsocketSession(\n websocket=websocket,\n path=uri,\n key=self.key,\n fromWSQueue=self.fromWSQueue,\n toWSQueue=self.toWSQueue\n )\n except Exception as e:\n debug(\"Client connection break, reason: %s\" % e)\n finally:\n self.decreaseConnectionsCount()\n info(\"Connection failed or broken. Try again in 5 seconds.\")\n await asyncio.sleep(5)\n\n def __await__(self):\n assert self.toWSQueue != None and self.fromWSQueue != None\n services = [self.__connect(uri) for uri in self.uris]\n yield from asyncio.gather(*services)\n" } ]
14
peetahzee/telepresence
https://github.com/peetahzee/telepresence
a56f91efcf5ffac4c4addd404c6bdc0b1844ffb0
cbbab3c2cfc3731ac5bb293877f376f03bf38f78
9203b902c07940401f640f899694ccf10daf999e
refs/heads/master
2016-09-05T12:57:07.860696
2013-03-30T01:36:19
2013-03-30T01:36:19
7,827,199
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6663700938224792, "alphanum_fraction": 0.6752669215202332, "avg_line_length": 28.986665725708008, "blob_id": "482370b877e0a9aa22c20516344b48eac7837a07", "content_id": "0940ed3a28975b52a488a5c57269956aede63860", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2248, "license_type": "no_license", "max_line_length": 107, "num_lines": 75, "path": "/usc_mrp/src/mrp.cpp", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"geometry_msgs/Twist.h\"\n#include \"usc_mrp/SetParam.h\"\n\ngeometry_msgs::Twist userCommand;\ngeometry_msgs::Twist controlCommand;\nbool allowUserInput = false;\nbool allowControlInput = false;\n\nvoid userCommandCallback(const geometry_msgs::Twist::ConstPtr& cmd) {\n\t// ROS_INFO(\"Received user command: [%f] / [%f] / [%f]\", cmd->linear.x, cmd->linear.y, cmd->angular.z);\n\tuserCommand = *cmd;\n}\n\nvoid controlCommandCallback(const geometry_msgs::Twist::ConstPtr& cmd) {\n\t// ROS_INFO(\"Received control command: [%f] / [%f] / [%f]\", cmd->linear.x, cmd->linear.y, cmd->angular.z);\n\tcontrolCommand = *cmd;\n}\n\nbool isCommandEmpty(const geometry_msgs::Twist& cmd) {\n\treturn cmd.linear.x == 0.0 && cmd.linear .y == 0.0 && cmd.angular.z == 0.0;\n}\n\nbool setParam(usc_mrp::SetParam::Request &req, usc_mrp::SetParam::Response &res) {\n\t// ros::param::set(req.topicName, req.value);\n\tif(req.topicName == \"allow_user_input\") {\n\t\tallowUserInput = req.value;\n\t} else if (req.topicName == \"allow_control_input\") {\n\t\tallowControlInput = req.value;\n\t}\n\tROS_INFO(\"Set [%s] to [%s]\", req.topicName.c_str(), req.value?\"true\":\"false\");\n\treturn true;\n}\n\nint main (int argc, char **argv) {\n\n\tros::init(argc, argv, \"usc_mrp\");\t\n\tros::NodeHandle n;\n\n\tros::Publisher cmdVel_pub = n.advertise<geometry_msgs::Twist>(\"/base_controller/command\", 1000);\n\tros::Subscriber cmdVel_userSub = n.subscribe(\"/usc_mrp/user_command\", 1000, userCommandCallback);\n\tros::Subscriber cmdVel_controlSub = n.subscribe(\"/usc_mrp/control_command\", 1000, controlCommandCallback);\n\tros::ServiceServer service = n.advertiseService(\"/usc_mrp/setParam\", setParam);\n\n\tros::Rate loop_rate(20);\n\n\n\tROS_INFO(\"Starting Server...\");\n\n\twhile(ros::ok()) {\n\t\tgeometry_msgs::Twist msg;\n\n\t\tif(isCommandEmpty(controlCommand)) {\n\t\t\tif(!isCommandEmpty(userCommand)) {\n\t\t\t\tif(allowUserInput) {\n\t\t\t\t\tmsg = userCommand;\n\t\t\t\t\tROS_INFO(\"Publishing user command\");\n\t\t\t\t} else {\n\t\t\t\t\tROS_INFO(\"User input received, but not allowed.\");\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tif(allowControlInput) {\n\t\t\t\tmsg = controlCommand;\n\t\t\t\tROS_INFO(\"Publishing control command\");\n\t\t\t} else {\n\t\t\t\tROS_INFO(\"Control input received, but not allowed.\");\n\t\t\t}\n\t\t}\n\t\tros::spinOnce();\n\t\tloop_rate.sleep();\n\t\tcmdVel_pub.publish(msg);\n\t}\n\n}" }, { "alpha_fraction": 0.47999998927116394, "alphanum_fraction": 0.47999998927116394, "avg_line_length": 12, "blob_id": "32af13b02f246cbdb79dabec5c5ca2e3be60cb19", "content_id": "c9dcc2080cea57af0553766539a577b1dc61a683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 12, "num_lines": 2, "path": "/README.md", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "telepresence\n============" }, { "alpha_fraction": 0.6146838068962097, "alphanum_fraction": 0.6162915229797363, "avg_line_length": 34.903846740722656, "blob_id": "8059ee49e6e50832a6d613a38ee80e0d2fdb3916", "content_id": "46fad3cd8c915fe4c4453a76e43860bf3ad20aaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 120, "num_lines": 52, "path": "/Web/js/feed.js", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "(function($) {\n\tvar methods = {\n\t\tinit: function(options) {\n\t\t\treturn this.each(function() {\n\t\t\t\t$(this).ros_widget('setContent', '<img src=\"images/dummy.jpg\" /><div class=\"feed_title\"></div>');\n\t\t\t\t$(this).ros_widget('addEdit', '<input type=\"text\" name=\"title\" placeholder=\"Title\" value=\"' + options.title + '\">');\n\t\t\t\t$(this).ros_widget('addEdit', '<input type=\"text\" name=\"topic\" placeholder=\"Topic\" value=\"' + options.topic + '\">');\n\t\t\t\t$(this).on(\"editSubmit\", function(e, form) {\n\t\t\t\t\tmethods.setTitle.apply(this, [$(form).find(\"input[name=title]\").val()]);\n\t\t\t\t\tmethods.changeTopic.apply(this, [$(form).find(\"input[name=topic]\").val()]);\n\t\t\t\t});\n\t\t\t\t\n\t\t\t\tmethods.setTitle.apply(this, [options.title]);\n\t\t\t\tmethods.setTopic.apply(this, [options.topic]);\n\t\t\t});\n\t\t},\n\t\tparseImage: function(data) {\n\t\t\tif(data != undefined) {\n\t\t\t\t$(this).find(\".widget_content img\").attr(\"src\", \"data:image/jpeg;base64,\" + data);\n\t\t\t}\n\t\t}, \n\t\tsetTitle: function(title) {\n\t\t\t$(this).find(\".feed_title\").html(title);\n\t\t},\n\t\tchangeTopic: function(topic) {\n\t\t\tthis.imageTopic.unsubscribe();\n\t\t\tmethods.setTopic.apply(this, [topic]);\n\t\t},\n\t\tsetTopic: function(topic) {\n\t\t\tvar base = $(this);\n\t\t\tthis.imageTopic = new $.ros.Topic({\n\t\t\t \tname: '/usc_mrp/camera/' + topic + '/compressed',\n\t\t\t\tmessageType: 'sensor_msgs/CompressedImage'\n\t\t\t});\n\t\t\tthis.imageTopic.subscribe(function(message) {\n\t\t\t\tmethods.parseImage.apply(base, [message.data]);\n\t\t\t});\n\t\t\t$(this).attr(\"data-viewName\", topic);\n\t\t}\n\t}\n\n\t$.fn.ros_feed = function( method ) {\n\t\t// Method calling logic\n\t\tif ( methods[method] ) {\n\t\t\treturn methods[ method ].apply( this, Array.prototype.slice.call( arguments, 1 ));\n\t\t} else if ( typeof method === 'object' || ! method ) {\n\t\t\treturn methods.init.apply( this, arguments );\n\t\t} else {\n\t\t\t$.error( 'Method ' + method + ' does not exist on jQuery.rosfeed' );\n\t\t}\n\t};\n}) (jQuery);" }, { "alpha_fraction": 0.6082742214202881, "alphanum_fraction": 0.6293144226074219, "avg_line_length": 21.505319595336914, "blob_id": "806a4fcdc505de8512c8e569d965dacaed499fae", "content_id": "203c1a941975730ce941d8269a81c0edcd6b1d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 107, "num_lines": 188, "path": "/Web/js/main.js", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "var ros;\nvar ROS_ADDRESS = \"127.0.0.1\";\nvar ROS_PORT = \"9090\";\n\nvar currentLinearX = 0.0;\nvar currentLinearY = 0.0;\nvar currentAngular = 0.0;\n\nvar topicCmdVel;\n/* var paramAllowUserInput;\nvar paramAllowControlInput; */\nvar paramService;\nvar userViewService;\nvar publishTimer;\n\nfunction initialize() {\n\tros = new ROS(\"ws://\" + ROS_ADDRESS + \":\" + ROS_PORT);\n\t$.ros = ros; // attach to jQuery so that it is available to plug ins\n\t$(\"#error\").html(\"\");\n\n\tif(ros != undefined) {\n\t\tros.socket.binaryType = 'arraybuffer';\n\t\t\n\t\tconsole.log(\"[INFO]: Connected to ROS host.\");\n\t\tros.on('error', function(error) {\n\t\t\tconsole.log(\"[ERROR]: \" + error);\n\t\t});\n\t\tros.on('close', function() {\n\t\t\tconsole.log(\"[INFO]: Connection to ROS host lost.\");\n\t\t\tros = undefined;\n\t\t\t$(\"#error\").html(\"Connection to ROS host lost.\");\n\t\t});\n\t}\n}\n\nfunction setCmdVelTopic(topicName) {\n\ttopicCmdVel = new $.ros.Topic({\n\t\tname: topicName,\n\t\t/* DEFAULT name: \"/base_controller/command\", */\n\t\tmessageType: \"geometry_msgs/Twist\" \n\t});\n\treturn topicCmdVel;\n}\n\nfunction setImageTopic(elem, topicName, title, noEdit) {\n\telem.ros_widget({\n\t\tnoEdit: noEdit,\n\t\tonClick: function() {\n\t\t\tuserViewService.callService(new $.ros.ServiceRequest({\n\t\t\t\tviewName : topicName\n\t\t\t}), function() { });\n\t\t}\n\t});\n\telem.ros_feed({\n\t\ttitle: title, \n\t\ttopic: topicName\n\t});\n}\n\nfunction setParamService() {\n\tparamService = new ros.Service({ name : '/usc_mrp/setParam', serviceType : '/usc_mrp/SetParam'});\n}\nfunction setUserViewService() {\n\tuserViewService = new ros.Service({ name : '/usc_mrp/setUserView', serviceType : '/usc_mrp/SetUserView'});\n}\n\nfunction setUpDirectionButtons(directionButtonDiv) {\n\tdirectionButtonDiv.mousedown(function() {\n\t\t$(this).addClass(\"active\");\n\t\t\n\t\tswitch($(this).attr('id')) {\n\t\t\tcase \"up\":\n\t\t\t\tcurrentLinearX = 1.0;\n\t\t\t\tbreak;\n\t\t\tcase \"down\":\n\t\t\t\tcurrentLinearX = -1.0;\n\t\t\t\tbreak;\n\t\t\tcase \"left\":\n\t\t\t\tcurrentLinearY = 1.0;\n\t\t\t\tbreak;\n\t\t\tcase \"right\":\n\t\t\t\tcurrentLinearY = -1.0;\n\t\t\t\tbreak;\n\t\t\tcase \"cw\":\n\t\t\t\tcurrentAngular = -1.0;\n\t\t\t\tbreak;\n\t\t\tcase \"ccw\":\n\t\t\t\tcurrentAngular = 1.0;\n\t\t\t\tbreak;\n\t\t}\n\t\tpublishCmdVel();\n\t});\n\t\n\tdirectionButtonDiv.mouseup(function() {\n\t\t$(this).removeClass(\"active\");\n\t\tswitch($(this).attr('id')) {\n\t\t\tcase \"up\":\n\t\t\tcase \"down\":\n\t\t\t\tcurrentLinearX = 0.0;\n\t\t\t\tbreak;\n\t\t\tcase \"left\":\n\t\t\tcase \"right\":\n\t\t\t\tcurrentLinearY = 0.0;\n\t\t\t\tbreak;\n\t\t\tcase \"cw\":\n\t\t\tcase \"ccw\":\n\t\t\t\tcurrentAngular = 0.0;\n\t\t\t\tbreak;\n\t\t}\n\t\tpublishCmdVel();\n\t});\n\t\n\t$(\"body\").keydown(function(event){\n\t\tswitch(event.keyCode) {\n\t\t\tcase 38:\n\t\t\t\t$(\"#direction_buttons #up\").mousedown();\n\t\t\t\tbreak;\n\t\t\tcase 40:\n\t\t\t\t$(\"#direction_buttons #down\").mousedown();\n\t\t\t\tbreak;\n\t\t\tcase 37:\n\t\t\t\t$(\"#direction_buttons #left\").mousedown();\n\t\t\t\tbreak;\n\t\t\tcase 39:\n\t\t\t\t$(\"#direction_buttons #right\").mousedown();\n\t\t\t\tbreak;\n\t\t}\n\t});\n\t\n\t$(\"body\").keyup(function(event){\n\t\tswitch(event.keyCode) {\n\t\t\tcase 38:\n\t\t\t\t$(\"#direction_buttons #up\").mouseup();\n\t\t\t\tbreak;\n\t\t\tcase 40:\n\t\t\t\t$(\"#direction_buttons #down\").mouseup();\n\t\t\t\tbreak;\n\t\t\tcase 37:\n\t\t\t\t$(\"#direction_buttons #left\").mouseup();\n\t\t\t\tbreak;\n\t\t\tcase 39:\n\t\t\t\t$(\"#direction_buttons #right\").mouseup();\n\t\t\t\tbreak;\n\t\t}\n\t});\n}\n\nfunction setUpJoysticks(joystickDiv) {\n\tjoystickDiv.children(\"div\").joystick();\n\tjoystickDiv.find(\"#left_joystick\").on(\"joystickMove\", function(e, deltaX, deltaY) {\n\t\t// 27 = stickOffset\n\t\tcurrentLinearX = -1 * deltaY / 27.0;\n\t\tif(deltaY > 0) {\n\t\t\tcurrentAngular = deltaX / 27.0\n\t\t} else {\n\t\t\tcurrentAngular = -1 * deltaX / 27.0;\n\t\t}\n\t\tpublishCmdVel();\n\t});\n\n\tjoystickDiv.find(\"#right_joystick\").on(\"joystickMove\", function(e, deltaX, deltaY) {\n\t\t// 27 = stickOffset\n\t\tcurrentLinearX = -1 * deltaY / 27.0;\n\t\tcurrentLinearY = -1 * deltaX / 27.0;\n\t\tpublishCmdVel();\n\t});\n\n\tjoystickDiv.children(\"div\").on(\"joystickRelease\", function(e) {\n\t\tcurrentLinearX = 0;\n\t\tcurrentLinearY = 0;\n\t\tcurrentAngular = 0;\n\t\tpublishCmdVel();\n\t});\n}\n\nfunction publishCmdVel() {\n\tclearTimeout(publishTimer);\n\n\ttopicCmdVel.publish({\n\t\tlinear: {x: currentLinearX, y: currentLinearY, z: 0},\n\t\tangular: {x: 0, y: 0, z: currentAngular}\n\t});\n\t\n\t// if there is a velocity, keep on publishing\n\tif(currentLinearX != 0.0 || currentLinearY != 0.0 || currentAngular != 0.0) {\n\t\tpublishTimer = setTimeout('publishCmdVel()', 200);\n\t}\n}" }, { "alpha_fraction": 0.6606971621513367, "alphanum_fraction": 0.6795650720596313, "avg_line_length": 30.918367385864258, "blob_id": "66b846e5e5cb1292479eca186b63ddabe2ccf48a", "content_id": "e4025d2eb4479f90fbc331d16e07604e52a24420", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3127, "license_type": "no_license", "max_line_length": 121, "num_lines": 98, "path": "/usc_mrp/src/image_republisher.cpp", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "#include <ros/ros.h>\n#include <image_transport/image_transport.h>\n#include <opencv/cv.h>\n#include <opencv/highgui.h>\n#include <cv_bridge/CvBridge.h>\n#include <cv_bridge/cv_bridge.h>\n#include \"usc_mrp/SetUserView.h\"\n#include <string>\n\nimage_transport::Publisher pub_user;\nimage_transport::Publisher pub_original;\nimage_transport::Publisher pub_crop1;\nimage_transport::Publisher pub_crop2;\nimage_transport::Publisher pub_quality1;\n\nros::NodeHandle *n;\n\nstd::string userView = \"original\";\n\nint cropLevels[2] = { 50, 100 };\n\nbool setUserView(usc_mrp::SetUserView::Request &req, usc_mrp::SetUserView::Response &res) {\n // ros::param::set(req.topicName, req.value);\n userView = req.viewName.c_str();\n ROS_INFO(\"User view set to [%s].\", req.viewName.c_str());\n}\n\nvoid publishCvMat(image_transport::Publisher pub, cv_bridge::CvImagePtr origPtr, cv::Mat mat) {\n cv_bridge::CvImage out_msg;\n \n out_msg.header = origPtr->header;\n out_msg.encoding = \"bgr8\";\n out_msg.image = mat;\n\n pub.publish(out_msg.toImageMsg());\n}\n\nvoid imageCallback(const sensor_msgs::ImageConstPtr& msg) {\n cv_bridge::CvImagePtr cv_ptr;\n try {\n cv_ptr = cv_bridge::toCvCopy(msg, \"bgr8\");\n } catch (cv_bridge::Exception& e) {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n\n pub_original.publish(cv_ptr->toImageMsg());\n if(userView == \"original\") { pub_user.publish(cv_ptr->toImageMsg()); }\n //cv::imshow(\"view\", cv_ptr->image);\n\n pub_quality1.publish(cv_ptr->toImageMsg());\n n->setParam(\"/usc_mrp/camera/quality1/compressed/jpeg_quality\", 10);\n\n if(userView == \"original\") { pub_user.publish(cv_ptr->toImageMsg()); }\n\n cv::Mat image(cv_ptr->image);\n cv::Mat croppedImage = image(cv::Rect(cropLevels[0], cropLevels[0], 640 - cropLevels[0] * 2, 480 - cropLevels[0] * 2));\n publishCvMat(pub_crop1, cv_ptr, croppedImage);\n if(userView == \"crop1\") { publishCvMat(pub_user, cv_ptr, croppedImage); }\n //cv::imshow(\"new view\", croppedImage);\n\n\n croppedImage = image(cv::Rect(cropLevels[1], cropLevels[1], 640 - cropLevels[1] * 2, 480 - cropLevels[1] * 2));\n publishCvMat(pub_crop2, cv_ptr, croppedImage);\n if(userView == \"crop2\") { publishCvMat(pub_user, cv_ptr, croppedImage); }\n //cv::imshow(\"new view 2\", croppedImage); \n \n //cv::waitKey(3);\n\n}\n\nint main(int argc, char **argv) {\n ros::init(argc, argv, \"image_listener\");\n ros::NodeHandle nh;\n n = &nh;\n \n ros::ServiceServer service = nh.advertiseService(\"/usc_mrp/setUserView\", setUserView);\n image_transport::ImageTransport it(nh);\n image_transport::Subscriber sub = it.subscribe(\"/camera/rgb/image_color\", 1, imageCallback);\n\n pub_user = it.advertise(\"usc_mrp/camera/user\", 1);\n pub_original = it.advertise(\"usc_mrp/camera/original\", 1);\n pub_crop1 = it.advertise(\"usc_mrp/camera/crop1\", 1);\n pub_crop2 = it.advertise(\"usc_mrp/camera/crop2\", 1);\n pub_quality1 = it.advertise(\"usc_mrp/camera/quality1\", 1);\n\n ros::spin();\n \n /*\n cv::namedWindow(\"view\");\n cv::namedWindow(\"new view\");\n cv::namedWindow(\"new view 2\");\n cv::startWindowThread();\n cv::destroyWindow(\"view\");\n cv::destroyWindow(\"new view\");\n cv::destroyWindow(\"new view 2\");\n */\n}" }, { "alpha_fraction": 0.5866538882255554, "alphanum_fraction": 0.5976956486701965, "avg_line_length": 27.16216278076172, "blob_id": "384f45a0c9126cd2d161adf836ee3092216473da", "content_id": "1ca136c39b3e7ba9ddda622127ee6c66065d4f9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2083, "license_type": "no_license", "max_line_length": 139, "num_lines": 74, "path": "/Web/js/widget.js", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "(function($) {\n\tvar methods = {\n\t\tinit: function(options) {\n\t\t\treturn this.each(function() {\n\t\t\t\tvar isEditMode = false;\n\t\t\t\tvar base = $(this);\n\t\t\t\tvar widgetContent = $('<div class=\"widget_content\"></div>');\n\t\t\t\tvar widgetEdit = $('<div class=\"widget_edit\"><form onsubmit=\"return false;\"><input type=\"submit\" value=\"Submit &gt;\" /></form></div>');\n\t\t\t\tvar widgetEditButton = $('<div class=\"widget_edit_button\">e</div>');\n\n\t\t\t\t$(this).append(widgetContent);\n\t\t\t\tif(!options.noEdit) {\n\t\t\t\t\t$(this).append(widgetEdit);\n\t\t\t\t\t$(this).append(widgetEditButton);\n\t\t\t\t}\n\n\t\t\t\t$(this).mouseenter(function() {\n\t\t\t\t\tif(!isEditMode) {\n\t\t\t\t\t\twidgetEditButton.fadeIn(300);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\t$(this).mouseleave(function() {\n\t\t\t\t\tif(!isEditMode) {\n\t\t\t\t\t\twidgetEditButton.fadeOut(300);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\t$(this).click(function() {\n\t\t\t\t\tif(!isEditMode && options.onClick) {\n\t\t\t\t\t\toptions.onClick.apply(this);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\twidgetEditButton.click(function() {\n\t\t\t\t\twidgetContent.hide(300);\n\t\t\t\t\twidgetEdit.show(300);\n\t\t\t\t\twidgetEditButton.fadeOut(300);\n\t\t\t\t\twidgetEdit.find(\"input\")[0].focus();\n\t\t\t\t\tisEditMode = true;\n\t\t\t\t\treturn false;\n\t\t\t\t});\n\n\t\t\t\twidgetEdit.find(\"form\").submit(function(e) {\n\t\t\t\t\tisEditMode = false;\n\t\t\t\t\twidgetEdit.hide(300);\n\t\t\t\t\twidgetContent.show(300);\n\t\t\t\t\tbase.trigger(\"editSubmit\", $(this));\n\t\t\t\t\treturn false;\n\t\t\t\t});\n\t\t\t});\n\t\t},\n\t\taddEdit : function(elem) { \n\t\t\tvar form = $(this).find(\".widget_edit form\");\n\t\t\tform.find(\"input[type=submit]\").before(elem);\n\t\t},\n\t\tsetContent: function(elem) {\n\t\t\tvar content = $(this).find(\".widget_content\");\n\t\t\tcontent.html(elem);\n\t\t},\n\t\tgetContent: function() {\n\t\t\treturn $(this).find(\".widget_content\");\n\t\t}\n\t};\n\n\t$.fn.ros_widget = function( method ) {\n\t\t// Method calling logic\n\t\tif ( typeof method === 'object' || ! method || method == undefined) {\n\t\t\treturn methods.init.apply( this, arguments );\n\t\t} else if ( methods[method] ) {\n\t\t\treturn methods[ method ].apply( this, Array.prototype.slice.call( arguments, 1 ));\n\t\t} else {\n\t\t\t$.error( 'Method ' + method + ' does not exist on jQuery.ros_widget' );\n\t\t}\n\t};\n\n}) (jQuery);" }, { "alpha_fraction": 0.7358490824699402, "alphanum_fraction": 0.7358490824699402, "avg_line_length": 25.5, "blob_id": "952d0abe5f07bcb2b738f958cb6a3bdc39166b6f", "content_id": "d7dcec29bbdf1fa04b43add021fc26cd86ab262c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/usc_mrp/src/usc_mrp/srv/__init__.py", "repo_name": "peetahzee/telepresence", "src_encoding": "UTF-8", "text": "from ._SetUserView import *\nfrom ._SetParam import *\n" } ]
7
QuantumJack/AutoMusicGeneration
https://github.com/QuantumJack/AutoMusicGeneration
d3c1c7da9f0f2f5dc0aa38fb1ae9c565e8c7170d
88d975a1059eaac1b89f171cab3894ab60451365
07287ccf284ac9fd77afee879e8eb2665580df71
refs/heads/master
2022-04-25T14:21:47.002651
2020-04-29T00:20:08
2020-04-29T00:20:08
259,781,715
0
0
null
2020-04-29T00:13:18
2020-04-15T10:34:41
2020-01-20T00:35:05
null
[ { "alpha_fraction": 0.730648934841156, "alphanum_fraction": 0.7388584613800049, "avg_line_length": 33.5405387878418, "blob_id": "36f8be58b72fe4bebc067787596a234a9d3c8b75", "content_id": "82d4d73ca0aa62be8ff4f9d3433d2d61e5bfbd40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2558, "license_type": "no_license", "max_line_length": 125, "num_lines": 74, "path": "/generate.py", "repo_name": "QuantumJack/AutoMusicGeneration", "src_encoding": "UTF-8", "text": "\n'''\n\tThis file loads a trained Seq2Seq LSTM model and generate music\n'''\n\nimport sys\nimport time\nimport random\nimport glob\nimport numpy as np\n\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.optimizers import Adam, RMSprop\n\nfrom midi_parser import *\n\n# GLOBAL PARAMETERS\nx_length = 100 # sample sequence length.\ny_length = 10 # output sequence legth. \t\tNeeds to be consistent with the value in train.py\niteration = 50 # number of iteration to generate new sequence. \t\tFinal result length: y_length * itertaion\n\nsaved_model = \"./saved_params/LSTM_model.json\"\nsaved_weights = \"./saved_params/LSTM_weights.hdf5\"\nsample_folder = \"./samples\"\noutput_folder = \"./output\"\n\n\ndef generate(input_data, tempo, resolution):\n\t'''\n\t\tgenerate new music and save to a midi file\n\n\t\tparams:\n\t\t\t\tinput_data: seed music pianoroll for music generation\n\t\t\t\ttempo: tempo value parsed from the seed music\n\t\t\t\tresolution: resolution value parsed from the seed music\n\t'''\n\n\toutput_path = os.path.join(output_folder, \"generated_%s.midi\"%(time.strftime(\"%Y%m%d_%H_%M\")))\n\n\t# randomly select a sequence from the seed music\n\tstart = np.random.randint(0, input_data.shape[0]-1-x_length-iteration)\n\tpattern = np.array(input_data[start:start+x_length])\n\n\tprediction_output = []\n\n\t# concatenate all generated sequence\n\tfor i in range(iteration):\n\t\tprediction = model.predict(pattern.reshape(1,pattern.shape[0],-1).astype(float)).reshape(y_length,-1) # generate sequence\n\t\tprediction_output.append(prediction)\n\t\tpattern = np.append(pattern[y_length:,], prediction, axis=0) # shift sliding window on input data\n\n\tprint(\"output shape: \", np.array(prediction_output).shape)\n\n\t# convert sequence back to piano roll\n\tpianoroll = outputPianoRoll(np.array(prediction_output), note_threshold=0.1)\n\tprint(\"pianoroll shape: \", pianoroll.shape)\n\t\n\t# convert piano roll back to midi\n\toutputMidi(output_path, pianoroll, tempo, resolution, scale=int(y_length)) # scale: seqch output sequence has y_length ticks\n\n\nif __name__ == '__main__':\n\n\t# load trained model\n\tmodel = model_from_json(open(saved_model).read())\n\tmodel.load_weights(saved_weights)\n\tmodel.summary()\n\tmodel.compile(loss='categorical_crossentropy', optimizer = RMSprop())\n\n\t# randomly select a file from sample folder\n\tmidi_files = [file for file in os.listdir(sample_folder) if file.endswith(\".midi\") or file.endswith(\".mid\")]\n\tinput_data, tempo, resolution = parseMidi( os.path.join(sample_folder, midi_files[random.randint(0,len(midi_files)-1)]) )\n\n\t# generate new music\n\tgenerate(input_data, tempo, resolution)\n\t" }, { "alpha_fraction": 0.7502634525299072, "alphanum_fraction": 0.7523708939552307, "avg_line_length": 30.11475372314453, "blob_id": "3cefe08cbc0ad338f7977556c3a68f29a0ece3d2", "content_id": "fc7c80903fb135a81390db130798b6d124d00b57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1898, "license_type": "no_license", "max_line_length": 447, "num_lines": 61, "path": "/README.md", "repo_name": "QuantumJack/AutoMusicGeneration", "src_encoding": "UTF-8", "text": "# Music Generation Using Seq2Seq LSTM Network\n\n\n## Overview\n\nMusic composition is a difficult task for many people since it requires a decent understanding of music theory as well as chord progression. In this project we try to build a neutral network to generate new music. We train a Sequence-to-Sequence Recurrent Neural Network with long short-term memory (LSTM) that can learn chord progressions from music in the training data and generate brand new music sequences using a short music sample as prior.\n\n## Requirement:\n\n**Python version:** python 3.x\n\n**Required Packages:**\n\n pip install tensorflow\n pip install keras\n pip install mido\n pip install sklearn\n pip install numpy\n pip install ipykernel\n pip install matplotlib\n\n**(Optional) Training on GPU**\n\nWe use Keras (TensorfFlow backend), which automatically enables training on **GPU** if available*.\n\n 1. Install GPU version of TensorFlow:\n \n pip install tensorflow-gpu\n \n 2. Install CUDA:\n \n follow the instruction here: https://www.tensorflow.org/install/gpu\n \n\n## Dataset:\n\nWe use MIDI as our input/output file type. \n\nThe dataset (MAESTRO) used for the experiments is available at: https://magenta.tensorflow.org/datasets/maestro\n\n## Train the Network: \n\nTraining data: manually select midi files from the dataset and put in *./midi_songs*\n\n python train.py\n\nModel will automatically save the best weights during training. To load saved weights, set the global parameter *load_weights=True* in *train.py*\n\n## Generate Music: \n\n*This repo contains a pre-trained model in ./saved_params and will be used to generate new music. Re-training the model is optional.*\n\nThe generated MIDI file will be saved to *./output*\n\n python generate.py\n\n## Listen to The Music:\n\nOn macOS: Recommend using GarageBand to open and play midi files\n\nThere are also many online tools for playing midi files\n" }, { "alpha_fraction": 0.7299532294273376, "alphanum_fraction": 0.7439770102500916, "avg_line_length": 27.95833396911621, "blob_id": "dc9708e1b6d7ef56d5c64ede61330ac0c9d9eac6", "content_id": "a41ff4024ddce7742972ca74eb61620ef288ecf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2781, "license_type": "no_license", "max_line_length": 141, "num_lines": 96, "path": "/train.py", "repo_name": "QuantumJack/AutoMusicGeneration", "src_encoding": "UTF-8", "text": "\n'''\n\tThis file trains a Seq2Seq LSTM model to learn to play music\n'''\n\nimport sys\nimport os\nimport time\nimport ipykernel\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense, BatchNormalization, Dropout, TimeDistributed, RepeatVector\nfrom tensorflow.keras.callbacks import EarlyStopping, History\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.callbacks import ModelCheckpoint\n\nfrom midi_parser import getData, createTrainData\n\n# GLOBAL PARAMETERS\nhighest_note = 81 # A_6 \tNeeds to be consistent with the value in midi_parser.py\nlowest_note = 33 # A_2\t\tNeeds to be consistent with the value in midi_parser.py\npitch_dimension = highest_note - lowest_note + 1\n\n# Model parameters\nnum_hidden = 512\nx_length = 100\ny_length = 10\nbatch_size = 64\nnum_epochs = 100\n\nload_weights = True\ndata_path = \"./midi_songs\"\nweight_path = \"./saved_params/LSTM_weights.hdf5\"\nmodel_path = \"./saved_params/LSTM_model.json\"\n\n\n\ndef buildModel():\n\t'''Build a Seq2Seq LSTM model'''\n\n\t#encoder\n\tmodel = Sequential()\n\tmodel.add(LSTM(num_hidden, input_dim = pitch_dimension, return_sequences = True ))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Dropout(0.2))\n\n\tmodel.add(LSTM(num_hidden))\n\tmodel.add(RepeatVector(y_length))\n\t\n\t#decoder\n\tmodel.add(LSTM(num_hidden, return_sequences = True))\n\tmodel.add(Dropout(0.2))\n\n\tmodel.add(LSTM(num_hidden, return_sequences = True))\n\tmodel.add(Dropout(0.2))\n\n\tmodel.add(LSTM(num_hidden, return_sequences = True))\n\tmodel.add(Dropout(0.2))\n\t\n\tmodel.add(TimeDistributed(Dense(pitch_dimension, activation= 'softmax')))\n\tmodel.add(TimeDistributed(Dense(pitch_dimension, activation= 'softmax')))\n\n\treturn model\n\n\n\nif __name__ == '__main__':\n\n\t# prepare data for training\n\tpianoroll = getData(data_path)\n\tX,Y = createTrainData(pianoroll, x_length, y_length)\n\n\t# build model\n\tmodel = buildModel()\n\tmodel.summary()\n\tif load_weights:\n\t\tmodel.load_weights(weight_path)\n\tmodel.compile(loss='categorical_crossentropy', optimizer = RMSprop())\n\n\t# model callbacks\n\tcheckpoint = ModelCheckpoint(weight_path, monitor='loss', verbose=0, save_best_only=True, mode='auto') # save weights\n\tearlystop = EarlyStopping(monitor='loss', patience= 10, verbose=0, mode= 'auto') # terminate training\n\thistory = History() # plot training loss\n\n\t# train the model\n\thist = model.fit(X.astype(np.bool), Y.astype(np.bool), batch_size=batch_size, epochs=num_epochs, callbacks=[earlystop, history, checkpoint])\n\n\t# save trained model structure\n\topen(model_path, 'w').write(model.to_json())\n\n\t# plot training loss\n\timg = plt.figure(figsize=(6,5), dpi=75)\n\tplt.plot(hist.history['loss'])\n\timg.savefig(\"TrainingLoss.png\", bbox_inches='tight')\n" }, { "alpha_fraction": 0.8037382960319519, "alphanum_fraction": 0.8037382960319519, "avg_line_length": 106, "blob_id": "fe1a27d003f8c85c091fd96ea8dcfda09cdd72c7", "content_id": "4cc2537c41e8422801f5493033aee541976b67e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 107, "license_type": "no_license", "max_line_length": 106, "num_lines": 1, "path": "/samples/README.md", "repo_name": "QuantumJack/AutoMusicGeneration", "src_encoding": "UTF-8", "text": "The generative model will randomly select one song from this dataset as the seed music (prior) for output.\n" }, { "alpha_fraction": 0.7932203412055969, "alphanum_fraction": 0.7932203412055969, "avg_line_length": 41.14285659790039, "blob_id": "9b2a7d5f9f08382f745776d9e420885a3cbe121f", "content_id": "662220fd69b4d0afb1ff80fa92b37c44fad80271", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 295, "license_type": "no_license", "max_line_length": 103, "num_lines": 7, "path": "/midi_songs/README.md", "repo_name": "QuantumJack/AutoMusicGeneration", "src_encoding": "UTF-8", "text": "## This folder contains training Data\n\nThe files here are randomly selected from the dataset as an example training data. \n\nIt is not the full training data used for the pre-trained model. \n\nYou download the full dataset and select your own data: https://magenta.tensorflow.org/datasets/maestro\n" } ]
5
nickbuker/PygameBall
https://github.com/nickbuker/PygameBall
b190f65f343fe5064a3b01535d71d401d1ceecc2
d174b2d440f5835df7335c580c2e050693a0464a
f99a99818f52e9f29e4b147d8619931d8de0fd7a
refs/heads/master
2020-05-15T00:18:11.304716
2019-06-06T02:47:08
2019-06-06T02:47:08
182,009,565
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5007598996162415, "alphanum_fraction": 0.5227963328361511, "avg_line_length": 28.244443893432617, "blob_id": "1424c0aa1fe5d19957a338d50dea730ee4209b7c", "content_id": "cd65f65b84be9ad5e72f7f005b265c1b7babd70b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2632, "license_type": "no_license", "max_line_length": 63, "num_lines": 90, "path": "/src/play_ball.py", "repo_name": "nickbuker/PygameBall", "src_encoding": "UTF-8", "text": "import pygame\n\nfrom src.ball import Ball\nfrom src.key_states import KeyStates\n\n\npygame.init()\n\n\n# constants\nSCREEN_SIZE = (1500, 600)\nBLACK = (0, 0, 0)\nBALL_EDGE_WIDTH = 0\n\n\n# soundz\npygame.mixer.pre_init(22050, -16, 2, 1024)\npygame.init()\npygame.mixer.quit()\npygame.mixer.init(22050, -16, 2, 1024) # not janky at all :)\nsplat_sound = pygame.mixer.Sound('../assets/splat.ogg')\n\n\ndef main():\n screen = pygame.display.set_mode(SCREEN_SIZE)\n clock = pygame.time.Clock()\n ball = Ball(\n x=int(SCREEN_SIZE[0] / 2) - 20,\n y=40,\n speed_x=0,\n speed_y=0\n )\n key_states = KeyStates()\n MIN_X = 0 + ball.width\n MAX_X = SCREEN_SIZE[0] - ball.width\n MAX_Y = SCREEN_SIZE[1] - ball.height\n active = True\n while active:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n active = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n key_states.key_states['R'] = True\n if event.key == pygame.K_LEFT:\n key_states.key_states['L'] = True\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n key_states.key_states['R'] = False\n if event.key == pygame.K_LEFT:\n key_states.key_states['L'] = False\n\n screen.fill(BLACK)\n if key_states.key_states['R']:\n ball.update_speed_x(speed_x=ball.speed_x + 1)\n if key_states.key_states['L']:\n ball.update_speed_x(speed_x=ball.speed_x - 1)\n if not key_states.key_states['R'] and ball.speed_x > 0:\n ball.update_speed_x(speed_x=ball.speed_x - 1)\n if not key_states.key_states['L'] and ball.speed_x < 0:\n ball.update_speed_x(speed_x=ball.speed_x + 1)\n if ball.x < MIN_X:\n ball.x = MIN_X\n ball.speed_x *= -1\n splat_sound.play()\n ball.set_color_state()\n if ball.x > MAX_X:\n ball.x = MAX_X\n ball.speed_x *= -1\n splat_sound.play()\n ball.set_color_state()\n if ball.y > MAX_Y:\n ball.y = MAX_Y\n ball.speed_y *= -1\n ball.set_color_state()\n ball.update_speed_y(ball.speed_y + 1)\n ball.update_position()\n pygame.draw.ellipse(\n screen,\n ball.colors[ball.color_state],\n [ball.x, ball.y, ball.width, ball.height],\n BALL_EDGE_WIDTH\n )\n pygame.display.flip()\n clock.tick(60)\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.33636364340782166, "alphanum_fraction": 0.3713286817073822, "avg_line_length": 17.07594871520996, "blob_id": "867a4af588c7bf4a6fb178e50122a9de17da56d3", "content_id": "b62ec08b9c74a2599d3c3981e64ca45252f45b63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1430, "license_type": "no_license", "max_line_length": 68, "num_lines": 79, "path": "/src/ball.py", "repo_name": "nickbuker/PygameBall", "src_encoding": "UTF-8", "text": "class Ball:\n\n def __init__(self, x, y, speed_x, speed_y, width=40, height=40):\n \"\"\"\n\n Parameters\n ----------\n x\n y\n speed_x\n speed_y\n \"\"\"\n self.x = x\n self.y = y\n self.speed_x = speed_x\n self.speed_y = speed_y\n self.width = width\n self.height = height\n self.color_state = 0\n self.colors = [\n (255, 0, 0),\n (255, 87, 51),\n (255, 255, 0),\n (50, 205, 50),\n (20, 144, 255),\n (138, 43, 226)\n ]\n\n def update_speed_x(self, speed_x):\n \"\"\"\n\n Parameters\n ----------\n speed_x\n\n Returns\n -------\n None\n \"\"\"\n self.speed_x = speed_x\n return\n\n def update_speed_y(self, speed_y):\n \"\"\"\n\n Parameters\n ----------\n speed_y\n\n Returns\n -------\n None\n \"\"\"\n self.speed_y = speed_y\n return\n\n def update_position(self):\n \"\"\"\n\n Returns\n -------\n None\n \"\"\"\n self.x += self.speed_x\n self.y += self.speed_y\n return\n\n def set_color_state(self):\n \"\"\"\n\n Returns\n -------\n None\n \"\"\"\n if self.color_state < len(self.colors) - 1:\n self.color_state += 1\n else:\n self.color_state = 0\n return\n\n\n" }, { "alpha_fraction": 0.3937007784843445, "alphanum_fraction": 0.3937007784843445, "avg_line_length": 17.14285659790039, "blob_id": "130449f445742920e51080436aa86127f6ce2c2e", "content_id": "7221d3bb3bc7f1a72ea981cfd372911880a29c3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/src/key_states.py", "repo_name": "nickbuker/PygameBall", "src_encoding": "UTF-8", "text": "class KeyStates:\n\n def __init__(self):\n self.key_states = {\n 'L': False,\n 'R': False\n }\n" } ]
3
sylviawanjiku/her_african_duka
https://github.com/sylviawanjiku/her_african_duka
ed40684d9073991063cb6db5504faca66186f405
90326ee43057834115d76df7ab221fb809183c56
036dd899de6edc08da2a0878ec6255a4259932db
refs/heads/develop
2020-04-27T12:45:53.321813
2019-03-07T13:07:53
2019-10-15T09:18:07
174,343,405
0
0
null
2019-03-07T12:51:35
2019-10-15T09:18:19
2020-06-05T22:24:39
Python
[ { "alpha_fraction": 0.5268292427062988, "alphanum_fraction": 0.6024390459060669, "avg_line_length": 21.77777862548828, "blob_id": "c202c8957c50bf4bd1d7a32b94c80d613430f2d2", "content_id": "93675ac142ed47d83b3e068aee1d4ed07fa82cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 73, "num_lines": 18, "path": "/myduka/apps/authentication/migrations/0003_auto_20190919_1447.py", "repo_name": "sylviawanjiku/her_african_duka", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.5 on 2019-09-19 14:47\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('authentication', '0002_auto_20190919_1053'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='id',\n field=models.IntegerField(primary_key=True, serialize=False),\n ),\n ]\n" }, { "alpha_fraction": 0.7089040875434875, "alphanum_fraction": 0.7089040875434875, "avg_line_length": 23.33333396911621, "blob_id": "0468634dfac8809490cdc6de60bce2e80be6f7d3", "content_id": "920d9c25f887d8e2e8563f5d2a2627c20aa11971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/myduka/apps/authentication/views.py", "repo_name": "sylviawanjiku/her_african_duka", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import View\n\nfrom .models import User\n\n# Create your views here.\n\n\nclass RegistrationApiView(View):\n def get(self, request):\n user = User.objects.all()\n return render(request, \"authentication/home.html\", {'user': user })\n" }, { "alpha_fraction": 0.6476151347160339, "alphanum_fraction": 0.6517269611358643, "avg_line_length": 30.584415435791016, "blob_id": "96046a82f26dbde8da21823a7de1e00038961b6f", "content_id": "484cee9c7e9e2b624aaf1ee3ba0c81b98c50158c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2432, "license_type": "no_license", "max_line_length": 79, "num_lines": 77, "path": "/myduka/apps/authentication/models.py", "repo_name": "sylviawanjiku/her_african_duka", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\n\n\n# Create your models here.\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, username, email, password=None):\n \"\"\"Create and return a `User` with an email, username and password.\"\"\"\n if username is None:\n raise TypeError('Users must have a username.')\n\n if email is None:\n raise TypeError('Users must have an email address.')\n\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save()\n\n return user\n\n def create_superuser(self, username, email, password):\n \"\"\"\n Create and return a `User` with superuser powers.\n Superuser powers means that this use is an admin that can do anything\n they want.\n \"\"\"\n if password is None:\n raise TypeError('Superusers must have a password.')\n\n user = self.create_user(username, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n\n return user\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(db_index=True, max_length=255, unique=True)\n email = models.EmailField(max_length=200, unique=True)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n deleted = models.BooleanField(\n default=False,\n help_text='Toogle to prevent actual deletes'\n )\n mobile_number = models.IntegerField(default='+254')\n GENDER = (('F', 'Female'),\n ('M', 'Male'))\n gender = models.CharField(max_length=1, choices=GENDER, default='F')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email']\n\n # Tells Django that the UserManager class defined above should manage\n # objects of this type.\n objects = UserManager()\n\n def __str__(self):\n \"\"\"\n Returns a string representation of this `User`.\n This string is used when a `User` is printed in the console.\n \"\"\"\n return self.username\n\n @property\n def get_full_name(self):\n return self.username\n\n def get_short_name(self):\n return self.username\n" }, { "alpha_fraction": 0.5126903653144836, "alphanum_fraction": 0.5516074299812317, "avg_line_length": 24.69565200805664, "blob_id": "2d07f1f662a255051b04dddb195afd13f1567e98", "content_id": "b1b6ab11de51c3eae3432bc7438a46808871ac78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 104, "num_lines": 23, "path": "/myduka/apps/authentication/migrations/0002_auto_20190919_1053.py", "repo_name": "sylviawanjiku/her_african_duka", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.5 on 2019-09-19 10:53\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('authentication', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='gender',\n field=models.CharField(choices=[('F', 'Female'), ('M', 'Male')], default='F', max_length=1),\n ),\n migrations.AddField(\n model_name='user',\n name='mobile_number',\n field=models.IntegerField(default='+254')\n ),\n ]\n" }, { "alpha_fraction": 0.5520833134651184, "alphanum_fraction": 0.71875, "avg_line_length": 18.200000762939453, "blob_id": "355235ba7982e8adc95765e2ed183489f8808bcc", "content_id": "f21e3be8dfd07392f0a7e99bcb0d6efea598e4e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/requirements.txt", "repo_name": "sylviawanjiku/her_african_duka", "src_encoding": "UTF-8", "text": "Django==2.2.6\ndjango-debug-toolbar==2.0\npsycopg2-binary==2.8.3\npylint==2.4.1\nvirtualenv==16.7.5\n" } ]
5
ShowDown53/HomepageExtendsJinja
https://github.com/ShowDown53/HomepageExtendsJinja
6b958d0829a3199e10a0c209f365a19e00a89400
52e1af39d0725fdf5bbd50360404f1679a75aa4b
21a1c6e620baebac00fad4134327d41769985ab6
refs/heads/master
2021-08-15T08:49:01.879027
2017-11-17T16:41:30
2017-11-17T16:41:30
111,126,754
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7966101765632629, "alphanum_fraction": 0.7966101765632629, "avg_line_length": 18.66666603088379, "blob_id": "012631e3222b0b5a82978dc2fd0108c9b3f26c78", "content_id": "feb37c39a883582c38c022bb6e595732c21d6c77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 49, "num_lines": 3, "path": "/README.md", "repo_name": "ShowDown53/HomepageExtendsJinja", "src_encoding": "UTF-8", "text": "# About\n\nPersonal homepage made easier with Jinja Extends.\n" }, { "alpha_fraction": 0.6474432945251465, "alphanum_fraction": 0.6570549607276917, "avg_line_length": 31.92405128479004, "blob_id": "877f5b893d13465dee78cf4cd1dda8c1892b9dce", "content_id": "5e6e65602f726b198657e696f946c417047b80d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2601, "license_type": "no_license", "max_line_length": 180, "num_lines": 79, "path": "/main.py", "repo_name": "ShowDown53/HomepageExtendsJinja", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport jinja2\nimport webapp2\nimport random\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), \"templates\")\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=False)\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n def write(self, *a, **kw):\n return self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **kw):\n return self.write(self.render_str(template, **kw))\n\n def render_template(self, view_filename, params=None):\n if params is None:\n params = {}\n template = jinja_env.get_template(view_filename)\n return self.response.out.write(template.render(params))\n\nclass MainHandler(BaseHandler):\n def get(self):\n\n Omeni = \"To sem jaz.\"\n params = {\"Omeni\": Omeni}\n\n return self.render_template(\"omeni.html\", params=params)\n\nclass ProjektiHandler(BaseHandler):\n def get(self):\n\n projekti = \"Moj najvecji projekt je, da se zjutraj zbudim.\"\n params = {\"projekti\": projekti}\n\n return self.render_template(\"projekti.html\", params=params)\n\nclass BlogHandler(BaseHandler):\n def get(self):\n\n blog_posts = [{\"title\":\"First post\", \"text\":\"A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with Burek\"},\n {\"title\":\"Second post\", \"text\":\"One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin.\"},\n {\"title\":\"Third post\", \"text\":\"The European languages are members of the same family. Their separate existence is a myth. For science, music, sport, etc,\"}]\n\n params = {\"blogs\": blog_posts}\n\n return self.render_template(\"blog.html\", params=params)\n\nclass KontaktHandler(BaseHandler):\n def get(self):\n\n phone = \"123-456-789\"\n email = \"derp@herp.si\"\n\n params = {\"phone\": phone, \"email\": email}\n return self.render_template(\"kontakt.html\", params=params)\n\nclass RandomHandler(BaseHandler):\n def get(self):\n\n params = {\"random\": random.randint(1, 1000)}\n\n return self.render_template(\"random.html\", params=params)\n\napp = webapp2.WSGIApplication([\n webapp2.Route('/', MainHandler),\n webapp2.Route('/projects', ProjektiHandler),\n webapp2.Route('/blog', BlogHandler),\n webapp2.Route('/kontakt', KontaktHandler),\n webapp2.Route('/random', RandomHandler),\n\n], debug=True)\n" } ]
2
MohammedAbuibaid/LatticeBasedCrypto
https://github.com/MohammedAbuibaid/LatticeBasedCrypto
1a6271e112a82aaa591f35204d4127acd41bba2b
fd0e87a5f5cc07fff2e5a6ae528a04f15a7502fe
22ef46b1857c1fa314cfe654518384b461210b06
refs/heads/master
2023-03-19T06:27:57.213803
2021-03-04T20:54:33
2021-03-04T20:54:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43795621395111084, "alphanum_fraction": 0.4733296036720276, "avg_line_length": 21.54430389404297, "blob_id": "105943ace707884dc30c401138f44078c56968bb", "content_id": "3cb6bbdd9c81d89af465a32ba23b4cef5e14c8e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1781, "license_type": "no_license", "max_line_length": 131, "num_lines": 79, "path": "/cryptanalysis/experiment.py", "repo_name": "MohammedAbuibaid/LatticeBasedCrypto", "src_encoding": "UTF-8", "text": "import sys\nimport logging\nfrom scipy.io import savemat\nfrom cryptanalysis import *\n\"\"\" expiremet 1 : test collision time \nN, p, q = 17, 3, 64\n# N, p, q = 5, 3, 8\nfor i in range(20):\n L = make_L(N, p, q)\n if count_hit(L) > 0:\n print(i)\n# print(L)\nprint(count_hit(L))\nprint('end')\n\nN, p, q = 23, 3, 64\nL = make_L(N, p, q)\nL_L = LLL(L)\nL_G, _ = GAME(L,3,10)\nprint(H(L_L), H(L_G))\n\"\"\"\n\n\n\n\n\"\"\" Fix n, change r (rounds of BIROT) \"\"\"\nlogging.basicConfig(filename='test.txt')\n\ndef fix_n(N, p, q, r, t, id_):\n H_O = 0\n H_L = 0\n res = np.zeros(r+1)\n max_v = np.full(r+1, -np.inf)\n min_v = np.full(r+1, np.inf)\n t_hit = []\n\n for test in range(t):\n # print(test)\n L = make_L(N, p, q)\n logging.info(L)\n H_O += H(L)\n L = LLL(L)\n H_L += H(L)\n\n _, test_res = GAME(L, 1, r)\n logging.info('original test_res')\n logging.info(test_res)\n t_hit.append(r)\n for i in range(r+1-len(test_res)):\n test_res.append(test_res[-1])\n res += test_res\n max_v = np.maximum(max_v, test_res)\n min_v = np.minimum(min_v, test_res)\n\n H_O /= t\n H_L /= t\n res /= t\n savemat(f'fix_n_N{N}_p{p}_q{q}_r{r}_t{t}_id{id_}.mat', {\n 'H_G': res, 'H_O': H_O, 'H_L': H_L, 'N': N, 'p': p, 'q': q, 'r': r, 't': t, 'min_v': min_v, 'max_v': max_v, 't_hit': t_hit,\n })\n\n print(H_O)\n print(H_L)\n print(max_v)\n print(min_v)\n print(res)\n \nif __name__ == '__main__':\n # fix_n(11, 3, 32, 20, 20)\n N = int(sys.argv[1])\n p = int(sys.argv[2])\n q = int(sys.argv[3])\n r = int(sys.argv[4])\n t = int(sys.argv[5])\n id_ = int(sys.argv[6])\n fix_n(N, p, q, r, t, id_)\n # fix_n(23, 3, 128, 20, 20)\n # fix_n(4)\n # fix_n(23, 3, 64, 20, 20)\n" }, { "alpha_fraction": 0.4290839433670044, "alphanum_fraction": 0.44630470871925354, "avg_line_length": 23.73964500427246, "blob_id": "153f6ff4cbfe4327b542b981ab740c3533c96d5f", "content_id": "ae4d8e601d41f02bfdf7f082f516c13d04a823f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4181, "license_type": "no_license", "max_line_length": 107, "num_lines": 169, "path": "/cryptanalysis/cryptanalysis.py", "repo_name": "MohammedAbuibaid/LatticeBasedCrypto", "src_encoding": "UTF-8", "text": "from ntru.ntrucipher import NtruCipher\nimport numpy as np\nfrom numpy.linalg import det, slogdet, norm\nfrom scipy.linalg import circulant, qr\nimport logging\nlogging.basicConfig(filename='log.txt', level=logging.INFO)\n\ndef generate(N, p, q):\n ntru = NtruCipher(N, p, q)\n ntru.generate_random_keys()\n attrs = ['f_poly', 'f_p_poly', 'f_q_poly', 'g_poly', 'h_poly']\n return tuple(np.array(getattr(ntru, attr_name).all_coeffs()[::-1], dtype=float) for attr_name in attrs)\n\ndef birotate(v, k):\n assert len(v) % 2 == 0\n N = int(len(v) / 2)\n return np.append(np.roll(v[:N], k), np.roll(v[N:], k))\n\ndef BIROT(M, m, n):\n # if norm(M[m]) == norm(M[n]):\n # return M, False\n N = int(len(M)/2)\n M = np.copy(M)\n a = abs(det(M))\n for i in range(1, N):\n back_track = np.copy(M[n])\n M[n] = birotate(M[m], i)\n b = abs(det(M))\n # logging.INFO(f'a={a}, b={b}')\n if a == b:\n comp = M[n] == back_track\n if not np.all(comp):\n logging.info('old vec')\n logging.info(back_track)\n logging.info('new vec')\n logging.info(M[n])\n return M, True\n M[n] = back_track\n return M, False\n\ndef make_L(N,p,q):\n while True:\n try:\n f, Fp, Fq, g, h = generate(N,p,q)\n L = np.zeros((2*N,2*N), dtype=float)\n L[:N,:N] = np.eye(N)\n L[N:,N:] = q * np.eye(N)\n # print('h = ')\n # print(h)\n L[:N,N:] = circulant(h)\n break\n except ValueError:\n pass\n except Exception as e:\n raise e\n\n return L\n \n\ndef weight(L):\n return np.prod(norm(L,axis=1))\n\ndef mu(vi, vj):\n # a=vi@vj\n # b=vj@vj\n # print(a,b)\n # print(vi@vj/(vj@vj))\n return vi@vj/(vj@vj)\n\ndef proj(vi, vj):\n return mu(vi, vj) * vj\n\ndef gs(L):\n n = len(L)\n L2 = np.copy(L)\n # print(L2)\n for i in range(1, n):\n L2[i] -= sum(proj(L[i], L2[j]) for j in range(i))\n return L2\n\ndef LLL(L, delta=3/4):\n n = len(L)\n L = np.copy(L)\n L2 = gs(L)\n k = 1\n while k < n:\n for j in range(k-1, -1, -1):\n mu_kj = mu(L[k], L2[j])\n if abs(mu_kj) > 0.5:\n L[k] -= L[j] * round(mu_kj)\n L2 = gs(L)\n\n if L2[k]@L2[k] >= (delta - mu(L[k], L2[k-1])**2) * (L2[k-1]@L2[k-1]):\n k += 1\n else:\n L[[k, k-1]] = L[[k-1, k]]\n L2 = gs(L)\n k = max(k-1, 1)\n return L\n\n\ndef sorted_mat(L):\n idx = np.argsort(norm(L,axis=1))\n return L[idx]\n\n\ndef count_hit(L):\n L = sorted_mat(L)\n n = len(L)\n cnt = 0\n for i in range(1,n):\n for j in range(i):\n _, check = BIROT(L, j, i)\n if check:\n cnt += 1\n return cnt\n\ndef GAME(L, p, r, delta=3/4):\n L = np.copy(L)\n n = len(L)\n res = [H(LLL(L))]\n for round_ in range(r):\n L2 = L\n L = LLL(L, delta)\n for i in range(p-1):\n np.random.shuffle(L2)\n L3 = LLL(L2, delta)\n if weight(L3) < weight(L):\n L = L3\n \n L = sorted_mat(L)\n # print(norm(L,axis=1,keepdims=1))\n improved = False\n j = 1 if norm(L[0]) != (n/2)**0.5 else 2\n for i in range(n-1, j, -1):\n L2, imporved = BIROT(L, j, i)\n if imporved: \n # print('old L = ')\n # print(L)\n # print('new L = ')\n # print(L2)\n L = L2\n res.append(H(L))\n logging.info('IMPROVED by BIROT')\n logging.info(f'j = {j}, i = {i}')\n break\n\n if not imporved:\n logging.warn(f'GAME ends earlier because there is no hit when r = {round_}!')\n # t_hit.append(round_)\n break\n\n return L, res\n\ndef H(L):\n n = len(L)\n return (abs(det(L))/weight(L))**(1/n)\n\ndef compare(N,p,q):\n L = make_L(N,p,q)\n L_L = LLL(L)\n L_G = GAME(L, 3, 10)[0]\n print(L_L)\n print(L_G)\n print('LLL: ',H(L_L))\n print('GAME:',H(L_G))\n\nif __name__ == '__main__':\n compare(N,p,q)\n" }, { "alpha_fraction": 0.5973559617996216, "alphanum_fraction": 0.646319568157196, "avg_line_length": 91.81818389892578, "blob_id": "e943291c74462afe69d0b021ca85e6b3c25b5a21", "content_id": "b95b1cd5812580c5210abf19ab4c0d1af4204c4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6132, "license_type": "no_license", "max_line_length": 427, "num_lines": 66, "path": "/README.md", "repo_name": "MohammedAbuibaid/LatticeBasedCrypto", "src_encoding": "UTF-8", "text": "# LatticeBasedCryto\n\n## 1. Information:\n- A short 6-week research project under the [SHARP special programme](https://www.sutd.edu.sg/SHARP) in SUTD supervised by lecturer [Wong Wei Pin](https://esd.sutd.edu.sg/people/faculty/wong-wei-pin)\n- Lattice-Based Cryptography (LBC) has faster encryption/decryption and more importantly, it's quantum resistance. \n- We learned about the basics of lattices and hard lattice problems like Shortest Vectors Problem or Closet Vectors Problem. \n- We then proceed to implement the NTRU Public Key Cryptosystem using Matlab and try to perform cryptanalysis (recover `private keys` given `public keys` and `public parameters`) on this system using Python.\n\n## 2. Implementation:\n### a. [Research Poster](Complementing_LLL_Lattice_Reduction_Algorithm_with_BIROT_to_find_SVPs.pdf)\n### b. Cryptosystem\n- Based on [NTRUEncrypt protocol](https://en.wikipedia.org/wiki/NTRUEncrypt) and its detailed formulation in the textbook [An Introduction to Mathematical Cryptography](https://www.springer.com/gp/book/9781441926746), we were able to write a Matlab program (making used of Matlab's vectorization for faster encryption and decryption)that can simulate the full process of encoding, encrypting, decrypting and decoding a message:\n - **Message**: `m = \"I love you!\"`\n - **Public parameters**: `[N, p, q, d] = [47, 3, 128, 7]`\n some other possible parameters choices according to NTRU protocol to ensure 1-1 encrypt-decrypt are:\n \n | N | p | q | d |\n | :---: | :---: | :---: | :---: |\n | 5 | 3 | 64 | 1 |\n | 7 | 3 | 64 | 2 |\n | 11 | 3 | 64 | 3 |\n | 23 | 3 | 128 | 7 |\n | 47 | 3 | 128 | 7 |\n | 59 | 3 | 128 | 7 |\n | 83 | 3 | 128 | 7 |\n - **Key Generation**: Generate `public_key` and `private_key` from the public parameters to be used for encryption and decryption. `public_key` will be used by Bob to encrypt the message while `private_key` will be used by Alice to decrypt the message (a form of *assymetric cryptography*)\n - **Encoding** (there can be more efficient protocol, we just find one that work for us so we can quickly try out the full encryption system): \n - first, we turn a string into its ASCII numeric values. Then, we add padding so all ASCII values are 3-letter long. If the very fist digit in the message is `0`, this information will be lost when we convert base 10 to base 3 number (encyprting purposes). In this case, we convert the very first `0` into `9` (No ASCII value will reach 900 so this can be easily converted back during decoding).\n \n | Letter | ASCII (DEC) | Final (3 digits) |\n | :----: | :---------: | :--------------: |\n | I | 73 | 973 |\n | <space>| 32 | 032 |\n | l | 108 | 108 |\n | o | 111 | 111 |\n | v | 118 | 118 |\n | e | 101 | 101 |\n | <space>| 032 | 032 |\n | u | 117 | 117 |\n | ! | 33 | 033 |\n - then we will split the full sequence `973032108111118101032117033` into blocks of 15 digits each (Matlab's precision is 16 digits by default). This is equivalent to 5 character per block. There will be zero-padding at the end of the last block if there is less than 15 digits:\n `973032108111118` && `101032117033000`\n - each of this block will then be converted to base 3 (ternary) number. This is equivalent to a block size <br>`C = 32` ($3^{31} \\approx 10e^{15}$). If less then 32 bits are required, there will be zero-padding at the front:\n `11201121012221221121210211222201` && `00111020201120112221121021110111`\n - the result is an `R x 32` ternary array where `R` is the total number of blocks required to encode `m`.\n - Encrypting:\n - using NTRUEncrypt protocol, we convert this array into completely random coefficients. Since we chose <br>`N = 47` and NTRU is based on Polynomial Rings, the encrypted message will be in the form of a `n x 47` array (block size of 47). Below we show 2 completely random encryption after 2 run:\n - Run 1:<br>\n ![](https://i.imgur.com/6vuwBzy.png)\n - Run 2:<br>\n ![](https://i.imgur.com/9gS6cWT.png)\n - **Decrypting and Decoding**: If the encrypted message is passed back for decryption, we can easily convert back to the original post-encoded message. Then with the relevant block number information, we can reshape this back for decoding. \n - **Example**: If you have Matlab installed, simply clone this repository and use codes inside the [cryptosystem](cryptosystem) folder to try out the program. A screenshot will be shown below to show you the expected output of our program:\n - Choosing *messages*, *parameters* and generate `public_key` and `private_key`:\n ![](https://i.imgur.com/XiFUrsl.png)\n - Run 2: Complementing_LLL_Lattice_Reduction_Algorithm_with_BIROT_to_find_SVPs.pdf\n - Encoding and Encrypting:<br>\n ![](https://i.imgur.com/AjBQuJF.png)\n - Decrypting and Decoding:<br>\n ![](https://i.imgur.com/WR1YTQG.png)\n \n### c. Cryptanalysis:\n- Due to time constraint, we could not write a full Matlab script for cryptanalysis. Hence, we refered to a previously done python script for NTRUEncrypt by [jkrauze](https://github.com/jkrauze/ntru) in Python and build our attack using Python from there (codes in [cryptanalysis](cryptanalysis) folder).\n- We use a Latice Reduction Method called [Lenstra–Lenstra–Lovász](https://en.wikipedia.org/wiki/Lenstra%E2%80%93Lenstra%E2%80%93Lov%C3%A1sz_lattice_basis_reduction_algorithm) (LLL) in combination with the idea in this master thesis on [Parallel Symmetric Attack](Mater_Thesis.pdf\n) by Daniel Socek.\n- The main results are shown in our [research poster](Complementing_LLL_Lattice_Reduction_Algorithm_with_BIROT_to_find_SVPs.pdf). \n" } ]
3
victorleworker/PythonTraining
https://github.com/victorleworker/PythonTraining
1b0fd0235447447d09123441d37c771386095558
2ac13576cde520727d8d7df2fbfdef324bbeb43b
c7faf28948e74ebabdad57e3bb112ad1b9c9616c
refs/heads/master
2020-03-28T03:23:39.107472
2018-09-25T08:21:02
2018-09-25T08:21:02
147,641,813
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 24, "blob_id": "4080c66fc70cec4fd0f3480be61cc0621e8fb433", "content_id": "07b13cfa0b29598129c1cd8ff1dad6d9a95666a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75, "license_type": "no_license", "max_line_length": 45, "num_lines": 3, "path": "/blockchain/utility/__init__.py", "repo_name": "victorleworker/PythonTraining", "src_encoding": "UTF-8", "text": "from utility.hash_util import hash_string_256\n\n__all__=['hash_string_256']\n" }, { "alpha_fraction": 0.7075471878051758, "alphanum_fraction": 0.7232704162597656, "avg_line_length": 20.200000762939453, "blob_id": "28f2754f418c7f8b62f1ea91fa589eb2f35d91a5", "content_id": "f8a2fc6f5ea1aae101aa9310f4a3035e32263b00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/blockchain.py", "repo_name": "victorleworker/PythonTraining", "src_encoding": "UTF-8", "text": "blockchain = [[1]]\n\n\ndef get_last_blockchain_value():\n return blockchain[-1]\n\n\ndef add_value(transaction_amount, last_transaction):\n blockchain.append([last_transaction, transaction_amount])\n print(blockchain)\n\n\nadd_value(3, get_last_blockchain_value())\nadd_value(5)\nadd_value(6, get_last_blockchain_value())\n" } ]
2
KumarSanskar/Python-Programs
https://github.com/KumarSanskar/Python-Programs
3341e47f43f7c96ac7ef9cab5e9baedb4d40a684
a33cde03b19b36b1c447f78aa2f30f2593d38708
2c2c8d82d3a5ee78a4e4bd4ed501fc31203204ab
refs/heads/master
2021-01-04T13:31:39.509503
2020-11-18T04:25:39
2020-11-18T04:25:39
240,574,672
3
2
null
2020-02-14T18:33:32
2020-10-18T10:06:57
2020-10-18T10:09:54
Python
[ { "alpha_fraction": 0.6485355496406555, "alphanum_fraction": 0.6652719378471375, "avg_line_length": 46.20000076293945, "blob_id": "c97966aedf92301a43af7fd20ab1718f9a5e4195", "content_id": "6021137c1fac3b6c3a11b0580b5a9be88ab555d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 62, "num_lines": 5, "path": "/Cubes _in _range.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program to print cubes in an input range:\r\nnum1 = int(input(\"Enter from where you want cube of number:\"))\r\nnum2 = int(input(\"Enter till where you want cube of number:\"))\r\nfor i in range(num1,num2):\r\n print(\"Cube of\",i,\"is\",i*i*i,\".\")" }, { "alpha_fraction": 0.6688311696052551, "alphanum_fraction": 0.6688311696052551, "avg_line_length": 28.799999237060547, "blob_id": "062c299cba6d19754b841eec64ccdb9fc0e74d52", "content_id": "ea88bebdf665996a8efe29ac47ecd67c20d6ca16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/catenation.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program that shows (catenation) of two strings\r\nfirst_name = \"Thor\"\r\nlast_name = \"Odison\"\r\nfull_name = first_name + \" \" + last_name\r\nprint(full_name)\r\n" }, { "alpha_fraction": 0.6073619723320007, "alphanum_fraction": 0.6319018602371216, "avg_line_length": 31, "blob_id": "86376c4aa99298897a094d5d965ee59113c0d51c", "content_id": "5d4f8185b7c6e01cabb4070a202cb20f77845822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 58, "num_lines": 5, "path": "/Using_pass.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program to understand working of pass statement:\r\nfor i in range(20):\r\n if i == 10:\r\n pass # it simply has no effect on working of loop\r\n print(i)" }, { "alpha_fraction": 0.6613756418228149, "alphanum_fraction": 0.6613756418228149, "avg_line_length": 32.3636360168457, "blob_id": "84e9d0b869c9b47ae05f7b7b29b416b67681bec8", "content_id": "4c45d0c4357c536989e2abb5f53e0da703d920a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/check_equal.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Programs to check two no. are equal or not\r\nn = int(input(\"Enter first no \"))\r\np = int(input(\"Enter second no \"))\r\nif n == p: # using equality operator and if else\r\n print(\"They are equal\")\r\nelse:\r\n print(\"They are unequal\")\r\nprint()\r\n# Using ternary if else structure\r\n# This will be printed at second time\r\nprint(\" They are equal\" if n == p else \"They are unequal\")\r\n" }, { "alpha_fraction": 0.6103448271751404, "alphanum_fraction": 0.6448276042938232, "avg_line_length": 34.5, "blob_id": "8a237dd47767eee1ad11c576eec832eac5a47c24", "content_id": "2e487f938a5376560fb5804b9ceb9ced717bc7ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 74, "num_lines": 8, "path": "/nested_if_example.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Nested if example(using parameter of number should be greater than 30):\r\nnum = int(input(\"Enter a number greater than 30\"))\r\nif num >= 30:\r\n print(\"Number accepted\")\r\n if num % 2 == 0:\r\n print(\"Number is even\")\r\nelse:\r\n print(\"Number is smaller than 30, please re-enter\")" }, { "alpha_fraction": 0.6724137663841248, "alphanum_fraction": 0.6724137663841248, "avg_line_length": 27.5, "blob_id": "d5404c82cab2295a5bbecda44d020106916d185b", "content_id": "467c0d40c1ab42f2c6f60e438ddec7cd5a00814a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/Extract_characters.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program to extract characters from an input string\r\nb = eval(input(\"Enter a string:\"))\r\nfor i in b:\r\n print(i)" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 28.33333396911621, "blob_id": "56a6c1c6a6fa9a43b43beeef57bff6795b801a9f", "content_id": "3b5f7e671d37279efbf0d160d1074ed65f7d997e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 61, "num_lines": 6, "path": "/break_statement.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program to show use of break statement :\r\nlst = [1,2,3,4,5,6,7,8,9]\r\nfor i in lst:\r\n if (i == 6):\r\n break # break simply moves execution out of the loop\r\n print(i)" }, { "alpha_fraction": 0.6270626783370972, "alphanum_fraction": 0.6468647122383118, "avg_line_length": 35.875, "blob_id": "c15d3e18a94ac437934644a0280f9f37effee2b6", "content_id": "506b7ab43a7a981226fae1731f08a00055819e93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 83, "num_lines": 8, "path": "/sum_of_digits.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# This program calculates the sum of digits of an input number\r\nnum = int((input(\"Enter the number: \"))) # takes input from user in integer format\r\nsum = 0\r\nwhile num > 0: # iterates till value of num becomes zero\r\n x = num%10\r\n sum =sum + x\r\n num = num//10\r\nprint(\"sum of digits is \",sum)\r\n" }, { "alpha_fraction": 0.7198067903518677, "alphanum_fraction": 0.739130437374115, "avg_line_length": 49.75, "blob_id": "c3d73928bd5c38bfc00a879041a478642ce637b4", "content_id": "3e45bc181821cf400749759428b52d69193186ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 71, "num_lines": 4, "path": "/temprature_converter.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# takes temperature in degree celsius and converts in degree Fahrenheit\r\ncel = float(input('Enter temperature in degree Celsius '))\r\nfar = (9/5)*cel+32\r\nprint(\"Temperature in degree Fahrenheit is \", far)\r\n" }, { "alpha_fraction": 0.686274528503418, "alphanum_fraction": 0.6901960968971252, "avg_line_length": 49, "blob_id": "ef6427cd8172a2f2b9f5af8f1959940d5466428a", "content_id": "da1a4934e08ac0b890c0efd0aca8e7cebe79187a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 255, "license_type": "no_license", "max_line_length": 85, "num_lines": 5, "path": "/Using_insert.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program using ***.insert() method to add element to a list in a specified position\r\nVech = [\"car\", \"bus\", \"truck\", \"bike\"]\r\nprint(\"List before adding an element using insert\", Vech)\r\nVech.insert(2, \"aeroplane\")\r\nprint(\"List after using insert\", Vech)\r\n" }, { "alpha_fraction": 0.4802744388580322, "alphanum_fraction": 0.5197255611419678, "avg_line_length": 51, "blob_id": "845ab000113017c880892fc61e954dce82249a12", "content_id": "16d136f39e671bdbc45eb60127b599739a7faf12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 87, "num_lines": 11, "path": "/even_odd_in_list.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# to create a list of even and no in list between 1 to 100\r\nls1 = [] # creation of two lists\r\nls2 = []\r\nfor i in range(1, 100): # use of for loop in range 1 to 100\r\n if i % 2 == 0: # checks for the condition whether the no is even\r\n ls1.append(i)\r\n else: # appends the no to ls1 if even else to ls2\r\n ls2.append(i)\r\ni = i+1 # increases the value after evaluation\r\nprint(\"list of even no is as follows: \", ls1)\r\nprint(\"list of odd no is as follows: \", ls2)\r\n" }, { "alpha_fraction": 0.6351351141929626, "alphanum_fraction": 0.637837827205658, "avg_line_length": 31.81818199157715, "blob_id": "4cd4d4ded06466bbdb3df3817a29c6cdeb30c35f", "content_id": "59eb38749a6323260b3c1fe84ed72cb9b291d513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 74, "num_lines": 11, "path": "/Using_break_continue.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program showing the use of break and continue statement simultaneously\r\nwhile True:\r\n val = input(\"Enter a Number\")\r\n if val == \"q\":\r\n print(\"Exiting from the program as you entered a special key \")\r\n\r\n if not val.isdigit():\r\n print(\"Enter digits only, continue statement will be used now!!!\")\r\n continue\r\n val =int(val)\r\n print(val**3)" }, { "alpha_fraction": 0.5860927104949951, "alphanum_fraction": 0.5927152037620544, "avg_line_length": 23.33333396911621, "blob_id": "5d36f48a2026d461004ecabeca16e745b10bccdc", "content_id": "fc2477e9fdc3c40e278ff4621bd8f4912d412156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 44, "num_lines": 12, "path": "/length_of_the_list.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program to find length of the given list:\r\n# using len() function :\r\nlst = [\"Ram\",\"Ali\",\"Hardy\",\"Joe\"]\r\nprint(\"Length of the list is: \",len(lst))\r\n\r\n\r\n# using iteration i.e. a counter variable:\r\nlst = [\"Ram\",\"Ali\",\"Hardy\",\"Joe\"]\r\nc = 0\r\nfor i in lst:\r\n c += 1\r\nprint(\"Length of the list is: \",c)" }, { "alpha_fraction": 0.5941644310951233, "alphanum_fraction": 0.6312997341156006, "avg_line_length": 35.70000076293945, "blob_id": "eb505728354d7224a298a34a57722476017d6a80", "content_id": "358017bc6fa9e025eb79aae623fc0afcc2a2a473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/distance.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# two find distance between two co-ordinates\r\nimport math # to use sqrt function it has been imported from math\r\n\r\nx1 = int(input(\"enter X1 point\"))\r\nx2 = int(input(\"enter X2 point\"))\r\ny1 = int(input(\"enter Y1 point\"))\r\ny2 = int(input(\"enter Y2 point\"))\r\n\r\ndist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\r\nprint(\"Distance between two points is \", dist, \"units\")\r\n" }, { "alpha_fraction": 0.6075156331062317, "alphanum_fraction": 0.6450939178466797, "avg_line_length": 32.35714340209961, "blob_id": "4976f101775a0330516b8506fe6138d129a881de", "content_id": "5ae0a7bd2f582118aac3ea57c382f153c2ef88f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 87, "num_lines": 14, "path": "/if_elif_example.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# If....elif example(using program to tell grade of a student according to percentage)\r\npercen = int(input(\"Enter your percentage: \"))\r\nif percen > 95:\r\n print(\"Your grade is A+\")\r\nelif percen >85 and percen< 95:\r\n print(\"Your grade is B+\")\r\nelif percen > 80 and percen< 85:\r\n print(\"Your grade is B\")\r\nelif percen > 75 and percen< 80:\r\n print(\"Your grade is C+\")\r\nelif percen > 70 and percen< 75:\r\n print(\"Your grade is C\")\r\nelse:\r\n print(\"You have failed.\")" }, { "alpha_fraction": 0.620512843132019, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 25.714284896850586, "blob_id": "5193cf7063bdd31b23c7465dcc43142fd06c3187", "content_id": "76c9a024fa334165eeca8d7618d2b50cf0bb9572", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/Check_Even_or_Odd.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# To check whether number is even or odd using modulus operator\r\nnum = int(input(\"Enter a number\"))\r\nif num % 2 == 0:\r\n print(\"Number is even\")\r\nelse:\r\n print(\"Number is odd\")\r\n\r\n# To check whether number is even or odd without using modulus operator\r\nn = int(input(\"Enter a number\"))\r\nres = (n *2)/2\r\nif res == n:\r\n print(\"Number is even\")\r\nelse:\r\n print(\"Number is odd\")\r\n\r\n" }, { "alpha_fraction": 0.6602563858032227, "alphanum_fraction": 0.6602563858032227, "avg_line_length": 29.600000381469727, "blob_id": "f099d23e27ead381916d9f99a5cc0e78d19ae3c9", "content_id": "402adfe9e516f4163d0f78fdf8aefc7df38407f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 44, "num_lines": 5, "path": "/extract_elements_from_list.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program to extract elements from list:\r\na = eval(input(\"Enter elements of list: \"))\r\nprint(\"Elements of the input list is :\")\r\nfor i in a:\r\n print(i)" }, { "alpha_fraction": 0.6886447072029114, "alphanum_fraction": 0.692307710647583, "avg_line_length": 37, "blob_id": "8bcc27ecf5490d8b9f9a23db273c5006acd0a317", "content_id": "57ebb8b381b3c5481d2b6c9e62c2a8e9aa62d933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/area_rectangle.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program to calculate area of rectangle.\r\nlength = int(input(\"enter length of rectangle \"))\r\nbreadth = int(input(\"enter breadth of rectangle \"))\r\narea = length*breadth\r\nperi = 2*(length+breadth)\r\nprint(\"Area is \", area, \"sq units\")\r\nprint(\"Perimeter is \", peri, \"units\")\r\n" }, { "alpha_fraction": 0.6724565625190735, "alphanum_fraction": 0.692307710647583, "avg_line_length": 34.45454406738281, "blob_id": "4dea29fa0c8e18c8d0bd067140a8d4f7b03fdbe2", "content_id": "14e4ec5b8407b80032dea535d3eb664d2e4b81b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 98, "num_lines": 11, "path": "/GST_calculator.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# A program which calculates and displays total amount after including CGST AND SGST on a product\r\ncp = int(input(\"Enter cost of production \"))\r\ncgst = 9\r\nsgst = 9\r\namt_cgst = (cgst/100)*cp\r\namt_sgst = (sgst/100)*cp\r\ntotal = cp+amt_cgst+amt_sgst\r\nprint(\"cost price\", cp)\r\nprint(\"Amount after CGST \", amt_cgst)\r\nprint(\"Amount after SGST \", amt_sgst)\r\nprint(\"Total amount inc. CGST and SGST \", total)\r\n\r\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5223880410194397, "avg_line_length": 17.428571701049805, "blob_id": "b81cfac5212289f4298adbcaf01a5b9d23acb5ad", "content_id": "eb7232c1a424131269bb79702b560a9e1048d921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/sum_of_n_natural no.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "n = int(input()) # takes input from user in integer\r\na = 1\r\nsum_n = 0\r\nwhile a <= n:\r\n sum_n = sum_n + a\r\n a += 1\r\nprint(sum_n)" }, { "alpha_fraction": 0.6369583010673523, "alphanum_fraction": 0.6582174897193909, "avg_line_length": 35.06060791015625, "blob_id": "b7f9b9c59b4051caac748c8a23382d4cf62e5e95", "content_id": "567b86f0ffcf0d1b2c5d337749847eccc21fbe14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/Deleting_items_in_list.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Using various methods to delete items from list:\r\n\r\n# Using ***.remove() method:\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"list before removing: \", lst1)\r\nlst1.remove(\"Ram\")\r\nprint(\"list after removing: \",lst1)\r\n\r\n# Using ***.pop() method to remove item from the list:\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"list before using .pop: \", lst1)\r\nlst1.pop() # this will remove the last elemnt as index is not specified\r\nprint(\"list after using .pop(): \", lst1)\r\nlst1.pop(0) # this will remove the tem from the index 0\r\nprint(\"list after using .pop(0): \", lst1)\r\n\r\n# Using del() :\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"List before using del(): \",lst1)\r\ndel lst1[0] # is used to delete at specified index\r\nprint(\"List after using del \", lst1)\r\n\r\n# Using ***.clear() , it removes everything from the list\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"List before using .clear(): \", lst1)\r\nlst1.clear() # this will empty the elements of the list\r\nprint(\"List after using .clear(): \",lst1)\r\n\r\n# Using del keyword to delete entire list:\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"List before using del keyword is: \", lst1)\r\ndel lst1 # this will delete the entire list\r\nprint(\"The list is:\",lst1)\r\n" }, { "alpha_fraction": 0.605381190776825, "alphanum_fraction": 0.6218236088752747, "avg_line_length": 34.83333206176758, "blob_id": "7f4503b51c5c06b2c97242a6058450ac5ca17978", "content_id": "caca1648f56787f5f5e8bce238c6e34ee7d1f2ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 106, "num_lines": 18, "path": "/Getting_items_of _list.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Programs to access items/elements of the list:\r\n# (1) Using index:- This can be used to get a specific item:\r\nlst = [\"Ram\",\"Ali\",\"Hardy\",\"Joe\"]\r\nprint(\"List is: \",lst)\r\nprint(\"Item at second is:\",lst[2])\r\n\r\n\r\n# (2) Using negative index:- this can be used to acces item from backwards, (-1) represents last element:\r\nlst = [\"Ram\",\"Ali\",\"Hardy\",\"Joe\"]\r\nprint(\"List is: \",lst)\r\nprint(\"Item at last is: \",lst[-1])\r\nprint(\"Item at second last is :\", lst[-2])\r\n\r\n\r\n# (3) Using index range:- By mentioning the index range it can be accessed:\r\nlst = [\"Ram\",\"Ali\",\"Hardy\",\"Joe\",\"Hariom\",\"Rebeca\"]\r\nprint(\"List is: \",lst)\r\nprint(\"Items in range [2:5] is: \",lst[2:5])\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6359338164329529, "alphanum_fraction": 0.6501182317733765, "avg_line_length": 50.875, "blob_id": "879cdde293c58ac34cf9fd9fa1b3ff67bd2c5fbb", "content_id": "b7c316b679e4ed95b73bea2f710b6dc5402e2a5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 105, "num_lines": 8, "path": "/check_right _triangle.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# check whether the triangle is right angled or not using Hypotenuse perpendicular and base as parameters\r\nh = int(input(\"Enter length of Hypotenuse \"))\r\np = int(input(\"Enter length of Perpendicular \"))\r\nb = int(input(\"Enter length of Base \"))\r\nif h ** 2 == p ** 2 + b ** 2: # from Pythagoras Theorem(h^2=p^2+b^2)\r\n print(\"Triangle is right angled\")\r\nelse:\r\n print(\"Triangle is not right angled\")\r\n" }, { "alpha_fraction": 0.6489726305007935, "alphanum_fraction": 0.6626712083816528, "avg_line_length": 51.272727966308594, "blob_id": "43357238b5f53e813c640d49485047492524e6b3", "content_id": "60b410e7b3b0df1dd067edb9673b5db5e9b075d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 119, "num_lines": 11, "path": "/fibbonaci_series_printing.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# progrm to print fibbonaci series i.e. 1,1,2,3,5,8(a series in which in which a number is sum of two preceding terms)\r\na = 0 # setting the first term as zero\r\nb = 1 #setting second term as one\r\nnum = int(input(\"Enter number for how many times series is to be generated: \"))\r\nprint(\"The given fibbonaci series for\",num,\"terms is:\")\r\nprint(\"\",b,end = \"\")\r\nfor i in range(num):\r\n c = a + b # making third term as sum of first two terms\r\n a = b # interchanging the second term as to first term\r\n b = c # interchanging third term as to second term\r\n print(\" \",c,end =\"\")" }, { "alpha_fraction": 0.6593406796455383, "alphanum_fraction": 0.6703296899795532, "avg_line_length": 34.79999923706055, "blob_id": "a46a24fd68dcd3337c956567df69031478e42991", "content_id": "829ffe4932f5af9233625c2d645041d6b47dbf27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 89, "num_lines": 5, "path": "/continue_statement.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# using continue statement in a program:\r\nfor i in range(6):\r\n if i == 4:\r\n continue # it simply continues after where it encountered the continue statement\r\n print(i)" }, { "alpha_fraction": 0.5615941882133484, "alphanum_fraction": 0.5724637508392334, "avg_line_length": 44, "blob_id": "215d564746264cd39a4148c17dc212bb48fce237", "content_id": "901efdbb59502b031181c329b9b4a88cb1f45b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 72, "num_lines": 6, "path": "/factorial.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# calculates factorial of no supplied by user\r\nn = int(input()) # takes input from user in type integer\r\nfact = 1 # a counter variable for calculation\r\nfor i in range(1, n+1):\r\n fact = fact*i\r\nprint(\"factorial of\", n, \"is\", fact)\r\n" }, { "alpha_fraction": 0.7888198494911194, "alphanum_fraction": 0.7888198494911194, "avg_line_length": 31.200000762939453, "blob_id": "5c9b560089d85523a03c2167b8028e53ac1fd902", "content_id": "04934f43b351f0dd46506ee69dd861c4406b8017", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 61, "num_lines": 5, "path": "/README.md", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Python-Programs\nContains programs in Python Language \n>Contains programs in Python Language for easy understanding.\n>Created on PyCharm IDE.\n* Author - Kumar Sanskar\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 35.79999923706055, "blob_id": "cfe27fc20cc3bebb1b8545c2e9207e2d3407e344", "content_id": "330e1fc11f5e086e88a6f5973f722f44e711c68e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/use _of _escape_sequences.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "print(\"English using Escape Sequences:\")\r\nprint(\"For New Line: use \\\\n \\n\")\r\nprint(\"Languages:\\nPython\\nC\\nJava\\nC++\\nRuby\\n\")\r\nprint(\"For Using Tab Space: Use \\\\t \\t\")\r\nprint(\"\\tPython\")\r\n" }, { "alpha_fraction": 0.6271186470985413, "alphanum_fraction": 0.6652542352676392, "avg_line_length": 37.33333206176758, "blob_id": "c168e11fa626ebad3368da4665b4c4688b25c393", "content_id": "312ab3f71aba430f7bae094dc83e53ebab684c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 120, "num_lines": 6, "path": "/Using_else_with_for _loop.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "\r\n# use of else with for loop(else part gets executed only when loop is completed and there is no item left to iterate ):\r\nlst = [1,2,3,4,5,6,7,8,9]\r\nfor i in lst:\r\n print(i)\r\nelse:\r\n print(\"Items of the list have been printed.\")" }, { "alpha_fraction": 0.5234375, "alphanum_fraction": 0.52734375, "avg_line_length": 21.272727966308594, "blob_id": "cb949bfc795835c12e82bf25d2246b86997e806c", "content_id": "19ad0569ba320c8b29d0cad74bb5b219da79313c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/HCF_of_two_numbers.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "def hcf(a, b):\r\n if b == 0:\r\n return a\r\n else:\r\n return hcf(b, a % b)\r\n\r\n\r\na = int(input(\"Enter the first number: \"))\r\nb = int(input(\"Enter the second number: \"))\r\nres = hcf(a,b)\r\nprint(\"HCF of given two numbers\",a,\"and\",b,\"is: \",res)\r\n" }, { "alpha_fraction": 0.6437768340110779, "alphanum_fraction": 0.6480686664581299, "avg_line_length": 36.83333206176758, "blob_id": "008d63c78590def4a40696db0a009f59fd294c23", "content_id": "c72632883fd511b0b1d99ce8a1df4ad693b42c6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 73, "num_lines": 6, "path": "/Palindrome_test.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# program to check that given string is palindrome or not(case-sensitive)\r\na = str(input(\"Enter the string to be checked: \"))\r\nif a == a[::-1]:\r\n print(\"It's a palindrome string.\")\r\nelse:\r\n print(\"No, It's not a palindrome.\")\r\n" }, { "alpha_fraction": 0.6496519446372986, "alphanum_fraction": 0.6635730862617493, "avg_line_length": 33.83333206176758, "blob_id": "dece2dc4196f443cc5cbb0b52ac3936606a19089", "content_id": "76521de3c2993f8225c6fe4185127fd825fa1079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "no_license", "max_line_length": 80, "num_lines": 24, "path": "/Power_of_the _Number.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program to find power for a given number using function pow\r\nimport math # imports math module\r\n\r\nnum1 = int(input(\"Enter the number: \"))\r\nnum2 = int(input(\"Enter the value of power to the number: \"))\r\nres = math.pow(num1, num2) # uses pow() function from Math module to find power\r\nprint(num1, \"to the power\", num2, \"is\", res)\r\n\r\n# Program to find power of the given number without using any function\r\n\r\nnum = int(input(\"Enter the number: \"))\r\npower = int(input(\"Enter the value of power to the number: \"))\r\nsum = 1\r\ni = 1\r\nwhile i <= power:\r\n sum = sum * num\r\n i += 1\r\nprint(num, \"to the power\", power, \"is\", sum)\r\n\r\n# Program to find power of the given number using ** method\r\nnum3 = int(input(\"Enter the number: \"))\r\npowr = int(input(\"Enter the value of power to the number: \"))\r\nresult = num3**powr\r\nprint(num3,\"to the power\",powr,\"is\",result)\r\n\r\n" }, { "alpha_fraction": 0.681034505367279, "alphanum_fraction": 0.6982758641242981, "avg_line_length": 44.79999923706055, "blob_id": "20791f10329dc47c2a3dbf1b79f221ffe00ee6ac", "content_id": "dc2cfb1589bb99b191d86880073f936fbbb9eda4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 70, "num_lines": 5, "path": "/Using_append.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# Program using ***.append() method to add item to a list in the end:\r\nlst1 = [\"Ram\", \"Shyam\", \"Ghanshayam\"]\r\nprint(\"List before using append function\", lst1)\r\nlst1.append(\"Tularam\")\r\nprint(\"List after using append function\", lst1)" }, { "alpha_fraction": 0.6242774724960327, "alphanum_fraction": 0.6242774724960327, "avg_line_length": 26.83333396911621, "blob_id": "5c49cff4181445feb090bdf5e427396aa817cd2e", "content_id": "a1e3e28044a6fc770bf9d2e163f3f6dff147dce5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/catenation_using_+_operand.py", "repo_name": "KumarSanskar/Python-Programs", "src_encoding": "UTF-8", "text": "# \"catenation\" using + operand\r\nfirst_name = \"Kumar\"\r\nlast_name = \"Sanskar\"\r\nfull_name = first_name + \" \" + last_name\r\nmessage = \"Hello,\" + full_name + \"!\"\r\nprint(message)\r\n" } ]
34
andrey-kulevich/leap_motion_data_collector
https://github.com/andrey-kulevich/leap_motion_data_collector
5837e67a37f19f949534ae9caa17fdad2ec8f9f9
e68fc90869bc131d990ac7da96fd098c030aa0e5
0661e4734f4d06854f0679f8f35d9c07f1e7acd4
refs/heads/master
2023-03-09T02:09:50.617501
2021-02-28T16:18:40
2021-02-28T16:18:40
337,346,991
2
0
null
2021-02-09T09:03:43
2021-02-11T10:33:23
2021-02-24T10:41:33
Python
[ { "alpha_fraction": 0.5396634340286255, "alphanum_fraction": 0.5480769276618958, "avg_line_length": 39.09638595581055, "blob_id": "c28bc7a9ad5a64f5bdbbfcd8f63628a7fc83130a", "content_id": "6b57fec9a9f1119ee955e087ec568e6d4b0c4845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3328, "license_type": "no_license", "max_line_length": 118, "num_lines": 83, "path": "/ActionListener.py", "repo_name": "andrey-kulevich/leap_motion_data_collector", "src_encoding": "UTF-8", "text": "import Leap\nfrom datetime import datetime\nimport csv\nimport os\n\n\nclass ActionListener(Leap.Listener):\n \"\"\"\n listener for all events in Leap Motion controller (extends Leap.Listener)\n\n letter - symbol that we are currently tracking\n\n creates in folder ./collected_data csv file with all frames collected during to session\n \"\"\"\n finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']\n\n def __init__(self, letter):\n Leap.Listener.__init__(self)\n if not os.path.exists(\"collected_data\"):\n os.mkdir(\"collected_data\")\n self.file = open(\"./collected_data/\" + letter + \"_\" + datetime.now().strftime(\"%d-%m_%H-%M-%S\") + \".csv\", \"w\")\n self.writer = csv.writer(self.file)\n header = ['hand_type',\n 'position_x', 'position_y', 'position_z',\n 'roll', 'pitch', 'yaw',\n 'arm_dir_x', 'arm_dir_y', 'arm_dir_z',\n 'wrist_pos_x', 'wrist_pos_y', 'wrist_pos_z',\n 'elbow_pos_x', 'elbow_pos_y', 'elbow_pos_z']\n\n for i in range(5):\n header.extend([self.finger_names[i] + '_finger_id', 'finger_length', 'finger_width'])\n for b in range(0, 4):\n header.extend(['prev_joint_x', 'prev_joint_y', 'prev_joint_z',\n 'next_joint_x', 'next_joint_y', 'next_joint_z',\n 'direction_x', 'direction_y', 'direction_z'])\n\n self.writer.writerow(header)\n\n def on_init(self, controller):\n print \"Initialized\"\n\n def on_connect(self, controller):\n print \"Connected\"\n\n def on_disconnect(self, controller):\n # Note: not dispatched when running in a debugger.\n print \"Disconnected\"\n\n def on_exit(self, controller):\n print \"Exited\"\n self.file.close()\n\n def on_frame(self, controller):\n # Get the most recent frame and report some basic information\n frame = controller.frame()\n\n print \"Frame id: %d, timestamp: %d, hands: %d, fingers: %d\" % (\n frame.id, frame.timestamp, len(frame.hands), len(frame.fingers))\n\n # Get hands\n for hand in frame.hands:\n\n vector = [-1 if hand.is_left else 1,\n hand.palm_position[0], hand.palm_position[1], hand.palm_position[2],\n hand.palm_normal.roll, hand.direction.pitch, hand.direction.yaw,\n hand.arm.direction[0], hand.arm.direction[1], hand.arm.direction[2],\n hand.arm.wrist_position[0], hand.arm.wrist_position[1], hand.arm.wrist_position[2],\n hand.arm.elbow_position[0], hand.arm.elbow_position[1], hand.arm.elbow_position[2]]\n\n # Get fingers\n for finger in hand.fingers:\n vector.extend([finger.id, finger.length, finger.width])\n # Get bones\n for b in range(0, 4):\n bone = finger.bone(b)\n vector.extend([bone.prev_joint[0], bone.prev_joint[1], bone.prev_joint[2],\n bone.next_joint[0], bone.next_joint[1], bone.next_joint[2],\n bone.direction[0], bone.direction[1], bone.direction[2]])\n\n self.writer.writerow(vector)\n\n if not frame.hands.is_empty:\n print \"\"\n" }, { "alpha_fraction": 0.5781302452087402, "alphanum_fraction": 0.5865676403045654, "avg_line_length": 33.057472229003906, "blob_id": "0879c4aa8cc24f7201b6dd6af21abad41d379f60", "content_id": "4d4597ac764498da5d994eac26d1e83de7e3e032", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3150, "license_type": "no_license", "max_line_length": 119, "num_lines": 87, "path": "/MainWindow.py", "repo_name": "andrey-kulevich/leap_motion_data_collector", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom Tkinter import *\nfrom ttk import *\nfrom ActionListener import ActionListener\nimport Leap\nimport time\n\n\nclass MainWindow(Tk):\n \"\"\"\n main window that provides functionality for tracking data from Leap Motion controller (extends Tkinter.Tk)\n \"\"\"\n current_letter = 0\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n images = {}\n\n def __init__(self, master=Tk):\n Tk.__init__(self)\n self.title(\"Сбор данных для распознавания азбуки жестов\")\n self.minsize(width=470, height=600)\n self.iconbitmap('./img/hand.ico')\n self.configure(bg='white')\n bg_style = Style()\n bg_style.configure('bg.TLabel', background='white')\n\n self.master = master\n self.images.fromkeys(self.letters)\n self.load_images()\n\n self.start = Button(self, text=\"НАЧАТЬ\", command=self.record)\n self.exp_letter = Label(text=self.letters[self.current_letter], font=('Roboto', 64), style='bg.TLabel')\n\n self.img = Label(image=self.images[self.letters[self.current_letter]], style='bg.TLabel')\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\"\n creates widgets for window\n :return:\n \"\"\"\n header = Label(text=\"Попробуйте повторить этот жест. Затем нажмите кнопку \\\"Начать\\\" \\n\"\n \"и удерживайте этот жест в течение 4 секунд. \\n\"\n \"Держите руку перед контроллером, пока идет запись. \\n\",\n font=('Roboto', 11), style='bg.TLabel')\n header.pack(padx=5, pady=5)\n\n self.img.pack(padx=5, pady=5)\n self.exp_letter.pack(pady=20)\n self.start.pack(side=\"bottom\", fill=X, padx=5, pady=5)\n\n def load_images(self):\n \"\"\"\n load images with gestures for each letter\n :return:\n \"\"\"\n for i in range(len(self.letters)):\n self.images[self.letters[i]] = PhotoImage(file='./img/' + self.letters[i] + '.gif')\n\n def record(self):\n \"\"\"\n run tracking data\n\n tracking is in progress for 4 seconds\n\n collected data will be available in ./collected_data\n :return:\n \"\"\"\n # Create a sample listener and controller\n listener = ActionListener(self.letters[self.current_letter])\n controller = Leap.Controller()\n\n self.start.config(text=\"ИДЕТ ЗАПИСЬ...\")\n self.update()\n\n # Have the sample listener receive events from the controller\n controller.add_listener(listener)\n\n # Keep this process running 4 seconds\n time.sleep(4)\n\n controller.remove_listener(listener)\n\n self.start.config(text=\"НАЧАТЬ\")\n self.current_letter += 1\n self.exp_letter.config(text=self.letters[self.current_letter])\n self.img.config(image=self.images[self.letters[self.current_letter]])\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6223776340484619, "avg_line_length": 16.875, "blob_id": "7934e3c52d30870ef3551e546b1a9f9d386e662a", "content_id": "18638517d3dd89a0c4ea38b50377111c2b1b002b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 33, "num_lines": 8, "path": "/main.py", "repo_name": "andrey-kulevich/leap_motion_data_collector", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom MainWindow import MainWindow\n\n\nif __name__ == \"__main__\":\n # launch app\n window = MainWindow()\n window.mainloop()\n" } ]
3
mooolen/CS180FinalMP
https://github.com/mooolen/CS180FinalMP
f0fcecdafcabfc7f19109d8b46fbf8a468ebe9ef
dbb7cfe171638853b14d522dc14f9871637ab365
d8dbb16ffe6d3667e51d43968c68ccb16926fbd5
refs/heads/master
2021-01-20T09:32:45.481671
2013-04-07T10:42:11
2013-04-07T10:42:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7233181595802307, "alphanum_fraction": 0.7319170236587524, "avg_line_length": 28.954545974731445, "blob_id": "4509f868e02e2d27f2fe34a4d9649652061429fc", "content_id": "5a28442f1b849e01368146868b8688df0d8729c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1977, "license_type": "no_license", "max_line_length": 175, "num_lines": 66, "path": "/apriori.py", "repo_name": "mooolen/CS180FinalMP", "src_encoding": "UTF-8", "text": "def generateSingleItemSet(transfile):\n\titemSet = set()\n\n\tfor line in datfile:\n\t\tfor item in line.split(' '):\n\t\t\titemSet.add(frozenset([int(item)]))\n\n\treturn itemSet\n\ndef getSetOfFrequentKItemSet(transfile, transCount, itemSet, minSupport):\n\tdatfile.seek(0)\n\tcandidates = defaultdict(int)\n\tfreqitemset = set()\n\n\tfor item in itemSet:\n\t\tfor line in datfile:\n\t\t\tif not line.isspace() and item.issubset(set(map(int,line.split(' ')))):\n\t\t\t\tcandidates[item] += 1\n\n\tfor item, supcount in candidates.iteritems():\n\t\tif supcount/transCount >= minSupport:\n\t\t\tfreqitemset.add(item)\n\n\treturn freqitemset, supcount/transCount\n\n\ndef getTransactionCount(datfile):\n\ttransCount = 0\n\tfor line in datfile:\n\t\tif not line.isspace():\n\t\t\ttransCount += 1\n\treturn transCount\n\ndef getNextCandidateItemSets(candidates):\n\t'''joining + pruning\n\t......\n\t......\n\t......\n\n\treturn nextcandidates\n\treturn None pag wala\n\t'''\n\ndef generateStrongRules(setoffreqitemset):\n\t''' setoffreqitemset is a list of itemsets with their corresponding support\n\t\te.g. [ [{A},2], [{B},3], [{C},5], [{A,B},9] ]\n\t'''\n\n\ndef apriori(filename, minSupport, minConfidence):\n\tdatfile = open(filename, 'r')\t\t\t\n\ttransCount = getTransactionCount()\t#count number of transactions\n\tsetoffreqitemset = []\n\n\t'''C_1 -- 1-itemset candidates '''\n\tcandidates = generateSingleItemSet(datfile)\t# generate 1-itemset candidates -> C_1 \n\tsetoffreqitemset.append(getSetOfFrequentKItemSet(datfile, transCount, candidates, minSupport)) # count support/freq of each item, delete all items that has sup<minsup -> L_1\n\ti = 0\n\n\n\twhile not setoffreqitemset is None:\t#continue until generated frequent set is null\n\t\tcandidates = getNextCandidateItemSets(setoffreqitemset[i][0])\t#generate next candidates by joining sets, then prune -> C_2\n\t\tsetoffreqitemset.append(getSetOfFrequentKItemSet(datfile, transCount, candidates, minSupport)) # count support/freq of each item, delete all items that has sup<minsup -> L_k\n\t\ti += 1\n\n\tgenerateStrongRules(setoffreqitemset)\n" } ]
1
ampampamp/lg_test
https://github.com/ampampamp/lg_test
854340d14b55cf1e7e449d48831198f74e6db408
fa146d31780afca9b16bd86f53427abd204f44dd
9e8edb5ff3469e3e86ff31a3ae4da1633dcbf45a
refs/heads/master
2016-08-05T11:31:24.369906
2015-05-22T17:43:18
2015-05-22T17:43:18
35,849,600
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5547282099723816, "alphanum_fraction": 0.5666418671607971, "avg_line_length": 19.984375, "blob_id": "58aa2f68a3f0c59c14c5b4c7bed04a355762660b", "content_id": "4c6bd7608ab4066838fb6fd437b104ec8554cda4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1343, "license_type": "no_license", "max_line_length": 76, "num_lines": 64, "path": "/send_email.py", "repo_name": "ampampamp/lg_test", "src_encoding": "UTF-8", "text": "from smtplib import SMTP\nfrom email.mime.text import MIMEText\nimport sqlite3\nimport time\n\nfrom account_scrape import CREDS\n\ndef get_connection():\n return sqlite3.connect('test.db')\n\n\ndef get_send_id(c):\n c.execute('select coalesce(max(email_send_id), 0) + 1 from email_sends')\n return c.fetchone()[0]\n\n\ndef register_send(conn, c, send_id):\n c.execute(\n '''\n insert into email_sends \n (email_send_id, send_ts) values \n (%s, %.0f)\n ''' % (send_id, time.time())\n )\n conn.commit()\n\n\ndef send_email(address):\n conn = get_connection()\n c = conn.cursor()\n send_id = get_send_id(c)\n\n email = MIMEText(\n 'Hello world! ' +\n '<img border=0 height=1 width=1 ' +\n 'src=\"http://localhost:5000/opens/%s\">' % send_id,\n 'html'\n )\n email['To'] = address\n email['From'] = CREDS['user']\n email['Subject'] = 'Test send'\n\n smtp = SMTP()\n try:\n smtp.connect('smtp.mail.yahoo.com', 587)\n except Exception, e:\n raise RuntimeError, repr(e)\n smtp.starttls()\n smtp.login(CREDS['user'], CREDS['pwd'])\n smtp.sendmail(\n CREDS['user'],\n address,\n email.as_string()\n )\n smtp.quit()\n register_send(conn, c, send_id)\n\ndef main():\n send_email(CREDS['user'])\n return\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8636363744735718, "avg_line_length": 6.333333492279053, "blob_id": "fcda123491ab47dc103fb84fde52cbdf79ba1628", "content_id": "f707f9790c85eeef6f7700655e3de2371483232e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "no_license", "max_line_length": 10, "num_lines": 6, "path": "/requirements.txt", "repo_name": "ampampamp/lg_test", "src_encoding": "UTF-8", "text": "imapclient\nemail\nflask\njson\nsmtplib\nsqlite3\n" }, { "alpha_fraction": 0.6045576333999634, "alphanum_fraction": 0.6058981418609619, "avg_line_length": 20.941177368164062, "blob_id": "58c123169b266021f6c68cbd3ec087972809d8b8", "content_id": "1e677c4139309a6e5f7485757c23c547b2c73fbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 49, "num_lines": 34, "path": "/app.py", "repo_name": "ampampamp/lg_test", "src_encoding": "UTF-8", "text": "from flask import Flask\nimport json\nimport time\n\nfrom account_scrape import CREDS, Account, scrape\nfrom send_email import get_connection\n \napp = Flask(__name__)\n\n@app.route('/emails')\ndef show_emails():\n acct = Account(CREDS['user'], CREDS['pwd'])\n acct_pulled = scrape(acct)\n return json.dumps(acct_pulled.emails)\n\n\n@app.route('/opens/<int:email_send_id>')\ndef record_open(email_send_id):\n print email_send_id\n conn = get_connection()\n c = conn.cursor()\n c.execute(\n '''\n update email_sends \n set open_ts = %.0f \n where email_send_id = %s\n ''' % (time.time(), email_send_id)\n )\n conn.commit()\n return str(email_send_id) \n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.5709401965141296, "alphanum_fraction": 0.5772079825401306, "avg_line_length": 19.89285659790039, "blob_id": "fc6fcee4a769279628f2ebf9aeda447775505c11", "content_id": "dbfabb2ef880652f1f0be4ef0e8fe0024f888cc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1755, "license_type": "no_license", "max_line_length": 65, "num_lines": 84, "path": "/account_scrape.py", "repo_name": "ampampamp/lg_test", "src_encoding": "UTF-8", "text": "from imapclient import IMAPClient\nfrom email.parser import HeaderParser\n\n# Probably want to put this somewhere else!\nCREDS = {\n 'user': 'lgrulesyo@yahoo.com',\n 'pwd': '4A<+Lf5Rk`7^[*3F'\n}\n\nclass Account():\n def __init__(self, account, pwd):\n\tself.account = account\n\tself.pwd = pwd\n\tself.emails = dict()\n\n def account(self):\n\treturn self.account\n\n def pwd(self):\n\treturn self.pwd\n\n def emails(self):\n\treturn self.emails\n\n\ndef scrape(acct, num_to_get=10):\n imap = IMAPClient(\n\t'imap.mail.yahoo.com', \n\tport=993,\n\tuse_uid=True,\n\tssl=True\n )\n\n try:\n\timap.login(acct.account, acct.pwd)\n except Exception, e:\n\traise RuntimeError(repr(e))\n\t\n imap.select_folder('INBOX', readonly=True)\n inbox = imap.search(['NOT DELETED'])\n\n parser = HeaderParser()\n h_fetch = 'BODY[HEADER]'\n b_fetch = 'BODY[TEXT]'\n\n emails = imap.fetch(inbox[-num_to_get:], [h_fetch, b_fetch]) \n\n for email_id, content in emails.items():\n\theader = parser.parsestr(content[h_fetch])\n\tbody = content[b_fetch]\n\tacct.emails[int(email_id)] = {\n\t 'header': {\n\t 'to': header['To'],\n\t\t'from': header['From'],\n\t\t'received': header['Received']\n\t },\n\t 'body': body if len(body) <= 200 else body[:200] \n\t}\t\n return acct\n\n\ndef main():\n acct = Account(CREDS['user'], CREDS['pwd'])\n acct_pulled = scrape(acct)\n\n for email_id, email in acct_pulled.emails.items():\n print(\n 'Email ID {_id}\\n' \\\n\t 'To: {to}\\n' \\\n\t 'From :{_from}\\n' \\\n\t 'Received: {rec}\\n\\n' \\\n\t 'Body: {body}\\n\\n'.format(\n\t _id=email_id,\n\t to=email['header']['to'],\n\t _from=email['header']['from'],\n\t rec=email['header']['received'],\n\t body=email['body']\n\t ) \n\t)\n return\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5284280776977539, "alphanum_fraction": 0.5351170301437378, "avg_line_length": 22, "blob_id": "5f8968b5b6985153358025a14ca09248bbed3987", "content_id": "798288b881a118185d9486e3f973e3e30d85811d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/sql_db.py", "repo_name": "ampampamp/lg_test", "src_encoding": "UTF-8", "text": "import sqlite3\n\nwith sqlite3.connect('test.db') as connection:\n c = connection.cursor()\n c.execute(\n '''\n CREATE TABLE IF NOT EXISTS email_sends (\n email_send_id integer primary key,\n send_ts integer,\n open_ts integer\n )\n '''\n )\n" } ]
5
ShubhamTatvamasi/Uadyam
https://github.com/ShubhamTatvamasi/Uadyam
69b5dfef982903370b743acb04f14ead0db7e185
b438f146c2a7c283a801ebcab62ae1e1f9625edc
ec9babf3b233ff1c16e5d2a6b09b6c873ca349ab
refs/heads/main
2023-03-02T06:08:57.287427
2021-02-08T18:42:53
2021-02-08T18:42:53
319,406,429
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6784037351608276, "alphanum_fraction": 0.7065727710723877, "avg_line_length": 27.46666717529297, "blob_id": "00edc37aaf810a52a9a8a647b988fc248032bf3a", "content_id": "41a7b4a6cfa8d0c6289cf4b839087b8c9791a043", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 63, "num_lines": 15, "path": "/app.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_cors import CORS\nimport os\nfrom pathlib import Path\nfull_path = os.getcwd()\nRESUME_FOLDER = str(Path(full_path).parents[0]) + '/app/resume'\nJD_FOLDER = str(Path(full_path).parents[0]) + '/app/jd'\n\n\napp = Flask(__name__)\nCORS(app)\napp.secret_key = \"secret key\"\napp.config['RESUME_FOLDER'] = RESUME_FOLDER\napp.config['JD_FOLDER'] = JD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024" }, { "alpha_fraction": 0.6094714999198914, "alphanum_fraction": 0.6122168898582458, "avg_line_length": 28.489795684814453, "blob_id": "5d4af9db1130888e47c22c53a1cc8d07ca7a11c7", "content_id": "4c6e29e0540d00f7bd852e919bba794f16dec75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 88, "num_lines": 49, "path": "/matcher_bulk.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import nltk\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import stopwords\nimport re\nimport pandas as pd\nimport numpy as np\nfrom scipy import spatial\nimport numpy as np\nimport os, sys\nimport spacy\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom jd_parsar import jdParsar\nfrom resume_parsar import resumeParsar\nfrom pathlib import Path\nimport math\nfrom collections import Counter\nfrom matcher import matcher\nimport ast\nen_nlp = spacy.load('en')\n\nclass matcher_bulk():\n def __init__(self):\n print('Bulk Matching started')\n self.mt = matcher()\n def match_bulk(self,resume_ls,jd_ls):\n result_final=[]\n for resume in resume_ls:\n '''try:\n resume = ast.literal_eval(resume)[0]\n except:\n resume = ast.literal_eval(resume)'''\n for jd in jd_ls:\n '''try:\n jd = ast.literal_eval(jd)[0]\n except:\n jd = ast.literal_eval(jd)'''\n result = self.mt.get_similarity_overall(jd, resume, self.mt.skill_2_vec)\n result_final.append(result)\n return result_final\n\nif __name__ == \"__main__\":\n\n mb = matcher_bulk()\n resume_ls = open(\"/home/lid/resume.txt\").readlines()\n jd_ls = open(\"/home/lid/JD.txt\").readlines()\n result_final=mb.match_bulk(resume_ls, jd_ls)\n print(len(result_final))\n df = pd.DataFrame(result_final)\n print(df)\n\n\n \n\n\n\n\n\n" }, { "alpha_fraction": 0.6247236728668213, "alphanum_fraction": 0.6347559690475464, "avg_line_length": 43.816795349121094, "blob_id": "d908461956939402ba0a8a4f00d22a95c21ef42d", "content_id": "901a6950cb83ee87d83fd892eec9fb35d042aab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5881, "license_type": "no_license", "max_line_length": 137, "num_lines": 131, "path": "/matcher.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import nltk\nfrom gensim.models import Word2Vec\nfrom nltk.corpus import stopwords\nimport re\nimport pandas as pd\nimport numpy as np\nfrom scipy import spatial\nimport numpy as np\nimport os, sys\nimport spacy\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom jd_parsar import jdParsar\nfrom resume_parsar import resumeParsar\nfrom pathlib import Path\nimport math\nfrom collections import Counter\nen_nlp = spacy.load('en')\n\nclass matcher():\n def __init__(self):\n print('Matching started')\n self.jdparsar=jdParsar()\n self.resumeparsar=resumeParsar()\n self.full_path = os.getcwd()\n self.data_path = str(Path(self.full_path).parents[0]) + '/app/data/'\n self.df = pd.read_csv(self.data_path + 'Skills.csv')\n skills = self.df['skills'].to_string()\n sentences = nltk.sent_tokenize(skills)\n sentences = [nltk.word_tokenize(sentence) for sentence in sentences]\n for i in range(len(sentences)):\n sentences[i] = [word for word in sentences[i] if word not in stopwords.words('english')]\n self.skill_2_vec = Word2Vec(sentences, min_count=1)\n\n def get_propn(self,text):\n nns=[]\n en_doc = en_nlp(u'' + text)\n for token in en_doc:\n if str(token.pos_) == 'PROPN' or str(token.pos_) == 'NOUN':\n nns.append(token.text)\n return nns\n\n def counter_cosine_similarity(self,c1, c2):\n terms = set(c1).union(c2)\n dotprod = sum(c1.get(k, 0) * c2.get(k, 0) for k in terms)\n magA = math.sqrt(sum(c1.get(k, 0) ** 2 for k in terms))\n magB = math.sqrt(sum(c2.get(k, 0) ** 2 for k in terms))\n return dotprod / (magA * magB)\n\n\n\n def pre_process_text(self,text):\n text = re.sub(r'\\[[0-9]*\\]', ' ', text)\n text = re.sub(r'\\s+', ' ', text)\n text = text.lower()\n text = re.sub(r'\\d', ' ', text)\n text = re.sub(r'\\s+', ' ', text)\n return text\n\n\n def get_mean_vector(self,word2vec_model, words):\n # remove out-of-vocabulary words\n words = [word for word in words if word in word2vec_model.wv.vocab]\n if len(words) >= 1:\n return np.mean(word2vec_model[words], axis=0)\n else:\n return []\n\n\n def get_similarity_score(self,resume_vector, jd_vector):\n if len(resume_vector)==0 or len(jd_vector)==0:\n similarity=0\n else:\n similarity = 1 - spatial.distance.cosine(resume_vector, jd_vector)\n return similarity\n\n\n def get_parsed_jds(self,filename):\n print(\"jd parsing in progress\")\n result_jd = self.jdparsar.getparsedjd(filename)\n return result_jd\n\n def get_parsed_resume(self,resume_filename):\n print(\"resume parsing in progress\")\n resume_result = self.resumeparsar.generate_resume_result(resume_filename)\n return resume_result\n\n def get_similarity_overall(self,jd_result,resume_result,skill_2_vec):\n print('similarity calculation is in progress')\n result = {}\n resume_skill_all_vector = self.get_mean_vector(skill_2_vec, resume_result['Skills_All'].split(','))\n resume_primary_skill_vector = self.get_mean_vector( skill_2_vec, resume_result['Primary_Skills'].split(','))\n jd_skill_all_vector=self.get_mean_vector( skill_2_vec, jd_result['Skills_All'].split(','))\n jd_primary_skill_vector=self.get_mean_vector(skill_2_vec, jd_result['Primary_Skills'])\n result['Resume_filename'] = resume_result['Filename']\n result['JD_filename'] = jd_result['Filename']\n result['Skill_All_Simalrity'] = self.get_similarity_score(resume_skill_all_vector, jd_skill_all_vector)\n result['Primary_Skill_Simalrity'] = self.get_similarity_score(resume_primary_skill_vector, jd_primary_skill_vector)\n result['Weighted_Skill_Similarity'] = ((result['Primary_Skill_Simalrity'] + result['Skill_All_Simalrity']/4))/2\n resume_text=resume_result['Projects']\n jd_text=jd_result['Title']+jd_result['Skill_With_Experince']\n resume_nouns = self.get_propn(resume_text)\n jd_nouns = self.get_propn(jd_text)\n c_resume = Counter(resume_nouns)\n c_jd = Counter(jd_nouns)\n result['Content_Similarity']=self.counter_cosine_similarity(c_resume,c_jd)\n ps_resume = Counter(resume_result['Primary_Skills'].split(','))\n ps_jd = Counter(jd_result['Primary_Skills'].split(','))\n result['Primary_Skill_exact_similarity']=self.counter_cosine_similarity(ps_resume,ps_jd)\n loc_resume =Counter(resume_result['Preferred_Location'].split(','))\n loc_jd = Counter(jd_result['Location'].split(','))\n result['Location_similarity']=self.counter_cosine_similarity(loc_resume,loc_jd)\n skill_all_resume=Counter(resume_result['Skills_All'].split(','))\n skill_all_jd=Counter(jd_result['Skills_All'].split(','))\n result['Skills_All_exact_similarity'] = self.counter_cosine_similarity(skill_all_resume,skill_all_jd)\n result['Over_all_Similarity']= (result['Skills_All_exact_similarity'] + result['Skill_All_Simalrity']+ \\\n result['Primary_Skill_exact_similarity']+ \\\n result['Primary_Skill_Simalrity'] + result['Content_Similarity'] +result['Location_similarity'])/6\n return result\n\nif __name__ == \"__main__\":\n mp = matcher()\n # resume_filename = \"//home//lid//Downloads//SurajKumar_1.docx\"\n resume_filename = sys.argv[1]\n resume_result = mp.get_parsed_resume(resume_filename)\n print(resume_result)\n # jd_file_name=\"//home//lid//Downloads//Job Description-20201121T112409Z-001//Job Description//DevOps_JD.docx\"\n jd_file_name = sys.argv[2]\n result_jd = mp.get_parsed_jds(jd_file_name)\n print(result_jd)\n result = mp.get_similarity_overall(result_jd, resume_result, mp.skill_2_vec)\n print(result)\n \n\n\n\n\n\n" }, { "alpha_fraction": 0.7370030283927917, "alphanum_fraction": 0.7400611639022827, "avg_line_length": 16.210525512695312, "blob_id": "aebde69191b6cbc7fa3469d295d87c0251dde575", "content_id": "eed1695a776eb26376587ba3e13cd6d30bfde4f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 327, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/Dockerfile", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "FROM python:3\n\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\n\nRUN pip install --no-cache-dir -r requirements.txt\n\nRUN python -c \"import nltk;nltk.download('stopwords');nltk.download('punkt')\"\n\nRUN python -m spacy download en\n\nCOPY . .\n\nRUN pip install resume_parser\n\nRUN apt update && apt install antiword\n\nCMD python main.py\n" }, { "alpha_fraction": 0.5758196711540222, "alphanum_fraction": 0.6086065769195557, "avg_line_length": 34.65853500366211, "blob_id": "b3040823ca4f5f4e3ac0bcb64933638f58bead2c", "content_id": "2660feca098861d23314764543a02a5b4b4368be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1464, "license_type": "no_license", "max_line_length": 205, "num_lines": 41, "path": "/readme.md", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "python -m spacy download en\nhas to be done in environment\n**************************************\nCalling various api\n**************************************\n#Steps for calling resume_parsar api\n**************************************\nimport requests\nimport json\nfrom flask import Flask, request, redirect, jsonify\nmyurl = 'http://127.0.0.1:5000/Resume_Parsar'\nfiles = {'file':open(\"resume.docx\",'rb')}\ngetdata = requests.get(myurl,files=files)\njson.loads(getdata.text)\n\n*************************************\n#Steps for calling jd_parsar api\n*************************************\nimport requests\nimport json\nfrom flask import Flask, request, redirect, jsonify\nmyurl = 'http://127.0.0.1:5000/Jd_Parsar'\nfiles = {'file':open(\"jd.docx\",'rb')}\ngetdata = requests.get(myurl,files=files)\njson.loads(getdata.text)\n***************************************\n#Steps for calling matcher api\n***************************************\nimport requests\nfrom flask import Flask, request, redirect, jsonify\nmyurl = 'http://127.0.0.1:5000/Matcher'\n#open('test.txt', 'rb')file2\nfiles = {'resume_file':open(\"//home//lid//Downloads//Omar_Nour_CV.docx\",'rb'),'jd_file':open(\"//home//lid//Downloads//Job Description-20201121T112409Z-001//Job Description//Business Analyst_JD.docx\",'rb')}\ngetdata = requests.get(myurl,files=files)\njson.loads(getdata.text)\n\n\nrunning diffrent scripts\npython resume_parsar.py <resume_file>\npython jd_parsar.py <jd_file>\npython matcher.py <resume_file> <jd_file>\n\n\n" }, { "alpha_fraction": 0.5554847121238708, "alphanum_fraction": 0.560222327709198, "avg_line_length": 42.02745056152344, "blob_id": "9a0645858d5b3f40be57d3595ed9f12aa773f4fd", "content_id": "1cd33c7d4d6ba2dc291841644460577e5922eb1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10976, "license_type": "no_license", "max_line_length": 183, "num_lines": 255, "path": "/jd_parsar.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import PyPDF2\nimport textract\nimport docx\nimport pandas as pd\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nimport re\nfrom docx.opc.constants import RELATIONSHIP_TYPE as RT\nimport spacy\nfrom spacy.matcher import Matcher\nimport spacy\nimport glob\nfrom pathlib import Path\nimport os\nfrom gensim import corpora, models, similarities\nfrom nltk.corpus import stopwords\nimport sys\nSTOPWORDS = set(stopwords.words('english'))\n\nclass jdParsar():\n def __init__(self):\n self.full_path = os.getcwd()\n self.data_path = str(Path(self.full_path).parents[0]) + '/app/data/'\n self.df = pd.read_csv(self.data_path + 'Skills.csv')\n self.skills = list(self.df['skills'])\n self.df_locations = pd.read_csv(self.data_path + 'Locations.csv')\n self.locations = list(self.df_locations['Cities'])\n self.education = [\n 'BE', 'B.E.', 'B.E', 'BS', 'B.S',\n 'ME', 'M.E', 'M.E.', 'MS', 'M.S',\n 'BTECH', 'B.TECH', 'M.TECH', 'MTECH', 'BCA', 'MCA', 'BSC', 'MSC', 'B.S.C', 'M.S.C', 'M.C.A'\n 'SSC', 'HSC', 'CBSE',\n 'ICSE', 'X', 'XII', '10TH', '12TH', 'BACHELORS OF ENGINEERING', 'B.E', 'PG', 'PGP', 'PGPA'\n , 'PGDBA']\n self.designations=['software engineer','trainee','intern','manager','data analyst','data engineer'\n 'architect','test engineer','qa','data scientist','analyst','consultant',\n 'business analyst','director','president','vice president']\n self.job_types=['contractor','contract','parttime','part time','fulltime','full time',\n 'third party payroll','remotly','wfh']\n\n def extract_jd_title(self,filename):\n title = ''\n doc = docx.Document(filename)\n fullText = \"\"\n for para in doc.paragraphs:\n text_para = (para.text)\n if len(text_para) > 5:\n title = text_para\n break\n return title\n\n def extract_job_type(self,resume_text):\n job_types_all = ''\n if any(job_type in str(resume_text).lower() for job_type in self.job_types):\n job_types_all = job_types_all + ',' + str(\n [job_type for job_type in self.job_types if (job_type in str(resume_text).lower())]).replace('[', '').replace(']',\n '').replace(\n '\\'', '').replace(', ', ',').strip()\n return job_types_all\n\n def extract_skills(self,resume_text):\n skills_all = ''\n if any(skill in str(resume_text).lower() for skill in self.skills):\n skills_all = [skill for skill in self.skills if (skill in str(resume_text).lower())]\n return self.listToString(skills_all)\n\n def extract_designation(self,resume_text):\n designations_all = ''\n if any(designation in str(resume_text).lower() for designation in self.designations):\n designations_all = [designation for designation in self.designations if (designation in str(resume_text).lower())]\n return self.listToString(designations_all)\n\n def extract_location(self,resume_text):\n location_all = ''\n if any(str(location) in str(resume_text).lower() for location in self.locations):\n location_all = [str(location) for location in self.locations if (str(location) in str(resume_text).lower())]\n return self.listToString(location_all)\n def extract_notice_period(self,resumetext):\n if 'immediate' in resumetext.lower() and 'join' in resumetext.lower():\n np='immediate'\n elif 'day' in resumetext.lower() and len(re.findall(r\"\\d{2}\", resumetext.lower()))>0:\n np=re.findall(r\"\\d{2}\", resumetext.lower())\n np=str(np)+'days'\n elif 'month' in resumetext.lower() and len(re.findall(r\"\\d{1}\", resumetext.lower()))>0:\n np=re.findall(r\"\\d{1}\", resumetext.lower())\n np=str(np)+'months'\n elif 'month' in resumetext.lower() and len(re.findall(r\"\\d{1}\", resumetext.lower()))>0:\n np=re.findall(r\"\\d{1}\", resumetext.lower())\n np=str(np)+'days'\n return np\n\n def listToString(self,ls):\n str1 = \"\"\n for ele in ls:\n str1 += ele.strip() + ','\n return str1\n\n def extract_primary_secondry_skill(self, filename):\n primary_skill = []\n secondry_skill = []\n count = 0\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n if any(skill in str(text_para).lower() for skill in self.skills) and count < 2:\n primary_skill.extend([skill for skill in self.skills if (skill in str(text_para).lower())])\n count = count + 1\n elif any(skill in str(text_para).lower() for skill in self.skills) and count > 2:\n secondry_skill.extend([skill for skill in self.skills if (skill in str(text_para).lower())])\n count = count + 1\n if len(primary_skill) > 3:\n primary_skill_final = primary_skill[0:2]\n secondry_skill.extend(primary_skill[3:len(primary_skill) - 1])\n else:\n primary_skill_final = primary_skill\n primary_skill = list(dict.fromkeys(primary_skill_final))\n secondry_skill = list(dict.fromkeys(secondry_skill))\n #return str(primary_skill).replace('[', '').replace(']', ''), str(secondry_skill).replace('[', '').replace(']','')\n return self.listToString(primary_skill),self.listToString(secondry_skill)\n\n def iter_hyperlink_rels(self,rels):\n hls = \"\"\n for rel in rels:\n if rels[rel].reltype == RT.HYPERLINK:\n hls = hls + str(rels[rel]._target)\n return hls\n\n def readtables(self,filename):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n if len(tables) > 0:\n data = []\n for table in tables:\n keys = None\n for i, row in enumerate(table.rows):\n text = (cell.text for cell in row.cells)\n\n if i == 0:\n keys = tuple(text)\n continue\n row_data = dict(zip(keys, text))\n data.append(row_data)\n return data\n\n def extracttabletextskills(self,filename, skills):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n table_text = \"\"\n if len(tables) > 0:\n data = []\n for table in tables:\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n table_text = table_text + \" \" + paragraph.text\n if any(skill in str(table_text).lower() for skill in skills):\n break\n return table_text\n\n def extracttabletexteducation(self,filename):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n table_text = \"\"\n if len(tables) > 0:\n data = []\n for table in tables:\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n table_text = table_text + \" \" + paragraph.text\n if any(edu in str(table_text).lower() for edu in self.education):\n break\n return table_text\n\n def readdocxFile(self,filename):\n doc = docx.Document(filename)\n fullText = \"\"\n for para in doc.paragraphs:\n text_para = (para.text)\n fullText = fullText + ' ' + text_para\n rels = doc.part.rels\n hls = self.iter_hyperlink_rels(rels)\n fullText = fullText + \" \" + hls\n return fullText\n def getfilenames(self,jd_location):\n file_names = glob.glob(jd_location)\n return file_names\n\n def extract_skills_with_experience(self,filename):\n swe = ''\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n search_string = ['yr', 'yrs', 'year', 'years', 'exp', 'experience']\n if any(search in str(text_para).lower() for search in search_string):\n # swe=swe+','+str([search for search in search_string if(search in str(text_para).lower())]).replace('[','').replace(']','').replace('\\'','').replace(', ',',').strip()\n swe = swe + \" \" + text_para\n return swe\n def getparsedjd_multiple(self,file_names):\n fulltexts = []\n titles = []\n skills_all = []\n swe = []\n primary_skills = []\n secondry_skills = []\n for file in file_names:\n resume_text = self.readdocxFile(file)\n fulltexts.append(resume_text)\n titles.append(self.extract_jd_title(file))\n skills_all.append(self.extract_skills(resume_text))\n swe.append(self.extract_skills_with_experience(file))\n primary_skill, secondry_skill = self.extract_primary_secondry_skill(file)\n primary_skills.append(primary_skill)\n secondry_skills.append(secondry_skill)\n df_jds = pd.DataFrame()\n df_jds['title'] = titles\n df_jds['skills_all'] = skills_all\n df_jds['swe'] = swe\n df_jds['primary_skills'] = primary_skills\n df_jds['secondry_skills'] = secondry_skills\n return df_jds\n def get_file_nm(self, filenm):\n try:\n filenms = filenm.split('/')\n length = len(filenms) - 1\n file_name = filenms[length]\n except:\n file_name = filenm\n return file_name\n\n def getparsedjd(self,file):\n result={}\n resume_text = self.readdocxFile(file)\n file_name = self.get_file_nm(file)\n result['Filename'] = file_name\n result['Title'] = self.extract_jd_title(file)\n result['Skills_All'] = self.extract_skills(resume_text)\n primary_skill, secondry_skill = self.extract_primary_secondry_skill(file)\n result['Primary_Skills'] = primary_skill\n result['Secondry_Skills'] = secondry_skill\n result['Designation'] = self.extract_designation(resume_text)\n result['Skill_With_Experince'] = self.extract_skills_with_experience(file)\n result['Job_Type']=self.extract_job_type(resume_text)\n result['Location']=self.extract_location(resume_text)\n result['Notice_Period']=self.extract_notice_period(resume_text)\n return result\n\nif __name__ == \"__main__\":\n jdp = jdParsar()\n filename = sys.argv[1]\n # filename = \"/home/lid/Downloads/Job Description-20201121T112409Z-001/Job Description/Django Engineer_JD.docx\"\n print(jdp.getparsedjd(filename))\n\n\n\n\n" }, { "alpha_fraction": 0.6334026455879211, "alphanum_fraction": 0.6548856496810913, "avg_line_length": 17.259492874145508, "blob_id": "367cef85436031f8ab24b59acd79f8466cd3b1f8", "content_id": "b6bd05b9b64c5baa473fb47719b100d13ef4f1ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2886, "license_type": "no_license", "max_line_length": 84, "num_lines": 158, "path": "/README.md", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "# Uadyam\n\n### Docker\n\ncreating new image from Dockerfile\n```bash\ndocker build -t shubhamtatvamasi/private:uadyam .\n```\n\nRun application\n```bash\ndocker run --rm -it -p 80:5000 shubhamtatvamasi/private:uadyam main.py\n```\n\nFor testing and building docker image\n```bash\ndocker run --rm -it -p 80:5000 \\\n -v ${PWD}:/usr/src/app \\\n -w /usr/src/app \\\n --entrypoint bash \\\n shubhamtatvamasi/private:uadyam\n```\n\nTag the docker image\n```bash\ndocker tag shubhamtatvamasi/private:uadyam shubhamtatvamasi/private:uadyam-1\n```\n\nPush the docker image\n```bash\ndocker push shubhamtatvamasi/private:uadyam-1\n```\n---\n\n### Kubernetes\n\ncreate deployment and service:\n```bash\nkubectl create deployment uadyam --image=shubhamtatvamasi/private:uadyam-18\nkubectl expose deployment uadyam --port=5000 --name=uadyam\n\n# update the image pull secret\nkubectl patch deployment uadyam \\\n --patch='{\n \"spec\": {\n \"template\": {\n \"spec\": {\n \"imagePullSecrets\":[\n {\n \"name\":\"docker-shubhamtatvamasi\"\n }\n ]\n }\n }\n }\n}'\n```\n\nIngress deployment\n```bash\nkubectl apply -f - << EOF\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n name: uadyam\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt\nspec:\n tls:\n - hosts:\n - uadyam.k8s.shubhamtatvamasi.com\n secretName: letsencrypt-uadyam\n rules:\n - host: uadyam.k8s.shubhamtatvamasi.com\n http:\n paths:\n - backend:\n serviceName: uadyam\n servicePort: 5000\nEOF\n```\n\ndelete all\n```bash\nkubectl delete deploy/uadyam svc/uadyam ing/uadyam\n```\n---\n\nCreate Deployment:\n```yaml\nkubectl apply -f - << EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: uadyam\n labels:\n app: uadyam\nspec:\n replicas: 2\n selector:\n matchLabels:\n app: uadyam\n template:\n metadata:\n labels:\n app: uadyam\n spec:\n containers:\n - name: uadyam\n image: shubhamtatvamasi/private:uadyam-18\n imagePullSecrets:\n - name: docker-shubhamtatvamasi\nEOF\n```\n\nCreate service for Deployment:\n```bash\nkubectl expose deployment uadyam --port=5000 --name=uadyam\n```\n\nScale Deployment:\n```bash\nkubectl scale deployment uadyam --replicas=3\n```\n\n#### POD\n\nCreate a POD on k8s and expose it's service on NodePort 31001\n```bash\nkubectl run uadyam --image=shubhamtatvamasi/private:uadyam-18 --port=5000 --expose \\\n --overrides='{\n \"apiVersion\":\"v1\",\n \"spec\":{\n \"imagePullSecrets\":[\n {\n \"name\":\"docker-shubhamtatvamasi\"\n }\n ]\n }\n}'\n\n# Don't add these if using Ingress:\nkubectl patch svc uadyam \\\n --patch='{\"spec\": {\"type\": \"NodePort\"}}'\n\nkubectl patch svc uadyam \\\n --patch='{\"spec\": {\"ports\": [{\"nodePort\": 31001, \"port\": 5000}]}}'\n```\n\nUpdate the docker image\n```bash\nkubectl set image po uadyam uadyam=shubhamtatvamasi/private:uadyam-18\n```\n\nDelete deployment\n```bash\nkubectl delete pod/uadyam service/uadyam\n```\n\n" }, { "alpha_fraction": 0.4484536051750183, "alphanum_fraction": 0.6649484634399414, "avg_line_length": 13.923076629638672, "blob_id": "107e3f631ecce3136b9b6123d54f26a38d2db929", "content_id": "c7f9e7ab32b215e3ba5fdb6d1c862933cbf14ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 194, "license_type": "no_license", "max_line_length": 20, "num_lines": 13, "path": "/requirements.txt", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "PyPDF2==1.26.0\ntextract==1.6.3\ndocx==0.2.4\nnltk==3.5\ngensim==3.8.3\npandas==1.1.4\nspacy==2.3.4\nscikit-learn==0.23.2\npython-docx==0.8.10\nword2number==1.1\nFlask==1.1.1\nflask-cors==3.0.9\ntika==1.24\n" }, { "alpha_fraction": 0.5432395935058594, "alphanum_fraction": 0.5495137572288513, "avg_line_length": 39.9528923034668, "blob_id": "6db0aa10ce0a7603641bb2414f885e4bd4b1ba42", "content_id": "5fde4b8a4c95af2e9d1d01139c79b0120bd0a64a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19126, "license_type": "no_license", "max_line_length": 141, "num_lines": 467, "path": "/resume_parsar.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import datetime\nimport math\nimport os\nimport re\nimport sys\nfrom pathlib import Path\n\nimport PyPDF2\nimport docx\nimport en_core_web_sm\nimport numpy as np\nimport pandas as pd\nimport textract\nfrom docx.opc.constants import RELATIONSHIP_TYPE as RT\nfrom word2number import w2n\n#from resume_parser import resumeparse\n#import tika\n#tika.initVM()\n#from tika import parser\n\nnlp = en_core_web_sm.load()\n\nfrom nltk.corpus import stopwords\nSTOPWORDS = set(stopwords.words('english'))\ncurdir = os.getcwd()\nsys.path.append(curdir)\nclass resumeParsar():\n def __init__(self):\n self.full_path = os.getcwd()\n self.data_path = str(Path(self.full_path).parents[0]) + '/app/data/'\n self.df = pd.read_csv(self.data_path+'Skills.csv')\n self.skills = list(self.df['skills'])\n #self.skills = [' {0} '.format(elem) for elem in self.skills]\n self.df_hobbies = pd.read_csv(self.data_path+ 'Hobbies.csv')\n self.hobbies = list(self.df_hobbies['hobbies'])\n self.hobbies = [' {0} '.format(elem) for elem in self.hobbies]\n self.education = ['B.E.', 'B.E', 'B.S',\n 'M.E', 'M.E.', 'M.S',\n 'BTECH', 'B.TECH','B.TECH', 'M.TECH',\n 'MTECH','BCA','MCA','BSC','MSC','B.S.C','M.S.C','M.C.A' \n ' SSC ', ' HSC ', 'CBSE', 'ICSE ', ' Xth ', ' XIIth ','10TH','12TH ',\n 'BACHELORS OF ENGINEERING','B.E',' PG ',' PGP ','PGPA','BBA','12th' ,'10th ',' CA '\n ,' CBSE ',' ICSE '\n ,' PGDBA ',' UNIVERISTY ',' SCHOOL ',' COLLEGE ']\n self.df_locations = pd.read_csv(self.data_path + 'Locations.csv')\n self.locations = list(self.df_locations['Cities'])\n self.locations = [' {0} '.format(elem) for elem in self.locations]\n\n\n def listToString(self,ls):\n str1 = \"\"\n for ele in ls:\n str1 += ele.strip() + ','\n return str1\n\n def getFileExtension(self,filename):\n extension = filename.split('.')[-1]\n return extension\n\n def checkExtension(self,filename):\n ValidExtension = [\"pdf\", \"docx\",\"doc\"]\n extension = self.getFileExtension(filename)\n if extension not in ValidExtension:\n print(\"Error:Not a valid File\")\n return False\n else:\n return True\n\n def readpdfFile(self,filename):\n content = \"\"\n pdfFileObj = open(filename, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n noofpages = pdfReader.numPages\n for i in range(0, noofpages):\n page = pdfReader.getPage(i)\n content_page = page.extractText()\n content = content + ' ' + content_page\n content = content.encode('utf-8')\n return content\n\n def iter_hyperlink_rels(self,rels):\n hls = \"\"\n for rel in rels:\n if rels[rel].reltype == RT.HYPERLINK:\n hls = hls + str(rels[rel]._target)\n return hls\n\n def readtables(self,filename):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n if len(tables) > 0:\n data = []\n for table in tables:\n keys = None\n for i, row in enumerate(table.rows):\n text = (cell.text for cell in row.cells)\n\n if i == 0:\n keys = tuple(text)\n continue\n row_data = dict(zip(keys, text))\n data.append(row_data)\n return data\n\n def extracttabletextskills(self,filename):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n table_text = \"\"\n if len(tables) > 0:\n data = []\n for table in tables:\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n table_text = table_text + \" \" + paragraph.text\n if any(skill in str(table_text).lower() for skill in self.skills):\n break\n return table_text\n\n def extracttabletexteducation(self,filename):\n document = docx.Document(filename)\n tables = document.tables\n data = []\n table_text = \"\"\n if len(tables) > 0:\n data = []\n for table in tables:\n for row in table.rows:\n for cell in row.cells:\n for paragraph in cell.paragraphs:\n table_text = table_text + \" \" + paragraph.text\n if any(edu in str(table_text).lower() for edu in self.education):\n break\n return table_text\n else:\n return ''\n\n def readdocxFile(self,filename):\n doc = docx.Document(filename)\n fullText = \"\"\n\n for para in doc.paragraphs:\n text_para = (para.text)\n fullText = fullText + ' ' + text_para\n rels = doc.part.rels\n hls = self.iter_hyperlink_rels(rels)\n fullText = fullText + \" \" + hls\n return fullText\n\n def doc_to_docx(self,filename):\n text = textract.process(filename)\n text = text.decode(\"utf-8\")\n texts = text.split('\\n')\n doc = docx.Document()\n para_change_flag = 0\n para_text = ''\n\n for text in texts:\n # print(text)\n if text.strip() == '':\n para_change_flag = 1\n else:\n para_change_flag = 0\n if para_change_flag == 1 and para_text.strip() != '':\n doc_para = doc.add_paragraph(para_text)\n para_text = ''\n else:\n para_text = para_text + '\\n' + text\n new_filename = filename + 'x'\n doc.save(new_filename)\n return new_filename\n\n def extract_name(self,fulltext):\n name = re.findall(\"[\\dA-Za-z+\\' ']*\", fulltext)[0]\n return name\n\n def extract_mobile_number(self,text):\n phone = re.findall(re.compile('([0-9]{10}|[0-9]{4}\\s[0-9]{3}\\s[0-9]{3})'), text)\n if phone:\n number = ''.join(phone[0])\n if len(number) > 10:\n return '+' + number\n else:\n return number\n\n def extract_email(self,text):\n email = re.findall(r\"[a-z0-9\\.\\-+_]+@[a-z0-9\\.\\-+_]+\\.[a-z]+\", text)\n if email:\n try:\n return email\n except IndexError:\n return None\n\n def extract_primary_secondry_skill(self,filename):\n primary_skill = []\n secondry_skill = []\n count = 0\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n if any(skill in str(text_para).lower() for skill in self.skills) and count < 2:\n primary_skill.extend([skill for skill in self.skills if (skill in str(text_para).lower())])\n count = count + 1\n elif any(skill in str(text_para).lower() for skill in self.skills) and count > 2:\n secondry_skill.extend([skill for skill in self.skills if (skill in str(text_para).lower())])\n count = count + 1\n if len(primary_skill) > 3:\n primary_skill_final = primary_skill[0:2]\n secondry_skill.extend(primary_skill[3:len(primary_skill) - 1])\n else:\n primary_skill_final=primary_skill\n primary_skill = list(dict.fromkeys(primary_skill_final))\n secondry_skill = list(dict.fromkeys(secondry_skill))\n #return str(primary_skill).replace('[','').replace(']',''), str(secondry_skill).replace('[','').replace(']','')\n return self.listToString(primary_skill),self.listToString(secondry_skill)\n\n def extract_current_preferred_location(self,filename):\n count = 0\n Current_Location=[]\n Preferred_Location=[]\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n if any(str(location) in str(text_para).lower() for location in self.locations) and count < 2:\n Current_Location = [str(location) for location in self.locations if (str(location) in str(text_para).lower())]\n count = count + 1\n elif any(str(location) in str(text_para).lower() for location in self.locations) and count >= 2:\n Preferred_Location = [str(location) for location in self.locations if (str(location) in str(text_para).lower())]\n count = count + 1\n Current_Location = list(set(Current_Location))\n Preferred_Location = list(set(Preferred_Location))\n return self.listToString(Current_Location), self.listToString(Preferred_Location)\n\n def extract_location(self,resume_text):\n location_all = []\n if any(str(location) in str(resume_text).lower() for location in self.locations):\n location_all = [str(location) for location in self.locations if (str(location) in str(resume_text).lower())]\n return self.listToString(location_all)\n\n def extract_hobbies(self,resume_text):\n hobbies_all = []\n if any(str(hobby) in str(resume_text).lower() for hobby in self.hobbies):\n hobbies_all = [str(hobby) for hobby in self.hobbies if (str(hobby) in str(resume_text).lower())]\n return self.listToString(hobbies_all)\n\n def visa_check(self,resume_text):\n visa_all=[]\n visas = ['h1b','h1n1','l1','schengen']\n if any(str(visa) in str(resume_text).lower() for visa in visas):\n visa_all = [str(visa) for visa in visas if (str(visa) in str(resume_text).lower())]\n return self.listToString(visa_all)\n\n def extract_skills(self,resume_text):\n skills_all = ''\n if any(skill in str(resume_text).lower() for skill in self.skills):\n skills_all = [skill for skill in self.skills if (skill in str(resume_text).lower())]\n return self.listToString(skills_all)\n\n def extract_education(self,filename):\n ls_edu = []\n doc = docx.Document(filename)\n escape_text = ['personal', 'course', 'certification', 'certifications', 'certificate',\n 'declaration', 'declare',\n 'skill', 'project', 'experience', 'projects','company','companies']\n escape_text.extend(self.skills)\n for para in doc.paragraphs:\n text_para = (para.text)\n if any(ext in text_para.lower() for ext in escape_text):\n ls_edu = ls_edu\n elif any(edu.lower() in text_para.lower() for edu in self.education):\n ls_edu.append(text_para.strip())\n if len(ls_edu) > 0:\n return self.listToString(ls_edu)\n else:\n return self.listToString(ls_edu)\n\n def getexpr(self,filename):\n text_exp = \"\"\n exclude_text=['name','father','address','personal','married','single']\n exclude_text.extend(self.education)\n exclude_text.extend(self.locations)\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n match = re.findall(r'.*(\\s[1-3][0-9]{3}|3000)', text_para)\n if match:\n if any(ext.lower() in text_para.lower() for ext in exclude_text):\n text_exp = text_exp\n else:\n text_exp = text_exp + \"\\n\" + text_para + \" \"\n return text_exp\n\n def project_details(self,filename):\n doc = docx.Document(filename)\n project_text = ''\n exclude_texts = ['education', 'skill', 'certificate', 'windows', 'win', 'version', 'university', 'school',\n 'college']\n exclude_texts.extend(self.education)\n start_para = 6\n para_size = len(doc.paragraphs)\n end_para = math.ceil(para_size * 80 / 100)\n for para in doc.paragraphs[start_para:end_para]:\n text_para = (para.text)\n if any(et.lower() in str(text_para).lower() for et in exclude_texts):\n project_text = project_text\n else:\n project_text = project_text + ' ' + text_para\n return project_text\n\n def get_project_regex(self,filename):\n pro_text = []\n project_texts = ['project']\n exclude_texts = ['education', 'skill', 'experience']\n exclude_texts.extend(self.education)\n doc = docx.Document(filename)\n for para in doc.paragraphs:\n text_para = (para.text)\n if any(pt in str(text_para).lower() for pt in project_texts):\n if any(et in str(text_para).lower() for et in exclude_texts):\n pro_text = pro_text\n else:\n pro_text.append(text_para.replace('project', ''))\n return pro_text\n\n def extractoverall_experience_through_yrs(self,filename):\n doc = docx.Document(filename)\n now = datetime.datetime.now()\n curr_yr = now.year\n max_working_yr = curr_yr - 40\n yrs = []\n exclude_texts = ['education', 'skill', 'certificate', 'windows', 'win', 'version']\n exclude_texts.extend(self.education)\n for para in doc.paragraphs:\n text_para = (para.text)\n match = re.findall(r'.*(\\s[1-3][0-9]{3}|3000)', text_para)\n if match:\n if any(et.lower() in str(text_para).lower() for et in exclude_texts):\n yrs = yrs\n else:\n yrs.extend(match)\n if len(yrs) > 0:\n yrs = [str(yr).strip() for yr in yrs if\n (str(yr).strip() > str(max_working_yr) and str(yr).strip() <= str(curr_yr))]\n yrs.append(curr_yr)\n yrs = list(set(yrs))\n yrs = [int(yr) for yr in yrs]\n yrs.sort()\n ex_limt = np.mean(np.diff(yrs)) + 2\n ex_limit_pos = []\n pos = 0\n for i in np.diff(yrs):\n if i > ex_limt:\n ex_limit_pos.append(pos)\n pos = pos + 1\n for pos in ex_limit_pos:\n del yrs[pos]\n overall_exp = max(yrs) - min(yrs)\n else:\n overall_exp = ''\n return str(overall_exp) + ' years'\n\n def extract_overall_experience(self,filename):\n doc = docx.Document(filename)\n yrs=[]\n for para in doc.paragraphs:\n text_para = (para.text)\n exp_tokens = ['yrs','years','year','exp','experience']\n for para in doc.paragraphs:\n text_para = (para.text)\n yrs=[int(i) for i in text_para.split() if i.isdigit() and int(i)<40]\n try:\n flag = w2n.word_to_num(text_para)\n if flag > 100:\n flag = 0\n except:\n flag = 0\n if any(ext in text_para.lower() for ext in exp_tokens) and (len(yrs)>0 or flag >0):\n yrs.append(flag)\n if len(yrs)>0 and max(yrs) >0:\n return str(max(yrs))+' '+'years'\n else:\n return self.extractoverall_experience_through_yrs(filename)\n #return ''\n\n def get_file_nm(self, filenm):\n try:\n filenms = filenm.split('/')\n length = len(filenms) - 1\n file_name = filenms[length]\n except:\n file_name = filenm\n return file_name\n\n def parse_direct(self,filename):\n '''try:\n result_direct = resumeparse.read_file(filename)\n return result_direct\n except:'''\n return {\"degree\":[],\"designition\":[],\"email\":\"\",\"name\":\"\",\"phone\":\"\",\"skills\":[],\"total_exp\":'NA',\"university\":[]}\n\n def generate_resume_result(self,filename):\n result = {}\n if self.checkExtension(filename):\n extension = self.getFileExtension(filename)\n if extension == 'docx' or extension == 'doc':\n if extension == 'doc':\n filename = self.doc_to_docx(filename)\n fulltext = self.readdocxFile(filename)\n tables = self.readtables(filename)\n else:\n fulltext = self.readpdfFile(filename)\n tabular_data = self.readtables(filename)\n result_direct = self.parse_direct(filename)\n if self.extract_name(fulltext) and self.extract_name(fulltext).strip()!='':\n result['Name'] = self.extract_name(fulltext)\n else:\n result['Name'] = str(result_direct['name']).replace('Phone','').replace('phone','').replace('Mobile','').replace('mobile','')\n\n if self.extract_mobile_number(fulltext) and self.extract_mobile_number(fulltext).strip()!='':\n result['Mobile_number'] = self.extract_mobile_number(fulltext)\n else:\n result['Mobile_number'] = result_direct['phone']\n if self.extract_email(fulltext) and len(self.extract_email(fulltext)) > 0:\n result['Email'] = self.extract_email(fulltext)\n else:\n result['Email'] = result_direct['email']\n if self.extract_skills(fulltext):\n result['Skills_All'] = self.extract_skills(fulltext)\n elif len(self.extracttabletextskills(fulltext, self.skills)) > 0:\n result['Skills_All'] = self.extracttabletextskills(fulltext, self.skills)\n if self.extract_education(filename) and len(self.extract_education(filename)) >0:\n result['Education'] = self.extract_education(filename)\n elif len(self.extracttabletexteducation(filename)) > 0:\n result['Education'] = self.extracttabletexteducation(filename)\n else:\n result['Education'] = result_direct['degree']\n result['Experience'] = self.getexpr(filename)\n Primary_skills, Secondary_skills = self.extract_primary_secondry_skill(filename)\n Current_Location,Preferred_Location = self.extract_current_preferred_location(filename)\n Overall_Experience = self.extract_overall_experience(filename)\n result['Primary_Skills'] = Primary_skills\n result['Secondary_Skills'] = Secondary_skills\n result['Current_Location'] = Current_Location\n result['Preferred_Location'] = self.extract_location(fulltext)\n result['Projects'] = self.project_details(filename)\n if Overall_Experience and Overall_Experience.strip()!='':\n result['Overall_Experience'] = Overall_Experience\n else:\n result['Overall_Experience'] = result_direct['total_exp']\n result['Designation'] = result_direct['designition']\n result['Hobbies'] = self.extract_hobbies(fulltext)\n result['Visa'] = self.visa_check(fulltext)\n result['Filename'] = self.get_file_nm(filename)\n return result\n\n\n\n\nif __name__ == \"__main__\":\n rp = resumeParsar()\n # filename= \"//home/lid/Downloads//Omar_Nour_CV.docx\"\n filename = sys.argv[1]\n print(filename)\n result = rp.generate_resume_result(filename)\n print(result)\n\n" }, { "alpha_fraction": 0.595204770565033, "alphanum_fraction": 0.6083915829658508, "avg_line_length": 33.75694274902344, "blob_id": "dadad71b16494f3222027282cdfaa4e47f76d9cf", "content_id": "49ca2744a379f884fd5160c31d65ec347def5b5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5005, "license_type": "no_license", "max_line_length": 87, "num_lines": 144, "path": "/main.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import os\nimport urllib.request\nfrom app import app\nfrom flask import Flask, request, redirect, jsonify\nfrom werkzeug.utils import secure_filename\nfrom resume_parsar import resumeParsar\nfrom jd_parsar import jdParsar\nfrom matcher import matcher\nfrom matcher_bulk import matcher_bulk\n\nALLOWED_EXTENSIONS = set(['docx','doc'])\nrp = resumeParsar()\njdp = jdParsar()\nmp = matcher()\nmb = matcher_bulk()\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/Resume_Parsar', methods=['POST'])\ndef resume_parsar():\n # check if the post request has the file part\n if 'file' not in request.files:\n resp = jsonify({'message': 'No file part in the request'})\n resp.status_code = 400\n return resp\n file = request.files['file']\n # print(file)\n if file.filename == '':\n resp = jsonify({'message': 'No file selected for uploading'})\n resp.status_code = 401\n return resp\n if file and allowed_file(file.filename):\n # print('in')\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['RESUME_FOLDER'], filename))\n full_file_name = str(app.config['RESUME_FOLDER']) + '/' + filename\n print(type(full_file_name))\n print(full_file_name)\n resp = rp.generate_resume_result(full_file_name)\n resp['status_code'] = 200\n return jsonify(resp)\n else:\n resp = jsonify({'message': 'Allowed file types are docx'})\n resp.status_code = 400\n return resp\n\n\n@app.route('/Jd_Parsar', methods=['POST'])\ndef jd_parsar():\n # check if the post request has the file part\n if 'file' not in request.files:\n resp = jsonify({'message': 'No file part in the request'})\n resp.status_code = 400\n return resp\n file = request.files['file']\n # print(file)\n if file.filename == '':\n resp = jsonify({'message': 'No file selected for uploading'})\n resp.status_code = 401\n return resp\n if file and allowed_file(file.filename):\n # print('in')\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['JD_FOLDER'], filename))\n full_file_name = str(app.config['JD_FOLDER']) + '/' + filename\n resp = jdp.getparsedjd(full_file_name)\n resp['status_code'] = 200\n return jsonify(resp)\n else:\n resp = jsonify({'message': 'Allowed file types are docx'})\n resp.status_code = 400\n return resp\n\n\n@app.route('/Matcher', methods=['POST'])\ndef matcher():\n # check if the post request has the file part\n if 'resume_file' not in request.files:\n resp = jsonify({'message': 'No resume file in the request'})\n resp.status_code = 400\n return resp\n if 'jd_file' not in request.files:\n resp = jsonify({'message': 'No jd file in the request'})\n resp.status_code = 400\n return resp\n resume_file = request.files.getlist(\"resume_file\")\n jd_file = request.files.getlist(\"jd_file\")\n print('**********')\n print(resume_file)\n print(\"*****************\")\n print(type(jd_file[0]))\n if jd_file and allowed_file(jd_file[0].filename):\n filename = secure_filename(jd_file[0].filename)\n jd_file[0].save(os.path.join(app.config['JD_FOLDER'], filename))\n jd_file_name = str(app.config['JD_FOLDER']) + '/' + filename\n resp_jd = jdp.getparsedjd(jd_file_name)\n else:\n resp = jsonify({'message': 'Allowed file types for jd are docx'})\n resp.status_code = 400\n return resp\n if resume_file and allowed_file(resume_file[0].filename):\n filename = secure_filename(resume_file[0].filename)\n resume_file[0].save(os.path.join(app.config['RESUME_FOLDER'], filename))\n resume_file_name = str(app.config['RESUME_FOLDER']) + '/' + filename\n resp_resume = rp.generate_resume_result(resume_file_name)\n else:\n resp = jsonify({'message': 'Allowed file types for resume are docx'})\n resp.status_code = 400\n return resp\n resp = mp.get_similarity_overall(resp_jd, resp_resume, mp.skill_2_vec)\n resp['status_code']=200\n return jsonify(resp)\n\n@app.route('/Matcher_Bulk', methods=['POST'])\ndef Matcher_Bulk():\n if 'resume_ls' in request.json:\n resumes = request.json['resume_ls']\n if 'jd_ls' in request.json:\n jds = request.json['jd_ls']\n result = mb.match_bulk(resumes, jds)\n print(result)\n resp = jsonify({'result': result})\n print(resp)\n resp.status_code = 200\n return resp\n else:\n resp = jsonify({'message': 'No jd output in the request'})\n resp.status_code = 400\n return resp\n else:\n resp = jsonify({'message': 'No resume output in the request'})\n resp.status_code = 400\n return resp\n\n\n\n\n\nif __name__ == \"__main__\":\n # app.debug = True\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.574999988079071, "alphanum_fraction": 0.7099999785423279, "avg_line_length": 24, "blob_id": "89e0d5160f341e2868a359c2f77c48ef79feba16", "content_id": "590c0c771ad537b3453b52bb9c1c3513233609e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 89, "num_lines": 8, "path": "/test_app.py", "repo_name": "ShubhamTatvamasi/Uadyam", "src_encoding": "UTF-8", "text": "import requests\n\nurl = 'http://127.0.0.1:5000/jd_parsar'\nmyobj = \"//home//lid//Downloads//Job Description-20201121T112409Z-001//Job Description//\"\n\nx = requests.post(url, data = myobj)\n\nprint(x.text)\n" } ]
11
gleb89/shitcoinback
https://github.com/gleb89/shitcoinback
d5bad91adc160fa32c8fd08c35f12da0dce1bf9f
fb5172a6b1d978efeef04b40b306ecc072091112
d7c3160133c1bb2abe37e32f2eac8dac1977f03b
refs/heads/main
2023-05-06T20:36:18.512323
2021-06-19T11:38:43
2021-06-19T11:38:43
366,483,659
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5378972887992859, "alphanum_fraction": 0.591687023639679, "avg_line_length": 21.72222137451172, "blob_id": "a0de981fca60ff6e63d68daba24428de58f32d43", "content_id": "66913ba4790457ccc372090cacb4d3f04225d81c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 72, "num_lines": 18, "path": "/apps/coins/migrations/0008_alter_exchange_slug.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-03 11:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0007_alter_coins_market_cap'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='exchange',\n name='slug',\n field=models.SlugField(max_length=100, verbose_name='slug'),\n ),\n ]\n" }, { "alpha_fraction": 0.5618686676025391, "alphanum_fraction": 0.6085858345031738, "avg_line_length": 28.33333396911621, "blob_id": "95f0f44349adb2ffae5bc929269c633adf40994d", "content_id": "7dad5cd191735dd2b1b3a4e915cf188f09f54c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "no_license", "max_line_length": 123, "num_lines": 27, "path": "/apps/comments/migrations/0005_auto_20210514_0716.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-14 07:16\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ('comment', '0004_auto_20210514_0703'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='comments',\n name='content_type',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='comments',\n name='object_id',\n field=models.PositiveIntegerField(default=2),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5663634538650513, "alphanum_fraction": 0.5735945701599121, "avg_line_length": 23.220338821411133, "blob_id": "eddadc97dd59abde16b03b991aa3e7e9c247e6a4", "content_id": "8a627ac7a01bba8fd4352b0c72b9af9770381c32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4296, "license_type": "no_license", "max_line_length": 98, "num_lines": 177, "path": "/apps/coins/service.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "import datetime\nimport time\n\n\nfrom requests import Session\n\n\nfrom .models import Coins, Exchange\n\nheaders = {\n 'Accepts': 'application/json'\n\n}\n\nsession = Session()\nsession.headers.update(headers)\ndelay = 4500\n\n\ndef save_exchange(pk):\n\n \"\"\" save in database data exchange\"\"\"\n\n url = f'https://api.coingecko.com/api/v3/exchanges/{pk}'\n data = session.get(url)\n response_data = data.json()\n name = response_data['name'].lower()\n image = response_data['image']\n slug = response_data['name']\n trade_url = response_data['url']\n exchange = Exchange(\n name=name,\n image=image,\n slug=slug,\n trade_url=trade_url\n )\n exchange.save()\n print('сохранено-', exchange.name)\n return exchange\n\n\ndef get_exchange(pk):\n\n \"\"\"\n get exchange data for name exchange\n \"\"\"\n\n url = f'https://api.coingecko.com/api/v3/exchanges/{pk}'\n data = session.get(url)\n print(data, pk)\n echange_db = Exchange.objects.filter(name=pk).first()\n if echange_db:\n return\n else:\n try:\n return save_exchange(pk)\n except:\n print('none')\n\n\ndef get_exchanges_list():\n\n \"\"\"\n get names list coins all \n \"\"\"\n \n counter = 0\n url = 'https://api.coingecko.com/api/v3/exchanges/list'\n response_exchanges = session.get(url)\n data = response_exchanges.json()\n for exchange in data:\n counter = counter+1\n exchange_pk = exchange['id']\n print(counter)\n time.sleep(2)\n get_exchange(exchange_pk)\n\n\ndef get_chart_data(id):\n\n \"\"\"\n history data (price 7d) for coin name\n \"\"\"\n\n list_price_7d = {}\n days = 7\n today_date = datetime.date.today() - datetime.timedelta(days=days)\n\n while days >= 1:\n url_price_7d = f'''\n https://api.coingecko.com/api/v3/coins/{id}/history?date={today_date.strftime(\"%d-%m-%Y\")}\n '''\n response_price_7d = session.get(url_price_7d)\n data_price = response_price_7d.json()\n data_price_today = data_price['market_data']['current_price']['usd']\n today_date = datetime.date.today() - datetime.timedelta(days=days-1)\n list_price_7d[str(days)] = data_price_today\n days = days - 1\n return list_price_7d\n\n\ndef update_price_coin(coin_symbol):\n\n \"\"\"\n request for name_coin data\n \"\"\"\n\n name_coin = coin_symbol.lower()\n url = f'https://api.coingecko.com/api/v3/coins/{name_coin}/'\n response = session.get(url)\n data = response.json()\n price_7d = get_chart_data(coin_symbol)\n price = data['market_data']['current_price']['usd']\n market_cap = data['market_data']['market_cap']['usd']\n volume = int(data['market_data']['total_volume']['usd'])\n image = str(data['image']['small'])\n price_exc = int(data['market_data']['price_change_percentage_24h'])\n\n return price, market_cap, volume, image, price_exc, price_7d\n\n\ndef get_update_price_coins():\n \"\"\"\n update data for coins\n \"\"\"\n\n for coin in Coins.objects.all():\n print(coin)\n time.sleep(5)\n try:\n (\n coin.price,\n coin.market_cap,\n coin.volume,\n coin.image,\n coin.price_exc,\n coin.board_price\n ) = update_price_coin(coin.name)\n coin.save()\n except:\n print('except')\n\n\ndef add_market_for_coin(market_id, coin):\n\n \"\"\"\n add many to many Exchange for coin\n \"\"\"\n\n exchange = Exchange.objects.filter(name=market_id).first()\n print(exchange, market_id)\n if not exchange:\n try:\n new_exchange = save_exchange(market_id)\n coin.market_exchange.add(new_exchange)\n except:\n print('error')\n else:\n print('add')\n coin.market_exchange.add(exchange)\n\n\ndef get_market_coins(coins):\n\n \"\"\"\n get markets for coin id\n \"\"\"\n\n for coin in coins:\n url = f'https://api.coingecko.com/api/v3/coins/{coin}/tickers'\n response = session.get(url)\n data = response.json()\n for market in data['tickers']:\n print(coin)\n market_name = market['market']['identifier']\n time.sleep(2)\n add_market_for_coin(market_name.lower(), coin)\n" }, { "alpha_fraction": 0.502170741558075, "alphanum_fraction": 0.7018813490867615, "avg_line_length": 16.71794891357422, "blob_id": "d9758c140b7d139b46e44ce7ef9611f39eb94243", "content_id": "7af5d4d144f27587346fdc61155eb4b462d5f880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 691, "license_type": "no_license", "max_line_length": 36, "num_lines": 39, "path": "/requirements.txt", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "amqp==5.0.6\nasgiref==3.3.4\nautopep8==1.5.7\nbilliard==3.6.4.0\ncelery==5.1.1\ncertifi==2020.12.5\nchardet==4.0.0\nclick==7.1.2\nclick-didyoumean==0.0.3\nclick-plugins==1.1.1\nclick-repl==0.2.0\nDjango==3.2.1\ndjango-cors-headers==3.7.0\ndjango-filter==2.4.0\ndjangorestframework==3.12.4\ndjangorestframework-jwt==1.11.0\ndjangorestframework-simplejwt==4.6.0\ngunicorn==20.1.0\nidna==2.10\nkombu==5.1.0\nMarkdown==3.3.4\nPillow==8.2.0\nprompt-toolkit==3.0.18\npsycopg2-binary==2.8.6\npycodestyle==2.7.0\nPyJWT==1.7.1\npython-dotenv==0.17.1\npytz==2021.1\nPyYAML==5.4.1\nredis==3.5.3\nrequests==2.25.1\nsix==1.16.0\nsqlparse==0.4.1\ntoml==0.10.2\nuritemplate==3.0.1\nurllib3==1.26.4\nuWSGI==2.0.19.1\nvine==5.0.0\nwcwidth==0.2.5\n" }, { "alpha_fraction": 0.645487368106842, "alphanum_fraction": 0.645487368106842, "avg_line_length": 29.44444465637207, "blob_id": "4874ef1f0a06bff9f0812f1e81e176430e942cd7", "content_id": "ac58a485c56d9a12d1c9d764cf4c11051f0d97c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "no_license", "max_line_length": 90, "num_lines": 45, "path": "/apps/comments/views.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated,AllowAny\n\nfrom .models import Comments\nfrom .serializer import CommentsSerializer, CommentsPostSerializer\n\n\nclass CommentsViewSet(viewsets.ModelViewSet):\n\n queryset = Comments.objects.all()\n serializer_class = CommentsPostSerializer\n # # permission_classes = (IsAuthenticated,)\n def list(self, request):\n \"\"\"\n если есть get параметры :\n -вывод списка коментариев по id coin\n иначе:\n -вывод списка всех обьектов\n\n \"\"\"\n params = self.request.query_params\n if params:\n coin_id = params['coin_id']\n queryset = Comments.objects.filter(object_id=coin_id).order_by('-updated')\n\n else:\n queryset = Comments.objects.all().order_by('updated')\n serializer = CommentsSerializer(queryset, many=True, read_only=True)\n\n return Response(serializer.data)\n\n # def create(self, request):\n # \"\"\" \n\n # создание коментария\n\n # \"\"\"\n \n # serializer = CommentsPostSerializer(data=request.data, many=True,read_only=True)\n # serializer.is_valid(raise_exception=True)\n # serializer.save()\n # return Response(serializer.data)\n \n \n\n\n\n\n" }, { "alpha_fraction": 0.6063055992126465, "alphanum_fraction": 0.6063055992126465, "avg_line_length": 21.436363220214844, "blob_id": "a23dbe893a0d476be872eccb1aa0b4fd4f2cc61e", "content_id": "71f227c1bb5e05b7de0ae422fd62efc5789d2033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1276, "license_type": "no_license", "max_line_length": 78, "num_lines": 55, "path": "/apps/comments/serializer.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Comments\n\n\nclass RecursiveSerializer(serializers.Serializer):\n \"\"\"\n\n Вывод рекурсивно children\n\n \"\"\"\n\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\n\nclass FilterReviewListSerializer(serializers.ListSerializer):\n \"\"\"\n\n Фильтр комментариев, только parents\n \n \"\"\"\n\n def to_representation(self, data):\n data = data.filter(parent=None)\n return super().to_representation(data)\n\n\nclass CommentsSerializer(serializers.ModelSerializer):\n\n children = RecursiveSerializer(many=True, allow_null=True)\n\n class Meta:\n list_serializer_class = FilterReviewListSerializer\n model = Comments\n fields = '__all__'\n\n\nclass CommentsPostSerializer(serializers.ModelSerializer):\n children = [],\n class Meta:\n model = Comments\n fields = [\n 'id',\n 'user_id',\n 'text_comment',\n 'children',\n 'user_parent',\n 'object_id',\n 'parent',\n 'content_type',\n 'updated'\n ]\n read_only_fields = ['children']\n\n\n\n" }, { "alpha_fraction": 0.6488147377967834, "alphanum_fraction": 0.6540825366973877, "avg_line_length": 48.5217399597168, "blob_id": "cd5d7fdadd2740fb1fc38e872ce1847eaaef8427", "content_id": "6a1a1d6231daf8714d1b6c834ef16bb93ef88457", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 100, "num_lines": 23, "path": "/apps/comments/models.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.contrib import contenttypes\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.db import models\n\n\nclass Comments(models.Model):\n user_id = models.CharField('Юсер комментария', max_length=255)\n text_comment = models.TextField(verbose_name='текст коментария')\n parent = models.ForeignKey('self',\n verbose_name='Родительский коментарий',\n blank=True,\n null=True,\n related_name='children',\n on_delete=models.CASCADE\n )\n user_parent = models.CharField('Юсер комментария родителя', max_length=255,null=True,blank=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n" }, { "alpha_fraction": 0.6740740537643433, "alphanum_fraction": 0.6740740537643433, "avg_line_length": 18.285715103149414, "blob_id": "7e0e3dcdcc3c3b882ea635e0524497cd544e7055", "content_id": "d79cf861041e4a87e44957abdf4541abd8159f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/apps/coins/apps.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass CoinsConfig(AppConfig):\n name = 'apps.coins'\n verbose_name = 'Coin'\n label = 'coin'\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6613636612892151, "avg_line_length": 19.952381134033203, "blob_id": "74ed16a42e2a781d32b187d831e0966f17597450", "content_id": "cca5b5d10fa51651f025fbea6738c1581c2e8874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/apps/coins/serializer.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User, Group\nfrom rest_framework import serializers\n\n\nfrom .models import Coins, Exchange\n\n\n\nclass ExchangeSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Exchange\n fields = ['id', 'name', 'slug','image']\n\n\nclass CoinsSerializer(serializers.ModelSerializer):\n \"\"\" Coin Коментарий\"\"\"\n class Meta:\n model = Coins\n fields = '__all__'\n depth = 1\n" }, { "alpha_fraction": 0.5607476830482483, "alphanum_fraction": 0.605140209197998, "avg_line_length": 22.77777862548828, "blob_id": "e99c482f1f6fa5ae26019b44fe4ee0ed26ef6d9d", "content_id": "f352258f18763265c21825428e60dd804dd40cd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/apps/coins/migrations/0005_alter_coins_market_cap.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-02 15:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0004_alter_exchange_slug'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='market_cap',\n field=models.IntegerField(blank=True, null=True, verbose_name='Капитализация'),\n ),\n ]\n" }, { "alpha_fraction": 0.5401844382286072, "alphanum_fraction": 0.5757575631141663, "avg_line_length": 22.244897842407227, "blob_id": "1bd3cdf270d63c3b20879d9d31ab270dafe79db1", "content_id": "8a5de6f340523512ccf6e68ec118237f95831ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 2277, "license_type": "no_license", "max_line_length": 111, "num_lines": 98, "path": "/docker-compose.yml", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "version: '3'\n\nservices:\n pgdata:\n restart: always\n image: \"postgres:13-alpine\"\n ports:\n - \"5432\"\n volumes:\n - ./db_data/:/var/lib/postgresql/data/\n environment:\n - \"POSTGRES_HOST_AUTH_METHOD=trust\"\n - POSTGRES_USER=${POSTGRES_USER}\n - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}\n - POSTGRES_DB=${POSTGRES_DB}\n # - DB_HOST=0.0.0.0\n networks:\n - backands_network\n\n\n djapi:\n restart: always\n build: .\n depends_on:\n - pgdata\n command: gunicorn config.wsgi --bind 0.0.0.0:80\n # command: python manage.py runserver 0.0.0.0:8080 \n\n networks:\n - backands_network\n\n nginx:\n image: nginx:1.13\n restart: always\n volumes:\n - ./data/nginx:/etc/nginx/conf.d\n - ./data/certbot/conf:/etc/letsencrypt\n - ./data/certbot/www:/var/www/certbot\n depends_on: \n - djapi\n networks:\n - backands_network\n ports:\n - \"80:80\"\n - \"443:443\"\n command: \"/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \\\"daemon off;\\\"'\"\n\n redis:\n image: redis:alpine\n depends_on: \n - djapi\n networks:\n - backands_network\n \n celery:\n build: .\n command: celery -A config worker -l info\n # volumes:\n # - ./project/:/usr/src/app/\n environment:\n # - DEBUG=1\n - SECRET_KEY=django-insecure-s%2p*3c2-qj89ew(a2%oy#5ntt8ee*u^2v9=n_vd-e!*1l#-8u\n - DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]\n depends_on:\n - redis\n networks:\n - backands_network\n \n celery-beat:\n build: .\n command: celery -A config beat -l info\n # volumes:\n # - ./project/:/usr/src/app/\n environment:\n # - DEBUG=1\n - SECRET_KEY=django-insecure-s%2p*3c2-qj89ew(a2%oy#5ntt8ee*u^2v9=n_vd-e!*1l#-8u\n - DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]\n depends_on:\n - redis\n networks:\n - backands_network\n\n\n certbot:\n image: certbot/certbot\n restart: unless-stopped\n volumes:\n - ./data/certbot/conf:/etc/letsencrypt\n - ./data/certbot/www:/var/www/certbot\n entrypoint: \"/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'\"\n \n\nvolumes:\n pgdata: \n\nnetworks:\n backands_network:\n driver: bridge" }, { "alpha_fraction": 0.5832217931747437, "alphanum_fraction": 0.5983935594558716, "avg_line_length": 35.14516067504883, "blob_id": "90040e50343856a41eea88b946d51e0c3ba109df", "content_id": "33011f27ba074010a22f5703bb275ab6c6fea196", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2388, "license_type": "no_license", "max_line_length": 89, "num_lines": 62, "path": "/apps/coins/models.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.urls import reverse\n\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom apps.comments.models import Comments\n\nclass Exchange(models.Model):\n name = models.CharField('Название рынка', max_length=255)\n image = models.CharField('Изображение биржи', max_length=255, null=True)\n slug = models.SlugField('slug', max_length=100)\n trade_url = models.CharField('link', max_length=255)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('еxchange:slug', kwargs={'slug': self.slug})\n\n class Meta:\n \n verbose_name = 'Рынок'\n verbose_name_plural = 'Рынки'\n\n\n\nclass Coins(models.Model):\n symbol = models.CharField('Символ монеты', max_length=55, null=True)\n name = models.CharField('Название монеты', max_length=255)\n description = models.TextField('Описание монеты', blank=True, null=True)\n image = models.CharField('Изображение монеты', max_length=255, null=True)\n updated = models.DateTimeField(auto_now=True, auto_now_add=False)\n price = models.DecimalField('Цена', max_digits=12, decimal_places=10)\n market_cap = models.FloatField('Капитализация', blank=True, null=True)\n volume = models.TextField('Обьемы', blank=True, null=True)\n market_exchange = models.ManyToManyField(\n Exchange,\n related_name=\"market_list\",\n verbose_name='торгуется на рынках',\n blank=True\n \n )\n price_exc = models.CharField('Изменение % 24ч', max_length=255,blank=True, null=True)\n board_price = models.JSONField(encoder=None)\n comments = GenericRelation(Comments)\n \n \n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Coin'\n verbose_name_plural = 'Coins'\n\n\nclass Note(models.Model):\n \"\"\"\n A note consists of some text, and 0 or more descriptive tags.\n \"\"\"\n text = models.CharField(max_length=1000)\n tags = GenericRelation(Comments)\n" }, { "alpha_fraction": 0.5463182926177979, "alphanum_fraction": 0.5961995124816895, "avg_line_length": 22.38888931274414, "blob_id": "fc7a8b6e2ebd92755c6d7b33eb47618dbb57c173", "content_id": "674c104bd2de83639d75a7a7d6e23f2bbb1cc132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/apps/coins/migrations/0013_alter_coins_price.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-06 17:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0012_alter_coins_price'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='price',\n field=models.DecimalField(decimal_places=2, max_digits=2, verbose_name='Цена'),\n ),\n ]\n" }, { "alpha_fraction": 0.5450236797332764, "alphanum_fraction": 0.5971564054489136, "avg_line_length": 22.44444465637207, "blob_id": "dc022c9e946714cc97addfa0012f6f20963d9461", "content_id": "8bc2009a598342b9cfb29073f00a878fd1fbe39d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 92, "num_lines": 18, "path": "/apps/coins/migrations/0015_alter_coins_price.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-06 17:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0014_alter_coins_price'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='price',\n field=models.DecimalField(decimal_places=8, max_digits=10, verbose_name='Цена'),\n ),\n ]\n" }, { "alpha_fraction": 0.5366614460945129, "alphanum_fraction": 0.5772230625152588, "avg_line_length": 25.70833396911621, "blob_id": "6f9478e03d99f1eb09160994b82dddd98fca104e", "content_id": "b758bdbc5f930fb4f5e3f01ef64c61a085080794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 96, "num_lines": 24, "path": "/apps/coins/migrations/0003_auto_20210525_0814.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-25 08:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0002_note'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='exchange',\n name='trade_url',\n field=models.CharField(default=1, max_length=255, verbose_name='link'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='exchange',\n name='image',\n field=models.CharField(max_length=255, null=True, verbose_name='Изображение биржи'),\n ),\n ]\n" }, { "alpha_fraction": 0.7171717286109924, "alphanum_fraction": 0.7171717286109924, "avg_line_length": 8, "blob_id": "0869068542dd7ffe20046210e4d990cc5677ca7e", "content_id": "4274c1e8cd56302f72a337cf72c6240d5796d452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/config/keysetting.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nclass Config:\n pass\n\n\nconfig = Config " }, { "alpha_fraction": 0.5820451974868774, "alphanum_fraction": 0.5933412313461304, "avg_line_length": 22.20833396911621, "blob_id": "a79a01e12cccf882c4ad3a79ff51ba89c830db51", "content_id": "3b5bebc6b2c5434a6c8312adf9b4f581db1a30bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1800, "license_type": "no_license", "max_line_length": 140, "num_lines": 72, "path": "/README.md", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# shitcoinsmarketcap\n![Test Image 3](/api.png)\n### Service to display scam coins and tokens \n***\n \n### Application structure \n ├── apps\n | ├── coins\n | ├── comments\n | ├── core\n | └── users\n ├── config\n | ├── settings.py\n | ├── urls.py\n | ├── wsgi.py\n ├── celery.py\n ├── tasks.py\n | ├── asgi.py\n | └── keysetting.py\n ├── data\n | ├── nginx\n | └──app.conf\n ├── static\n ├── Dockerfile\n ├── docker-compose.yml\n └─ requirements.txt\n \n \n \n\n\n\n## Build and run the container\n\n1. Install Docker.\n\n2. Create a `.env` file \n\n ```\n # Environment settings for local development.\n POSTGRES_USER=postgres\n POSTGRES_PASSWORD=postgres\n POSTGRES_DB=241281\n ```\n\n\n3. On the command line, within this directory, do this to build the image and\n start the container:\n\n docker-compose run djapi python manage.py migrate --noinput\n docker-compose run djapi python manage.py createsuperuser\n docker-compose up --build\n docker-compose up -d\n docker-compose -f docker-compose.yml logs -f\n\n\n4. Open http://0.0.0.0:80/api/v1 in your browser.\n\n## If deploy - \n - rename file <strike>docker-compose.prod.yml</strike> docker-compose.yml and <strike>docker-compose.yml </strike> docker-compose.dev.yml \n\n \n chmod +x init-letsencrypt.sh\n ./init-letsencrypt.sh\n docker-compose run djapi python manage.py migrate --noinput\n docker-compose run djapi python manage.py createsuperuser\n docker-compose up --build\n docker-compose up -d\n\n5 . Requests api JWT\n\n - Authorization :Bearer 'token' \n \n \n" }, { "alpha_fraction": 0.5135746598243713, "alphanum_fraction": 0.5565611124038696, "avg_line_length": 20.047618865966797, "blob_id": "99dc5756840a2631655218588ce5ff85c26d7130", "content_id": "db33f60c430bbaeca86d9ae021790f3b674b135f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "no_license", "max_line_length": 47, "num_lines": 21, "path": "/apps/comments/migrations/0003_auto_20210513_1939.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-13 19:39\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('comment', '0002_comments_updated'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='comments',\n name='content_type',\n ),\n migrations.RemoveField(\n model_name='comments',\n name='object_id',\n ),\n ]\n" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.5853658318519592, "avg_line_length": 16.636363983154297, "blob_id": "7e2e0177d8a0cb4f454381fb9003a6c8f03a9f09", "content_id": "993bc55a304d308ab96afa1d1af2db4d0423568c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/config/tasks.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from celery import shared_task\n\n\n \n@shared_task\ndef sample_task():\n try:\n from apps.coins import service\n service.get_update_price_coins()\n except:\n print('error')\n\n\n\n\n\n\n\n\n " }, { "alpha_fraction": 0.5857142806053162, "alphanum_fraction": 0.6244897842407227, "avg_line_length": 26.22222137451172, "blob_id": "4fb6427f9d0b968423f3f4fbfb3b1abf84279a2e", "content_id": "962449317a9dcd69a899a2fa0852721c5aaa10e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 148, "num_lines": 18, "path": "/apps/coins/migrations/0009_alter_coins_market_exchange.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-03 19:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0008_alter_exchange_slug'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='market_exchange',\n field=models.ManyToManyField(blank=True, null=True, related_name='market_list', to='coin.Exchange', verbose_name='торгуется на рынках'),\n ),\n ]\n" }, { "alpha_fraction": 0.8113207817077637, "alphanum_fraction": 0.8113207817077637, "avg_line_length": 25.33333396911621, "blob_id": "9a9f144b53cdd5b708a57a5ba0179f4123ba4d39", "content_id": "6d0b7d9bf8e68f62a8c889f728dc9b494bc09c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/apps/coins/admin.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Coins, Exchange,Note\n\nadmin.site.register(Coins)\nadmin.site.register(Note)\nadmin.site.register(Exchange)\n\n" }, { "alpha_fraction": 0.5566037893295288, "alphanum_fraction": 0.6014150977134705, "avg_line_length": 22.55555534362793, "blob_id": "63f9ffa0b56cd1016e2f4c558f88d2f3afd0ba2c", "content_id": "08f618b31192dccf2e7f462d438887567224a482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 89, "num_lines": 18, "path": "/apps/coins/migrations/0007_alter_coins_market_cap.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-03 07:45\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0006_alter_coins_price'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='market_cap',\n field=models.FloatField(blank=True, null=True, verbose_name='Капитализация'),\n ),\n ]\n" }, { "alpha_fraction": 0.5439024567604065, "alphanum_fraction": 0.5926828980445862, "avg_line_length": 21.77777862548828, "blob_id": "d3fb03dd95313279b9d2158d70b5feb395bf075a", "content_id": "57395ff62ffb8d8688bc80a9dd7e3468d67aa95e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/apps/coins/migrations/0006_alter_coins_price.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-03 07:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0005_alter_coins_market_cap'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='coins',\n name='price',\n field=models.FloatField(default=0, verbose_name='Цена монеты'),\n ),\n ]\n" }, { "alpha_fraction": 0.5537587404251099, "alphanum_fraction": 0.5690559148788452, "avg_line_length": 46.66666793823242, "blob_id": "a4e1c8a6601f0e499b89a54f8070ca5262cd71f7", "content_id": "a7b2780ffae23ccabf4a8c1d730a87b31c99919a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2447, "license_type": "no_license", "max_line_length": 144, "num_lines": 48, "path": "/apps/coins/migrations/0001_initial.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-12 09:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Exchange',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255, verbose_name='Название рынка')),\n ('image', models.ImageField(blank=True, null=True, upload_to='images', verbose_name='Главное изображение')),\n ('slug', models.SlugField(max_length=100, unique=True, verbose_name='URL рынка')),\n ],\n options={\n 'verbose_name': 'Рынок',\n 'verbose_name_plural': 'Рынки',\n },\n ),\n migrations.CreateModel(\n name='Coins',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('symbol', models.CharField(max_length=55, null=True, verbose_name='Символ монеты')),\n ('name', models.CharField(max_length=255, verbose_name='Название монеты')),\n ('description', models.TextField(blank=True, null=True, verbose_name='Описание монеты')),\n ('image', models.CharField(max_length=255, null=True, verbose_name='Изображение монеты')),\n ('updated', models.DateTimeField(auto_now=True)),\n ('price', models.IntegerField(default=0, verbose_name='Цена монеты')),\n ('market_cap', models.TextField(blank=True, null=True, verbose_name='Капитализация')),\n ('volume', models.TextField(blank=True, null=True, verbose_name='Обьемы')),\n ('price_exc', models.CharField(blank=True, max_length=255, null=True, verbose_name='Изменение % 24ч')),\n ('board_price', models.JSONField()),\n ('market_exchange', models.ManyToManyField(related_name='market_list', to='coin.Exchange', verbose_name='торгуется на рынках')),\n ],\n options={\n 'verbose_name': 'Coin',\n 'verbose_name_plural': 'Coins',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.698113203048706, "alphanum_fraction": 0.698113203048706, "avg_line_length": 15, "blob_id": "463b4573055fb15e0b82f96b3d97c93b32f93813", "content_id": "d33af0705d7d8758cdfcef984d095f086432ac65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/apps/coins/urls.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.urls import path, include\n\nfrom .views import (\n redirect_home_on_admin\n)\n\n\nurlpatterns = [\n path('', redirect_home_on_admin, name='homepage')\n]" }, { "alpha_fraction": 0.5794947743415833, "alphanum_fraction": 0.6255571842193604, "avg_line_length": 28.2608699798584, "blob_id": "730f1db31bcb42811e68cb3e6bc83caf71a6fbd9", "content_id": "3bda7d8d5449047f017677622d3064eb1298ed00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 192, "num_lines": 23, "path": "/apps/comments/migrations/0004_auto_20210514_0703.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-14 07:03\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('comment', '0003_auto_20210513_1939'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='comments',\n name='parent_coment',\n ),\n migrations.AddField(\n model_name='comments',\n name='parent',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='comment.comments', verbose_name='Родительский коментарий'),\n ),\n ]\n" }, { "alpha_fraction": 0.5143540501594543, "alphanum_fraction": 0.5956937670707703, "avg_line_length": 22.22222137451172, "blob_id": "8c5086f9011f52ae92476d28292996eb4e77caac", "content_id": "adc76f0afb9f6878b15f331d0689e9b4f6a1b366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 85, "num_lines": 18, "path": "/apps/coins/migrations/0004_alter_exchange_slug.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-25 13:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coin', '0003_auto_20210525_0814'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='exchange',\n name='slug',\n field=models.SlugField(max_length=100, unique=True, verbose_name='slug'),\n ),\n ]\n" }, { "alpha_fraction": 0.6474103331565857, "alphanum_fraction": 0.649402379989624, "avg_line_length": 37.61538314819336, "blob_id": "a2373bbf124b9d9956b06ab86635cb2a54ea2670", "content_id": "c74c320fb4c3bd7d6076ac0b4281fb5f68c2a4a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2010, "license_type": "no_license", "max_line_length": 78, "num_lines": 52, "path": "/config/urls.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom rest_framework import routers\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework_simplejwt import views as jwt_views\n\nfrom apps.coins.views import (\n CoinsViewSet,\n CoinsPaginationViewSet,\n ExchangeViewSet,\n CoinsNewViewSet,\n snippet_list,\n redirect_home_on_admin\n )\nfrom apps.users.views import UserCreate\nfrom apps.comments.views import CommentsViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'coins', CoinsPaginationViewSet, basename='Coinspaginator')\nrouter.register(r'coinslist', CoinsViewSet, basename='Coins')\nrouter.register(r'user', UserCreate, basename='User')\nrouter.register(r'comments', CommentsViewSet, basename='Commentsr')\nrouter.register(r'exchange', ExchangeViewSet, basename='Exchange')\nrouter.register(r'updatecoins', CoinsNewViewSet, basename='updatecoins')\n\n\n\n\nurlpatterns = [\n path('',redirect_home_on_admin,name='redirect_admin'),\n path('', include('apps.coins.urls')),\n path('api/v1/', include(router.urls)),\n path('api/admin/', admin.site.urls),\n path('api/openapi/', get_schema_view(\n title=\"Your Project\",\n description=\"API for all things …\",\n version=\"1.0.0\"\n ), name='openapi-schema'),\n path('api/update/<str:name>/', snippet_list,\n name='updatetokendata'),\n path('api/login/', jwt_views.TokenObtainPairView.as_view(),\n name='token_obtain_pair'),\n path('api/ref/', jwt_views.TokenRefreshView.as_view(),\n name='token_refresh'),\n\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" }, { "alpha_fraction": 0.544018030166626, "alphanum_fraction": 0.6139954924583435, "avg_line_length": 23.61111068725586, "blob_id": "40ba0e43f327927834006dc636ca9fc63a1da5eb", "content_id": "960ae5680f71bb03ce75da28d63d8d06c8bcc5d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 102, "num_lines": 18, "path": "/apps/comments/migrations/0006_comments_user_parent.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-05-15 20:43\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('comment', '0005_auto_20210514_0716'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='comments',\n name='user_parent',\n field=models.IntegerField(blank=True, null=True, verbose_name='Юсер коментария родителя'),\n ),\n ]\n" }, { "alpha_fraction": 0.7431694269180298, "alphanum_fraction": 0.7431694269180298, "avg_line_length": 15.454545021057129, "blob_id": "5b02c9a0f8b4f84b72b677c3764f4f4e12119fc2", "content_id": "efb20971e351fa0f676cde58808daa11478dfbb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/apps/coins/tasks.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "import django\ndjango.setup()\n\n\nfrom config.celery import app as celery_app\n\nfrom .service import get_update_price_coins\n\n@celery_app.task\ndef gettts():\n get_update_price_coins()\n\n\n" }, { "alpha_fraction": 0.7933130860328674, "alphanum_fraction": 0.7933130860328674, "avg_line_length": 28.909090042114258, "blob_id": "dfa3898947a22d83a255a3e89b71cf0305b49c9d", "content_id": "4ed2519b1c27d6afba1925e09557e9da13e39ecf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/apps/users/views.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User, Group\n\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import AllowAny\n\nfrom .serializers import UserSerializer\n\nclass UserCreate(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permission_classes = (AllowAny, )\n" }, { "alpha_fraction": 0.6141790747642517, "alphanum_fraction": 0.6145522594451904, "avg_line_length": 24.028038024902344, "blob_id": "51a1d196bb1bbdb87a4723f43e82f7e6eb3ed2c3", "content_id": "c707097be99d1912908bb7572ced1217085afdbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2742, "license_type": "no_license", "max_line_length": 81, "num_lines": 107, "path": "/apps/coins/views.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "import threading\n\n\nfrom django.shortcuts import redirect\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.decorators import action\n\nfrom .models import Coins, Exchange\nfrom .service import (\n get_market_coins,\n update_price_coin,\n get_exchanges_list\n )\nfrom .serializer import CoinsSerializer, ExchangeSerializer\nfrom .tasks import gettts\n\n\n\ndef redirect_home_on_admin(request):\n return redirect('/api/admin/')\n\n\nclass CoinsPaginationViewSet(viewsets.ModelViewSet):\n\n queryset = Coins.objects.all().order_by('-market_cap')\n serializer_class = CoinsSerializer\n paginate_by = 1\n pagination_class = PageNumberPagination\n\n\n\nclass CoinsViewSet(viewsets.ViewSet):\n\n def list(self, request):\n \"\"\" \n\n список обьектов без пагинации\n\n \"\"\"\n\n queryset = Coins.objects.all()\n serializer = CoinsSerializer(queryset, many=True, read_only=True)\n return Response(serializer.data)\n\n @action(detail=False, methods=['get'])\n def update_one_data_coin(self,request,name):\n\n \"\"\" \n update data coin name\n \"\"\"\n\n coin = Coins.objects.get(name=name)\n \n (\n coin.price,\n coin.market_cap,\n coin.volume,\n coin.image,\n coin.price_exc,\n coin.board_price\n ) = update_price_coin(name)\n coin.save()\n serializer = CoinsSerializer(coin)\n return Response(serializer.data)\n\n\nsnippet_list = CoinsViewSet.as_view({\n 'get': 'update_one_data_coin',\n \n})\n\nclass ExchangeViewSet(viewsets.ViewSet):\n\n def list(self, request):\n\n \"\"\" \n список бирж\n \"\"\" \n\n thread = threading.Thread(target=get_exchanges_list)\n thread.start()\n \n try:\n coins_not_echange = Coins.objects.filter(market_exchange=None)\n thread = threading.Thread(target=get_market_coins(coins_not_echange))\n thread.start()\n except:\n pass \n queryset = Exchange.objects.all()\n serializer = ExchangeSerializer(queryset, many=True, read_only=True)\n return Response(serializer.data)\n\nclass CoinsNewViewSet(viewsets.ViewSet):\n\n def list(self, request):\n\n \"\"\" \n список обьектов без пагинации\n \"\"\"\n \n queryset = Coins.objects.all()\n serializer = CoinsSerializer(queryset, many=True, read_only=True)\n return Response(serializer.data)\n\n\n" }, { "alpha_fraction": 0.5391498804092407, "alphanum_fraction": 0.6152125000953674, "avg_line_length": 23.83333396911621, "blob_id": "4de1460e40dcad393ad59ce8d079d3bd31c5b402", "content_id": "ff2a2fc760872af2f82aa8142cec3345009a1397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "no_license", "max_line_length": 104, "num_lines": 18, "path": "/apps/comments/migrations/0008_alter_comments_user_parent.py", "repo_name": "gleb89/shitcoinback", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.1 on 2021-06-14 07:53\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('comment', '0007_auto_20210614_0748'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='comments',\n name='user_parent',\n field=models.CharField(max_length=255, null=True, verbose_name='Юсер комментария родителя'),\n ),\n ]\n" } ]
33
GEEK1050/AirBnB_clone_v2
https://github.com/GEEK1050/AirBnB_clone_v2
8e8698a43b2edc8c16fe8c27f64a98c58746790c
4982fe4ae7abe1e0614df1145c75087477397255
a293fef9bafe8cd1e5e8fe41caf504a7f7cd21ec
refs/heads/master
2022-12-07T21:35:58.343565
2020-09-02T15:54:59
2020-09-02T15:54:59
288,158,893
0
0
null
2020-08-17T11:11:11
2020-08-14T22:19:00
2020-08-14T22:18:58
null
[ { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6368563771247864, "avg_line_length": 22.0625, "blob_id": "590e4296beec52d03c4d891cd7c9b90aee971c24", "content_id": "4378ebe49d09cfa5cade6c6b6db4df0248b36004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/1-pack_web_static.py", "repo_name": "GEEK1050/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"files compression\"\"\"\n\nfrom datetime import datetime\nfrom fabric.api import run, put, local\n\n\ndef do_pack():\n \"\"\"compress file\"\"\"\n day = datetime.today()\n path = \"versions/web_static_{}.tgz\".format(day.isoformat())\n\n if local(\"mkdir -p versions; tar -vfzc {} web_static\".format(path)):\n return path\n else:\n return None\n" } ]
1
coolharsh55/easyarm
https://github.com/coolharsh55/easyarm
fa7164464b83dee0da3f3eabfcc4d0e8c2e29cda
3dd536f7d08ac0c9be9b70f3177c87a1ce956761
335bf275d6067e8c6bae65dc34d44dd2165f1475
refs/heads/master
2022-08-27T12:08:14.944035
2020-05-23T08:51:39
2020-05-23T08:51:39
106,132,709
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5042158365249634, "alphanum_fraction": 0.5126475691795349, "avg_line_length": 27.238094329833984, "blob_id": "5422de08de34a478fa5d1cbefa53b42d1efac531", "content_id": "835d09ba81c37973d0e0bd7faec7572456180350", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "permissive", "max_line_length": 52, "num_lines": 21, "path": "/parsefile.py", "repo_name": "coolharsh55/easyarm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 2:\n print('needs filename')\n sys.exit()\n filename = sys.argv[1]\n with open(filename, 'r') as fd:\n lines = fd.readlines()\n lines = [l.strip() for l in lines]\n lines = [l for l in lines if l]\n while lines and lines[0].lower() != 'start':\n lines.pop(0)\n if not lines:\n print('nothing to process')\n sys.exit()\n from sleeve import parser\n for line in lines:\n # print('>>> ', line)\n parser.parse(line)\n" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.796875, "avg_line_length": 41.66666793823242, "blob_id": "0427b433c22c1a3c7f5f1f8b5dc6fd56c1e3c08f", "content_id": "3563b709446ded286fa3d01cced60b3d20ebc3ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 256, "license_type": "permissive", "max_line_length": 78, "num_lines": 6, "path": "/README.md", "repo_name": "coolharsh55/easyarm", "src_encoding": "UTF-8", "text": "# easyarm\nA small hobby project that uses ply (python lexer-parser) to convert ARM codes\nsubmitted as part of CS1021 assignment (SCSS - Trinity College Dublin) into\nconcise descriptions of what the code is doing. Something like pseudocode.\n\n> requires ply\n" }, { "alpha_fraction": 0.521182119846344, "alphanum_fraction": 0.5346771478652954, "avg_line_length": 18.384105682373047, "blob_id": "fb6425f57921fbca58be9fa6391bb4aa1636f9b7", "content_id": "74855e8b8474657c4ab8cd355064455577305076", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5854, "license_type": "permissive", "max_line_length": 69, "num_lines": 302, "path": "/sleeve.py", "repo_name": "coolharsh55/easyarm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# tokenise the program\n# uses: ply\n\n# handles following instructions -\n# LDR RX, =N\n# MOV RX, RY\n# ADD RX, RY, RZ\n# SUB RX, RY, RZ\n# MUL RX, RY, RZ\n\nNO_REGISTERS = 15\nregisters = dict((r, 0) for r in range(1, NO_REGISTERS + 1))\nSTACK = []\nTABS = 0\nBYTES = {}\n\nimport ply.lex as lex\nfrom colorama import init\ninit()\nfrom colorama import Fore, Back, Style\n\n# tokens\ntokens = (\n 'INSTRUCTION_LDR',\n 'INSTRUCTION_MOV',\n 'INSTRUCTION_ADD',\n 'INSTRUCTION_SUB',\n 'INSTRUCTION_MUL',\n 'INSTRUCTION_LDRB',\n 'INSTRUCTION_CMP',\n 'INSTRUCTION_BEQ',\n 'INSTRUCTION_B',\n 'INSTRUCTION_BLT',\n 'INSTRUCTION_DCB',\n 'INSTRUCTION_DCD',\n 'INSTRUCTION_SPACE',\n 'REGISTER',\n 'REGISTER_ADDR',\n 'NUMBER',\n 'COMMENT',\n 'LABEL',\n 'LABEL_REF',\n)\n\n\ndef t_INSTRUCTION_LDRB(t):\n r'LDRB '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_MOV(t):\n r'MOV '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_ADD(t):\n r'ADD '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_SUB(t):\n r'SUB '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_MUL(t):\n r'MUL '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_LDR(t):\n r'LDR '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_CMP(t):\n r'CMP '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_BEQ(t):\n r'BEQ '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_BLT(t):\n r'BLT '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_B(t):\n r'[bB] '\n t.value = t.value.strip()\n return t\n\n\ndef t_INSTRUCTION_DCB(t):\n r'DCB\\s+.+'\n t.value = t.value[4:].strip()\n return t\n\n\ndef t_INSTRUCTION_DCD(t):\n r'DCD\\s+.+'\n t.value = t.value[4:].strip()\n return t\n\n\ndef t_INSTRUCTION_SPACE(t):\n r'SPACE\\s+\\d+'\n t.value = int(t.value[5:].strip())\n return t\n\n\ndef t_REGISTER(t):\n r'R\\d{1}'\n t.value = int(t.value[1:])\n return t\n\n\ndef t_REGISTER_ADDR(t):\n r'\\[R\\d{1}\\]'\n t.value = t.value[2:-1]\n return t\n\n\ndef t_NUMBER(t):\n r'[=#]{1}\\d+'\n t.value = int(t.value[1:])\n return t\n\n\ndef t_COMMENT(t):\n r';.+'\n t.value = t.value[1:]\n return t\n\n\ndef t_LABEL(t):\n r'\\w+'\n return t\n\n\ndef t_LABEL_REF(t):\n r'=\\w+'\n t.value = t.value[1:]\n return t\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n\ndef t_error(t):\n t.lexer.skip(1)\n\n\nt_ignore = ' \\t,'\nlexer = lex.lex()\n\nimport ply.yacc as yacc\n\ndef _tabs(value=0):\n global TABS\n if value:\n TABS += value\n if TABS < 0:\n TABS = 0\n return '\\t' * TABS\n\n\ndef p_expression(p):\n 'expression : expression COMMENT'\n print(_tabs(), Fore.CYAN, '#', p[2], Style.RESET_ALL)\n\n\ndef p_label(p):\n 'expression : LABEL'\n style = Fore.RED\n if p[1] == 'while' or p[1].startswith('while'):\n print(_tabs(), style, p[1], Style.RESET_ALL)\n _tabs(1)\n elif p[1] in ('endwhile', 'endwh'):\n _tabs(-1)\n print(_tabs(), style, p[1], Style.RESET_ALL)\n elif p[1].startswith('end'):\n _tabs(-1)\n print(_tabs(), style, p[1], Style.RESET_ALL)\n else:\n print(_tabs(), style, p[1], Style.RESET_ALL)\n\n\ndef p_databytes(p):\n '''expression : LABEL INSTRUCTION_DCB\n | LABEL INSTRUCTION_DCD'''\n BYTES[p[1]] = p[2]\n print(_tabs(), '%s = %s' % (p[1], p[2]))\n\n\ndef p_allocate_space(p):\n 'expression : LABEL INSTRUCTION_SPACE'\n BYTES[p[1]] = '%sbytes' % p[2]\n print(_tabs(), '%s = [%s]bytes' % (p[1], p[2]))\n\n\ndef p_load(p):\n 'expression : INSTRUCTION_LDR REGISTER NUMBER'\n print(_tabs(), 'R%s = %s' % (p[2], p[3]))\n\n\ndef p_load_addr(p):\n 'expression : INSTRUCTION_LDR REGISTER REGISTER_ADDR'\n print(_tabs(), 'R%s = *R%s' % (p[2], p[3]))\n\n\ndef p_loadbyte(p):\n 'expression : INSTRUCTION_LDRB REGISTER REGISTER_ADDR'\n print(_tabs(), 'R%s = *R%s' % (p[2], p[3]))\n\n\ndef p_loadstring(p):\n 'expression : INSTRUCTION_LDR REGISTER LABEL_REF'\n print(_tabs(), 'R%s = addr(%s)' % (p[2], p[3]))\n\n\ndef p_mov(p):\n '''expression : INSTRUCTION_MOV REGISTER REGISTER\n | INSTRUCTION_MOV REGISTER NUMBER'''\n print(_tabs(), 'R%s = %s' % (p[2], p[3]))\n\n\ndef p_compare(p):\n '''expression : INSTRUCTION_CMP REGISTER NUMBER\n | INSTRUCTION_CMP REGISTER REGISTER'''\n STACK.append((p[2], p[3]))\n\n\ndef p_branch(p):\n 'expression : INSTRUCTION_B LABEL'\n print(_tabs(), 'goto %s' % p[2])\n\n\ndef p_branch_equal(p):\n '''expression : INSTRUCTION_BEQ LABEL\n | INSTRUCTION_BLT LABEL'''\n registers = STACK.pop()\n if p[1] == 'BEQ':\n print(_tabs(), 'if R%s == %s' % registers)\n elif p[1] == 'BLT':\n print(_tabs(), 'if R%s < %s' % registers)\n print(_tabs(), '\\tgoto %s' % p[2])\n\n\ndef p_mads(p):\n '''expression : INSTRUCTION_ADD REGISTER REGISTER REGISTER\n | INSTRUCTION_SUB REGISTER REGISTER REGISTER\n | INSTRUCTION_MUL REGISTER REGISTER REGISTER'''\n if p[1] == 'ADD':\n sign = '+'\n elif p[1] == 'SUB':\n sign = '-'\n elif p[1] == 'MUL':\n sign = '*'\n if p[2] == p[3]:\n print(_tabs(), 'R%s %s= R%s' % (p[2], sign, p[4]))\n else:\n print(_tabs(), 'R%s = R%s %s R%s' % (p[2], p[3], sign, p[4]))\n\n\ndef p_mads_value(p):\n '''expression : INSTRUCTION_ADD REGISTER REGISTER NUMBER\n | INSTRUCTION_SUB REGISTER REGISTER NUMBER\n | INSTRUCTION_MUL REGISTER REGISTER NUMBER'''\n if p[1] == 'ADD':\n sign = '+'\n elif p[1] == 'SUB':\n sign = '-'\n elif p[1] == 'MUL':\n sign = '*'\n if p[2] == p[3]:\n print(_tabs(), 'R%s %s= %s' % (p[2], sign, p[4]))\n else:\n print(_tabs(), 'R%s = R%s %s %s' % (p[2], p[3], sign, p[4]))\n\n\ndef p_error(p):\n pass\n\n\nparser = yacc.yacc()\n" }, { "alpha_fraction": 0.4290030300617218, "alphanum_fraction": 0.461228609085083, "avg_line_length": 19.26530647277832, "blob_id": "b5ad5994d6f87799daf717de54c79b3dd3882092", "content_id": "0dfd8253bdc2e27b0bc0ec76cbba078ed44075e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 993, "license_type": "permissive", "max_line_length": 50, "num_lines": 49, "path": "/test.py", "repo_name": "coolharsh55/easyarm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# tests\n\nfrom sleeve import parser\nfrom sleeve import registers\n\n\ndata = {\n 'simple': '''\n LDR R1, =3 ; some vanity comment\n MOV R3, R1\n ADD R1, R2, R3 ; more comments\n SUB R1, R2, R3\n ; comment between lines\n ; for testing purposes\n MUL R1, R2, R3\n ''',\n 'stringlength': '''\n start\n LDR R1, =str1\n LDR R2, =0 \n LDRB R3, [R1] \n while \n CMP R3, #0\n BEQ endwhile\n ADD R1, R1, #1\n LDRB R3, [R1] \n ADD R2, R2, #1\n B while \n endwhile \n stop B stop\n AREA TestData, DATA, READWRITE\n str1 DCB \"Friday\",0\n END''',\n}\n\n\ndef test_parser():\n '''test parser'''\n for line in data['stringlength'].splitlines():\n line = line.strip()\n # print('>>', line)\n parser.parse(line)\n\n\nif __name__ == '__main__':\n # do things\n test_parser()\n" } ]
4
olneyhymn/presbyterian_stats
https://github.com/olneyhymn/presbyterian_stats
fd2dea78e4d0e571d45e5534fda988bca01c3ea2
e5fd916a5fbe292b6e6dab5cf4d9bc48a570b793
bef5bf735bd32683a6e886f1c9b0c99cc7ee3092
refs/heads/main
2023-03-22T00:27:01.798545
2021-03-05T01:55:45
2021-03-05T01:55:45
344,661,410
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5273584723472595, "alphanum_fraction": 0.5424528121948242, "avg_line_length": 13.708333015441895, "blob_id": "b769609b3cd82d7848c51ed7c7d964aaac799a60", "content_id": "f2674ba5d26456f467a8e5e6fc568986a24314ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 160, "num_lines": 72, "path": "/generate.py", "repo_name": "olneyhymn/presbyterian_stats", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport altair as alt\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"data/raw.csv\")\ndf.head()\n\n\n# In[3]:\n\n\nmelted = df.melt(['Year', 'Denomination', 'Source'], ['Ministers', 'Communicant Members', 'Presbyteries', 'Congregations'], value_name=\"count\", var_name=\"Type\")\n\n\n# In[4]:\n\n\nmelted.head()\n\n\n# In[5]:\n\n\n\nmelted['Year'] = pd.to_datetime(melted['Year'], format=\"%Y\")\n\n\n# In[6]:\n\n\ncharts = []\nfor d in melted['Denomination'].unique():\n c = alt.Chart(melted[melted[\"Denomination\"] == d].dropna()).mark_circle().encode(\n alt.X(\n 'Year',\n title='Year',\n scale=alt.Scale(zero=False),\n ),\n alt.Y('count'),\n row=alt.Row(\"Type\"),\n tooltip=[\"count\", \"Source\"], \n ).resolve_scale(y='independent').properties(\n title=d\n ).properties(width=600, height=100)\n charts.append(c)\n\n\n# In[7]:\n\n\nchart = alt.vconcat(*charts)\n\n\n# In[8]:\n\n\nchart\n\n\n# In[9]:\n\n\nchart.save(\"site/data/chart.json\")\n\n" } ]
1
tipfom/website-corona-data
https://github.com/tipfom/website-corona-data
5e6a026539b91508f3bac16b9ecb40a8315c9e12
b5df3c9653f356b77af5d65ae2a0b81e2b0c5c7f
5cc10f09de0820962f727d5cfcc8889d966f407a
refs/heads/master
2023-05-02T05:02:05.169903
2021-05-22T16:11:42
2021-05-22T16:11:42
369,851,694
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5981087684631348, "alphanum_fraction": 0.6087470650672913, "avg_line_length": 34.20833206176758, "blob_id": "1fda1fcd4e011b6f6409e03e9bf47e309f37c41a", "content_id": "41941e6d7fbe67b361ca2fca311e32e08cc08e8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/requesthandler.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "from http.server import BaseHTTPRequestHandler\nfrom urllib.parse import urlparse\n\nfrom corona.data import get_overview_dataset, get_detail_dataset\n\nclass HTTPRequestHandler(BaseHTTPRequestHandler):\n def __init__(self, request, client_address, server):\n super().__init__(request, client_address, server)\n\n def do_GET(self):\n parsed_path = urlparse(self.path)\n splitted = parsed_path.path.split(\"/\")\n\n if len(splitted) == 2:\n self.send_response(200)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.end_headers()\n if splitted[1] == \"\":\n self.wfile.write(get_overview_dataset())\n else:\n self.wfile.write(get_detail_dataset(splitted[1]))\n else:\n self.send_response(400)\n self.end_headers()\n\n" }, { "alpha_fraction": 0.605481743812561, "alphanum_fraction": 0.6072466969490051, "avg_line_length": 37.22222137451172, "blob_id": "7b734faad128212255ff81d036f6cf63a5b4d55a", "content_id": "2b7a48e87a1edc44d5169079dd98af3c1afeb915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9632, "license_type": "no_license", "max_line_length": 129, "num_lines": 252, "path": "/corona/regions.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "MAINLAND_CHINA = 0\nWESTERN_PACIFIC_REGION = 1\nEUROPEAN_REGION = 2\nSOUTH_EAST_ASIA_REGION = 3\nEASTERN_MEDITERRANEAN_REGION = 4\nREGION_OF_THE_AMERICANS = 5\nAFRICAN_REGION = 6\nOTHER = 7\n\nregion_map = {\n \"China\": MAINLAND_CHINA,\n \"Taiwan*\": MAINLAND_CHINA,\n #####################################################################\n \"Korea, South\": WESTERN_PACIFIC_REGION,\n \"Japan\": WESTERN_PACIFIC_REGION,\n \"Singapore\": WESTERN_PACIFIC_REGION,\n \"Australia\": WESTERN_PACIFIC_REGION,\n \"Malaysia\": WESTERN_PACIFIC_REGION,\n \"Vietnam\": WESTERN_PACIFIC_REGION,\n \"Philippines\": WESTERN_PACIFIC_REGION,\n \"Cambodia\": WESTERN_PACIFIC_REGION,\n \"New Zealand\": WESTERN_PACIFIC_REGION,\n \"Fiji\": WESTERN_PACIFIC_REGION,\n \"Papua New Guinea\": WESTERN_PACIFIC_REGION,\n #####################################################################\n \"Italy\": EUROPEAN_REGION,\n \"France\": EUROPEAN_REGION,\n \"Germany\": EUROPEAN_REGION,\n \"Spain\": EUROPEAN_REGION,\n \"United Kingdom\": EUROPEAN_REGION,\n \"Switzerland\": EUROPEAN_REGION,\n \"Norway\": EUROPEAN_REGION,\n \"Sweden\": EUROPEAN_REGION,\n \"Austria\": EUROPEAN_REGION,\n \"Croatia\": EUROPEAN_REGION,\n \"Netherlands\": EUROPEAN_REGION,\n \"Azerbaijan\": EUROPEAN_REGION,\n \"Denmark\": EUROPEAN_REGION,\n \"Georgia\": EUROPEAN_REGION,\n \"Greece\": EUROPEAN_REGION,\n \"Romania\": EUROPEAN_REGION,\n \"Finland\": EUROPEAN_REGION,\n \"Russia\": EUROPEAN_REGION,\n \"Belarus\": EUROPEAN_REGION,\n \"Belgium\": EUROPEAN_REGION,\n \"Estonia\": EUROPEAN_REGION,\n \"Ireland\": EUROPEAN_REGION,\n \"Lithuania\": EUROPEAN_REGION,\n \"Monaco\": EUROPEAN_REGION,\n \"North Macedonia\": EUROPEAN_REGION,\n \"San Marino\": EUROPEAN_REGION,\n \"Luxembourg\": EUROPEAN_REGION,\n \"Iceland\": EUROPEAN_REGION,\n \"Czechia\": EUROPEAN_REGION,\n \"Andorra\": EUROPEAN_REGION,\n \"Portugal\": EUROPEAN_REGION,\n \"Latvia\": EUROPEAN_REGION,\n \"Ukraine\": EUROPEAN_REGION,\n \"Hungary\": EUROPEAN_REGION,\n \"Liechtenstein\": EUROPEAN_REGION,\n \"Poland\": EUROPEAN_REGION,\n \"Bosnia and Herzegovina\": EUROPEAN_REGION,\n \"Slovenia\": EUROPEAN_REGION,\n \"Serbia\": EUROPEAN_REGION,\n \"Slovakia\": EUROPEAN_REGION,\n \"Malta\": EUROPEAN_REGION,\n \"Bulgaria\": EUROPEAN_REGION,\n \"Moldova\": EUROPEAN_REGION,\n \"Albania\": EUROPEAN_REGION,\n \"Cyprus\": EUROPEAN_REGION,\n \"Turkey\": EUROPEAN_REGION, # ?????????????????????\n \"Holy See\": EUROPEAN_REGION,\n \"Kosovo\": EUROPEAN_REGION,\n \"Montenegro\": EUROPEAN_REGION,\n #####################################################################\n \"Thailand\": SOUTH_EAST_ASIA_REGION,\n \"Indonesia\": SOUTH_EAST_ASIA_REGION,\n \"India\": SOUTH_EAST_ASIA_REGION,\n \"Nepal\": SOUTH_EAST_ASIA_REGION,\n \"Sri Lanka\": SOUTH_EAST_ASIA_REGION,\n \"Bhutan\": SOUTH_EAST_ASIA_REGION,\n \"Maldives\": SOUTH_EAST_ASIA_REGION,\n \"Bangladesh\": SOUTH_EAST_ASIA_REGION,\n \"Brunei\": SOUTH_EAST_ASIA_REGION,\n \"Mongolia\": SOUTH_EAST_ASIA_REGION, # ??????????\n \"Uzbekistan\": SOUTH_EAST_ASIA_REGION, # ?????????\n \"Kazakhstan\": SOUTH_EAST_ASIA_REGION, # ????????????\n \"Kyrgyzstan\": SOUTH_EAST_ASIA_REGION, # ??????????????\n \"Timor-Leste\": SOUTH_EAST_ASIA_REGION,\n \"Laos\": SOUTH_EAST_ASIA_REGION,\n \"Burma\": SOUTH_EAST_ASIA_REGION,\n #####################################################################\n \"Armenia\": EASTERN_MEDITERRANEAN_REGION, # ????????????\n \"Iran\": EASTERN_MEDITERRANEAN_REGION,\n \"Kuwait\": EASTERN_MEDITERRANEAN_REGION,\n \"Bahrain\": EASTERN_MEDITERRANEAN_REGION,\n \"United Arab Emirates\": EASTERN_MEDITERRANEAN_REGION,\n \"Iraq\": EASTERN_MEDITERRANEAN_REGION,\n \"Oman\": EASTERN_MEDITERRANEAN_REGION,\n \"Pakistan\": EASTERN_MEDITERRANEAN_REGION,\n \"Lebanon\": EASTERN_MEDITERRANEAN_REGION,\n \"Afghanistan\": EASTERN_MEDITERRANEAN_REGION,\n \"Egypt\": EASTERN_MEDITERRANEAN_REGION,\n \"Qatar\": EASTERN_MEDITERRANEAN_REGION,\n \"Saudi Arabia\": EASTERN_MEDITERRANEAN_REGION,\n \"Jordan\": EASTERN_MEDITERRANEAN_REGION, # ??????????????\n \"Israel\": EASTERN_MEDITERRANEAN_REGION,\n \"Syria\": EASTERN_MEDITERRANEAN_REGION,\n \"West Bank and Gaza\": EASTERN_MEDITERRANEAN_REGION,\n \"Yemen\": EASTERN_MEDITERRANEAN_REGION,\n \"Tajikistan\": EASTERN_MEDITERRANEAN_REGION,\n #####################################################################\n \"US\": REGION_OF_THE_AMERICANS,\n \"Canada\": REGION_OF_THE_AMERICANS,\n \"Brazil\": REGION_OF_THE_AMERICANS,\n \"Mexico\": REGION_OF_THE_AMERICANS,\n \"Ecuador\": REGION_OF_THE_AMERICANS,\n \"Dominican Republic\": REGION_OF_THE_AMERICANS, # ????????????\n \"Chile\": REGION_OF_THE_AMERICANS, # ?????????????????\n \"Argentina\": REGION_OF_THE_AMERICANS, # ????????????\n \"Peru\": REGION_OF_THE_AMERICANS,\n \"Colombia\": REGION_OF_THE_AMERICANS,\n \"Costa Rica\": REGION_OF_THE_AMERICANS,\n \"Paraguay\": REGION_OF_THE_AMERICANS,\n \"Honduras\": REGION_OF_THE_AMERICANS,\n \"Jamaica\": REGION_OF_THE_AMERICANS,\n \"Cuba\": REGION_OF_THE_AMERICANS,\n \"Guyana\": REGION_OF_THE_AMERICANS,\n \"Panama\": REGION_OF_THE_AMERICANS,\n \"Bolivia\": REGION_OF_THE_AMERICANS,\n \"Venezuela\": REGION_OF_THE_AMERICANS,\n \"Guatemala\": REGION_OF_THE_AMERICANS,\n \"Saint Lucia\": REGION_OF_THE_AMERICANS,\n \"Saint Vincent and the Grenadines\": REGION_OF_THE_AMERICANS,\n \"Antigua and Barbuda\": REGION_OF_THE_AMERICANS,\n \"Uruguay\": REGION_OF_THE_AMERICANS,\n \"Trinidad and Tobago\": REGION_OF_THE_AMERICANS,\n \"Suriname\": REGION_OF_THE_AMERICANS,\n \"Bahamas\": REGION_OF_THE_AMERICANS,\n \"Barbados\": REGION_OF_THE_AMERICANS,\n \"Nicaragua\": REGION_OF_THE_AMERICANS,\n \"El Salvador\": REGION_OF_THE_AMERICANS,\n \"Haiti\": REGION_OF_THE_AMERICANS,\n \"Dominica\": REGION_OF_THE_AMERICANS,\n \"Grenada\": REGION_OF_THE_AMERICANS,\n \"Belize\": REGION_OF_THE_AMERICANS,\n \"Saint Kitts and Nevis\": REGION_OF_THE_AMERICANS,\n #####################################################################\n \"Algeria\": AFRICAN_REGION,\n \"Nigeria\": AFRICAN_REGION,\n \"Morocco\": AFRICAN_REGION,\n \"Senegal\": AFRICAN_REGION,\n \"Tunisia\": AFRICAN_REGION,\n \"South Africa\": AFRICAN_REGION,\n \"Togo\": AFRICAN_REGION,\n \"Cameroon\": AFRICAN_REGION,\n \"Congo (Kinshasa)\": AFRICAN_REGION,\n \"Cote d'Ivoire\": AFRICAN_REGION,\n \"Burkina Faso\": AFRICAN_REGION,\n \"Ghana\": AFRICAN_REGION,\n \"Namibia\": AFRICAN_REGION,\n \"Seychelles\": AFRICAN_REGION,\n \"Eswatini\": AFRICAN_REGION,\n \"Gabon\": AFRICAN_REGION,\n \"Mauritania\": AFRICAN_REGION,\n \"Rwanda\": AFRICAN_REGION,\n \"Sudan\": AFRICAN_REGION,\n \"Kenya\": AFRICAN_REGION,\n \"Guinea\": AFRICAN_REGION,\n \"Congo (Brazzaville)\": AFRICAN_REGION,\n \"Equatorial Guinea\": AFRICAN_REGION,\n \"Central African Republic\": AFRICAN_REGION,\n \"Ethiopia\": AFRICAN_REGION,\n \"Benin\": AFRICAN_REGION,\n \"Liberia\": AFRICAN_REGION,\n \"Somalia\": AFRICAN_REGION,\n \"Tanzania\": AFRICAN_REGION,\n \"Mauritius\": AFRICAN_REGION, # ???????????\n \"Zambia\": AFRICAN_REGION,\n \"Djibouti\": AFRICAN_REGION, # ??????????\n \"Chad\": AFRICAN_REGION,\n \"Zimbabwe\": AFRICAN_REGION,\n \"Niger\": AFRICAN_REGION,\n \"Madagascar\": AFRICAN_REGION,\n \"Cabo Verde\": AFRICAN_REGION,\n \"Angola\": AFRICAN_REGION,\n \"Eritrea\": AFRICAN_REGION,\n \"Uganda\": AFRICAN_REGION,\n \"Mozambique\": AFRICAN_REGION,\n \"Gambia\": AFRICAN_REGION,\n \"Libya\": AFRICAN_REGION,\n \"Guinea-Bissau\": AFRICAN_REGION,\n \"Mali\": AFRICAN_REGION,\n \"Botswana\": AFRICAN_REGION,\n \"Burundi\": AFRICAN_REGION,\n \"Sierra Leone\": AFRICAN_REGION,\n \"Sao Tome and Principe\": AFRICAN_REGION,\n \"Western Sahara\": AFRICAN_REGION,\n \"South Sudan\": AFRICAN_REGION,\n \"Malawi\": AFRICAN_REGION,\n \"Comoros\": AFRICAN_REGION,\n \"Lesotho\": AFRICAN_REGION,\n \"Comoros\": AFRICAN_REGION,\n #####################################################################\n \"Diamond Princess\": OTHER,\n \"MS Zaandam\": OTHER,\n}\n\nregion_names = {\n MAINLAND_CHINA: \"china\",\n WESTERN_PACIFIC_REGION: \"western_pacific_region\",\n EUROPEAN_REGION: \"european_region\",\n SOUTH_EAST_ASIA_REGION: \"south_east_asia_region\",\n EASTERN_MEDITERRANEAN_REGION: \"eastern_mediterranean_region\",\n REGION_OF_THE_AMERICANS: \"region_of_the_americans\",\n AFRICAN_REGION: \"african_region\",\n OTHER: \"other\",\n}\n\nREGION_COUNT = OTHER + 1\n\nif __name__ == \"__main__\":\n import csv\n regions_in_file = []\n with open(\"./corona/data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\") as datafile:\n datafile_reader = csv.reader(datafile, delimiter=\",\", quotechar='\"')\n for row in datafile_reader:\n if not regions_in_file.__contains__(row[1]):\n regions_in_file.append(row[1])\n\n regions_in_regionmap = [key for key in region_map.keys()]\n for region in regions_in_file:\n if not regions_in_regionmap.__contains__(region):\n print(\"missing \" + region)\n else:\n regions_in_regionmap.remove(region)\n \n print(regions_in_regionmap)\n\n if False:\n for region in range(REGION_COUNT):\n print('<optgroup label=\"{'+ \"{ 'pages.corona.names.\" + region_names[region] + \"' | translate }\" + '}\">')\n for key in region_map.keys():\n if region_map[key] == region:\n escaped_key = key.replace(\" \", \"_\") \n print('<option value=\"' + escaped_key + '\" translate>pages.corona.names.' + escaped_key + \"</option>\")\n print('</optgroup>')\n\n if False:\n for key in region_map.keys():\n escaped_key = key.replace(\" \", \"_\") \n print('\"' + escaped_key + '\": \"' + key + '\",')\n" }, { "alpha_fraction": 0.41838350892066956, "alphanum_fraction": 0.42736396193504333, "avg_line_length": 29.532258987426758, "blob_id": "a8d3b13a458d4407ccde8f8407f6c2a3d6a941c9", "content_id": "5a8743a6765ab86cd0cc312b8889cacf21e49292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1893, "license_type": "no_license", "max_line_length": 81, "num_lines": 62, "path": "/corona/fetch_tests.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "country_map = {\n \"World\": \"global\",\n \"USA\": \"US\",\n \"S. Korea\": \"Korea, South\",\n \"UK\": \"United Kingdom\",\n \"TOTAL\": \"global\",\n \"Myanmar\": \"Burma\",\n}\n\n\ndef try_get_tests():\n url = \"https://www.worldometers.info/coronavirus/\"\n\n from urllib.request import Request, urlopen\n from datetime import datetime, timedelta\n import json\n\n htmlcontent = (\n urlopen(Request(url, headers={\"User-Agent\": \"Mozilla/5.0\"}))\n .read()\n .decode(\"utf8\")\n )\n table = htmlcontent.split('<table id=\"main_table_countries_today\"')[1].split(\n \"</table>\"\n )[0]\n tablebody = table.split(\"<tbody>\")[1].split(\"</tbody>\")[0]\n test_data = {}\n for row in tablebody.split(\"</tr>\"):\n columns = row.split(\"</td>\")\n if len(columns) > 12:\n try:\n name = columns[1].split(\"<a \")[1]\n name = name.split(\">\")[1]\n name = name.replace(\"</a\", \"\")\n country = name\n if country_map.__contains__(country):\n country = country_map[country]\n country = country.replace(\" \", \"_\")\n cases = columns[2].split(\">\")[1]\n tests = columns[12].split(\">\")[1]\n if tests != \"\":\n test_data.update(\n {\n country: {\n \"total\": tests.replace(\",\", \"\"),\n \"original_name\": name,\n \"confirmed_cases\": cases.replace(\",\", \"\"),\n \"updated\": datetime.now().isoformat(),\n }\n }\n )\n except Exception as e:\n print(e)\n pass\n\n return test_data\n\n\nif __name__ == \"__main__\":\n ret = try_get_tests()\n print(ret)\n pass\n" }, { "alpha_fraction": 0.5207148790359497, "alphanum_fraction": 0.5393988490104675, "avg_line_length": 34.17142868041992, "blob_id": "4190549191cad6197cf7c631393d02862759b472", "content_id": "d00d2ab2416d68501c6d7fd23dba24688b2468f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1231, "license_type": "no_license", "max_line_length": 104, "num_lines": 35, "path": "/corona/fetch_bno.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "country_map = {\n \"United States\": \"US\",\n \"Czech Republic\": \"Czechia\",\n \"TOTAL\": \"global\"\n}\n\ndef try_get_bno_seriouscases():\n import urllib.request\n from urllib.request import Request, urlopen\n\n try:\n url = \"https://bnonews.com/index.php/2020/03/the-latest-coronavirus-cases/\"\n mybytes = urlopen(Request(url, headers={'User-Agent': 'Mozilla/5.0'})).read()\n htmlcontent = mybytes.decode(\"utf8\")\n data_url = htmlcontent.split(\"</iframe>\")[0].split(\"<iframe\")[1].split(\"src=\")[1].split(\"\\\"\")[1]\n\n mybytes = urlopen(Request(data_url)).read()\n htmlcontent = mybytes.decode(\"utf8\")\n\n table = htmlcontent.split(\"<tbody>\")[1].split(\"</tbody>\")[0]\n rows = table.split(\"<tr style='height:39px;'>\")\n serious = {}\n for row in rows:\n columns = row.split(\"</td>\")\n try:\n country = columns[0].split(\"th>\")[1].split(\">\")[1]\n if country_map.__contains__(country):\n country = country_map[country]\n cases = columns[6].split(\">\")[1]\n serious.update({country: cases})\n except:\n pass\n return serious\n except:\n return None\n" }, { "alpha_fraction": 0.557369589805603, "alphanum_fraction": 0.560997724533081, "avg_line_length": 34, "blob_id": "493353811968cc7d6a34875c3cf11e00978ee756", "content_id": "f1c825689bab60f5ce354e3d356218fb3f597f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2205, "license_type": "no_license", "max_line_length": 81, "num_lines": 63, "path": "/corona/load.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "import corona.regions \nimport csv\nimport numpy as np\nfrom importlib import reload\n\nclass Data:\n def __init__(self):\n self.by_region = []\n for i in range(corona.regions.REGION_COUNT):\n self.by_region.append(np.array([]))\n\n self.total = np.array([])\n self.by_country = {}\n\n def to_json(self):\n by_region_dict = {}\n for i in range(len(self.by_region)):\n by_region_dict.update({region_names[i]: self.by_region[i].tolist()})\n return {\"by_region\": by_region_dict, \"total\": self.total.tolist()}\n\ndef get_data_from_file(filename):\n corona.regions = reload(corona.regions)\n\n data_raw = []\n with open(filename) as datafile:\n datafile_reader = csv.reader(datafile, delimiter=\",\", quotechar='\"')\n for row in datafile_reader:\n data_raw.append(row)\n\n data = Data()\n for i in range(4, len(data_raw[0])):\n column_by_region = np.zeros(corona.regions.REGION_COUNT)\n column_by_country = {}\n for j in range(1, len(data_raw)):\n country = data_raw[j][1]\n if not column_by_country.__contains__(country):\n column_by_country.update({country: 0})\n try:\n column_by_country[country] += int(float(data_raw[j][i]))\n except Exception as e:\n column_by_country[country] += 0\n\n if corona.regions.region_map.__contains__(country):\n region = corona.regions.region_map[country]\n if data_raw[j][i] != \"\":\n column_by_region[region] += int(float(data_raw[j][i]))\n elif i == 4:\n print(\"could not find region for \" + country)\n\n for k, v in column_by_country.items():\n if not data.by_country.__contains__(k):\n data.by_country.update({k:np.array([])})\n data.by_country[k] = np.append(data.by_country[k], v)\n\n column_total = 0\n for i in range(corona.regions.REGION_COUNT):\n data.by_region[i] = np.append(data.by_region[i], column_by_region[i])\n\n column_total += column_by_region[i]\n\n data.total = np.append(data.total, column_total)\n\n return data\n" }, { "alpha_fraction": 0.4728572964668274, "alphanum_fraction": 0.4789001941680908, "avg_line_length": 30.028125762939453, "blob_id": "f106b3828dadbc4617b6f603596ad30c088992d5", "content_id": "0c5a017d91d994b5d4b4c0df952d171cd4844e88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9929, "license_type": "no_license", "max_line_length": 97, "num_lines": 320, "path": "/corona/data.py", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "import json\nimport math\nimport sched\nimport threading\nimport time\nfrom datetime import date, datetime\nfrom importlib import reload\n\nimport git\nimport numpy as np\nimport scipy.optimize\n\nimport corona.fetch_bno\nimport corona.fetch_tests\nfrom corona.load import get_data_from_file\nfrom corona.regions import *\n\n\ndef exp_fit_function(x, a, b):\n return a * np.exp(b * x)\n\n\ndef exp_fit_jacobian(x, a, b):\n return np.transpose([np.exp(b * x), a * x * np.exp(b * x)])\n\n\ndef sig_fit_function(x, a, b, c):\n return a / (1 + np.exp(-b * (x - c)))\n\n\ndef sig_fit_jacobian(x, a, b, c):\n return np.transpose(\n [\n 1 / (1 + np.exp(-b * (x - c))),\n -a / ((1 + np.exp(-b * (x - c))) ** 2) * (c - x) * np.exp(-b * (x - c)),\n -a / ((1 + np.exp(-b * (x - c))) ** 2) * b * np.exp(-b * (x - c)),\n ]\n )\n\n\ndef generate_fits(x, y, start, p0, function, jacobian):\n result = []\n for i in range(start, len(x) + 1):\n try:\n popt, pcov = scipy.optimize.curve_fit(\n function, x[:i], y[:i], p0, jac=jacobian, maxfev=10000\n )\n perr = np.sqrt(np.diag(pcov))\n done = False\n for k in range(len(popt)):\n if math.isnan(popt[k]) or math.isinf(popt[k]):\n result.append({\"param\": \"undefined\", \"err\": \"undefined\"})\n done = True\n break\n if done:\n break\n for k in range(len(perr)):\n if math.isnan(perr[k]) or math.isinf(perr[k]):\n result.append({\"param\": \"undefined\", \"err\": \"undefined\"})\n done = True\n break\n if done:\n break\n result.append({\"param\": popt.tolist(), \"err\": perr.tolist()})\n except:\n result.append({\"param\": \"undefined\", \"err\": \"undefined\"})\n return result\n\n\nsubmodule_path = \"./data/\"\ndata_submodule_path = submodule_path + \"csse_covid_19_data/csse_covid_19_time_series/\"\ndatafile_confirmed = data_submodule_path + \"time_series_covid19_confirmed_global.csv\"\ndatafile_deaths = data_submodule_path + \"time_series_covid19_deaths_global.csv\"\ndatafile_recovered = data_submodule_path + \"time_series_covid19_recovered_global.csv\"\n\nlast_tests_data = None\nlast_tests_data_refresh = date.today().isoformat()\nlast_serious_data = None\nlast_serious_data_refresh = date.today().isoformat()\n\n\ndef prepare_data():\n global last_tests_data\n global last_tests_data_refresh\n global last_serious_data\n global last_serious_data_refresh\n\n recovered = get_data_from_file(datafile_recovered)\n confirmed = get_data_from_file(datafile_confirmed)\n dead = get_data_from_file(datafile_deaths)\n\n entries = len(confirmed.total)\n\n recovered_china = recovered.by_region[MAINLAND_CHINA]\n dead_china = dead.by_region[MAINLAND_CHINA]\n confirmed_china = confirmed.by_region[MAINLAND_CHINA]\n\n recovered_row = confirmed.total[: len(recovered_china)] - recovered_china\n dead_row = dead.total - dead_china\n confirmed_row = confirmed.total - confirmed_china\n\n fit_start = 16\n fit_data_x = np.arange(0, entries)\n\n corona.fetch_bno = reload(corona.fetch_bno)\n serious_data = {}\n try:\n serious_data = corona.fetch_bno.try_get_bno_seriouscases()\n except:\n pass\n if len(serious_data) == 0:\n print(\"serious cases pull failed\")\n serious_data = last_serious_data\n else:\n last_serious_data = serious_data\n last_serious_data_refresh = date.today().isoformat()\n\n corona.fetch_tests = reload(corona.fetch_tests)\n tests_data = {}\n try:\n tests_data = corona.fetch_tests.try_get_tests()\n except:\n pass\n if len(tests_data) == 0:\n print(\"tests pull failed\")\n tests_data = last_tests_data\n else:\n last_tests_data = tests_data\n last_tests_data_refresh = date.today().isoformat()\n\n temp_overview_dataset = {}\n temp_detail_datasets = {}\n\n for i in range(REGION_COUNT):\n temp_detail_datasets.update(\n {\n region_names[i]: {\n \"exp\": generate_fits(\n fit_data_x,\n confirmed.by_region[i],\n fit_start,\n [confirmed.by_region[i][0], 0.1],\n exp_fit_function,\n exp_fit_jacobian,\n ),\n \"sig\": generate_fits(\n fit_data_x,\n confirmed.by_region[i],\n fit_start,\n [np.max(confirmed.by_region[i]), 0.2, len(confirmed.by_region[i]) / 2],\n sig_fit_function,\n sig_fit_jacobian,\n ),\n }\n }\n )\n temp_overview_dataset.update(\n {\n region_names[i]: {\n \"confirmed\": confirmed.by_region[i].tolist(),\n \"dead\": dead.by_region[i].tolist(),\n \"recovered\": recovered.by_region[i].tolist(),\n }\n }\n )\n\n temp_detail_datasets.update(\n {\n \"row\": {\n \"exp\": generate_fits(\n fit_data_x,\n confirmed_row,\n fit_start,\n [confirmed_row[0], 0.1],\n exp_fit_function,\n exp_fit_jacobian,\n ),\n \"sig\": generate_fits(\n fit_data_x,\n confirmed_row,\n fit_start,\n [np.max(confirmed_row), 0.2, len(confirmed_row) / 2],\n sig_fit_function,\n sig_fit_jacobian,\n ),\n }\n }\n )\n temp_overview_dataset.update(\n {\n \"row\": {\n \"confirmed\": confirmed_row.tolist(),\n \"dead\": dead_row.tolist(),\n \"recovered\": recovered_row.tolist(),\n }\n }\n )\n\n for n in confirmed.by_country.keys():\n temp_detail_datasets.update(\n {\n n.replace(\" \", \"_\"): {\n \"exp\": generate_fits(\n fit_data_x,\n confirmed.by_country[n],\n fit_start,\n [confirmed.by_country[n][0], 0.1],\n exp_fit_function,\n exp_fit_jacobian,\n ),\n \"sig\": generate_fits(\n fit_data_x,\n confirmed.by_country[n],\n fit_start,\n [np.max(confirmed.by_country[n]), 0.2, len(confirmed.by_country[n]) / 2],\n sig_fit_function,\n sig_fit_jacobian,\n ),\n }\n }\n )\n temp_overview_dataset.update(\n {\n n.replace(\" \", \"_\"): {\n \"confirmed\": confirmed.by_country[n].tolist(),\n \"dead\": dead.by_country[n].tolist(),\n \"recovered\": recovered.by_country[n].tolist(),\n \"tests\": tests_data[n]\n if tests_data.__contains__(n)\n else \"undefined\",\n \"serious\": {\n \"value\": serious_data[n]\n if serious_data.__contains__(n)\n else \"undefined\",\n \"updated\": last_serious_data_refresh,\n },\n }\n }\n )\n\n temp_detail_datasets.update(\n {\n \"global\": {\n \"exp\": generate_fits(\n fit_data_x,\n confirmed.total,\n fit_start,\n [confirmed.total[0], 0.1],\n exp_fit_function,\n exp_fit_jacobian,\n ),\n \"sig\": generate_fits(\n fit_data_x,\n confirmed.total,\n fit_start,\n [np.max(confirmed.total), 0.2, len(confirmed.total) / 2],\n sig_fit_function,\n sig_fit_jacobian,\n ),\n },\n }\n )\n temp_overview_dataset.update(\n {\n \"global\": {\n \"confirmed\": confirmed.total.tolist(),\n \"dead\": dead.total.tolist(),\n \"recovered\": recovered.total.tolist(),\n \"tests\": tests_data[\"global\"]\n if tests_data.__contains__(\"global\")\n else \"undefined\",\n \"serious\": {\n \"value\": serious_data[\"global\"]\n if serious_data.__contains__(\"global\")\n else \"undefined\",\n \"updated\": last_serious_data_refresh,\n },\n }\n }\n )\n\n temp_details_json = {}\n for k in temp_detail_datasets.keys():\n temp_details_json.update({k: json.dumps(temp_detail_datasets[k]).encode()})\n\n return (json.dumps(temp_overview_dataset).encode(), temp_details_json)\n\n\noverview_json, details_json = prepare_data()\n\n\ndef get_detail_dataset(country):\n return details_json[country]\n\n\ndef get_overview_dataset():\n return overview_json\n\n\ndef update_data():\n global overview_json\n global details_json\n\n scheduler.enter(60 * 60 * 4, 1, update_data)\n\n print(\"Updating Corona Data\")\n repo = git.cmd.Git(\"./data\")\n repo.fetch(\"--all\")\n repo.reset(\"--hard\", \"origin/master\")\n repo.pull(\"origin\", \"master\")\n repo.checkout(\"master\")\n print(\"Git Pull completed\")\n\n overview_json, details_json = prepare_data()\n\nscheduler = sched.scheduler(time.time, time.sleep)\n\nupdate_data()\nt = threading.Thread(target=scheduler.run)\nt.start()\n" }, { "alpha_fraction": 0.7277777791023254, "alphanum_fraction": 0.7444444298744202, "avg_line_length": 15.454545021057129, "blob_id": "adaebb7023726535ed18dff2ae25e0e75b106083", "content_id": "3999cddbe3caffc728f59b53751028d496ecd965", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 180, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/Dockerfile", "repo_name": "tipfom/website-corona-data", "src_encoding": "UTF-8", "text": "FROM python:3\n\nWORKDIR /server/\n\nRUN python -m pip install scipy\nRUN python -m pip install numpy\nRUN python -m pip install GitPython\n\nEXPOSE 80\n\nCMD [\"python\", \"/server/server.py\"]" } ]
7
Gaoyifei1011/AmapProgram
https://github.com/Gaoyifei1011/AmapProgram
88a5ff5b2d337c94b3db6dff7f51c74d2142bf21
d45a27abf9f508d922f37abc34f00da6d0aab4a0
169b5030807c38e6b55fd8fa767fa1836737e899
refs/heads/main
2023-04-30T13:53:28.303296
2021-05-20T02:31:08
2021-05-20T02:31:08
368,448,437
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.5244251489639282, "alphanum_fraction": 0.5317136645317078, "avg_line_length": 34.015625, "blob_id": "a4b9e61f0a161ddbfa212b9350f9f7d68b01e636", "content_id": "3ea936f1c529133158c572d0953aeb05beb3b52a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13213, "license_type": "permissive", "max_line_length": 118, "num_lines": 320, "path": "/FundamentalFunctions/TrafficInformationExecuteOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\nimport os\r\nimport time\r\n\r\nimport xlrd # 读取excel文件\r\nimport xlwt # 写入excel文件\r\nfrom xlutils.copy import copy\r\nfrom xlwt import XFStyle\r\n\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass TrafficInformationWriteOperation:\r\n \"\"\"\r\n Class:交通信息执行操作\r\n \"\"\"\r\n\r\n def __init__(self, RoadNameList):\r\n self.file_path = None\r\n self.font_style = None\r\n self.name = None\r\n self.height = None\r\n self.bold = None\r\n self.path = None\r\n self.sheet_name = None\r\n self.value = None\r\n self.cityName = None\r\n self.resultInformation = None\r\n self.path = None\r\n self.RoadNameList = RoadNameList\r\n self.sheet_init()\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def sheet_init(self) -> None:\r\n \"\"\"\r\n 函数:单元表格初始化\r\n \"\"\"\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 文件保存路径\r\n local_appdata_directory = os.getenv('LOCALAPPDATA')\r\n\r\n # 根目录\r\n temp_directory = '\\\\'.join([local_appdata_directory, 'AmapProgram'])\r\n # 目录不存在,创建\r\n if not os.path.exists(temp_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n os.mkdir(temp_directory)\r\n\r\n # 数据目录\r\n data_directory = '\\\\'.join([temp_directory, 'Data'])\r\n # 目录不存在,创建\r\n if not os.path.exists(data_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n os.mkdir(data_directory)\r\n\r\n # 文件绝对路径\r\n list_filename = [data_directory, 'TrafficInformation.XLS']\r\n self.file_path = '\\\\'.join(list_filename)\r\n\r\n # 字体样式\r\n self.font_style = self.set_style('微软雅黑 Light', 12 * 20, False)\r\n\r\n # 文件不存在,创建并初始化excel表格文件\r\n if not os.path.exists(self.file_path):\r\n book = xlwt.Workbook(encoding='utf-8')\r\n # cell_overwrite_ok=True表示一个单元格可以被多次覆盖写入\r\n # 标题行\r\n row_head = [['日期', '时间', '状态码', '响应信息', '道路名称', '路况语义化描述', '路况整体评价', '路况整体评价的语义化描述',\r\n '路段拥堵语义化描述', '路段拥堵评价', '平均通行速度', '拥堵距离', '较10分钟前拥堵趋势'\r\n ]]\r\n # 初始化单元格\r\n for city in self.RoadNameList:\r\n book.add_sheet(city)\r\n book.save(self.file_path)\r\n time.sleep(3)\r\n # 初始化单元格每一个标题\r\n for city in self.RoadNameList:\r\n self.write_excel_xls_append(self.file_path, city, row_head)\r\n\r\n # 设置字体样式\r\n def set_style(self, name: str,\r\n height: int,\r\n bold: bool = False\r\n ) -> XFStyle:\r\n \"\"\"\r\n 函数:设置字体的表格样式\r\n Args:\r\n name: 字体的名称\r\n height: 字体的大小\r\n bold: 表格的字体是否为粗体\r\n Returns: 表格的字体格式对象\r\n \"\"\"\r\n\r\n self.name = name\r\n self.height = height\r\n self.bold = bold\r\n\r\n style = xlwt.XFStyle()\r\n font = xlwt.Font()\r\n font.name = self.name\r\n font.bold = self.bold\r\n font.color_index = 4\r\n font.height = self.height\r\n\r\n global rows_num\r\n\r\n style.font = font\r\n return style\r\n\r\n def write_excel_xls(self, path: str,\r\n sheet_name: str,\r\n value: list\r\n ) -> None:\r\n \"\"\"\r\n 函数:向excel中写入数据\r\n Args:\r\n path: 文件路径\r\n sheet_name: 表格名称\r\n value: 要写入的内容\r\n \"\"\"\r\n\r\n self.path = path\r\n self.sheet_name = sheet_name\r\n self.value = value\r\n\r\n index = len(self.value) # 获取需要写入数据的行数\r\n workbook = xlwt.Workbook() # 新建一个工作簿\r\n sheet = workbook.add_sheet(self.sheet_name, cell_overwrite_ok=True) # 在工作簿中新建一个表格\r\n for i in range(0, index):\r\n for j in range(0, len(self.value[i])):\r\n sheet.write(i, j, self.value[i][j]) # 像表格中写入数据(对应的行和列)\r\n workbook.save(self.path) # 保存工作簿\r\n\r\n def write_excel_xls_append(self, path: str,\r\n sheet_name: str,\r\n value: list\r\n ) -> None:\r\n \"\"\"\r\n 函数:向excel中追加写入数据\r\n Args:\r\n path: 文件路径\r\n sheet_name: 表格名称\r\n value: 要写入的内容\r\n \"\"\"\r\n self.path = path\r\n self.sheet_name = sheet_name\r\n self.value = value\r\n\r\n index = len(self.value) # 获取需要写入数据的行数\r\n workbook = xlrd.open_workbook(self.path) # 打开工作簿\r\n # sheets = workbook.sheet_names() # 获取工作簿中的所有表格\r\n worksheet = workbook.sheet_by_name(self.sheet_name) # 获取工作簿中所有表格中的的sheet_name表格\r\n rows_old = worksheet.nrows # 获取表格中已存在的数据的行数\r\n new_workbook = copy(workbook) # 将xlrd对象拷贝转化为xlwt对象\r\n new_worksheet = new_workbook.get_sheet(self.sheet_name) # 获取转化后工作簿中的sheet_name表格\r\n for i in range(0, index):\r\n for j in range(0, len(self.value[i])):\r\n new_worksheet.write(i + rows_old, j, self.value[i][j]) # 追加写入数据,注意是从i+rows_old行开始写入\r\n new_workbook.save(self.path) # 保存工作簿\r\n\r\n def write_to_excel(self, cityName: str,\r\n resultInformation: dict\r\n ) -> None:\r\n \"\"\"\r\n 将获取到的数据写入excel文件中\r\n Args:\r\n cityName: 城市名称,对应sheet_name\r\n resultInformation: 获取到的数据\r\n \"\"\"\r\n\r\n self.cityName = cityName\r\n self.resultInformation = resultInformation\r\n\r\n # 写入表格的内容\r\n writeList = []\r\n\r\n # 写入数据\r\n # 1.获取当前系统的日期\r\n local_date = time.strftime(\"%Y-%m-%d\", time.localtime())\r\n writeList.append(local_date)\r\n # 2.获取当前系统的时间\r\n local_time = time.strftime(\"%H:%M:%S\", time.localtime())\r\n writeList.append(local_time)\r\n # 3.状态码\r\n if 'status' in resultInformation:\r\n status = resultInformation['status']\r\n writeList.append(status)\r\n # 4.响应信息\r\n if 'message' in resultInformation:\r\n message = resultInformation['message']\r\n writeList.append(message)\r\n # 5.道路名称\r\n if 'road_traffic' in resultInformation:\r\n road_traffic = resultInformation['road_traffic'][0]\r\n if 'road_name' in road_traffic:\r\n road_name = road_traffic['road_name']\r\n writeList.append(road_name)\r\n # 6.路况语义化描述\r\n if 'description' in resultInformation:\r\n description = resultInformation['description']\r\n writeList.append(description)\r\n if 'evaluation' in resultInformation:\r\n evaluation = resultInformation['evaluation']\r\n # 7.路况整体评价\r\n if 'status' in evaluation:\r\n status = evaluation['status']\r\n writeList.append(status)\r\n # 8.路况整体评价的语义化描述\r\n if 'status_desc' in evaluation:\r\n status_desc = evaluation['status_desc']\r\n writeList.append(status_desc)\r\n if 'road_traffic' in resultInformation:\r\n road_traffic = resultInformation['road_traffic'][0]\r\n if 'congestion_sections' in road_traffic:\r\n congestion_sections = road_traffic['congestion_sections'][0]\r\n # 9.路段拥堵语义化描述\r\n if 'section_desc' in congestion_sections:\r\n section_desc = congestion_sections['section_desc']\r\n writeList.append(section_desc)\r\n # 10.路段拥堵评价\r\n if 'status' in congestion_sections:\r\n status = congestion_sections['status']\r\n writeList.append(status)\r\n # 11.平均通行速度\r\n if 'speed' in congestion_sections:\r\n speed = congestion_sections['speed']\r\n writeList.append(speed)\r\n # 12.拥堵距离\r\n if 'congestion_distance' in congestion_sections:\r\n congestion_distance = congestion_sections['congestion_distance']\r\n writeList.append(congestion_distance)\r\n # 13.较10分钟前拥堵趋势\r\n if 'congestion_trend' in congestion_sections:\r\n congestion_trend = congestion_sections['congestion_trend']\r\n writeList.append(congestion_trend)\r\n\r\n self.write_excel_xls_append(self.file_path, cityName, [writeList])\r\n\r\n\r\nclass TrafficInformationReadOperation:\r\n def __init__(self):\r\n self.path = None\r\n\r\n def read_excel_xls(self, sheet_name: str\r\n ) -> dict:\r\n \"\"\"\r\n 函数:读取excel文件内容\r\n Args:\r\n sheet_name: 表格名称\r\n Returns:\r\n 返回获取的状态信息\r\n \"\"\"\r\n\r\n # 文件保存路径\r\n local_appdata_directory = os.getenv('LOCALAPPDATA')\r\n # 根目录\r\n temp_directory = '\\\\'.join([local_appdata_directory, 'AmapProgram'])\r\n # 数据目录\r\n data_directory = '\\\\'.join([temp_directory, 'Data'])\r\n # 文件绝对路径\r\n list_filename = [data_directory, 'TrafficInformation.XLS']\r\n self.path = '\\\\'.join(list_filename)\r\n\r\n # 输出结果\r\n resultDict = {}\r\n\r\n workbook = xlrd.open_workbook(self.path) # 打开工作簿\r\n # sheets = workbook.sheet_names() # 获取工作簿中的所有表格\r\n worksheet = workbook.sheet_by_name(sheet_name) # 获取工作簿中所有表格中的的sheet_name表格\r\n\r\n wholeDataLength = 0\r\n effectiveDataLength = 0\r\n # 统计获取的数据个数\r\n for i in range(1, worksheet.nrows):\r\n if worksheet.cell_value(i, 6):\r\n wholeDataLength = wholeDataLength + 1\r\n\r\n # 统计有效值数据个数\r\n for i in range(1, worksheet.nrows):\r\n if worksheet.cell_value(i, 9) != '':\r\n effectiveDataLength = effectiveDataLength + 1\r\n\r\n # 道路通行状况语言描述\r\n percent = float(effectiveDataLength / wholeDataLength * 100)\r\n\r\n resultDict.update(effectiveDataLength=effectiveDataLength)\r\n resultDict.update(wholeDataLength=wholeDataLength)\r\n resultDict.update(percent=percent)\r\n\r\n if 0 <= percent < 10:\r\n resultDict.update(percentContext=\"该城市道路拥堵占比{0:.2f}%,基本上没有拥堵路段,请继续保持\".format(percent))\r\n elif 10 <= percent < 30:\r\n resultDict.update(percentContext=\"该城市道路拥堵占比{0:.2f}%,有较少路段经常发生拥堵,请继续保持\".format(percent))\r\n elif 30 <= percent < 50:\r\n resultDict.update(percentContext=\"该城市道路拥堵占比{0:.2f}%,有部分路段经常发生拥堵,建议您绕行这部分路段\".format(percent))\r\n elif 50 <= percent < 75:\r\n resultDict.update(percentContext=\"该城市道路拥堵占比{0:.2f}%,有大部分路段经常发生拥堵,建议您合理安排出行计划,避开拥堵路段\".format(percent))\r\n elif 75 <= percent < 100:\r\n resultDict.update(percentContext=\"该城市道路拥堵占比{0:.2f}%,经常发生拥堵,建议您采取其他方式或乘坐公共交通出行,以免耽误您的出行计划\".format(percent))\r\n\r\n return resultDict\r\n" }, { "alpha_fraction": 0.34293216466903687, "alphanum_fraction": 0.34941136837005615, "avg_line_length": 54.357994079589844, "blob_id": "333df5ca09ad44f645c51aeea8418ad91d102264", "content_id": "600e6c916069a7ae0d4cdce44871fd9570d086d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25250, "license_type": "permissive", "max_line_length": 119, "num_lines": 419, "path": "/AmapFunctions/InputPrompt.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport time\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass InputPrompt:\r\n \"\"\"\r\n Class:输入提示\r\n 输入提示是一类简单的HTTP接口,提供根据用户输入的关键词查询返回建议列表。\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.city = None\r\n self.cityLimit = None\r\n self.datatype = None\r\n self.input_type = None\r\n self.json_decode = None\r\n self.keywords = None\r\n self.location = None\r\n self.output = None\r\n\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_input_prompt(self, keywords: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:提供根据用户输入的关键词查询返回建议列表。\r\n Args:\r\n keywords:查询关键词,必填。\r\n kwargs:\r\n input_type:POI分类,可选。服务可支持传入多个分类,多个类型剑用“|”分隔。可选值:POI分类名称、分类代码。此处强烈建议使用分类代码,否则可能会得到不符合预期的结果\r\n location:坐标,可选。格式:“X,Y”(经度,纬度),不可以包含空格。建议使用location参数,可在此location附近优先返回搜索关键词信息。在请求参数city不为空时生效\r\n city:搜索城市,可选,默认在全国范围内搜索。可选值:citycode、adcode,不支持县级市。如:010/110000\r\n 填入此参数后,会尽量优先返回此城市数据,但是不一定仅局限此城市结果,若仅需要某个城市数据请调用cityLimit参数。如:在深圳市搜天安门,返回北京天安门结果。\r\n cityLimit:仅返回指定城市数据,可选,默认false。可选值:true/false\r\n datatype:返回的数据类型,可选,默认all。多种数据类型用“|”分隔,可选值:all-返回所有数据类型、poi-返回POI数据类型、bus-返回公交站点数据类型、busLine-返回公交线路数据类型\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.keywords = keywords\r\n\r\n if 'city' in kwargs:\r\n self.city = kwargs['city']\r\n if 'cityLimit' in kwargs:\r\n self.cityLimit = kwargs['cityLimit']\r\n if 'datatype' in kwargs:\r\n self.datatype = kwargs['datatype']\r\n if 'input_type' in kwargs:\r\n self.input_type = kwargs['input_type']\r\n if 'location' in kwargs:\r\n self.location = kwargs['location']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': InputPrompt.APIkey,\r\n 'keywords': self.keywords\r\n }\r\n\r\n if self.city is not None:\r\n parameters.update(city=self.city)\r\n if self.cityLimit is not None:\r\n parameters.update(cityLimit=self.cityLimit)\r\n if self.datatype is not None:\r\n parameters.update(datatype=self.datatype)\r\n if self.input_type is not None:\r\n parameters.update(type=self.input_type)\r\n if self.location is not None:\r\n parameters.update(location=self.location)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/assistant/inputtips?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Input prompt data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_input_prompt(self, json_decode: dict,\r\n datatype: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:解析提供根据用户输入的关键词得到的返回建议列表。\r\n Args:\r\n json_decode:get_input_prompt()方法从网络中获取的数据\r\n datatype:获取的数据类型\r\n \"\"\"\r\n\r\n # TODO:未来版本升级为查询框输入的提示预备词\r\n self.datatype = datatype\r\n self.json_decode = json_decode\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n json_decode['status'])\r\n )\r\n if json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n json_decode['infocode'])\r\n )\r\n # 提示信息,返回结果总数目\r\n tips = json_decode['tips']\r\n tips_count = json_decode['count']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - tips_count:{1}'.format(function_name,\r\n tips_count)\r\n )\r\n\r\n if tips is not None:\r\n # 所有数据类型\r\n if datatype == 'all' or datatype == 'poi':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - all or poi:{1}'.format(function_name,\r\n 1)\r\n )\r\n print(\"根据您的关键字已查询到以下相关信息\")\r\n print(\"共包含{0}条记录\".format(tips_count))\r\n for item, tip in enumerate(tips):\r\n name = tip['name']\r\n district = tip['district']\r\n adcode = tip['adcode']\r\n location = tip['location']\r\n address = tip['address']\r\n typeCode = tip['typeCode']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - district:{1}'.format(function_name,\r\n district)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adcode:{1}'.format(function_name,\r\n adcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - location:{1}'.format(function_name,\r\n location)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - address:{1}'.format(function_name,\r\n address)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - typeCode:{1}'.format(function_name,\r\n typeCode)\r\n )\r\n\r\n print(\"============================\")\r\n print(\"第{0}条\".format(item + 1))\r\n if address:\r\n print(\"名称:{0},地址:{1}\".format(name, address))\r\n else:\r\n print(\"名称:{0}\".format(name))\r\n print(\"具体位置信息:{0}\".format(district))\r\n\r\n elif datatype == 'bus':\r\n print(\"根据您的关键字已查询到以下相关公交站或地铁站\")\r\n print(\"共包含{0}条记录\".format(tips_count))\r\n for item, tip in enumerate(tips):\r\n name = tip['name']\r\n district = tip['district']\r\n adcode = tip['adcode']\r\n location = tip['location']\r\n address = tip['address']\r\n typeCode = tip['typeCode']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - district:{1}'.format(function_name,\r\n district)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adcode:{1}'.format(function_name,\r\n adcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - location:{1}'.format(function_name,\r\n location)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - address:{1}'.format(function_name,\r\n address)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - typeCode:{1}'.format(function_name,\r\n typeCode)\r\n )\r\n\r\n print(\"============================\")\r\n print(\"第{0}条\".format(item + 1))\r\n print(\"公交/地铁站名称:{0},公交/地铁线路名称:{1}\".format(name, address))\r\n print(\"具体位置信息:{0}\".format(district))\r\n\r\n elif datatype == 'busline':\r\n print(\"根据您的关键字已查询到以下相关公交线路\")\r\n print(\"共包含{0}条记录\".format(tips_count))\r\n for item, tip in enumerate(tips):\r\n name = tip['name']\r\n district = tip['district']\r\n adcode = tip['adcode']\r\n typeCode = tip['typeCode']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - district:{1}'.format(function_name,\r\n district)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adcode:{1}'.format(function_name,\r\n adcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - typeCode:{1}'.format(function_name,\r\n typeCode)\r\n )\r\n\r\n print(\"============================\")\r\n print(\"第{0}条\".format(item + 1))\r\n print(\"公交/地铁线路名称:{0}\".format(name))\r\n print(\"具体位置信息:{0}\".format(district))\r\n else:\r\n print(\"暂未查询到相关信息,请尝试更换关键字查询\")\r\n else:\r\n print(\"暂未查询到相关信息,请尝试更换关键字查询\")\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n" }, { "alpha_fraction": 0.42092224955558777, "alphanum_fraction": 0.427610844373703, "avg_line_length": 47.66080856323242, "blob_id": "f37eb951578a0f77708ba1ffd12a5cf7724507f9", "content_id": "ab3b6e51373f83f0bdd65b1d13a6b54354bcc026", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31537, "license_type": "permissive", "max_line_length": 135, "num_lines": 569, "path": "/AmapFunctions/GeographicCoding.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport time\r\nfrom typing import Any\r\n\r\nimport requests\r\n\r\nfrom SelfExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass GeographicCoding:\r\n \"\"\"\r\n Class:地理/逆地理编码\r\n 地理编码:将详细的结构化地址转换为高德经纬度坐标。且支持对地标性名胜景区、建筑物名称解析为高德经纬度坐标。\r\n 逆地理编码:将经纬度转换为详细结构化的地址,且返回附近周边的POI、AOI信息。\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n self.address = None\r\n self.batch = None\r\n self.city = None\r\n self.extensions = None\r\n self.flag_batch = None\r\n self.homeorcrop = None\r\n self.inverse_json_decode = None\r\n self.json_decode = None\r\n self.location = None\r\n self.output = None\r\n self.poitype = None\r\n self.radius = None\r\n self.roadLevel = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_geographic_coding(self, address: str,\r\n city: str,\r\n **kwargs: dict[str, Any]\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取地理编码数据。\\n\r\n Args:\r\n address:结构化地址信息,必填。规则遵循:国家、省份、城市、区县、城镇、乡村、街道、门牌号码、屋邨、大厦,如:北京市朝阳区阜通东大街6号。如果需要解析多个地址的话,请用\"|\"进行间隔,并且将 batch参数设置\r\n 为 true,最多支持 10 个地址进进行\"|\"分割形式的请求。\r\n city:指定查询的城市,可选。可选输入内容包括:指定城市的中文(如北京)、指定城市的中文全拼(beijing)、citycode(010)、adcode(110000),不支持县级市。当指定城市查询内容为空时,会进行全国范围内的地址转换检索。\r\n kwargs:\r\n output:返回数据格式类型,可选,默认JSON格式。可选输入内容包括:JSON,XML。设置 JSON 返回结果数据将会以JSON结构构成;如果设置 XML 返回结果数据将以 XML 结构构成。\r\n batch:批量查询控制,可选,默认False。batch 参数设置为 true 时进行批量查询操作,最多支持 10 个地址进行批量查询。batch 参数设置为 false 时进行单点查询,此时即使传入多个地址也只返\r\n 回第一个地址的解析查询结果。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.address = address\r\n self.city = city\r\n\r\n if 'batch' in kwargs:\r\n self.batch = kwargs['batch']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'address': self.address,\r\n 'city': self.city\r\n }\r\n\r\n if self.batch is not None:\r\n parameters.update(batch=self.batch)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n\r\n # 获取数据\r\n try:\r\n # 以下except都是用来捕获当requests请求出现异常时,\r\n # 通过捕获然后等待网络情况的变化,以此来保护程序的不间断运行\r\n request_information = requests.get(\"https://restapi.amap.com/v3/geocode/geo?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Geographic coding data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n\r\n return error_information_dict\r\n\r\n def get_inverse_geographic_coding(self, location: str,\r\n **kwargs\r\n ) -> dict:\r\n\r\n \"\"\"\r\n 函数:获取逆地理编码数据。\\n\r\n Args:\r\n location:经纬度坐标,必填。传入内容规则:经度在前,纬度在后,经纬度间以“,”分割,经纬度小数点后不要超过 6 位。如果需要解析多个经纬度的话,请用\"|\"进行间隔,并且将 batch 参数设置为 true,最\r\n 多支持传入 20 对坐标点。每对点坐标之间用\"|\"分割。\r\n kwargs:\r\n radius:搜索半径,可选,默认1000。radius取值范围在0~3000。单位:米。\r\n roadLevel:道路等级,可选。以下内容需要 extensions 参数为 all时才生效。可选值:0,1当roadlevel=0时,显示所有道路。当roadlevel=1时,过滤非主干道路,仅输出主干道路数据。\r\n extensions:返回结果控制,可选,默认base。extensions 参数默认取值是 base,也就是返回基本地址信息;extensions 参数取值为 all 时会返回基本地址信息、附近 POI内容、道路信息以及道路交叉\r\n 口信息。\r\n poitype:返回附近POI类型,可选。以下内容需要 extensions 参数为 all 时才生效。逆地理编码在进行坐标解析之后不仅可以返回地址描述,也可以返回经纬度附近符合限定要求的POI内容(在\r\n extensions 字段值为 all 时才会返回POI内容)。设置 POI 类型参数相当于为上述操作限定要求。参数仅支持传入POI TYPECODE,可以传入多个POITYPECODE,相互之间用“|”分隔。该参\r\n 数在 batch 取值为 true 时不生效。\r\n output:返回数据格式类型,可选,默认JSON格式。可选输入内容包括:JSON,XML。设置JSON 返回结果数据将会以JSON结构构成;如果设置 XML 返回结果数据将以 XML 结构构成。\r\n batch:批量查询控制,可选,默认False。batch 参数设置为 true 时进行批量查询操作,最多支持 20 个经纬度点进行批量地址查询操作。batch 参数设置为 false 时进行单点查询,此时即使传入多个经纬度也只返回第一个\r\n 经纬度的地址解析查询结果。\r\n homeorcorp:是否优化POI返回顺序,可选,默认0。以下内容需要 extensions 参数为 all时才生效。homeorcorp 参数的设置可以影响召回 POI 内容的排序策略,目前提供三个可选参数:0:不对召回\r\n 的排序策略进行干扰。1:综合大数据分析将居家相关的 POI 内容优先返回,即优化返回结果中 pois字段的poi顺序。2:综合大数据分析将公司相关的 POI 内容优先返回,即优化返回结果中 pois\r\n 字段的poi顺序。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.location = location\r\n\r\n if 'batch' in kwargs:\r\n self.batch = kwargs['batch']\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'homeorcorp' in kwargs:\r\n self.homeorcrop = kwargs['homeorcorp']\r\n if 'poitype' in kwargs:\r\n self.poitype = kwargs['poitype']\r\n if 'radius' in kwargs:\r\n self.radius = kwargs['radius']\r\n if 'roadlevel' in kwargs:\r\n self.roadLevel = kwargs['roadlevel']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': GeographicCoding.APIkey,\r\n 'location': self.location,\r\n }\r\n\r\n if self.batch is not None:\r\n parameters.update(batch=self.batch)\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.homeorcrop is not None:\r\n parameters.update(homeorcorp=self.homeorcrop)\r\n if self.poitype is not None:\r\n parameters.update(poitype=self.poitype)\r\n if self.radius is not None:\r\n parameters.update(radius=self.radius)\r\n if self.roadLevel is not None:\r\n parameters.update(roadlevel=self.roadLevel)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/geocode/regeo?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n inverse_json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Inverse geographic coding data successful get.'.format(\r\n function_name)\r\n )\r\n return inverse_json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n # only for debugging\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n\r\n return error_information_dict\r\n\r\n def parse_geographic_coding(self, json_decode: dict\r\n ) -> dict:\r\n \"\"\"\r\n 函数:解析地理编码数据\r\n Args:\r\n json_decode:get_geographic_coding()方法从网络中获取到的数据\r\n Returns:返回得到的经纬度值\r\n \"\"\"\r\n\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = {}\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode['infocode'])\r\n )\r\n\r\n if self.json_decode['geocodes']:\r\n # 地理位置\r\n geographic_position = self.json_decode['geocodes'][0]['location']\r\n # 地理位置对应的城市\r\n geographic_city = self.json_decode['geocodes'][0]['city']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - geographic_position:{1}'.format(\r\n function_name,\r\n geographic_position)\r\n )\r\n\r\n resultContext['geographic_position'] = geographic_position\r\n resultContext['geographic_city'] = geographic_city\r\n return resultContext\r\n\r\n else:\r\n context = \"您提供的地点信息查询失败,换个词进行搜索吧\"\r\n resultContext['error_context'] = context\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n context = \"您提供的地点信息查询失败,换个词进行搜索吧\"\r\n resultContext['error_context'] = context\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"您提供的地点信息查询失败,换个词进行搜索吧\"\r\n resultContext['error_context'] = context\r\n return resultContext\r\n\r\n def parse_inverse_geographic_coding(self, inverse_json_decode: dict,\r\n flag_batch: bool\r\n ) -> str or None:\r\n \"\"\"\r\n 函数:解析逆地理编码数据\r\n Args:\r\n inverse_json_decode:get_inverse_geographic_coding()方法从网络中获取到的数据\r\n flag_batch:是否为多值查询\r\n Returns:返回得到的经纬度值\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str/None升级为dict\r\n self.inverse_json_decode = inverse_json_decode\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if inverse_json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n inverse_json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif inverse_json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n inverse_json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif inverse_json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n inverse_json_decode['status'])\r\n )\r\n\r\n if inverse_json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n if not flag_batch: # 单点查询\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - flag_batch:{1}'.format(function_name,\r\n flag_batch)\r\n )\r\n # 逆解析后的地理实际位置名称\r\n inverse_geographic_information = inverse_json_decode['regeocode']['formatted_address']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - inverse_geographic_information:{1}'.format(\r\n function_name,\r\n inverse_geographic_information)\r\n )\r\n\r\n return inverse_geographic_information\r\n\r\n else: # 多值查询\r\n len_regeocodes = len(inverse_json_decode['regeocodes'])\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - lenRegeocodes:{1}'.format(function_name,\r\n len_regeocodes)\r\n )\r\n\r\n for item in range(len_regeocodes):\r\n # 逆解析后的地理实际位置名称\r\n formatted_address = inverse_json_decode['regeocodes'][item]['formatted_address']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - formatted_address:{1}'.format(\r\n function_name,\r\n formatted_address)\r\n )\r\n\r\n return inverse_json_decode['regeocodes']\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(inverse_json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n resultContext = \"Error\"\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(inverse_json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n resultContext = \"Error\"\r\n return resultContext\r\n" }, { "alpha_fraction": 0.4739641547203064, "alphanum_fraction": 0.48390257358551025, "avg_line_length": 36.793479919433594, "blob_id": "ae23e4293da38334b921ea22000f6447fa4f46ea", "content_id": "1457bc9127a77faf10951ed37ea80caa28147986", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9116, "license_type": "permissive", "max_line_length": 137, "num_lines": 184, "path": "/AmapFunctions/TrafficSituation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport json\r\n\r\nimport requests\r\n\r\n\r\nclass TrafficSituation:\r\n \"\"\"\r\n 高德地图API已停止使用,该项目目前暂时停止使用,目前使用百度地图API爬取。\r\n \"\"\"\r\n def __init__(self):\r\n pass\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n fileitem = 1\r\n\r\n def get_traffic_situation_by_rectangle(self, rectangle: str,\r\n level: int = 5,\r\n extensions: str = 'base',\r\n output: str = 'JSON',\r\n ) -> dict:\r\n \"\"\"\r\n 函数:矩形区域交通态势\r\n Args:\r\n rectangle:代表此为矩形区域查询,必填。左下右上顶点坐标对。矩形对角线不能超过10公里。两个坐标对之间用”;”间隔。xy之间用”,”间隔\r\n level:道路等级。指定道路等级,可选,默认5。下面各值代表的含义:1:高速(京藏高速)2:城市快速路、国道(西三环、103国道)3:高速辅路(G6辅路)4:主要道路(长安街、三环辅路路)5:一般道路(彩和坊路)6:无名道路\r\n extensions:返回结果控制,可选,默认base。可选值:base,all\r\n output:返回数据格式类型,可选,默认JSON。可选值:JSON,XML\r\n Returns:返回矩形区域交通态势的json格式数据\r\n \"\"\"\r\n\r\n self.rectangle = rectangle\r\n self.level = level\r\n self.extensions = extensions\r\n self.output = output\r\n\r\n # 传入参数\r\n parameters = {'key': TrafficSituation.APIkey,\r\n 'level': level,\r\n 'extensions': extensions,\r\n 'output': output,\r\n 'rectangle': rectangle\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/traffic/status/rectangle?parameters\",\r\n params=parameters)\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # only for debugging\r\n # print(\"请求状态结果:\" + str(request_information))\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n return json_decode\r\n except requests.RequestException:\r\n return dict()\r\n\r\n def get_traffic_situation_by_circle(self, location: str,\r\n level: int = 5,\r\n extensions: str = 'base',\r\n output: str = 'JSON',\r\n radius: int = 1000,\r\n ) -> dict:\r\n \"\"\"\r\n 函数:圆形区域交通态势\r\n Args:\r\n location:中心点坐标,必填。经度在前,纬度在后。经度和纬度用\",\"分割。经纬度小数点后不得超过6位。\r\n level:道路等级。指定道路等级,可选,默认5。下面各值代表的含义:1:高速(京藏高速)2:城市快速路、国道(西三环、103国道)3:高速辅路(G6辅路)4:主要道路(长安街、三环辅路路)5:一般道路(彩和坊路)6:无名道路\r\n extensions:返回结果控制,可选,默认base。可选值:base,all\r\n output:返回数据格式类型,可选,默认JSON。可选值:JSON,XML\r\n radius:半径,可选,默认1000。单位:米,最大取值5000米。\r\n Returns:返回圆形区域交通态势的json格式数据\r\n \"\"\"\r\n\r\n self.location = location\r\n self.level = level\r\n self.extensions = extensions\r\n self.output = output\r\n self.radius = radius\r\n\r\n # 传入参数\r\n parameters = {'key': TrafficSituation.APIkey,\r\n 'level': level,\r\n 'extensions': extensions,\r\n 'output': output,\r\n 'radius': radius\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/traffic/status/circle?parameters\",\r\n params=parameters)\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # only for debugging\r\n # print(\"请求状态结果:\" + str(request_information))\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n return json_decode\r\n except requests.RequestException:\r\n return dict()\r\n\r\n def get_traffic_situation_by_road(self, name: str,\r\n level: int = 5,\r\n extensions: str = 'base',\r\n output: str = 'JSON',\r\n city: str = '',\r\n adcode: str = ''\r\n ) -> dict:\r\n \"\"\"\r\n 函数:指定线路交通态势\r\n Args:\r\n name:道路名,必填。\r\n level:道路等级。指定道路等级,可选,默认5。下面各值代表的含义:1:高速(京藏高速)2:城市快速路、国道(西三环、103国道)3:高速辅路(G6辅路)4:主要道路(长安街、三环辅路路)5:一般道路(彩和坊路)6:无名道路\r\n extensions:返回结果控制,可选。可选值:base,all\r\n output:返回数据格式类型,可选。可选值:JSON,XML\r\n city:城市名,非必填(city和adcode必填一个)。由于开发者可能对城市称呼和高德的称呼存在差异(例如开发者称呼为深圳,但高德仅识别深圳市)故强烈建议使用adcode,不使用city字段。另外此处的adcode仅识别市级的adcode。\r\n adcode:城市编码,非必填(city和adcode必填一个)。由于开发者可能对城市称呼和高德的称呼存在差异(例如开发者称呼为深圳,但高德仅识别深圳市)故强烈建议使用adcode,不使用city字段。另外此处的adcode仅识别市级的adcode。\r\n Returns:返回指定线路交通态势的json格式数据\r\n \"\"\"\r\n\r\n self.name = name\r\n self.level = level\r\n self.extensions = extensions\r\n self.output = output\r\n self.city = city\r\n self.adcode = adcode\r\n\r\n # 传入参数\r\n parameters = {'key': TrafficSituation.APIkey,\r\n 'name': name,\r\n 'level': level,\r\n 'extensions': extensions,\r\n 'output': output,\r\n 'city': city,\r\n 'adcode': adcode\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/traffic/status/road?parameters\",\r\n params=parameters)\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # only for debugging\r\n # print(\"请求状态结果:\" + str(request_information))\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n return json_decode\r\n except requests.RequestException:\r\n return dict()\r\n\r\n def parse_traffic_situation(self, json_decode: dict,\r\n extensions: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:解析交通态势的json格式数据\r\n Args:\r\n json_decode:交通态势的json格式数据\r\n extensions:返回结果控制,可选。可选值:base,all\r\n \"\"\"\r\n\r\n self.json_decode = json_decode\r\n self.extensions = extensions\r\n\r\n if not json_decode:\r\n print(\"返回异常\")\r\n if json_decode['status'] == '1':\r\n if json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n trafficinfo = json_decode['trafficinfo']\r\n trafficinfo_description = trafficinfo['description']\r\n evaluation = trafficinfo['evaluation']\r\n\r\n expedite = evaluation['expedite']\r\n congested = evaluation['congested']\r\n blocked = evaluation['blocked']\r\n unknown = evaluation['unknown']\r\n status = evaluation['status']\r\n evaluation_description = evaluation['description']\r\n\r\n if extensions == 'all':\r\n roads = trafficinfo['roads']\r\n print(roads)\r\n pass\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5097001791000366, "alphanum_fraction": 0.5167548656463623, "avg_line_length": 34.412845611572266, "blob_id": "25af13d693983ca5a7792ab70de706b191342e02", "content_id": "fe5288d80cfbffcb02b1b5be8dd8f012c55d9a3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4237, "license_type": "permissive", "max_line_length": 117, "num_lines": 109, "path": "/FundamentalFunctions/GetTrafficData.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\nimport time\r\n\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\nfrom apscheduler.schedulers.blocking import BlockingScheduler\r\n\r\nfrom AmapFunctions.TrafficSituationByBaiduMap import TrafficSituationByBaiduMap\r\nfrom FundamentalFunctions.TrafficInformationExecuteOperation import TrafficInformationWriteOperation\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass GetTrafficData(QThread):\r\n \"\"\"\r\n Class:批量获取交通数据(新线程)\r\n \"\"\"\r\n # 定义信号\r\n signal = pyqtSignal(str)\r\n\r\n def __init__(self, RoadNameList, parent=None):\r\n super(GetTrafficData, self).__init__()\r\n self.trafficSituationByBaiduMap = TrafficSituationByBaiduMap()\r\n self.flag = 1 # 自定义开关变量\r\n # 初始化创建excel数据表\r\n self.RoadNameList = RoadNameList\r\n self.trafficInformationSavingOperation = TrafficInformationWriteOperation(self.RoadNameList)\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def __del__(self):\r\n self.wait()\r\n\r\n def run(self) -> None:\r\n \"\"\"\r\n 进行多线程任务操作,主要的逻辑操作,返回结果\r\n \"\"\"\r\n\r\n self.flag = 1\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 定时运行\r\n scheduler = BlockingScheduler()\r\n # 每天定时在6-23点执行\r\n scheduler.add_job(func=self.job, trigger='cron', month='*', day='*', hour='6-23', minute='0')\r\n scheduler.start()\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - Successfully Start'.format(\r\n function_name)\r\n )\r\n\r\n def job(self) -> None:\r\n \"\"\"\r\n 函数:程序具体运行内容\r\n \"\"\"\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - Data get Time:{1}'.format(\r\n function_name,\r\n time.asctime(time.localtime(time.time())))\r\n )\r\n\r\n for road in self.RoadNameList:\r\n if self.flag == 1:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - Successfully Executed'.format(\r\n function_name)\r\n )\r\n\r\n # 依次遍历每一个城市\r\n # 城市名称\r\n cityName = road\r\n roadName = self.RoadNameList[cityName]\r\n for item in roadName:\r\n if self.flag == 1:\r\n # 获取实时路况信息\r\n resultInformation = self.trafficSituationByBaiduMap.get_traffic_situation_by_road(\r\n road_name=item, city=cityName)\r\n # 将获取的路况信息保存到excel数据库文件中\r\n self.trafficInformationSavingOperation.write_to_excel(cityName, resultInformation)\r\n time.sleep(3)\r\n else:\r\n self.flag = 0\r\n\r\n else:\r\n self.signal.emit(\"successfully Saved\")\r\n\r\n # 重写程序停止运行\r\n def stop(self) -> None:\r\n self.flag = 0\r\n" }, { "alpha_fraction": 0.5266019701957703, "alphanum_fraction": 0.5304854512214661, "avg_line_length": 40.21311569213867, "blob_id": "4e31a20f14158ec03368bfeb4c130b3e6bae1d11", "content_id": "6cf725dd37d43758ecfc177979eee4f25f110d8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8379, "license_type": "permissive", "max_line_length": 134, "num_lines": 183, "path": "/FundamentalFunctions/DriveRoutePlanningOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.RoutePlanning import RoutePlanning\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass DriveRoutePlanningOperation:\r\n \"\"\"\r\n Class:驾驶路径规划操作\r\n \"\"\"\r\n def __init__(self):\r\n self.driveDepartureAddress = None\r\n self.driveDestinationAddress = None\r\n self.driveDepartureCity = None\r\n self.driveDestinationCity = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_drive_departure_information(self, driveDepartureAddress: str,\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的驾驶路径出发点是否符合规范要求\r\n Args:\r\n driveDepartureAddress: 用户输入的出发点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.driveDepartureAddress = driveDepartureAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.driveDepartureAddress is None or self.driveDepartureAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - drive departure address check result:{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def check_drive_destination_information(self, driveDestinationAddress: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的步行路径终点是否符合规范要求\r\n Args:\r\n driveDestinationAddress: 用户输入的终点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.driveDestinationAddress = driveDestinationAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.driveDestinationAddress is None or self.driveDestinationAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - drive destination address check result:{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 检测用户提供的步行路径出发点是否符合规范要求\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_drive_route_planning_information(self, driveDepartureAddress: str,\r\n driveDestinationAddress: str,\r\n driveDepartureCity: str = '',\r\n driveDestinationCity: str = ''\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取驾驶路径规划的具体信息\r\n Args:\r\n driveDepartureAddress: 用户输入的出发点\r\n driveDestinationAddress: 用户输入的终点\r\n driveDepartureCity: 用户输入的出发点对应的城市\r\n driveDestinationCity: 用户输入的终点对应的城市\r\n Returns:\r\n 返回获取的驾驶路径规划对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.driveDepartureAddress = driveDepartureAddress\r\n self.driveDestinationAddress = driveDestinationAddress\r\n # 在以后的版本中添加\r\n self.driveDepartureCity = driveDepartureCity\r\n self.driveDestinationCity = driveDestinationCity\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 驾驶路径规划\r\n geographicCoding = GeographicCoding()\r\n # 获取起点终点对应的初始编码信息\r\n # TODO:优化city参数\r\n driveDepartureJsonDecode = geographicCoding.get_geographic_coding(address=self.driveDepartureAddress,\r\n city='')\r\n driveDestinationJsonDecode = geographicCoding.get_geographic_coding(address=self.driveDestinationAddress,\r\n city='')\r\n\r\n parseDriveDepartureInformation = geographicCoding.parse_geographic_coding(driveDepartureJsonDecode)\r\n parseDriveDestinationInformation = geographicCoding.parse_geographic_coding(driveDestinationJsonDecode)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - drive departure information:{1}'.format(function_name,\r\n parseDriveDepartureInformation)\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - drive destination information:{1}'.format(function_name,\r\n parseDriveDestinationInformation)\r\n )\r\n\r\n # 起点位置编码\r\n if 'error_context' not in parseDriveDepartureInformation:\r\n resultDepartureGeographicCoding = parseDriveDepartureInformation['geographic_position']\r\n else:\r\n return [parseDriveDepartureInformation['error_context']]\r\n\r\n # 终点位置编码\r\n if 'error_context' not in parseDriveDestinationInformation:\r\n resultDestinationGeographicCoding = parseDriveDestinationInformation['geographic_position']\r\n else:\r\n return [parseDriveDestinationInformation['error_context']]\r\n\r\n # TODO: 未来将strategy和extensions放入设置选项中\r\n routePlanning = RoutePlanning()\r\n driveRoutePlanning = routePlanning.get_drive_route_planning(origin=resultDepartureGeographicCoding,\r\n destination=resultDestinationGeographicCoding,\r\n strategy=10,\r\n extensions='base')\r\n\r\n # 获取内容\r\n resultDriveRoutePlanning = routePlanning.parse_drive_route_planning(driveRoutePlanning)\r\n promptInformation = \"从{0}到{1}的驾驶导航信息如下所示\".format(self.driveDepartureAddress, self.driveDestinationAddress)\r\n resultDriveRoutePlanning.insert(0, promptInformation)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result drive route planning:{1}'.format(function_name,\r\n resultDriveRoutePlanning)\r\n )\r\n\r\n return resultDriveRoutePlanning\r\n" }, { "alpha_fraction": 0.6391566395759583, "alphanum_fraction": 0.6668674945831299, "avg_line_length": 36.38150405883789, "blob_id": "94fd3ca432e2a64f955ae17a05c720ddaa5bb48c", "content_id": "27a12248364a6bbcf82d70301990af77392918dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8194, "license_type": "permissive", "max_line_length": 120, "num_lines": 173, "path": "/FundamentalFunctions/NorthShanxiAreaDataVisualization.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\"\"\"\r\nCopy from Jupyter Notebook\r\n\"\"\"\r\n# TODO: In the future version will insert into the 山西省道路信息分析系统 page.\r\n\r\n# 设置字体,否则中文会显示异常\r\nplt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\nplt.rcParams['figure.figsize'] = (22.0, 14.0)\r\nplt.title(\"晋北地区各城市道路通行情况\")\r\n\r\n# 使用pandas读取excel文件\r\ndf_datong = pd.read_excel(r'F:/01.XLS', sheet_name='大同市')\r\ndf_shuozhou = pd.read_excel(r'F:/01.XLS', sheet_name='朔州市')\r\ndf_xinzhou = pd.read_excel(r'F:/01.XLS', sheet_name='忻州市')\r\n\r\n# 设置子图默认间距\r\nplt.subplots_adjust(hspace=0.5)\r\n# 大同市数据可视化\r\nplt.subplot(2, 2, 1)\r\n# 添加条形图的标题\r\nplt.title('大同市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\ndatong_road_name = df_datong.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\ndatong_road_cong = df_datong.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\ndatong_combination = tuple(zip(datong_road_cong['路段拥堵评价'].values(), datong_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\ndatong_cong_proportion = []\r\ndatong_clear_road = []\r\n\r\nfor item in list(datong_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n datong_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n datong_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\ndatong_information = df_datong.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n# print(datong_information)\r\n# print(type(datong_information))\r\ndatong_information['拥堵占比'] = datong_cong_proportion\r\ndatong_information['道路畅通评价'] = datong_clear_road\r\n# print(datong_information)\r\n# print(list(datong_information['道路名称']))\r\n# print(list(datong_information['路段拥堵评价']))\r\n# print(list(datong_information['拥堵占比']))\r\n# print(datong_information['道路畅通评价'])\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 大同市道路名称\r\ndatong_road_name_list = ['云中路', '北都街', '南环路', '同煤快线', '御河东路', '御河西路', '文兴路', '迎宾街', '魏都大道']\r\nplt.xticks(range(len(datong_road_name_list)), datong_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(9) - 0.3, height=list(datong_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(9), height=list(datong_information['道路畅通评价']), alpha=0.5, width=0.3, color='green', edgecolor='blue',\r\n label='道路畅通次数')\r\nplt.bar(np.arange(9) + 0.3, height=list(datong_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 朔州市数据可视化\r\nplt.subplot(2, 2, 2)\r\n# 添加条形图的标题\r\nplt.title('朔州市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nshuozhou_road_name = df_shuozhou.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nshuozhou_road_cong = df_shuozhou.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nshuozhou_combination = tuple(zip(shuozhou_road_cong['路段拥堵评价'].values(), shuozhou_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nshuozhou_cong_proportion = []\r\nshuozhou_clear_road = []\r\n\r\nfor item in list(shuozhou_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n shuozhou_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n shuozhou_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nshuozhou_information = df_shuozhou.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(shuozhou_information))\r\nshuozhou_information['拥堵占比'] = shuozhou_cong_proportion\r\nshuozhou_information['道路畅通评价'] = shuozhou_clear_road\r\n\r\n# print(list(shuozhou_information['道路名称']))\r\n# print(list(shuozhou_information['路段拥堵评价']))\r\n# print(list(shuozhou_information['拥堵占比']))\r\n# print(list(shuozhou_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 朔州市道路名称\r\nshuozhou_road_name_list = ['开发北路', '开发南路', '张辽北路', '张辽南路', '文远路', '民福东街', '民福西街']\r\nplt.xticks(range(len(shuozhou_road_name_list)), shuozhou_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(7) - 0.3, height=list(shuozhou_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(7), height=list(shuozhou_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(7) + 0.3, height=list(shuozhou_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 忻州市数据可视化\r\nplt.subplot(2, 2, 3)\r\n# 添加条形图的标题\r\nplt.title('忻州市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nxinzhou_road_name = df_xinzhou.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nxinzhou_road_cong = df_xinzhou.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nxinzhou_combination = tuple(zip(xinzhou_road_cong['路段拥堵评价'].values(), xinzhou_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nxinzhou_cong_proportion = []\r\nxinzhou_clear_road = []\r\n\r\nfor item in list(xinzhou_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n xinzhou_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n xinzhou_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nxinzhou_information = df_xinzhou.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(xinzhou_information))\r\nxinzhou_information['拥堵占比'] = xinzhou_cong_proportion\r\nxinzhou_information['道路畅通评价'] = xinzhou_clear_road\r\n\r\n# print(list(xinzhou_information['道路名称']))\r\n# print(list(xinzhou_information['路段拥堵评价']))\r\n# print(list(xinzhou_information['拥堵占比']))\r\n# print(list(xinzhou_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 忻州市道路名称\r\nxinzhou_road_name_list = ['七一北路', '七一南路', '和平东街', '和平西街', '建设北路', '建设南路', '慕山北路', '慕山南路', '雁门西大道']\r\nplt.xticks(range(len(xinzhou_road_name_list)), xinzhou_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(9) - 0.3, height=list(xinzhou_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(9), height=list(xinzhou_information['道路畅通评价']), alpha=0.5, width=0.3, color='green', edgecolor='blue',\r\n label='道路畅通次数')\r\nplt.bar(np.arange(9) + 0.3, height=list(xinzhou_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\nplt.savefig(r\"C:\\Users\\高怡飞\\Desktop\\02.png\", dpi=600)\r\n" }, { "alpha_fraction": 0.42683807015419006, "alphanum_fraction": 0.4399048686027527, "avg_line_length": 51.91883087158203, "blob_id": "9e6b196622bc352c14130e6a03f9ac99e0a37a85", "content_id": "3b3d502fd1d45f2f4d1ca59fd35e4fdff50a0588", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36648, "license_type": "permissive", "max_line_length": 229, "num_lines": 616, "path": "/AmapFunctions/TrafficSituationByBaiduMap.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport hashlib\r\nimport inspect\r\nimport json\r\nimport time\r\nimport urllib.parse as parse\r\n\r\nimport requests\r\n\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass TrafficSituationByBaiduMap:\r\n def __init__(self):\r\n self.bounds = None\r\n self.center = None\r\n self.city = None\r\n self.congestion_section = None\r\n self.coord_type_input = None\r\n self.coord_type_output = None\r\n self.json_decode = None\r\n self.radius = None\r\n self.road_name = None\r\n self.road_grade = None\r\n self.vertexes = None\r\n\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取百度地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n sk = '<请自己输入自己申请的sk 校验码>'\r\n\r\n def get_traffic_situation_by_road(self, road_name: str,\r\n city: str,\r\n ) -> dict:\r\n \"\"\"\r\n 函数:道路路况查询\r\n Args:\r\n road_name:道路名称,必填。\t如:\"北五环\"、\"信息路\"。目前支持除多方向立交桥和多方向道路以外的各类道路名称(注:多方向是指道路方向多于2个方向,如:南向北、北向南、西向东、东向西,称为4方向)。\r\n city:城市名,必填。1. 全国城市名称,如:\"北京市\"、\"上海市\"等。2. 百度地图行政区划adcode,仅支持城市级别(adcode映射表),如\"110000\"\r\n Returns:返回道路路况查询的json格式数据\r\n \"\"\"\r\n\r\n self.road_name = road_name\r\n self.city = city\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n queryStr = '/traffic/v1/road?road_name={0}&city={1}&ak={2}'.format(self.road_name, self.city, self.APIkey)\r\n # 对queryStr进行转码,safe内的保留字符不转换\r\n encodedStr = parse.quote(queryStr, safe=\"/:=&?#+!$,;'@()*[]\")\r\n # 在最后直接追加上yoursk\r\n rawStr = encodedStr + self.sk\r\n # 计算sn\r\n sn = (hashlib.md5(parse.quote_plus(rawStr).encode(\"utf8\")).hexdigest())\r\n # 由于URL里面含有中文,所以需要用parse.quote进行处理,然后返回最终可调用的url\r\n url = parse.quote(\"http://api.map.baidu.com\" + queryStr + \"&sn=\" + sn, safe=\"/:=&?#+!$,;'@()*[]\")\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(url, headers={\"content-type\": \"application/json\"})\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Traffic road situation data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # def get_traffic_situation_by_rectangle(self, bounds: str,\r\n # road_grade: int = 0,\r\n # coord_type_input: str = 'bd09ll',\r\n # coord_type_output: str = 'bd09ll',\r\n # ) -> dict:\r\n # \"\"\"\r\n # 函数:矩形区域交通态势\r\n # Args:\r\n # bounds:矩形区域, 左下角和右上角的经纬度坐标点,必填。坐标点顺序为\"左下;右上\",坐标对间使用;号分隔,格式为:纬度,经度;纬度,经度。对角线距离不超过2公里。示例: 39.912078,116.464303\r\n # ;39.918276,116.475442。\r\n # road_grade:道路等级,可选。用户可进行道路等级筛选,支持选择多个道路等级。道路等级之间使用英文“,”分隔。默认值:road_grade=0 道路等级对应表如下: 0:全部驾车道路 1:高速路 2:环路及快速\r\n # 路 3:主干路 4:次干路 5:支干路。示例: 查询全部驾车道路路况:road_grade:0 查询高速道路路况:road_grade:1 查询高速路、环路及快速路、主干路的路况:road_grade\r\n # =1,2,3。\r\n # coord_type_input:请求参数 bounds的坐标类型,可选。默认值:bd09ll。bd09ll:百度经纬度坐标 gcj02:国测局加密坐标 wgs84:gps 坐标\r\n # coord_type_output:返回结果的坐标类型,可选。默认值:bd09ll。该字段用于控制返回结果中坐标的类型。可选值为: bd09ll:百度经纬度坐标 gcj02:国测局加密坐标\r\n # Returns:返回矩形区域交通态势的json格式数据\r\n # \"\"\"\r\n #\r\n # self.bounds = bounds\r\n # self.coord_type_input = coord_type_input\r\n # self.coord_type_output = coord_type_output\r\n # self.road_grade = road_grade\r\n #\r\n # # 写入日志\r\n # writeLog = WriteLog()\r\n # class_name = self.__class__.__name__\r\n # function_name = inspect.stack()[0][3]\r\n # log_filename = writeLog.create_filename(class_name=class_name)\r\n #\r\n # queryStr = '/traffic/v1/bound?bounds={0}&road_grade={1}&coord_type_input={2}&coord_type_output={3}&ak={4}'.format(\r\n # bounds, road_grade, coord_type_input, coord_type_output, self.APIkey)\r\n # # 对queryStr进行转码,safe内的保留字符不转换\r\n # encodedStr = parse.quote(queryStr, safe=\"/:=&?#+!$,;'@()*[]\")\r\n # # 在最后直接追加上yoursk\r\n # rawStr = encodedStr + self.sk\r\n # # 计算sn\r\n # sn = (hashlib.md5(parse.quote_plus(rawStr).encode(\"utf8\")).hexdigest())\r\n # # 由于URL里面含有中文,所以需要用parse.quote进行处理,然后返回最终可调用的url\r\n # url = parse.quote(\"http://api.map.baidu.com\" + queryStr + \"&sn=\" + sn, safe=\"/:=&?#+!$,;'@()*[]\")\r\n # print(url)\r\n #\r\n # # 获取数据\r\n # try:\r\n # request_information = requests.get(url, headers={\"content-type\": \"application/json\"})\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=1,\r\n # context='Function name:{0} - request_information:{1}'.format(function_name,\r\n # request_information)\r\n # )\r\n # request_information.close() # 关闭访问\r\n # request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # json_decode = json.loads(request_information.text)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=6,\r\n # context='Function name:{0} - Traffic rectangle situation data successful get.'.format(\r\n # function_name)\r\n # )\r\n # return json_decode\r\n #\r\n # except requests.exceptions.ConnectionError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_connection = 'ConnectionError -- please wait 3 seconds'\r\n # error_connection_dict = {'status': '2',\r\n # 'info': 'requests.exceptions.ConnectionError',\r\n # 'detail_information': requests.exceptions.ConnectionError,\r\n # 'error_prompt': error_connection\r\n # }\r\n # return error_connection_dict\r\n #\r\n # except requests.exceptions.ChunkedEncodingError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__\r\n # )\r\n # )\r\n # error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n # error_chuck_encoding_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_chuck_encoding\r\n # }\r\n # return error_chuck_encoding_dict\r\n #\r\n # except Exception as e:\r\n # time.sleep(1)\r\n # error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_information_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_information\r\n # }\r\n # return error_information_dict\r\n #\r\n # def get_traffic_situation_by_polygon(self, vertexes: str,\r\n # road_grade: int = 0,\r\n # coord_type_input: str = 'bd09ll',\r\n # coord_type_output: str = 'bd09ll',\r\n # ) -> dict:\r\n # \"\"\"\r\n # 函数:矩形区域交通态势\r\n # Args:\r\n # vertexes:多边形边界点, 必填。多边形顶点,规则: 经纬度顺序为:纬度,经度; 顶点顺序需按逆时针排列。多边形外接矩形对角线距离不超过2公里。 示例: vertexes=39.910528,116.47292\r\n # 6;39.918276,116.475442;39.916671,116.459056;39.912078,116.464303\r\n # road_grade:道路等级,可选。用户可进行道路等级筛选,支持选择多个道路等级。道路等级之间使用英文“,”分隔。默认值:road_grade=0 道路等级对应表如下: 0:全部驾车道路 1:高速路 2:环路及快速\r\n # 路 3:主干路 4:次干路 5:支干路。示例: 查询全部驾车道路路况:road_grade:0 查询高速道路路况:road_grade:1 查询高速路、环路及快速路、主干路的路况:road_grade\r\n # =1,2,3。\r\n # coord_type_input:请求参数 bounds的坐标类型,可选。默认值:bd09ll。bd09ll:百度经纬度坐标 gcj02:国测局加密坐标 wgs84:gps 坐标\r\n # coord_type_output:返回结果的坐标类型,可选。默认值:bd09ll。该字段用于控制返回结果中坐标的类型。可选值为: bd09ll:百度经纬度坐标 gcj02:国测局加密坐标\r\n # Returns:返回矩形区域交通态势的json格式数据\r\n # \"\"\"\r\n #\r\n # self.coord_type_input = coord_type_input\r\n # self.coord_type_output = coord_type_output\r\n # self.road_grade = road_grade\r\n # self.vertexes = vertexes\r\n #\r\n # # 写入日志\r\n # writeLog = WriteLog()\r\n # class_name = self.__class__.__name__\r\n # function_name = inspect.stack()[0][3]\r\n # log_filename = writeLog.create_filename(class_name=class_name)\r\n #\r\n # queryStr = '/traffic/v1/polygon?vertexes={0}&road_grade={1}&coord_type_input={2}&coord_type_output={3}&ak={4}'.format(\r\n # vertexes, road_grade, coord_type_input, coord_type_output, self.APIkey)\r\n # # 对queryStr进行转码,safe内的保留字符不转换\r\n # encodedStr = parse.quote(queryStr, safe=\"/:=&?#+!$,;'@()*[]\")\r\n # # 在最后直接追加上yoursk\r\n # rawStr = encodedStr + self.sk\r\n # # 计算sn\r\n # sn = (hashlib.md5(parse.quote_plus(rawStr).encode(\"utf8\")).hexdigest())\r\n # # 由于URL里面含有中文,所以需要用parse.quote进行处理,然后返回最终可调用的url\r\n # url = parse.quote(\"http://api.map.baidu.com\" + queryStr + \"&sn=\" + sn, safe=\"/:=&?#+!$,;'@()*[]\")\r\n #\r\n # # 获取数据\r\n # try:\r\n # request_information = requests.get(url, headers={\"content-type\": \"application/json\"})\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=1,\r\n # context='Function name:{0} - request_information:{1}'.format(function_name,\r\n # request_information)\r\n # )\r\n # request_information.close() # 关闭访问\r\n # request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # # 返回格式化后的JSON数据\r\n # json_decode = json.loads(request_information.text)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=6,\r\n # context='Function name:{0} - Traffic polygon situation data successful get.'.format(\r\n # function_name)\r\n # )\r\n # return json_decode\r\n #\r\n # except requests.exceptions.ConnectionError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_connection = 'ConnectionError -- please wait 3 seconds'\r\n # error_connection_dict = {'status': '2',\r\n # 'info': 'requests.exceptions.ConnectionError',\r\n # 'detail_information': requests.exceptions.ConnectionError,\r\n # 'error_prompt': error_connection\r\n # }\r\n # return error_connection_dict\r\n #\r\n # except requests.exceptions.ChunkedEncodingError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__\r\n # )\r\n # )\r\n # error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n # error_chuck_encoding_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_chuck_encoding\r\n # }\r\n # return error_chuck_encoding_dict\r\n #\r\n # except Exception as e:\r\n # time.sleep(1)\r\n # error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_information_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_information\r\n # }\r\n # return error_information_dict\r\n #\r\n # def get_traffic_situation_by_circle(self, center: str,\r\n # radius: int,\r\n # road_grade: int = 0,\r\n # coord_type_input: str = 'bd09ll',\r\n # coord_type_output: str = 'bd09ll',\r\n # ) -> dict:\r\n # \"\"\"\r\n # 函数:矩形区域交通态势\r\n # Args:\r\n # center:中心点坐标,必填。示例:center=39.912078,116.464303\r\n # radius:查询半径,必填。单位:米,取值范围[1,1000],示例: radius=200。\r\n # road_grade:道路等级,可选。用户可进行道路等级筛选,支持选择多个道路等级。道路等级之间使用英文“,”分隔。默认值:road_grade=0 道路等级对应表如下: 0:全部驾车道路 1:高速路 2:环路及快速路 3:主干路 4:次干路 5:支干路。示例: 查询全部驾车道路路况:road_grade:0 查询高速道路路况:road_grade:1 查询高速路、环路及快速路、主干路的路况:road_grade=1,2,3。\r\n # coord_type_input:请求参数 bounds的坐标类型,可选。默认值:bd09ll。bd09ll:百度经纬度坐标 gcj02:国测局加密坐标 wgs84:gps 坐标\r\n # coord_type_output:返回结果的坐标类型,可选。默认值:bd09ll。该字段用于控制返回结果中坐标的类型。可选值为: bd09ll:百度经纬度坐标 gcj02:国测局加密坐标\r\n # Returns:返回矩形区域交通态势的json格式数据\r\n # \"\"\"\r\n #\r\n # self.center = center\r\n # self.coord_type_input = coord_type_input\r\n # self.coord_type_output = coord_type_output\r\n # self.radius = radius\r\n # self.road_grade = road_grade\r\n #\r\n # # 写入日志\r\n # writeLog = WriteLog()\r\n # class_name = self.__class__.__name__\r\n # function_name = inspect.stack()[0][3]\r\n # log_filename = writeLog.create_filename(class_name=class_name)\r\n #\r\n # queryStr = '/traffic/v1/around?center={0}&radius={1}&road_grade={2}&coord_type_input={3}&coord_type_output={4}&ak={5}' \\\r\n # .format(center, radius, road_grade, coord_type_input, coord_type_output, self.APIkey)\r\n # # 对queryStr进行转码,safe内的保留字符不转换\r\n # encodedStr = parse.quote(queryStr, safe=\"/:=&?#+!$,;'@()*[]\")\r\n # # 在最后直接追加上yoursk\r\n # rawStr = encodedStr + self.sk\r\n # # 计算sn\r\n # sn = (hashlib.md5(parse.quote_plus(rawStr).encode(\"utf8\")).hexdigest())\r\n # # 由于URL里面含有中文,所以需要用parse.quote进行处理,然后返回最终可调用的url\r\n # url = parse.quote(\"http://api.map.baidu.com\" + queryStr + \"&sn=\" + sn, safe=\"/:=&?#+!$,;'@()*[]\")\r\n #\r\n # # 获取数据\r\n # try:\r\n # request_information = requests.get(url, headers={\"content-type\": \"application/json\"}) # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=1,\r\n # context='Function name:{0} - request_information:{1}'.format(function_name,\r\n # request_information)\r\n # )\r\n # request_information.close() # 关闭访问\r\n # request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # # 返回格式化后的JSON数据\r\n # json_decode = json.loads(request_information.text)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=6,\r\n # context='Function name:{0} - Traffic circle situation data successful get.'.format(\r\n # function_name)\r\n # )\r\n # return json_decode\r\n #\r\n # except requests.exceptions.ConnectionError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_connection = 'ConnectionError -- please wait 3 seconds'\r\n # error_connection_dict = {'status': '2',\r\n # 'info': 'requests.exceptions.ConnectionError',\r\n # 'detail_information': requests.exceptions.ConnectionError,\r\n # 'error_prompt': error_connection\r\n # }\r\n # return error_connection_dict\r\n #\r\n # except requests.exceptions.ChunkedEncodingError as e:\r\n # time.sleep(1)\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__\r\n # )\r\n # )\r\n # error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n # error_chuck_encoding_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_chuck_encoding\r\n # }\r\n # return error_chuck_encoding_dict\r\n #\r\n # except Exception as e:\r\n # time.sleep(1)\r\n # error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # # only for debugging\r\n # writeLog.write_to_log(file_name=log_filename,\r\n # log_level=5,\r\n # context='Function name:{0} - {1} has occured.'.format(function_name,\r\n # e.__class__.__name__)\r\n # )\r\n # error_information_dict = {'status': '2',\r\n # 'info': 'HTTPError',\r\n # 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n # 'error_prompt': error_information\r\n # }\r\n # return error_information_dict\r\n\r\n def parse_traffic_situation(self, json_decode: dict,\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析交通态势的json格式数据\r\n Args:\r\n json_decode:交通态势的json格式数据\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n if not self.json_decode:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - data error'.format(function_name)\r\n )\r\n if self.json_decode['status'] == 0:\r\n description = self.json_decode['description']\r\n evaluation = self.json_decode['evaluation']\r\n road_traffic = self.json_decode['road_traffic']\r\n status = evaluation['status']\r\n status_desc = evaluation['status_desc']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - description:{1}'.format(function_name,\r\n description)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - evaluation:{1}'.format(function_name,\r\n evaluation)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - road traffic:{1}'.format(function_name,\r\n road_traffic)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n status)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - status desc:{1}'.format(function_name,\r\n status_desc)\r\n )\r\n\r\n resultContext.append(\"当前区域路况信息整体如下:\")\r\n resultContext.append(description)\r\n resultContext.append(\"该区域的所有道路整体通行情况是{0}\".format(status_desc))\r\n\r\n resultContext.append(\"您查询的信息包含以下道路\")\r\n for item in road_traffic:\r\n road_name = item['road_name']\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - road name:{1}'.format(function_name,\r\n road_name)\r\n )\r\n # 显示道路名称\r\n if road_name != 'UNKNOWN' or '':\r\n resultContext.append(road_name)\r\n\r\n if 'congestion_sections' in item:\r\n # 若有congestion_sections该字段\r\n congestion_sections = item['congestion_sections']\r\n for congestion_section in congestion_sections:\r\n context = self.condition_analysis(congestion_section, road_name)\r\n resultContext.extend(context)\r\n # else:\r\n # print(\"该区域道路名为{0}的道路暂无路况数据。\".format(road_name))\r\n return resultContext\r\n\r\n def condition_analysis(self, congestion_section: dict,\r\n road_name: str\r\n ) -> list:\r\n \"\"\"\r\n 函数:路况分析\r\n Args:\r\n congestion_section:拥堵路段详情\r\n road_name:道路名称\r\n \"\"\"\r\n self.congestion_section = congestion_section\r\n self.road_name = road_name\r\n\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n status_dict = {0: '未知路况', 1: '畅通', 2: '缓行', 3: '拥堵', 4: '严重拥堵'}\r\n\r\n section_desc = self.congestion_section['section_desc']\r\n congestion_status = self.congestion_section['status']\r\n speed = self.congestion_section['speed']\r\n congestion_distance = self.congestion_section['congestion_distance']\r\n congestion_trend = self.congestion_section['congestion_trend']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - section desc:{1}'.format(function_name,\r\n section_desc)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - congestion status:{1}'.format(function_name,\r\n congestion_status)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - speed:{1}'.format(function_name,\r\n speed)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - congestion distance:{1}'.format(function_name,\r\n congestion_distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - congestion trend:{1}'.format(function_name,\r\n congestion_trend)\r\n )\r\n\r\n resultContext.append(\"==================\")\r\n resultContext.append(\r\n \"当前区域拥堵路段位于{0},大体方向是{1},相比于10分钟前拥堵趋势{2}\".format(self.road_name, section_desc, congestion_trend))\r\n resultContext.append(\"{0}拥堵距离大约是{1}米,平均车速是{2}km/h\".format(self.road_name, congestion_distance, speed))\r\n resultContext.append(\"当前道路整体通行状况是{0}\".format(status_dict[congestion_status]))\r\n return resultContext\r\n" }, { "alpha_fraction": 0.3886704742908478, "alphanum_fraction": 0.3937254250049591, "avg_line_length": 42.19503402709961, "blob_id": "f8e72d10a05ecc79f0e8f6201f2f4bb267a19173", "content_id": "5a7ebe41a38c4a7f70a0de88a1f15b39a0c62bf8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13155, "license_type": "permissive", "max_line_length": 125, "num_lines": 282, "path": "/FundamentalFunctions/StaticMapsOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.StaticMaps import StaticMaps\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass StaticMapsOperation:\r\n \"\"\"\r\n Class:静态地图查询操作\r\n \"\"\"\r\n def __init__(self):\r\n self.staticMapsPosition = None\r\n self.zoom = None\r\n self.size = None\r\n self.scale = None\r\n self.traffic = None\r\n self.context = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_static_maps_information(self, staticMapsPosition: str\r\n ) -> int:\r\n\r\n \"\"\"\r\n 函数:检测用户提供的静态地图地点是否符合规范要求\r\n Args:\r\n staticMapsPosition: 用户输入的地理位置\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.staticMapsPosition = staticMapsPosition\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检测结果\r\n checkedResult = self.staticMapsPosition is None or self.staticMapsPosition == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - static maps position check result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n else:\r\n return True\r\n\r\n def get_static_maps(self, staticMapsPosition: str,\r\n zoom: int,\r\n size: str,\r\n scale: int,\r\n traffic: int\r\n ) -> str:\r\n \"\"\"\r\n 函数:获取静态地图\r\n Args:\r\n staticMapsPosition:地理位置\r\n zoom:缩放级别\r\n size:图片尺寸大小\r\n scale:是否为高清图\r\n traffic:是否包含交通状况\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.staticMapsPosition = staticMapsPosition\r\n self.zoom = zoom\r\n self.size = size\r\n self.scale = scale\r\n self.traffic = traffic\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 获取地理位置信息的地理编码\r\n geographicCoding = GeographicCoding()\r\n # 获取地理位置信息对应的初始编码信息\r\n # TODO:优化City参数\r\n staticMapsJsonDecode = geographicCoding.get_geographic_coding(address=self.staticMapsPosition,\r\n city='')\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - staticMapsJsonDecode:{1}'.format(function_name,\r\n staticMapsJsonDecode)\r\n )\r\n\r\n parseStaticMapsInformation = geographicCoding.parse_geographic_coding(staticMapsJsonDecode)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - parseStaticMapsInformation:{1}'.format(function_name,\r\n parseStaticMapsInformation)\r\n )\r\n\r\n # 地理位置编码\r\n if 'error_context' not in parseStaticMapsInformation:\r\n resultStaticMapsGeographicCoding = parseStaticMapsInformation['geographic_position']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultStaticMapsGeographicCoding:{1}'.format(\r\n function_name,\r\n resultStaticMapsGeographicCoding)\r\n )\r\n\r\n else:\r\n return \"3\"\r\n\r\n staticMaps = StaticMaps()\r\n\r\n markers = staticMaps.user_define_markers(location=resultStaticMapsGeographicCoding,\r\n size='large',\r\n color='0x0085FF',\r\n label=\"\"\r\n )\r\n\r\n labels = staticMaps.user_define_labels(content=self.staticMapsPosition,\r\n font=0,\r\n font_size=20,\r\n font_color='0xFFFFFF',\r\n background='0x0000FF',\r\n location=resultStaticMapsGeographicCoding\r\n )\r\n\r\n # TODO:\r\n # 将这些选项放入设置中\r\n check_result = staticMaps.check_static_maps_url(location=resultStaticMapsGeographicCoding,\r\n zoom=self.zoom,\r\n size=self.size,\r\n scale=self.scale,\r\n markers=markers,\r\n labels=labels,\r\n traffic=self.traffic\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - static maps check result:{1}'.format(function_name,\r\n check_result)\r\n )\r\n\r\n # 获取静态地图数据\r\n result_static_maps = staticMaps.get_static_maps_url(location=resultStaticMapsGeographicCoding,\r\n zoom=self.zoom,\r\n size=self.size,\r\n scale=self.scale,\r\n markers=markers,\r\n labels=labels,\r\n check_result=check_result,\r\n traffic=self.traffic\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result_static_maps:{1}'.format(function_name,\r\n result_static_maps)\r\n )\r\n\r\n if result_static_maps == 'Error':\r\n return \"1\" # '获取地图图片失败'\r\n else:\r\n result_Photo = staticMaps.parse_static_maps_url(result_static_maps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result Photo:{1}'.format(function_name,\r\n result_Photo)\r\n )\r\n\r\n if result_Photo == 'Error':\r\n return \"2\" # '图片保存失败,请检查您的网络链接或是否有保存文件的权限'\r\n else:\r\n return result_Photo\r\n\r\n def get_static_maps_by_location(self, staticMapsPosition: str,\r\n zoom: int,\r\n size: str,\r\n scale: int,\r\n traffic: int,\r\n context: str = 'Unknown',\r\n ) -> str:\r\n \"\"\"\r\n 函数:获取静态地图(通过地理位置)\r\n Args:\r\n staticMapsPosition:地理位置坐标\r\n zoom:缩放级别\r\n size:图片尺寸大小\r\n scale:是否为高清图\r\n traffic:是否包含交通状况\r\n context:查询的地理位置名称\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.staticMapsPosition = staticMapsPosition\r\n self.zoom = zoom\r\n self.size = size\r\n self.scale = scale\r\n self.traffic = traffic\r\n self.context = context\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n staticMaps = StaticMaps()\r\n\r\n markers = staticMaps.user_define_markers(location=self.staticMapsPosition,\r\n size='large',\r\n color='0x0085FF',\r\n )\r\n\r\n # TODO:\r\n # 将这些选项放入设置中\r\n check_result = staticMaps.check_static_maps_url(location=self.staticMapsPosition,\r\n zoom=self.zoom,\r\n size=self.size,\r\n scale=self.scale,\r\n markers=markers,\r\n traffic=self.traffic\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - static maps check result:{1}'.format(function_name,\r\n check_result)\r\n )\r\n\r\n result_static_maps = staticMaps.get_static_maps_url(location=self.staticMapsPosition,\r\n zoom=self.zoom,\r\n size=self.size,\r\n scale=self.scale,\r\n markers=markers,\r\n check_result=check_result,\r\n traffic=self.traffic\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result_static_maps:{1}'.format(function_name,\r\n result_static_maps)\r\n )\r\n\r\n if result_static_maps == 'Error':\r\n return \"1\" # '获取地图图片失败'\r\n else:\r\n # 获取静态地图数据\r\n result_Photo = staticMaps.parse_static_maps_url(result_static_maps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result Photo:{1}'.format(function_name,\r\n result_Photo)\r\n )\r\n\r\n if result_Photo == 'Error':\r\n return \"2\" # '图片保存失败,请检查您的网络链接或是否有保存文件的权限'\r\n else:\r\n return result_Photo\r\n" }, { "alpha_fraction": 0.6988985538482666, "alphanum_fraction": 0.7187246084213257, "avg_line_length": 56.67346954345703, "blob_id": "e2e1edbe86b361c4f89b5ee549d6738d65da7075", "content_id": "b1ef6c630c7b2b0ae57b8b6b73da7931fa2d4957", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8737, "license_type": "permissive", "max_line_length": 122, "num_lines": 147, "path": "/Window/loginUI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'F:\\AmapProgram\\Window\\login.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.15.4\r\n#\r\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\r\n# run again. Do not edit this file unless you know what you are doing.\r\n\r\nfrom PyQt5 import QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtWidgets import QLineEdit\r\n\r\nfrom Resources.Background.background import *\r\n\r\n\r\nclass Ui_AmapLoginUI(object):\r\n def setupUi(self, AmapLoginUiMainWindow):\r\n AmapLoginUiMainWindow.setObjectName(\"AmapLoginUiMainWindow\")\r\n AmapLoginUiMainWindow.resize(800, 468)\r\n self.amapLoginUiWidget = QtWidgets.QWidget(AmapLoginUiMainWindow)\r\n self.amapLoginUiWidget.setObjectName(\"amapLoginUiWidget\")\r\n self.AmapLoginUiFrame = QtWidgets.QFrame(self.amapLoginUiWidget)\r\n self.AmapLoginUiFrame.setGeometry(QtCore.QRect(0, 0, 800, 468))\r\n self.AmapLoginUiFrame.setStyleSheet(\"#AmapLoginUiFrame {\\n\"\r\n \" border-image: url(:/background.jpg);\\n\"\r\n \"}\\n\"\r\n \"\")\r\n self.AmapLoginUiFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.AmapLoginUiFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.AmapLoginUiFrame.setObjectName(\"AmapLoginUiFrame\")\r\n self.verticalLayoutWidget = QtWidgets.QWidget(self.AmapLoginUiFrame)\r\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 801, 471))\r\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\r\n self.loginVerticalLayout1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\r\n self.loginVerticalLayout1.setContentsMargins(0, 0, 0, 0)\r\n self.loginVerticalLayout1.setObjectName(\"loginVerticalLayout1\")\r\n self.loginHorizontalLayout1 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout1.setObjectName(\"loginHorizontalLayout1\")\r\n self.AmapSmallProgramLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,\r\n QtWidgets.QSizePolicy.MinimumExpanding)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.AmapSmallProgramLabel.sizePolicy().hasHeightForWidth())\r\n self.AmapSmallProgramLabel.setSizePolicy(sizePolicy)\r\n self.AmapSmallProgramLabel.setMinimumSize(QtCore.QSize(797, 82))\r\n self.AmapSmallProgramLabel.setMaximumSize(QtCore.QSize(797, 82))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(26)\r\n self.AmapSmallProgramLabel.setFont(font)\r\n self.AmapSmallProgramLabel.setStyleSheet(\"color: rgb(0,0,0);\")\r\n self.AmapSmallProgramLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.AmapSmallProgramLabel.setObjectName(\"AmapSmallProgramLabel\")\r\n self.loginHorizontalLayout1.addWidget(self.AmapSmallProgramLabel)\r\n self.loginVerticalLayout1.addLayout(self.loginHorizontalLayout1)\r\n spacerItem = QtWidgets.QSpacerItem(20, 100, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.loginVerticalLayout1.addItem(spacerItem)\r\n self.loginHorizontalLayout2 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout2.setContentsMargins(150, -1, 150, -1)\r\n self.loginHorizontalLayout2.setObjectName(\"loginHorizontalLayout2\")\r\n self.loginHorizontalLayout3 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout3.setObjectName(\"loginHorizontalLayout3\")\r\n self.loginVerticalLayout2 = QtWidgets.QVBoxLayout()\r\n self.loginVerticalLayout2.setObjectName(\"loginVerticalLayout2\")\r\n self.loginHorizontalLayout4 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout4.setObjectName(\"loginHorizontalLayout4\")\r\n self.userNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.userNameLabel.setMinimumSize(QtCore.QSize(57, 76))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(14)\r\n self.userNameLabel.setFont(font)\r\n self.userNameLabel.setStyleSheet(\"color: rgb(255, 255, 255);\")\r\n self.userNameLabel.setObjectName(\"userNameLabel\")\r\n self.loginHorizontalLayout4.addWidget(self.userNameLabel)\r\n self.userNameLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.userNameLineEdit.setFont(font)\r\n self.userNameLineEdit.setObjectName(\"userNameLineEdit\")\r\n self.userNameLineEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.loginHorizontalLayout4.addWidget(self.userNameLineEdit)\r\n self.loginVerticalLayout2.addLayout(self.loginHorizontalLayout4)\r\n self.loginHorizontalLayout5 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout5.setObjectName(\"loginHorizontalLayout5\")\r\n self.passwordLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.passwordLabel.setMinimumSize(QtCore.QSize(57, 76))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(14)\r\n self.passwordLabel.setFont(font)\r\n self.passwordLabel.setStyleSheet(\"color: rgb(255, 255, 255);\")\r\n self.passwordLabel.setObjectName(\"passwordLabel\")\r\n self.loginHorizontalLayout5.addWidget(self.passwordLabel)\r\n self.passwordLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.passwordLineEdit.setFont(font)\r\n self.passwordLineEdit.setObjectName(\"passwordLineEdit\")\r\n self.passwordLineEdit.setEchoMode(QLineEdit.Password)\r\n self.passwordLineEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.loginHorizontalLayout5.addWidget(self.passwordLineEdit)\r\n self.loginVerticalLayout2.addLayout(self.loginHorizontalLayout5)\r\n self.loginHorizontalLayout3.addLayout(self.loginVerticalLayout2)\r\n self.loginHorizontalLayout2.addLayout(self.loginHorizontalLayout3)\r\n self.loginHorizontalLayout6 = QtWidgets.QHBoxLayout()\r\n self.loginHorizontalLayout6.setObjectName(\"loginHorizontalLayout6\")\r\n self.loginInButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.loginInButton.setFont(font)\r\n self.loginInButton.setObjectName(\"loginInButton\")\r\n self.loginHorizontalLayout6.addWidget(self.loginInButton)\r\n self.loginHorizontalLayout2.addLayout(self.loginHorizontalLayout6)\r\n self.loginHorizontalLayout2.setStretch(0, 2)\r\n self.loginHorizontalLayout2.setStretch(1, 1)\r\n self.loginVerticalLayout1.addLayout(self.loginHorizontalLayout2)\r\n spacerItem1 = QtWidgets.QSpacerItem(20, 100, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.loginVerticalLayout1.addItem(spacerItem1)\r\n self.loginVerticalLayout1.setStretch(0, 1)\r\n self.loginVerticalLayout1.setStretch(2, 2)\r\n AmapLoginUiMainWindow.setCentralWidget(self.amapLoginUiWidget)\r\n\r\n self.retranslateUi(AmapLoginUiMainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(AmapLoginUiMainWindow)\r\n\r\n def retranslateUi(self, AmapLoginUiMainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n AmapLoginUiMainWindow.setWindowTitle(\"高德地图小程序\")\r\n AmapLoginUiMainWindow.setWindowFlags(\r\n QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint)\r\n AmapLoginUiMainWindow.setWindowIcon(QIcon(\":/logo.png\"))\r\n self.AmapSmallProgramLabel.setText(_translate(\"AmapLoginUiMainWindow\", \"高德地图小程序\"))\r\n self.userNameLabel.setText(_translate(\"AmapLoginUiMainWindow\", \"账号:\"))\r\n self.userNameLineEdit.setPlaceholderText(_translate(\"AmapLoginUiMainWindow\", \"请输入账号\"))\r\n self.userNameText = self.userNameLineEdit.text()\r\n str1 = self.userNameLineEdit.text()\r\n print(str1)\r\n self.passwordLabel.setText(_translate(\"AmapLoginUiMainWindow\", \"密码:\"))\r\n self.passwordLineEdit.setPlaceholderText(_translate(\"AmapLoginUiMainWindow\", \"请输入密码\"))\r\n self.passwordText = self.passwordLineEdit.text()\r\n self.loginInButton.setText(_translate(\"AmapLoginUiMainWindow\", \"登录\"))\r\n" }, { "alpha_fraction": 0.5009387731552124, "alphanum_fraction": 0.5039429068565369, "avg_line_length": 27.58888816833496, "blob_id": "15458a7be1bbc7b5a2215e715dada4301daedf37", "content_id": "9d4d347e45c559da06faae4bc0c202bd3982aa83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2917, "license_type": "permissive", "max_line_length": 66, "num_lines": 90, "path": "/logrecord/WriteLog.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import os\r\nimport time\r\nfrom pathlib import Path\r\n\r\nfrom loguru import logger\r\n\r\n\r\nclass WriteLog:\r\n def __init__(self):\r\n self.class_name = None\r\n self.file_name = None\r\n self.log_level = None\r\n self.context = None\r\n\r\n def create_filename(self, class_name: str\r\n ) -> str:\r\n \"\"\"\r\n 函数:创建日志记录文件\r\n Args:\r\n class_name: 日志记录被调用所属的类的名称\r\n Returns:\r\n\r\n \"\"\"\r\n self.class_name = class_name\r\n\r\n # TODO:优化代码,递归创建目录\r\n # 项目目录\r\n project_dir = os.getenv('LOCALAPPDATA')\r\n project_dir_name = '\\\\'.join([project_dir, 'AmapProgram'])\r\n # 若当前的目录是文件,删除\r\n if os.path.isfile(project_dir_name):\r\n os.remove(project_dir_name)\r\n # 若当前目录不存在,创建\r\n if not os.path.exists(project_dir_name):\r\n os.mkdir(project_dir_name)\r\n\r\n # 日志总目录\r\n log_dir = [project_dir_name, 'Log']\r\n log_dir_name = '\\\\'.join(log_dir)\r\n # 若当前的目录是文件,删除\r\n if os.path.isfile(log_dir_name):\r\n os.remove(log_dir_name)\r\n # 若当前目录不存在,创建\r\n if not os.path.exists(log_dir_name):\r\n os.mkdir(log_dir_name)\r\n\r\n # 各个功能对应的目录\r\n class_dir = [log_dir_name, class_name]\r\n class_dir_name = '\\\\'.join(class_dir)\r\n # 若当前的目录是文件,删除\r\n if os.path.isfile(class_dir_name):\r\n os.remove(class_dir_name)\r\n # 若当前目录不存在,创建\r\n if not os.path.exists(class_dir_name):\r\n os.mkdir(class_dir_name)\r\n\r\n # 文件名称\r\n time_now = time.strftime(\"%Y-%m-%d\", time.localtime())\r\n time_file = time_now + '.log'\r\n file_list = [class_dir_name, time_file]\r\n file_name = '\\\\'.join(file_list)\r\n if not os.path.exists(file_name):\r\n Path(file_name).touch()\r\n return file_name\r\n\r\n def write_to_log(self, file_name: str,\r\n log_level: int,\r\n context: str = ''\r\n ) -> None:\r\n\r\n self.file_name = file_name\r\n self.log_level = log_level\r\n self.context = context\r\n\r\n logger.remove(handler_id=None)\r\n logger.add(file_name, encoding='utf-8')\r\n if log_level == 0:\r\n logger.debug(context)\r\n elif log_level == 1:\r\n logger.info(context)\r\n elif log_level == 2:\r\n logger.warning(context)\r\n elif log_level == 3:\r\n logger.error(context)\r\n elif log_level == 4:\r\n logger.critical(context)\r\n elif log_level == 5:\r\n logger.exception(context)\r\n elif log_level == 6:\r\n logger.success(context)\r\n" }, { "alpha_fraction": 0.4320482313632965, "alphanum_fraction": 0.442004531621933, "avg_line_length": 40.19136428833008, "blob_id": "18f73aa851a5b5d9af1b5408e93330dbb92eb586", "content_id": "5589c909cecd32da239cf73e5e837c5a6cb28d32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39506, "license_type": "permissive", "max_line_length": 151, "num_lines": 857, "path": "/AmapFunctions/StaticMaps.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport os\r\nimport re\r\nimport urllib.request\r\nfrom http.client import IncompleteRead, RemoteDisconnected\r\nfrom urllib.error import HTTPError, URLError\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.StringFormatException import StringFormatException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass StaticMaps:\r\n \"\"\"\r\n Class:静态地图\r\n 静态地图服务通过返回一张地图图片响应HTTP请求,使用户能够将高德地图以图片形式嵌入自己的网页中。用户可以指定请求的地图位置、图片大小、以及在地图上添加覆盖物,如标签、标注、折线、多边形。\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.location = None\r\n self.zoom = None\r\n self.scale = None\r\n self.size = None\r\n self.markers = None\r\n self.labels = None\r\n self.traffic = None\r\n self.paths = None\r\n self.check_result = None\r\n self.url = None\r\n self.filename = None\r\n self.fillColor = None\r\n self.fill_transparency = None\r\n self.num_retries = None\r\n self.color = None\r\n self.color = None\r\n self.label = None\r\n self.content = None\r\n self.font = None\r\n self.bold = None\r\n self.fontSize = None\r\n self.fontColor = None\r\n self.background = None\r\n self.rgbColor = None\r\n self.transparency = None\r\n self.weight = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n fileItem = 1\r\n\r\n def check_static_maps_url(self, location: str,\r\n zoom: int,\r\n **kwargs\r\n ) -> bool:\r\n\r\n \"\"\"\r\n 函数:检测静态地图数据\r\n Args:\r\n location:地图中心点,部分条件必填。中心点坐标。规则:经度和纬度用\",\"分隔 经纬度小数点后不得超过6位。\r\n zoom:地图级别,必填。地图缩放级别:[1,17]\r\n **kwargs:\r\n size:地图大小,必填,默认400*400。图片宽度*图片高度。最大值为1024*1024\r\n scale:普通/高清,可选,默认1。1:返回普通图;2:调用高清图,图片高度和宽度都增加一倍,zoom也增加一倍(当zoom为最大值时,zoom不再改变)。\r\n traffic:交通路况标识,可选,默认0。底图是否展现实时路况。 可选值: 0,不展现;1,展现。\r\n markers:标注,可选。使用规则见markers详细说明,标注最大数10个\r\n labels:标签,可选。使用规则见labels详细说明,标签最大数10个\r\n paths:折线,可选。使用规则见paths详细说明,折线和多边形最大数4个\r\n 注:如果有标注/标签/折线等覆盖物,则中心点(location)和地图级别(zoom)可选填。当请求中无location值时,地图区域以包含请求中所有的标注/标签/折线的几何中心为中心点;如请求中无zoom,地图区域以包含请求中所有的标注/标签/折线为准,系统计算出zoom值。\r\n Returns:返回数据的检测格式是否符合要求\r\n \"\"\"\r\n\r\n self.location = location\r\n self.zoom = zoom\r\n\r\n if 'markers' in kwargs:\r\n self.markers = kwargs['markers']\r\n if 'labels' in kwargs:\r\n self.labels = kwargs['labels']\r\n if 'paths' in kwargs:\r\n self.paths = kwargs['paths']\r\n if 'scale' in kwargs:\r\n self.scale = kwargs['scale']\r\n if 'size' in kwargs:\r\n self.size = kwargs['size']\r\n if 'traffic' in kwargs:\r\n self.traffic = kwargs['traffic']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'location': self.location,\r\n 'zoom': self.zoom\r\n }\r\n\r\n if self.markers is not None:\r\n parameters.update(markers=self.markers)\r\n if self.labels is not None:\r\n parameters.update(labels=self.labels)\r\n if self.paths is not None:\r\n parameters.update(paths=self.paths)\r\n if self.size is not None:\r\n parameters.update(size=self.size)\r\n if self.scale is not None:\r\n parameters.update(scale=self.scale)\r\n if self.traffic is not None:\r\n parameters.update(traffic=self.traffic)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/staticmap?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - Json data error.'.format(\r\n function_name)\r\n )\r\n raise CustomExpection\r\n\r\n except requests.RequestException as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n exit(0)\r\n\r\n except json.decoder.JSONDecodeError as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n return True\r\n\r\n except CustomExpection as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n\r\n e.__class__.__name__)\r\n )\r\n return False\r\n\r\n except Exception as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n return False\r\n\r\n def get_static_maps_url(self, location: str,\r\n zoom: int,\r\n check_result: bool,\r\n size: str = '400*400',\r\n scale: int = 1,\r\n traffic: int = 0,\r\n **kwargs\r\n ) -> str:\r\n\r\n \"\"\"\r\n 函数:获取静态地图数据\r\n Args:\r\n location:地图中心点,部分条件必填。中心点坐标。规则:经度和纬度用\",\"分隔 经纬度小数点后不得超过6位。\r\n zoom:地图级别,必填。地图缩放级别:[1,17]\r\n check_result:数据的检测格式符合要求标志\r\n size:地图大小,必填,默认400*400。图片宽度*图片高度。最大值为1024*1024\r\n scale:普通/高清,可选,默认1。1:返回普通图;2:调用高清图,图片高度和宽度都增加一倍,zoom也增加一倍(当zoom为最大值时,zoom不再改变)。\r\n kwargs:\r\n markers:标注,可选。使用规则见markers详细说明,标注最大数10个\r\n labels:标签,可选。使用规则见labels详细说明,标签最大数10个\r\n paths:折线,可选。使用规则见paths详细说明,折线和多边形最大数4个\r\n traffic:交通路况标识,可选,默认0。底图是否展现实时路况。 可选值: 0,不展现;1,展现。\r\n 注:如果有标注/标签/折线等覆盖物,则中心点(location)和地图级别(zoom)可选填。当请求中无location值时,地图区域以包含请求中所有的标注/标签/折线的几何中心为中心点;如请求中无zoom,地图区域以包含请求中所有的标注/标签/折线为准,系统计算出zoom值。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str升级为dict\r\n self.location = location\r\n self.zoom = zoom\r\n self.check_result = check_result\r\n self.size = size\r\n self.scale = scale\r\n self.traffic = traffic\r\n\r\n if 'markers' in kwargs:\r\n self.markers = kwargs['markers']\r\n if 'labels' in kwargs:\r\n self.labels = kwargs['labels']\r\n if 'paths' in kwargs:\r\n self.paths = kwargs['paths']\r\n if 'scale' in kwargs:\r\n self.scale = kwargs['scale']\r\n if 'size' in kwargs:\r\n self.size = kwargs['size']\r\n if 'traffic' in kwargs:\r\n self.traffic = kwargs['traffic']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'location': self.location,\r\n 'zoom': self.zoom,\r\n }\r\n\r\n if self.markers is not None:\r\n parameters.update(markers=self.markers)\r\n if self.labels is not None:\r\n parameters.update(labels=self.labels)\r\n if self.paths is not None:\r\n parameters.update(paths=self.paths)\r\n if self.size is not None:\r\n parameters.update(size=self.size)\r\n if self.scale is not None:\r\n parameters.update(scale=self.scale)\r\n if self.traffic is not None:\r\n parameters.update(traffic=self.traffic)\r\n\r\n # 获取数据\r\n if check_result:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/staticmap?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - request successfully'.format(function_name)\r\n )\r\n request_information.close() # 关闭访问\r\n return request_information.url\r\n else:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - request failed'.format(function_name)\r\n )\r\n return 'Error'\r\n\r\n def parse_static_maps_url(self, url: str\r\n ) -> str:\r\n \"\"\"\r\n 函数:解析图片网页链接\r\n Args:\r\n url:网页图片链接\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str升级为dict\r\n self.url = url\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # TODO:优化代码,递归创建目录\r\n # 图片保存的位置\r\n local_appdata_directory = os.getenv('LOCALAPPDATA')\r\n temp_directory = '\\\\'.join([local_appdata_directory, 'AmapProgram'])\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - windows temp directory:{1}'.format(function_name,\r\n temp_directory)\r\n )\r\n\r\n # 目录不存在,创建\r\n if not os.path.exists(temp_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n os.mkdir(temp_directory)\r\n\r\n # Photo目录文件夹\r\n list_photo = [temp_directory, 'StaticMaps']\r\n photo_directory = '\\\\'.join(list_photo)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - photo directory:{1}'.format(function_name,\r\n temp_directory)\r\n )\r\n\r\n # 目录不存在,创建\r\n if not os.path.exists(photo_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n os.mkdir(photo_directory)\r\n\r\n # 文件绝对路径\r\n list_filename = [photo_directory, 'StaticMap{0}.png'.format(StaticMaps.fileItem)]\r\n filename = '\\\\'.join(list_filename)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - file name:{1}'.format(function_name,\r\n filename)\r\n )\r\n\r\n # 保存图片\r\n savePhotoResult = self.save_photo(self.url, filename=filename)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - photo successfully saved in local directory.'.format(\r\n function_name)\r\n )\r\n\r\n if savePhotoResult:\r\n # 图片保存成功,返回图片保存的文件位置信息\r\n return filename\r\n else:\r\n return \"Error\"\r\n\r\n def save_photo(self, url: str,\r\n filename: str,\r\n num_retries: int = 3\r\n ) -> bool:\r\n \"\"\"\r\n 函数:将网页url对应的图片保存到本地目录下\r\n Args:\r\n url:网页图片连接\r\n filename:保存到本地图片的位置\r\n num_retries:重连次数\r\n \"\"\"\r\n\r\n self.url = url\r\n self.filename = filename\r\n self.num_retries = num_retries\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n img_src = self.url\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - img_src:{1}'.format(function_name,\r\n img_src)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - file name:{1}'.format(function_name,\r\n self.filename)\r\n )\r\n\r\n header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) \\\r\n AppleWebKit/537.36 (KHTML, like Gecko) \\\r\n Chrome/35.0.1916.114 Safari/537.36',\r\n 'Cookie': 'AspxAutoDetectCookieSupport=1'\r\n }\r\n # Request类可以使用给定的header访问URL\r\n result = urllib.request.Request(url=img_src, headers=header)\r\n\r\n try:\r\n response = urllib.request.urlopen(result, timeout=15) # 得到访问的网址\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - response successfully'.format(function_name)\r\n )\r\n\r\n # 保存图片文件\r\n with open(self.filename, 'wb') as file:\r\n content = response.read() # 获得图片\r\n file.write(content) # 保存图片\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - picture saved successfully'.format(function_name)\r\n )\r\n return True\r\n\r\n except HTTPError as e: # HTTP响应异常处理\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.reason)\r\n )\r\n return False\r\n\r\n except URLError as e: # 一定要放到HTTPError之后,因为它包含了前者\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.reason)\r\n )\r\n return False\r\n\r\n except IncompleteRead or RemoteDisconnected:\r\n if self.num_retries == 0: # 重连机制\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - retries time:{1}'.format(function_name,\r\n self.num_retries)\r\n )\r\n return False\r\n\r\n else:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - retries time:{1}'.format(function_name,\r\n self.num_retries)\r\n )\r\n self.save_photo(self.url, self.filename, self.num_retries - 1)\r\n\r\n def user_define_markers(self, location: str,\r\n size: str = 'small',\r\n color: str = '0xFC6054',\r\n **kwargs,\r\n ) -> str:\r\n \"\"\"\r\n 函数:自定义标注\r\n Args:\r\n location:地理位置坐标\r\n size:自定义标注的大小,可选值: small,mid,large,默认为small\r\n color:自定义标注的颜色,选值范围:[0x000000, 0xffffff],默认为0xFC6054\r\n kwargs:\r\n label:[0-9]、[A-Z]、[单个中文字] 当size为small时,图片不展现标注名。\r\n Returns:返回格式化后的单个marker字符串\r\n \"\"\"\r\n\r\n self.size = size\r\n self.color = color\r\n self.location = location\r\n\r\n if 'label' in kwargs:\r\n self.label = kwargs['label']\r\n else:\r\n self.label = ''\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n # 检测字符格式正误\r\n if not self.is_rgb_color(self.color):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n if not self.is_label(self.label):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n list_location = self.location.split(\";\")\r\n for item in list_location:\r\n if not self.is_location(item):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 字符串合并\r\n # 合并markerStyle\r\n list_markers = [self.size, self.color, self.label]\r\n temp_markers = ','.join(list_markers)\r\n # 合并marker字符串\r\n list_temp = [temp_markers, self.location]\r\n result_markers = \":\".join(list_temp)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - markers:{1}'.format(function_name,\r\n result_markers)\r\n )\r\n return result_markers\r\n\r\n except StringFormatException as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.error_reason())\r\n )\r\n\r\n def user_define_labels(self, content: str,\r\n location: str,\r\n **kwargs\r\n ) -> str:\r\n \"\"\"\r\n 函数:自定义标签\r\n Args:\r\n content:标签内容,字符最大数目为15\r\n location:地理位置坐标\r\n kwargs:\r\n font:0:微软雅黑;1:宋体;2:Times New Roman;3:Helvetica\r\n bold:0:非粗体;1:粗体\r\n fontSize:字体大小,可选值[1,72]\r\n font_color:字体颜色,取值范围:[0x000000, 0xffffff],默认为0xFFFFFF\r\n background:背景色,取值范围:[0x000000, 0xffffff],默认为0x5288d8\r\n Returns:返回格式化后的单个label字符串\r\n \"\"\"\r\n\r\n self.content = content\r\n self.location = location\r\n\r\n if 'background' in kwargs:\r\n self.background = kwargs['background']\r\n if 'bold' in kwargs:\r\n self.bold = kwargs['bold']\r\n if 'font' in kwargs:\r\n self.font = kwargs['font']\r\n if 'fontColor' in kwargs:\r\n self.fontColor = kwargs['fontColor']\r\n if 'fontSize' in kwargs:\r\n self.fontSize = kwargs['fontSize']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n # 检测字符串格式\r\n # 检测字体\r\n if self.font not in [0, 1, 2, 3]:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 检测文字粗体\r\n if self.bold not in [0, 1]:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 检测文字字体大小\r\n if self.fontSize not in list(range(1, 73)):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 检测字体颜色格式\r\n if not self.is_rgb_color(self.fontColor):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 检测背景颜色格式\r\n if not self.is_rgb_color(self.background):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n list_location = self.location.split(\";\")\r\n for item in list_location:\r\n if not self.is_location(item):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 字符串合并\r\n # 合并labelStyle\r\n list_labels = [self.content, self.font, self.bold, self.fontSize, self.fontColor, self.background]\r\n temp_labels = ','.join('%s' % item for item in list_labels)\r\n # 合并\r\n list_temp = [temp_labels, self.location]\r\n result_labels = ':'.join(list_temp)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - labels:{1}'.format(function_name,\r\n result_labels)\r\n )\r\n return result_labels\r\n\r\n except StringFormatException as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.error_reason())\r\n )\r\n\r\n def user_define_paths(self, location: str,\r\n **kwargs\r\n ) -> str:\r\n \"\"\"\r\n 函数:自定义路线\r\n Args:\r\n location:地理位置坐标\r\n kwargs:\r\n weight: 线条粗细,可选值: [2,15],默认5\r\n color:折线颜色。 选值范围:[0x000000, 0xffffff]\r\n transparency:透明度。可选值[0,1],小数后最多2位,0表示完全透明,1表示完全不透明。默认1\r\n fillcolor:多边形的填充颜色,此值不为空时折线封闭成多边形。取值规则同color\r\n fill_transparency:填充面透明度。可选值[0,1],小数后最多2位,0表示完全透明,1表示完全不透明。默认0.5\r\n Returns:返回格式化后的单个path字符串\r\n \"\"\"\r\n\r\n self.location = location\r\n\r\n if 'widget' in kwargs:\r\n self.weight = kwargs['widget']\r\n if 'color' in kwargs:\r\n self.color = kwargs['color']\r\n if 'transparency' in kwargs:\r\n self.transparency = kwargs['transparency']\r\n if 'fillColor' in kwargs:\r\n self.fillColor = kwargs['fillColor']\r\n if 'fill_transparency' in kwargs:\r\n self.fill_transparency = kwargs['fill_transparency']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n # 检测字符串格式\r\n if self.weight not in list(range(2, 16)):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n if 0 > self.transparency > 1 and len(str(self.transparency).split(\".\")) != 2:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n if not self.is_rgb_color(self.color):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n if 0 > self.fill_transparency > 1 and len(str(self.fill_transparency).split(\".\")) != 2:\r\n raise StringFormatException\r\n\r\n if self.fillColor != '':\r\n if not self.is_rgb_color(self.fillColor):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n list_location = self.location.split(\";\")\r\n\r\n for item in list_location:\r\n if not self.is_location(item):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - exception:StringFormatException'.format(\r\n function_name)\r\n )\r\n raise StringFormatException\r\n\r\n # 字符串合并\r\n # 合并labelStyle\r\n list_paths = [self.weight, self.color, self.transparency, self.fillColor, self.fill_transparency]\r\n temp_paths = ','.join('%s' % item for item in list_paths)\r\n # 合并\r\n list_temp = [temp_paths, self.location]\r\n result_paths = ':'.join(list_temp)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - paths:{1}'.format(function_name,\r\n result_paths)\r\n )\r\n return result_paths\r\n\r\n except StringFormatException as e:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.error_reason())\r\n )\r\n\r\n def is_rgb_color(self, rgbColor: str\r\n ) -> bool:\r\n \"\"\"\r\n 函数:判断是否符合rgb格式字符串\r\n Args:\r\n rgbColor: rgb颜色值\r\n Returns:返回判断值\r\n \"\"\"\r\n\r\n self.rgbColor = rgbColor\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检测字符串是否符合rgb格式字符串\r\n rgbString = re.compile(r'0x[a-fA-F0-9]{6}$')\r\n result = bool(rgbString.match(self.rgbColor))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - rgb color string checked result:{1}'.format(function_name,\r\n result)\r\n )\r\n return result\r\n\r\n def is_label(self, label: str\r\n ) -> bool:\r\n \"\"\"\r\n 函数:判断是否符合label字符串\r\n Args:\r\n label: label字符串\r\n Returns:返回判断值\r\n \"\"\"\r\n\r\n self.label = label\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n labelString = re.compile(r'[0-9A-Z\\u4e00-\\u9fa5]$') # 检测字符串是否符合[0-9]、[A-Z]、[单个中文字]格式\r\n\r\n result = bool(labelString.match(self.label))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - label string checked result:{1}'.format(function_name,\r\n result)\r\n )\r\n return result\r\n\r\n def is_location(self, location: str\r\n ) -> bool:\r\n \"\"\"\r\n 函数:判断是否符合经纬度值格式\r\n Args:\r\n location: 经纬度值\r\n Returns:返回判断值\r\n \"\"\"\r\n self.location = location\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n locationString = re.compile(r'\\d{1,3}.\\d{5,6},\\d{1,3}.\\d{5,6}$')\r\n result = bool(locationString.match(location))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - location string checked result:{1}'.format(function_name,\r\n result)\r\n )\r\n return bool(locationString.match(location))\r\n" }, { "alpha_fraction": 0.5427362322807312, "alphanum_fraction": 0.5462623238563538, "avg_line_length": 38.98265838623047, "blob_id": "2d80325abf72ddd73846128ccc7e3e878931fe4f", "content_id": "cbdf29345c916f1658e421958ad69426833ac722", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7722, "license_type": "permissive", "max_line_length": 132, "num_lines": 173, "path": "/FundamentalFunctions/RideRoutePlanningOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.RoutePlanning import RoutePlanning\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass RideRoutePlanningOperation:\r\n \"\"\"\r\n Class:骑行路径规划操作\r\n \"\"\"\r\n def __init__(self):\r\n self.rideDepartureAddress = None\r\n self.rideDestinationAddress = None\r\n self.rideDepartureCity = None\r\n self.rideDestinationCity = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_ride_departure_information(self, rideDepartureAddress: str,\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的骑行路径出发点是否符合规范要求\r\n Args:\r\n rideDepartureAddress: 用户输入的出发点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.rideDepartureAddress = rideDepartureAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n checkedResult = self.rideDepartureAddress is None or self.rideDepartureAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ride departure address information :{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def check_ride_destination_information(self, rideDestinationAddress: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的骑行路径终点是否符合规范要求\r\n Args:\r\n rideDestinationAddress: 用户输入的终点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.rideDestinationAddress = rideDestinationAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检测结果\r\n checkedResult = self.rideDestinationAddress is None or self.rideDestinationAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ride destination address information :{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 检测用户提供的骑行路径出发点是否符合规范要求\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_ride_route_planning_information(self, rideDepartureAddress: str,\r\n rideDestinationAddress: str,\r\n rideDepartureCity: str = '',\r\n rideDestinationCity: str = ''\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取骑行路径规划的具体信息\r\n Args:\r\n rideDepartureAddress: 用户输入的出发点\r\n rideDestinationAddress: 用户输入的终点\r\n rideDepartureCity: 用户输入的出发点对应的城市\r\n rideDestinationCity: 用户输入的终点对应的城市\r\n Returns:\r\n 返回获取的骑行路径规划对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.rideDepartureAddress = rideDepartureAddress\r\n self.rideDestinationAddress = rideDestinationAddress\r\n # 在以后的版本中添加\r\n self.rideDepartureCity = rideDepartureCity\r\n self.rideDestinationCity = rideDestinationCity\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 骑行路径规划\r\n geographicCoding = GeographicCoding()\r\n # 获取起点终点对应的初始编码信息\r\n # TODO:优化city参数\r\n rideDepartureJsonDecode = geographicCoding.get_geographic_coding(address=self.rideDepartureAddress,\r\n city='')\r\n rideDestinationJsonDecode = geographicCoding.get_geographic_coding(address=self.rideDestinationAddress,\r\n city='')\r\n\r\n parseRideDepartureInformation = geographicCoding.parse_geographic_coding(rideDepartureJsonDecode)\r\n parseRideDestinationInformation = geographicCoding.parse_geographic_coding(rideDestinationJsonDecode)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ride departure information:{1}'.format(function_name,\r\n parseRideDepartureInformation)\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ride destination information:{1}'.format(function_name,\r\n parseRideDestinationInformation)\r\n )\r\n\r\n # 起点终点位置编码\r\n if 'error_context' not in parseRideDepartureInformation:\r\n resultDepartureGeographicCoding = parseRideDepartureInformation['geographic_position']\r\n else:\r\n return [parseRideDepartureInformation['error_context']]\r\n if 'error_context' not in parseRideDestinationInformation:\r\n resultDestinationGeographicCoding = parseRideDestinationInformation['geographic_position']\r\n else:\r\n return [parseRideDestinationInformation['error_context']]\r\n\r\n routePlanning = RoutePlanning()\r\n rideRoutePlanning = routePlanning.get_ride_route_planning(origin=resultDepartureGeographicCoding,\r\n destination=resultDestinationGeographicCoding)\r\n # only for debugging\r\n # print(\"详细信息\")\r\n # print(walkingRoutePlanning)\r\n\r\n # 输出路径规划信息\r\n resultRideRoutePlanning = routePlanning.parse_ride_route_planning(rideRoutePlanning)\r\n promptInformation = \"从{0}到{1}的骑行导航信息如下所示\".format(self.rideDepartureAddress, self.rideDestinationAddress)\r\n resultRideRoutePlanning.insert(0, promptInformation)\r\n return resultRideRoutePlanning\r\n" }, { "alpha_fraction": 0.6495794653892517, "alphanum_fraction": 0.6660179495811462, "avg_line_length": 57.45041275024414, "blob_id": "6474739d35218cc981cd7fc558a5ea158e0a63b5", "content_id": "422a5e15ef0b1bdee065d2bc48588d20e069d303", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29538, "license_type": "permissive", "max_line_length": 211, "num_lines": 484, "path": "/Window/SettingsUI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'F:\\AmapProgram\\Window\\ui\\SettingUI.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.15.4\r\n#\r\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\r\n# run again. Do not edit this file unless you know what you are doing.\r\nimport sys\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\r\nfrom Resources.Icon.Icon import *\r\n\r\n\r\nclass Ui_SettingsUI(object):\r\n def setupUi(self, SettingsUIMainWindow):\r\n SettingsUIMainWindow.setObjectName(\"SettingsUIMainWindow\")\r\n SettingsUIMainWindow.resize(600, 400)\r\n self.settingsUiWidget = QtWidgets.QWidget(SettingsUIMainWindow)\r\n self.settingsUiWidget.setObjectName(\"settingsUiWidget\")\r\n self.settingsUiFrame = QtWidgets.QFrame(self.settingsUiWidget)\r\n self.settingsUiFrame.setGeometry(QtCore.QRect(0, 0, 600, 400))\r\n self.settingsUiFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.settingsUiFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.settingsUiFrame.setObjectName(\"settingsUiFrame\")\r\n self.horizontalLayoutWidget = QtWidgets.QWidget(self.settingsUiFrame)\r\n self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 601, 401))\r\n self.horizontalLayoutWidget.setObjectName(\"horizontalLayoutWidget\")\r\n self.settingsUiHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)\r\n self.settingsUiHorizontal1.setContentsMargins(0, 0, 0, 0)\r\n self.settingsUiHorizontal1.setObjectName(\"settingsUiHorizontal1\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.settingsLabel = QtWidgets.QLabel(self.horizontalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(18)\r\n self.settingsLabel.setFont(font)\r\n self.settingsLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.settingsLabel.setObjectName(\"settingsLabel\")\r\n self.verticalLayout.addWidget(self.settingsLabel)\r\n self.itemListWidget = QtWidgets.QListWidget(self.horizontalLayoutWidget)\r\n self.itemListWidget.setStyleSheet(\"QListWidget, QListView, QTreeWidget, QTreeView {\\n\"\r\n \" outline: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QListWidget {\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item {\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #F5F5F5;\\n\"\r\n \" height:35px;\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item:selected {\\n\"\r\n \" background: rgb(204,232,255);\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #0961B0;\\n\"\r\n \"}\\n\"\r\n \"\")\r\n self.itemListWidget.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.itemListWidget.setObjectName(\"itemListWidget\")\r\n item = QtWidgets.QListWidgetItem()\r\n item.setTextAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n self.itemListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n item.setTextAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n self.itemListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n self.itemListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n item.setTextAlignment(QtCore.Qt.AlignJustify | QtCore.Qt.AlignVCenter)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n self.itemListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n self.itemListWidget.addItem(item)\r\n self.verticalLayout.addWidget(self.itemListWidget)\r\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem)\r\n self.verticalLayout.setStretch(0, 1)\r\n self.verticalLayout.setStretch(1, 5)\r\n self.verticalLayout.setStretch(2, 1)\r\n self.settingsUiHorizontal1.addLayout(self.verticalLayout)\r\n self.settingsUiLine = QtWidgets.QFrame(self.horizontalLayoutWidget)\r\n self.settingsUiLine.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.settingsUiLine.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.settingsUiLine.setObjectName(\"settingsUiLine\")\r\n self.settingsUiHorizontal1.addWidget(self.settingsUiLine)\r\n self.stackedWidget = QtWidgets.QStackedWidget(self.horizontalLayoutWidget)\r\n self.stackedWidget.setObjectName(\"stackedWidget\")\r\n self.personalCenterPage = QtWidgets.QWidget()\r\n self.personalCenterPage.setObjectName(\"personalCenterPage\")\r\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.personalCenterPage)\r\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 431, 391))\r\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\r\n self.personalCenterVertical = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\r\n self.personalCenterVertical.setContentsMargins(173, 0, 172, 0)\r\n self.personalCenterVertical.setObjectName(\"personalCenterVertical\")\r\n self.userLogoLabel = QtWidgets.QLabel(self.verticalLayoutWidget_2)\r\n self.userLogoLabel.setText(\"\")\r\n self.userLogoLabel.setObjectName(\"userLogoLabel\")\r\n self.personalCenterVertical.addWidget(self.userLogoLabel)\r\n self.userNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.userNameLabel.setFont(font)\r\n self.userNameLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.userNameLabel.setObjectName(\"userNameLabel\")\r\n self.personalCenterVertical.addWidget(self.userNameLabel)\r\n self.logoutButton = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.logoutButton.setFont(font)\r\n self.logoutButton.setObjectName(\"logoutButton\")\r\n self.personalCenterVertical.addWidget(self.logoutButton)\r\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.personalCenterVertical.addItem(spacerItem1)\r\n self.personalCenterVertical.setStretch(0, 1)\r\n self.personalCenterVertical.setStretch(1, 1)\r\n self.personalCenterVertical.setStretch(3, 2)\r\n self.stackedWidget.addWidget(self.personalCenterPage)\r\n self.staticMapsPage = QtWidgets.QWidget()\r\n self.staticMapsPage.setObjectName(\"staticMapsPage\")\r\n self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.staticMapsPage)\r\n self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(0, 0, 431, 391))\r\n self.verticalLayoutWidget_3.setObjectName(\"verticalLayoutWidget_3\")\r\n self.staticMapsVertical = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)\r\n self.staticMapsVertical.setContentsMargins(0, 0, 0, 0)\r\n self.staticMapsVertical.setObjectName(\"staticMapsVertical\")\r\n self.gridLayout = QtWidgets.QGridLayout()\r\n self.gridLayout.setContentsMargins(80, 40, 80, -1)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.scaleLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.scaleLabel.setFont(font)\r\n self.scaleLabel.setObjectName(\"scaleLabel\")\r\n self.gridLayout.addWidget(self.scaleLabel, 2, 0, 1, 1)\r\n self.trafficLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficLabel.setFont(font)\r\n self.trafficLabel.setObjectName(\"trafficLabel\")\r\n self.gridLayout.addWidget(self.trafficLabel, 3, 0, 1, 1)\r\n self.scaleComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.scaleComboBox.setFont(font)\r\n self.scaleComboBox.setObjectName(\"scaleComboBox\")\r\n self.scaleComboBox.addItem(\"\")\r\n self.scaleComboBox.addItem(\"\")\r\n self.gridLayout.addWidget(self.scaleComboBox, 2, 1, 1, 1)\r\n self.zoomLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.zoomLabel.setFont(font)\r\n self.zoomLabel.setObjectName(\"zoomLabel\")\r\n self.gridLayout.addWidget(self.zoomLabel, 1, 0, 1, 1)\r\n self.staticMapsLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.staticMapsLabel.setFont(font)\r\n self.staticMapsLabel.setStyleSheet(\"\")\r\n self.staticMapsLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.staticMapsLabel.setObjectName(\"staticMapsLabel\")\r\n self.gridLayout.addWidget(self.staticMapsLabel, 0, 0, 1, 2)\r\n self.zoomComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.zoomComboBox.setFont(font)\r\n self.zoomComboBox.setObjectName(\"zoomComboBox\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.zoomComboBox.addItem(\"\")\r\n self.gridLayout.addWidget(self.zoomComboBox, 1, 1, 1, 1)\r\n self.trafficComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficComboBox.setFont(font)\r\n self.trafficComboBox.setObjectName(\"trafficComboBox\")\r\n self.trafficComboBox.addItem(\"\")\r\n self.trafficComboBox.addItem(\"\")\r\n self.gridLayout.addWidget(self.trafficComboBox, 3, 1, 1, 1)\r\n spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.gridLayout.addItem(spacerItem2, 4, 0, 1, 2)\r\n self.staticMapsVertical.addLayout(self.gridLayout)\r\n self.stackedWidget.addWidget(self.staticMapsPage)\r\n self.weatherTypePage = QtWidgets.QWidget()\r\n self.weatherTypePage.setObjectName(\"weatherTypePage\")\r\n self.verticalLayoutWidget_6 = QtWidgets.QWidget(self.weatherTypePage)\r\n self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(0, 0, 431, 391))\r\n self.verticalLayoutWidget_6.setObjectName(\"verticalLayoutWidget_6\")\r\n self.weatherTypeVertical = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_6)\r\n self.weatherTypeVertical.setContentsMargins(0, 40, 0, 0)\r\n self.weatherTypeVertical.setObjectName(\"weatherTypeVertical\")\r\n self.weatherTypeLabel = QtWidgets.QLabel(self.verticalLayoutWidget_6)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.weatherTypeLabel.setFont(font)\r\n self.weatherTypeLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.weatherTypeLabel.setObjectName(\"weatherTypeLabel\")\r\n self.weatherTypeVertical.addWidget(self.weatherTypeLabel)\r\n self.weatherTypeHorizontal = QtWidgets.QHBoxLayout()\r\n self.weatherTypeHorizontal.setContentsMargins(80, -1, 80, -1)\r\n self.weatherTypeHorizontal.setObjectName(\"weatherTypeHorizontal\")\r\n self.weatherTypeSearchLabel = QtWidgets.QLabel(self.verticalLayoutWidget_6)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherTypeSearchLabel.setFont(font)\r\n self.weatherTypeSearchLabel.setObjectName(\"weatherTypeSearchLabel\")\r\n self.weatherTypeHorizontal.addWidget(self.weatherTypeSearchLabel)\r\n self.weatherTypeComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_6)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherTypeComboBox.setFont(font)\r\n self.weatherTypeComboBox.setObjectName(\"weatherTypeComboBox\")\r\n self.weatherTypeComboBox.addItem(\"\")\r\n self.weatherTypeComboBox.addItem(\"\")\r\n self.weatherTypeHorizontal.addWidget(self.weatherTypeComboBox)\r\n self.weatherTypeVertical.addLayout(self.weatherTypeHorizontal)\r\n spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.weatherTypeVertical.addItem(spacerItem3)\r\n self.stackedWidget.addWidget(self.weatherTypePage)\r\n self.detailInformationPage = QtWidgets.QWidget()\r\n self.detailInformationPage.setObjectName(\"detailInformationPage\")\r\n self.verticalLayoutWidget_4 = QtWidgets.QWidget(self.detailInformationPage)\r\n self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(0, 0, 431, 398))\r\n self.verticalLayoutWidget_4.setObjectName(\"verticalLayoutWidget_4\")\r\n self.detailInformationVertical1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_4)\r\n self.detailInformationVertical1.setContentsMargins(0, 0, 0, 0)\r\n self.detailInformationVertical1.setObjectName(\"detailInformationVertical1\")\r\n self.amapSmallProgramLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.amapSmallProgramLabel.setFont(font)\r\n self.amapSmallProgramLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.amapSmallProgramLabel.setObjectName(\"amapSmallProgramLabel\")\r\n self.detailInformationVertical1.addWidget(self.amapSmallProgramLabel)\r\n self.detailInformationLine1 = QtWidgets.QFrame(self.verticalLayoutWidget_4)\r\n self.detailInformationLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.detailInformationLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.detailInformationLine1.setObjectName(\"detailInformationLine1\")\r\n self.detailInformationVertical1.addWidget(self.detailInformationLine1)\r\n self.developerLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.developerLabel.setFont(font)\r\n self.developerLabel.setObjectName(\"developerLabel\")\r\n self.detailInformationVertical1.addWidget(self.developerLabel)\r\n self.detailInformationLine3 = QtWidgets.QFrame(self.verticalLayoutWidget_4)\r\n self.detailInformationLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.detailInformationLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.detailInformationLine3.setObjectName(\"detailInformationLine3\")\r\n self.detailInformationVertical1.addWidget(self.detailInformationLine3)\r\n self.feedbackLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.feedbackLabel.setFont(font)\r\n self.feedbackLabel.setObjectName(\"feedbackLabel\")\r\n self.detailInformationVertical1.addWidget(self.feedbackLabel)\r\n self.detailInformationLine2 = QtWidgets.QFrame(self.verticalLayoutWidget_4)\r\n self.detailInformationLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.detailInformationLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.detailInformationLine2.setObjectName(\"detailInformationLine2\")\r\n self.detailInformationVertical1.addWidget(self.detailInformationLine2)\r\n self.detailInformationLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.detailInformationLabel.setFont(font)\r\n self.detailInformationLabel.setWordWrap(True)\r\n self.detailInformationLabel.setObjectName(\"detailInformationLabel\")\r\n self.detailInformationVertical1.addWidget(self.detailInformationLabel)\r\n spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.detailInformationVertical1.addItem(spacerItem4)\r\n self.stackedWidget.addWidget(self.detailInformationPage)\r\n self.referencePage = QtWidgets.QWidget()\r\n self.referencePage.setObjectName(\"referencePage\")\r\n self.verticalLayoutWidget_5 = QtWidgets.QWidget(self.referencePage)\r\n self.verticalLayoutWidget_5.setGeometry(QtCore.QRect(0, 0, 450, 391))\r\n self.verticalLayoutWidget_5.setObjectName(\"verticalLayoutWidget_5\")\r\n self.referenceVertical = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_5)\r\n self.referenceVertical.setContentsMargins(0, 30, 0, 0)\r\n self.referenceVertical.setObjectName(\"referenceVertical\")\r\n self.referenceLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.referenceLabel.setFont(font)\r\n self.referenceLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.referenceLabel.setObjectName(\"referenceLabel\")\r\n self.referenceLabel.setMargin(10)\r\n self.referenceVertical.addWidget(self.referenceLabel)\r\n self.amapWebSupportLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.amapWebSupportLabel.setFont(font)\r\n self.amapWebSupportLabel.setStyleSheet(\"\")\r\n self.amapWebSupportLabel.setOpenExternalLinks(True)\r\n self.amapWebSupportLabel.setObjectName(\"amapWebSupportLabel\")\r\n self.amapWebSupportLabel.setMargin(10)\r\n self.referenceVertical.addWidget(self.amapWebSupportLabel)\r\n self.baiduMapWebSupportLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.baiduMapWebSupportLabel.setFont(font)\r\n self.baiduMapWebSupportLabel.setOpenExternalLinks(True)\r\n self.baiduMapWebSupportLabel.setMargin(10)\r\n self.baiduMapWebSupportLabel.setObjectName(\"baiduMapWebSupportLabel\")\r\n self.referenceVertical.addWidget(self.baiduMapWebSupportLabel)\r\n self.GitHubWebSupportLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.GitHubWebSupportLabel.setFont(font)\r\n self.GitHubWebSupportLabel.setOpenExternalLinks(True)\r\n self.GitHubWebSupportLabel.setObjectName(\"GitHubWebSupportLabel\")\r\n self.GitHubWebSupportLabel.setMargin(10)\r\n self.referenceVertical.addWidget(self.GitHubWebSupportLabel)\r\n self.IssueLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IssueLabel.setFont(font)\r\n self.IssueLabel.setObjectName(\"IssueLabel\")\r\n self.IssueLabel.setMargin(10)\r\n self.referenceVertical.addWidget(self.IssueLabel)\r\n spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.referenceVertical.addItem(spacerItem5)\r\n self.stackedWidget.addWidget(self.referencePage)\r\n self.settingsUiHorizontal1.addWidget(self.stackedWidget)\r\n self.settingsUiHorizontal1.setStretch(0, 1)\r\n self.settingsUiHorizontal1.setStretch(2, 3)\r\n self.stackedWidget.raise_()\r\n self.settingsUiLine.raise_()\r\n SettingsUIMainWindow.setCentralWidget(self.settingsUiWidget)\r\n\r\n self.retranslateUi(SettingsUIMainWindow)\r\n self.itemListWidget.setCurrentRow(0)\r\n self.stackedWidget.setCurrentIndex(0)\r\n QtCore.QMetaObject.connectSlotsByName(SettingsUIMainWindow)\r\n\r\n def retranslateUi(self, SettingsUIMainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n SettingsUIMainWindow.setWindowTitle(_translate(\"SettingsUIMainWindow\", \"设置\"))\r\n SettingsUIMainWindow.setWindowFlags(\r\n QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint)\r\n SettingsUIMainWindow.setWindowIcon(QIcon(\":/SettingsLogo.png\"))\r\n SettingsUIMainWindow.setFixedSize(SettingsUIMainWindow.width(), SettingsUIMainWindow.height())\r\n self.settingsLabel.setText(_translate(\"SettingsUIMainWindow\", \"设置\"))\r\n __sortingEnabled = self.itemListWidget.isSortingEnabled()\r\n self.itemListWidget.setSortingEnabled(False)\r\n item = self.itemListWidget.item(0)\r\n item.setText(_translate(\"SettingsUIMainWindow\", \" 个人中心\"))\r\n item = self.itemListWidget.item(1)\r\n item.setText(_translate(\"SettingsUIMainWindow\", \" 静态地图\"))\r\n item = self.itemListWidget.item(2)\r\n item.setText(_translate(\"SettingsUIMainWindow\", \" 天气类型\"))\r\n item = self.itemListWidget.item(3)\r\n item.setText(_translate(\"SettingsUIMainWindow\", \" 详细说明\"))\r\n item = self.itemListWidget.item(4)\r\n item.setText(_translate(\"SettingsUIMainWindow\", \" 参考及引用\"))\r\n self.itemListWidget.setSortingEnabled(__sortingEnabled)\r\n self.userNameLabel.setText(_translate(\"SettingsUIMainWindow\", \"用户名称\"))\r\n self.logoutButton.setText(_translate(\"SettingsUIMainWindow\", \"退出登录\"))\r\n self.scaleLabel.setText(_translate(\"SettingsUIMainWindow\", \"图片清晰度\"))\r\n self.trafficLabel.setText(_translate(\"SettingsUIMainWindow\", \"交通路况标识\"))\r\n self.scaleComboBox.setItemText(0, _translate(\"SettingsUIMainWindow\", \"普通图\"))\r\n self.scaleComboBox.setItemText(1, _translate(\"SettingsUIMainWindow\", \"高清图\"))\r\n self.zoomLabel.setText(_translate(\"SettingsUIMainWindow\", \"地图缩放级别\"))\r\n self.staticMapsLabel.setText(_translate(\"SettingsUIMainWindow\", \"静态地图\"))\r\n self.zoomComboBox.setItemText(0, _translate(\"SettingsUIMainWindow\", \"1\"))\r\n self.zoomComboBox.setItemText(1, _translate(\"SettingsUIMainWindow\", \"2\"))\r\n self.zoomComboBox.setItemText(2, _translate(\"SettingsUIMainWindow\", \"3\"))\r\n self.zoomComboBox.setItemText(3, _translate(\"SettingsUIMainWindow\", \"4\"))\r\n self.zoomComboBox.setItemText(4, _translate(\"SettingsUIMainWindow\", \"5\"))\r\n self.zoomComboBox.setItemText(5, _translate(\"SettingsUIMainWindow\", \"6\"))\r\n self.zoomComboBox.setItemText(6, _translate(\"SettingsUIMainWindow\", \"7\"))\r\n self.zoomComboBox.setItemText(7, _translate(\"SettingsUIMainWindow\", \"8\"))\r\n self.zoomComboBox.setItemText(8, _translate(\"SettingsUIMainWindow\", \"9\"))\r\n self.zoomComboBox.setItemText(9, _translate(\"SettingsUIMainWindow\", \"10\"))\r\n self.zoomComboBox.setItemText(10, _translate(\"SettingsUIMainWindow\", \"11\"))\r\n self.zoomComboBox.setItemText(11, _translate(\"SettingsUIMainWindow\", \"12\"))\r\n self.zoomComboBox.setItemText(12, _translate(\"SettingsUIMainWindow\", \"13\"))\r\n self.zoomComboBox.setItemText(13, _translate(\"SettingsUIMainWindow\", \"14\"))\r\n self.zoomComboBox.setItemText(14, _translate(\"SettingsUIMainWindow\", \"15\"))\r\n self.zoomComboBox.setItemText(15, _translate(\"SettingsUIMainWindow\", \"16\"))\r\n self.zoomComboBox.setItemText(16, _translate(\"SettingsUIMainWindow\", \"17\"))\r\n self.zoomComboBox.setItemText(17, _translate(\"SettingsUIMainWindow\", \"18\"))\r\n self.zoomComboBox.setItemText(18, _translate(\"SettingsUIMainWindow\", \"19\"))\r\n self.zoomComboBox.setItemText(19, _translate(\"SettingsUIMainWindow\", \"20\"))\r\n self.trafficComboBox.setItemText(0, _translate(\"SettingsUIMainWindow\", \"不展示\"))\r\n self.trafficComboBox.setItemText(1, _translate(\"SettingsUIMainWindow\", \"展示\"))\r\n self.weatherTypeLabel.setText(_translate(\"SettingsUIMainWindow\", \"天气类型\"))\r\n self.weatherTypeSearchLabel.setText(_translate(\"SettingsUIMainWindow\", \"查询的天气类型\"))\r\n self.weatherTypeComboBox.setItemText(0, _translate(\"SettingsUIMainWindow\", \"实况天气\"))\r\n self.weatherTypeComboBox.setItemText(1, _translate(\"SettingsUIMainWindow\", \"预测天气\"))\r\n self.amapSmallProgramLabel.setText(_translate(\"SettingsUIMainWindow\", \"高德地图小程序\"))\r\n self.developerLabel.setText(_translate(\"SettingsUIMainWindow\", \"开发 \\n\"\r\n \"开发者:高怡飞\\n\"\r\n \"Logo图片库:Windows Fluent UI Photo Library\\n\"\r\n \" 小爱同学\\n\"\r\n \" 高德地图 \"))\r\n self.feedbackLabel.setText(_translate(\"SettingsUIMainWindow\", \"反馈\\n\"\r\n \"电子邮箱:363301617@qq.com\"))\r\n self.detailInformationLabel.setText(_translate(\"SettingsUIMainWindow\", \"说明\\n\"\r\n \"这是一个基于高德地图Web API的第三方客户端,应用所有数据来源均来自高德地图。\\n\"\r\n \"由于高德地图关于实时路况的API暂未开放,所以使用百度地图的实时路况API数据来实现这一功能。\\n\"\r\n \"本程序仅供学习交流编程技术使用。如果侵犯您的合法权益,请及时联系本人以第一时间删除。\"))\r\n self.referenceLabel.setText(_translate(\"SettingsUIMainWindow\", \"参考及引用\"))\r\n self.amapWebSupportLabel.setText(_translate(\"SettingsUIMainWindow\",\r\n \"<html><head/><body><p><a href=\\\"https://mobile.amap.com/\\\"><span style=\\\" color:#000000;\\\">高德地图官网</span></a></p></body></html>\"))\r\n self.baiduMapWebSupportLabel.setText(_translate(\"SettingsUIMainWindow\",\r\n \"<html><head/><body><p><a href=\\\"https://map.baidu.com\\\"><span style=\\\" text-decoration: underline; color:#000000;\\\">百度地图官网</span></a></p></body></html>\"))\r\n self.GitHubWebSupportLabel.setText(_translate(\"SettingsUIMainWindow\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" text-decoration: underline; color:#000000;\\\">GitHub 仓库</span></a></p></body></html>\"))\r\n self.IssueLabel.setText(_translate(\"SettingsUIMainWindow\", \"如果发现任何Bug,请在GitHub仓库中创建一个新Issue\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\r\n app = QApplication(sys.argv)\r\n form = QMainWindow()\r\n w = Ui_SettingsUI()\r\n w.setupUi(form)\r\n form.show()\r\n sys.exit(app.exec_())\r\n" }, { "alpha_fraction": 0.48447203636169434, "alphanum_fraction": 0.4906832277774811, "avg_line_length": 33, "blob_id": "34950e0080461aac49f6796307c47bce3b59c649", "content_id": "e4a42f4242e209846404633563f40f94cd92c188", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "permissive", "max_line_length": 105, "num_lines": 23, "path": "/SelfExpection/StringFormatException.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass StringFormatException(BaseException):\r\n \"\"\"this is user's Exception for check the length of name \"\"\"\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def error_reason(self):\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n '您输入的数据格式异常')\r\n )\r\n" }, { "alpha_fraction": 0.3040187954902649, "alphanum_fraction": 0.3121739625930786, "avg_line_length": 63.1244010925293, "blob_id": "5d5e7975589078a1c68ef75b0d69e436143b003d", "content_id": "8f28811b0fd162b643dc71754e2d0d2f5b88ae28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 42865, "license_type": "permissive", "max_line_length": 131, "num_lines": 627, "path": "/AmapFunctions/WeatherInformation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport datetime\r\nimport inspect\r\nimport json\r\nimport time\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass WeatherInformation:\r\n \"\"\"\r\n Class:天气查询\r\n 天气查询是一个简单的HTTP接口,根据用户输入的adcode,查询目标区域当前/未来的天气情况。\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.city = None\r\n self.extensions = None\r\n self.json_decode = None\r\n self.output = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_weather_information(self, city: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取天气查询数据。\\n\r\n Args:\r\n city:城市编码,必填。输入城市的adcode,adcode信息可参考城市编码表\r\n kwargs:\r\n extensions:气象类型,选填,默认base。可选值:base/all,base:返回实况天气,all:返回预报天气\r\n output:返回格式,可选,默认JSON格式。可选值:JSON,XML。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n self.city = city\r\n\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'city': self.city,\r\n }\r\n\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/weather/weatherInfo?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Weather information data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_weather_information(self, json_decode: dict,\r\n extensions: str\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析天气查询数据。\\n\r\n Args:\r\n json_decode:get_weather_information()方法从网络中获取的数据\r\n extensions:获取数据的类型\r\n Returns:\r\n 返回获取到的天气信息\r\n \"\"\"\r\n\r\n # TODO:优化代码,递归创建目录 os.mkdirs()\r\n self.extensions = extensions\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n\r\n # 查询实况天气/预报天气\r\n week_dict = {\"1\": \"一\", \"2\": \"二\", \"3\": \"三\", \"4\": \"四\", \"5\": \"五\", \"6\": \"六\", \"7\": \"日\"}\r\n if self.extensions == 'base':\r\n lives = self.json_decode['lives']\r\n if lives:\r\n # 实况天气\r\n for live in lives:\r\n # 查询的数据包括省份名,城市名,天气现象,实时气温,凤翔描述,风力级别,空气湿度,数据发布的时间\r\n province = live['province']\r\n city = live['city']\r\n weather = live['weather']\r\n temperature = live['temperature']\r\n winddirection = live['winddirection']\r\n windpower = live['windpower']\r\n tempwindpower = []\r\n humidity = live['humidity']\r\n reporttime = live['reporttime']\r\n datetime_reporttime = datetime.datetime.strptime(reporttime, '%Y-%m-%d %H:%M:%S')\r\n formatted_reporttime = datetime_reporttime.strftime('%Y年%m月%d日%H时%M分%S秒')\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - province:{1}'.format(function_name,\r\n province)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - city:{1}'.format(function_name,\r\n city)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - weather:{1}'.format(function_name,\r\n weather)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - temperature:{1}'.format(\r\n function_name,\r\n temperature)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - winddirection:{1}'.format(\r\n function_name,\r\n winddirection)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - windpower:{1}'.format(function_name,\r\n windpower)\r\n ) # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - tempwindpower:{1}'.format(\r\n function_name,\r\n tempwindpower)\r\n ) # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - humidity:{1}'.format(function_name,\r\n humidity)\r\n ) # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - reporttime:{1}'.format(function_name,\r\n reporttime)\r\n )\r\n\r\n # 天气信息——风力\r\n for item in windpower:\r\n tempwindpower.append(item)\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - tempwindpower:{1}'.format(\r\n function_name,\r\n tempwindpower)\r\n )\r\n\r\n resultContext.append(\"您查询{0}{1}的天气情况如下所示:\".format(province, city))\r\n if tempwindpower[0] == \"<\":\r\n resultContext.append(\r\n \"今日天气{0},温度{1}度,{2}风小于{3}级,空气湿度{4}\".format(weather, temperature, winddirection,\r\n tempwindpower[1], humidity))\r\n elif tempwindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"今日天气{0},温度{1}度,{2}风小于等于{3}级,空气湿度{4}\".format(weather, temperature,\r\n winddirection,\r\n tempwindpower[1], humidity))\r\n else:\r\n resultContext.append(\r\n \"今日天气{0},温度{1}度,{2}风{3}级,空气湿度{4}\".format(weather, temperature, winddirection,\r\n windpower,\r\n humidity))\r\n resultContext.append(\"天气数据已于{0}更新\".format(formatted_reporttime))\r\n else:\r\n context = \"暂未查到天气信息,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n\r\n elif self.extensions == 'all':\r\n # 预测天气\r\n forecasts = self.json_decode['forecasts']\r\n if forecasts:\r\n for forecast in forecasts:\r\n province = forecast['province']\r\n city = forecast['city']\r\n reporttime = forecast['reporttime']\r\n datetime_reporttime = datetime.datetime.strptime(reporttime, '%Y-%m-%d %H:%M:%S')\r\n formatted_reporttime = datetime_reporttime.strftime('%Y年%m月%d日%H时%M分%S秒')\r\n casts = forecast['casts']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - province:{1}'.format(function_name,\r\n province)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - city:{1}'.format(function_name,\r\n city)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - report time:{1}'.format(\r\n function_name,\r\n reporttime)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - datetime report time:{1}'.format(\r\n function_name,\r\n datetime_reporttime)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - format report time:{1}'.format(\r\n function_name,\r\n formatted_reporttime)\r\n )\r\n\r\n # 预测数据\r\n for item, cast in enumerate(casts):\r\n date = cast['date']\r\n datetime_date = datetime.datetime.strptime(date, '%Y-%m-%d')\r\n formatted_date = datetime_date.strftime('%Y年%m月%d日')\r\n week = cast['week']\r\n dayweather = cast['dayweather']\r\n nightweather = cast['nightweather']\r\n daytemp = cast['daytemp']\r\n nighttemp = cast['nighttemp']\r\n daywind = cast['daywind']\r\n nightwind = cast['nightwind']\r\n daypower = cast['daypower']\r\n tempdaywindpower = []\r\n nightpower = cast['nightpower']\r\n tempnightwindpower = []\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - date:{1}'.format(function_name,\r\n date)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - date time:{1}'.format(\r\n function_name,\r\n datetime_date)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - format date:{1}'.format(\r\n function_name,\r\n formatted_date)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - week:{1}'.format(\r\n function_name,\r\n week)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - dayweather:{1}'.format(\r\n function_name,\r\n dayweather)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - night weather:{1}'.format(\r\n function_name,\r\n nightweather)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - day temp:{1}'.format(\r\n function_name,\r\n daytemp)\r\n ) # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - night temp:{1}'.format(\r\n function_name,\r\n nighttemp)\r\n ) # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - day wind:{1}'.format(\r\n function_name,\r\n daywind)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - night wind:{1}'.format(\r\n function_name,\r\n nightwind)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - day power:{1}'.format(\r\n function_name,\r\n daypower)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - night power:{1}'.format(\r\n function_name,\r\n nightpower)\r\n )\r\n\r\n for sub_item in daypower:\r\n tempdaywindpower.append(sub_item)\r\n for sub_item in nightpower:\r\n tempnightwindpower.append(sub_item)\r\n\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - tempdaywindpower:{1}'.format(\r\n function_name,\r\n tempdaywindpower)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - tempnightwindpower:{1}'.format(\r\n function_name,\r\n tempnightwindpower)\r\n )\r\n\r\n # 今日天气\r\n if item == 0:\r\n resultContext.append(\"=========================================\")\r\n resultContext.append(\"您查询{0}{1}的今天天气情况如下所示:\".format(province, city))\r\n resultContext.append(\"今天是{0},星期{1}\".format(formatted_date, week_dict[week]))\r\n if tempdaywindpower[0] == \"<\":\r\n resultContext.append(\r\n \"今天白天{0},温度{1}度,{2}风小于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"今天白天{0},温度{1}度,{2}风小于等于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"今天白天{0},温度{1}度,{2}风{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[0]))\r\n if tempnightwindpower[0] == \"<\":\r\n resultContext.append(\r\n \"今天夜间{0},温度{1}度,{2}风小于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"今天夜间{0},温度{1}度,{2}风小于等于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"今天夜间{0},温度{1}度,{2}风{3}级\".format(nightweather, nighttemp, nightwind,\r\n nightpower))\r\n\r\n # 明天天气\r\n elif item == 1:\r\n resultContext.append(\"=========================================\")\r\n resultContext.append(\"您查询{0}{1}的明天天气情况如下所示:\".format(province, city))\r\n resultContext.append(\"明天是{0},星期{1}\".format(formatted_date, week_dict[week]))\r\n if tempdaywindpower[0] == \"<\":\r\n resultContext.append(\r\n \"明天白天{0},温度{1}度,{2}风小于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"明天白天{0},温度{1}度,{2}风小于等于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"明天白天{0},温度{1}度,{2}风{3}级\".format(dayweather, daytemp, daywind,\r\n daypower))\r\n if tempnightwindpower[0] == \"<\":\r\n resultContext.append(\r\n \"明天夜间{0},温度{1}度,{2}风小于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"明天夜间{0},温度{1}度,{2}风小于等于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"明天夜间{0},温度{1}度,{2}风{3}级\".format(nightweather, nighttemp, nightwind,\r\n nightpower))\r\n\r\n # 后天天气\r\n elif item == 2:\r\n resultContext.append(\"=========================================\")\r\n resultContext.append(\"您查询{0}{1}的后天情况如下所示:\".format(province, city))\r\n resultContext.append(\"后天是{0},星期{1}\".format(formatted_date, week_dict[week]))\r\n if tempdaywindpower[0] == \"<\":\r\n resultContext.append(\r\n \"后天白天{0},温度{1}度,{2}风小于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"后天白天{0},温度{1}度,{2}风小于等于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"后天白天{0},温度{1}度,{2}风{3}级\".format(dayweather, daytemp, daywind,\r\n daypower))\r\n if tempnightwindpower[0] == \"<\":\r\n resultContext.append(\r\n \"后天夜间{0},温度{1}度,{2}风小于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"后天夜间{0},温度{1}度,{2}风小于等于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"后天夜间{0},温度{1}度,{2}风{3}级\".format(nightweather, nighttemp, nightwind,\r\n nightpower))\r\n\r\n # 大后天天气\r\n elif item == 3:\r\n resultContext.append(\"=========================================\")\r\n resultContext.append(\"您查询{0}{1}的大后天情况如下所示:\".format(province, city))\r\n resultContext.append(\"大后天是{0},星期{1}\".format(formatted_date, week_dict[week]))\r\n if tempdaywindpower[0] == \"<\":\r\n resultContext.append(\r\n \"大后天白天{0},温度{1}度,{2}风小于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"大后天白天{0},温度{1}度,{2}风小于等于{3}级\".format(dayweather, daytemp, daywind,\r\n tempdaywindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"大后天白天{0},温度{1}度,{2}风{3}级\".format(dayweather, daytemp, daywind,\r\n daypower))\r\n if tempnightwindpower[0] == \"<\":\r\n resultContext.append(\r\n \"大后天夜间{0},温度{1}度,{2}风小于{3}级\".format(nightweather, nighttemp, nightwind,\r\n tempnightwindpower[1]))\r\n elif tempdaywindpower[0] == \"≤\":\r\n resultContext.append(\r\n \"大后天夜间{0},温度{1}度,{2}风小于等于{3}级\".format(nightweather, nighttemp,\r\n nightwind,\r\n tempnightwindpower[1]))\r\n else:\r\n resultContext.append(\r\n \"大后天夜间{0},温度{1}度,{2}风{3}级\".format(nightweather, nighttemp, nightwind,\r\n nightpower))\r\n\r\n # 异常数据\r\n else:\r\n resultContext.append(\"天气数据异常\")\r\n resultContext.append(\"=========================================\")\r\n resultContext.append(\"天气数据已于{0}更新\".format(formatted_reporttime))\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - weather information print successfully.'.format(\r\n function_name)\r\n )\r\n\r\n # 查询暂无结果\r\n else:\r\n context = \"暂未查到天气信息,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n resultContext.append(errorInfo)\r\n context = \"天气信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"天气信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n" }, { "alpha_fraction": 0.6371992826461792, "alphanum_fraction": 0.6652292609214783, "avg_line_length": 37.33928680419922, "blob_id": "d43b6f3b9a0c4e531c24ec9bf2eb6237d76b3c41", "content_id": "797012da4e1ad003cef23760649a0ce58deae8ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10892, "license_type": "permissive", "max_line_length": 120, "num_lines": 224, "path": "/FundamentalFunctions/MiddleShanxiAreaDataVisualization.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\"\"\"\r\nCopy from Jupyter Notebook\r\n\"\"\"\r\n# TODO: In the future version will insert into the 山西省道路信息分析系统 page.\r\n\r\n# 设置字体,否则中文会显示异常\r\nplt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\nplt.rcParams['figure.figsize'] = (22.0, 14.0)\r\nplt.title(\"晋中地区各城市道路通行情况\")\r\n\r\n# 使用pandas读取excel文件\r\ndf_taiyuan = pd.read_excel(r'F:/01.XLS', sheet_name='太原市')\r\ndf_jinzhong = pd.read_excel(r'F:/01.XLS', sheet_name='晋中市')\r\ndf_lvliang = pd.read_excel(r'F:/01.XLS', sheet_name='吕梁市')\r\ndf_yangquan = pd.read_excel(r'F:/01.XLS', sheet_name='阳泉市')\r\n\r\n# 设置子图默认间距\r\nplt.subplots_adjust(hspace=0.5)\r\n# 太原市数据可视化\r\nplt.subplot(2, 2, 1)\r\n# 添加条形图的标题\r\nplt.title('太原市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\ntaiyuan_road_name = df_taiyuan.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\ntaiyuan_road_cong = df_taiyuan.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\ntaiyuan_combination = tuple(zip(taiyuan_road_cong['路段拥堵评价'].values(), taiyuan_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\ntaiyuan_cong_proportion = []\r\ntaiyuan_clear_road = []\r\n\r\nfor item in list(taiyuan_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n taiyuan_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n taiyuan_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\ntaiyuan_information = df_taiyuan.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n# print(taiyuan_information)\r\n# print(type(taiyuan_information))\r\ntaiyuan_information['拥堵占比'] = taiyuan_cong_proportion\r\ntaiyuan_information['道路畅通评价'] = taiyuan_clear_road\r\n# print(taiyuan_information)\r\n# print(list(taiyuan_information['道路名称']))\r\n# print(list(taiyuan_information['路段拥堵评价']))\r\n# print(list(taiyuan_information['拥堵占比']))\r\n# print(taiyuan_information['道路畅通评价'])\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 太原市道路名称\r\ntaiyuan_road_name_list = ['东中环路', '五一路', '北中环街', '南中环街', '南内环街', '太榆路', '平阳路', '并州北路', '并州南路', '府东街', '建设北路', '建设南路',\r\n '滨河东路', '滨河西路', '西中环路', '迎泽大街', '长风街']\r\nplt.xticks(range(len(taiyuan_road_name_list)), taiyuan_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(17) - 0.3, height=list(taiyuan_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(17), height=list(taiyuan_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(17) + 0.3, height=list(taiyuan_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 晋中市数据可视化\r\nplt.subplot(2, 2, 2)\r\n# 添加条形图的标题\r\nplt.title('晋中市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\njinzhong_road_name = df_jinzhong.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\njinzhong_road_cong = df_jinzhong.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\njinzhong_combination = tuple(zip(jinzhong_road_cong['路段拥堵评价'].values(), jinzhong_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\njinzhong_cong_proportion = []\r\njinzhong_clear_road = []\r\n\r\nfor item in list(jinzhong_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n jinzhong_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n jinzhong_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\njinzhong_information = df_jinzhong.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(jinzhong_information))\r\njinzhong_information['拥堵占比'] = jinzhong_cong_proportion\r\njinzhong_information['道路畅通评价'] = jinzhong_clear_road\r\n\r\n# print(list(jinzhong_information['道路名称']))\r\n# print(list(jinzhong_information['路段拥堵评价']))\r\n# print(list(jinzhong_information['拥堵占比']))\r\n# print(list(jinzhong_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 晋中市道路名称\r\njinzhong_road_name_list = ['中都路', '定阳路', '新建路', '汇通北路', '汇通南路', '汇通路', '蕴华街', '迎宾街', '锦纶路', '顺城街', '龙湖街']\r\nplt.xticks(range(len(jinzhong_road_name_list)), jinzhong_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(11) - 0.3, height=list(jinzhong_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(11), height=list(jinzhong_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(11) + 0.3, height=list(jinzhong_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 阳泉市数据可视化\r\nplt.subplot(2, 2, 3)\r\n# 添加条形图的标题\r\nplt.title('阳泉市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nyangquan_road_name = df_yangquan.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nyangquan_road_cong = df_yangquan.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nyangquan_combination = tuple(zip(yangquan_road_cong['路段拥堵评价'].values(), yangquan_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nyangquan_cong_proportion = []\r\nyangquan_clear_road = []\r\n\r\nfor item in list(yangquan_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n yangquan_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n yangquan_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nyangquan_information = df_yangquan.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(yangquan_information))\r\nyangquan_information['拥堵占比'] = yangquan_cong_proportion\r\nyangquan_information['道路畅通评价'] = yangquan_clear_road\r\n\r\n# print(list(yangquan_information['道路名称']))\r\n# print(list(yangquan_information['路段拥堵评价']))\r\n# print(list(yangquan_information['拥堵占比']))\r\n# print(list(yangquan_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 阳泉市道路名称\r\nyangquan_road_name_list = ['东环路', '南大街', '桃北东街', '桃北中街', '泉中路']\r\nplt.xticks(range(len(yangquan_road_name_list)), yangquan_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(5) - 0.3, height=list(yangquan_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(5), height=list(yangquan_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(5) + 0.3, height=list(yangquan_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 吕梁市数据可视化\r\nplt.subplot(2, 2, 4)\r\n# 添加条形图的标题\r\nplt.title('吕梁市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nlvliang_road_name = df_lvliang.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nlvliang_road_cong = df_lvliang.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nlvliang_combination = tuple(zip(lvliang_road_cong['路段拥堵评价'].values(), lvliang_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nlvliang_cong_proportion = []\r\nlvliang_clear_road = []\r\n\r\nfor item in list(lvliang_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n lvliang_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n lvliang_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nlvliang_information = df_lvliang.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(lvliang_information))\r\nlvliang_information['拥堵占比'] = lvliang_cong_proportion\r\nlvliang_information['道路畅通评价'] = lvliang_clear_road\r\n\r\n# print(list(lvliang_information['道路名称']))\r\n# print(list(lvliang_information['路段拥堵评价']))\r\n# print(list(lvliang_information['拥堵占比']))\r\n# print(list(lvliang_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 吕梁市道路名称\r\nlvliang_road_name_list = ['北川河西路', '吕梁大道', '滨河北东路', '滨河北中路', '滨河北西路', '龙凤北大街', '龙凤南大街']\r\nplt.xticks(range(len(lvliang_road_name_list)), lvliang_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(7) - 0.3, height=list(lvliang_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(7), height=list(lvliang_information['道路畅通评价']), alpha=0.5, width=0.3, color='green', edgecolor='blue',\r\n label='道路畅通次数')\r\nplt.bar(np.arange(7) + 0.3, height=list(lvliang_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\nplt.savefig(r\"C:\\Users\\高怡飞\\Desktop\\01.png\", dpi=600)\r\n" }, { "alpha_fraction": 0.5575436949729919, "alphanum_fraction": 0.5601897239685059, "avg_line_length": 43.774742126464844, "blob_id": "13def1f91f4e1cf40a21001dadcfb730fc16461e", "content_id": "22c1a65aab55e457c254070dd824cf0079b208f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72341, "license_type": "permissive", "max_line_length": 127, "num_lines": 1354, "path": "/MainWindow.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import urllib.request\r\n\r\nfrom PIL import Image\r\nfrom PIL.ImageQt import ImageQt\r\nfrom PyQt5 import QtWidgets, QtGui, QtChart\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QPixmap\r\nfrom PyQt5.QtWidgets import QGraphicsPixmapItem, QGraphicsScene\r\n\r\nfrom FundamentalFunctions.AdministrativeDistrictOperation import AdministrativeDistrictOperation\r\nfrom FundamentalFunctions.BusRoutePlanningOperation import BusRoutePlanningOperation\r\nfrom FundamentalFunctions.DriveRoutePlanningOperation import DriveRoutePlanningOperation\r\nfrom FundamentalFunctions.GetTrafficData import GetTrafficData\r\nfrom FundamentalFunctions.IPLocationOperation import IPLocationOperation\r\nfrom FundamentalFunctions.RideRoutePlanningOperation import RideRoutePlanningOperation\r\nfrom FundamentalFunctions.StaticMapsOperation import StaticMapsOperation\r\nfrom FundamentalFunctions.TrafficInformationExecuteOperation import TrafficInformationReadOperation\r\nfrom FundamentalFunctions.TrafficSituationOperation import TrafficSituationOperation\r\nfrom FundamentalFunctions.WalkingRoutePlanningOperation import WalkingRoutePlanningOperation\r\nfrom FundamentalFunctions.WeatherOperation import WeatherOperation\r\nfrom Resources.Icon.Icon import *\r\nfrom SettingsMainWindow import SettingsMainWindow\r\nfrom Window.MainUI import Ui_AmapMainUI\r\nfrom Window.MessageBoxUI import SelfMessageBox\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass MainWindow(QtWidgets.QMainWindow, Ui_AmapMainUI):\r\n \"\"\"\r\n 函数:主窗口界面函数MainWindow\r\n\r\n \"\"\"\r\n\r\n def __init__(self, parent=None):\r\n \"\"\"\r\n 函数:主窗口界面组件初始化\r\n Args:\r\n parent:arent作为构造函数的最后一个参数被传入,但通常情况下不必显示去指定parent对象。因为当调用局管理器时,部局管理器会自动处理这种parent-child关系。\r\n \"\"\"\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 对继承自父类的属性进行初始化\r\n super(MainWindow, self).__init__()\r\n self.setupUi(self)\r\n\r\n # 侧边栏选择条目触发器\r\n self.basicFunctionListWidget.itemClicked.connect(self.basic_function_list_widget_clicked)\r\n self.searchServiceListWidget.itemClicked.connect(self.search_service_list_widget_clicked)\r\n self.advancedFunctionListWidget.itemClicked.connect(self.advanced_function_list_widget_clicked)\r\n self.otherOptionsListWidget.itemClicked.connect(self.other_options_list_widget_clicked)\r\n\r\n # 步行路径规划查询页面\r\n self.walkingDepartureAddress = None\r\n self.walkingDestinationAddress = None\r\n # 步行路径出发点输入框内容变化监听器,步行路径终点输入框内容变化监听器\r\n self.walkingDepartureLineEdit.textChanged[str].connect(self.walking_departure_line_edit_text_changed)\r\n self.walkingDestinationLineEdit.textChanged[str].connect(self.walking_destination_line_edit_text_changed)\r\n # 步行路径出发点回车键(Enter)监听器,步行路径终点回车键(Enter)监听器\r\n self.walkingDepartureLineEdit.returnPressed.connect(\r\n lambda: self.get_walking_route_planning_event_handler(self.walkingDepartureAddress,\r\n self.walkingDestinationAddress))\r\n self.walkingDestinationLineEdit.returnPressed.connect(\r\n lambda: self.get_walking_route_planning_event_handler(self.walkingDepartureAddress,\r\n self.walkingDestinationAddress))\r\n # 步行路径规划查询按钮点击触发器\r\n self.walkingSearchButton.clicked.connect(\r\n lambda: self.get_walking_route_planning_event_handler(self.walkingDepartureAddress,\r\n self.walkingDestinationAddress))\r\n\r\n # 公交路径规划查询页面\r\n self.busDepartureAddress = None\r\n self.busDestinationAddress = None\r\n # 公交路径出发点输入框内容变化监听器,公交路径终点输入框内容变化监听器\r\n self.busDepartureLineEdit.textChanged[str].connect(self.bus_departure_line_edit_text_changed)\r\n self.busDestinationLineEdit.textChanged[str].connect(self.bus_destination_line_edit_text_changed)\r\n # 公交路径出发点回车键(Enter)监听器,公交路径终点回车键(Enter)监听器\r\n self.busDepartureLineEdit.returnPressed.connect(\r\n lambda: self.get_bus_route_planning_event_handler(self.busDepartureAddress,\r\n self.busDestinationAddress))\r\n self.busDestinationLineEdit.returnPressed.connect(\r\n lambda: self.get_bus_route_planning_event_handler(self.busDepartureAddress,\r\n self.busDestinationAddress))\r\n # 公交路径规划查询按钮点击触发器\r\n self.busSearchButton.clicked.connect(\r\n lambda: self.get_bus_route_planning_event_handler(self.busDepartureAddress,\r\n self.busDestinationAddress))\r\n\r\n # 驾驶路径规划查询页面\r\n self.driveDepartureAddress = None\r\n self.driveDestinationAddress = None\r\n # 驾驶路径出发点输入框内容变化监听器,驾驶路径终点输入框内容变化监听器\r\n self.driveDepartureLineEdit.textChanged[str].connect(self.drive_departure_line_edit_text_changed)\r\n self.driveDestinationLineEdit.textChanged[str].connect(self.drive_destination_line_edit_text_changed)\r\n # 驾驶路径出发点回车键(Enter)监听器,驾驶路径终点回车键(Enter)监听器\r\n self.driveDepartureLineEdit.returnPressed.connect(\r\n lambda: self.get_drive_route_planning_event_handler(self.driveDepartureAddress,\r\n self.driveDestinationAddress))\r\n self.driveDestinationLineEdit.returnPressed.connect(\r\n lambda: self.get_drive_route_planning_event_handler(self.driveDepartureAddress,\r\n self.driveDestinationAddress))\r\n # 驾驶路径规划查询按钮点击触发器\r\n self.driveSearchButton.clicked.connect(\r\n lambda: self.get_drive_route_planning_event_handler(self.driveDepartureAddress,\r\n self.driveDestinationAddress))\r\n\r\n # 骑行路径规划查询页面\r\n self.rideDepartureAddress = None\r\n self.rideDestinationAddress = None\r\n # 骑行路径出发点输入框内容变化监听器,骑行路径终点输入框内容变化监听器\r\n self.rideDepartureLineEdit.textChanged[str].connect(self.ride_departure_line_edit_text_changed)\r\n self.rideDestinationLineEdit.textChanged[str].connect(self.ride_destination_line_edit_text_changed)\r\n # 骑行路径出发点回车键(Enter)监听器,骑行路径终点回车键(Enter)监听器\r\n self.rideDepartureLineEdit.returnPressed.connect(\r\n lambda: self.get_ride_route_planning_event_handler(self.rideDepartureAddress,\r\n self.rideDestinationAddress))\r\n self.rideDestinationLineEdit.returnPressed.connect(\r\n lambda: self.get_ride_route_planning_event_handler(self.rideDepartureAddress,\r\n self.rideDestinationAddress))\r\n # 骑行路径规划查询按钮点击触发器\r\n self.rideSearchButton.clicked.connect(\r\n lambda: self.get_ride_route_planning_event_handler(self.rideDepartureAddress,\r\n self.rideDestinationAddress))\r\n\r\n # 静态地图查询页面\r\n self.staticMapsPosition = None\r\n # 静态地图地点查询框内容变化监听器\r\n self.staticMapsSearchLineEdit.textChanged[str].connect(self.static_maps_search_line_edit_text_changed)\r\n # 静态地图地点查询框回车键(Enter)监听器\r\n self.staticMapsSearchLineEdit.returnPressed.connect(\r\n lambda: self.get_static_maps_event_handler(self.staticMapsPosition)\r\n )\r\n # 静态地图查询按钮点击触发器\r\n self.staticMapsSearchButton.clicked.connect(\r\n lambda: self.get_static_maps_event_handler(self.staticMapsPosition)\r\n )\r\n\r\n # IP地址查询界面\r\n self.ip = None\r\n # IP信息输入框内容变化监听器\r\n self.IPLocationLineEdit.textChanged[str].connect(self.ip_location_line_edit_text_changed)\r\n # IP信息输入框回车键(Enter)监听器\r\n self.IPLocationLineEdit.returnPressed.connect(lambda: self.get_ip_location_event_handler(self.ip))\r\n # IP信息查询按钮点击触发器\r\n self.IPLocationSearchButton.clicked.connect(lambda: self.get_ip_location_event_handler(self.ip))\r\n # 获取当前网络IP地址按钮点击触发器\r\n self.IPLocationGetLocalNetWorkButton.clicked.connect(self.get_ip_location_from_host)\r\n\r\n # 行政区域查询页面\r\n self.administrativeInformation = None\r\n # 省份列表\r\n provinceList = ['请选择省份',\r\n '北京市', '天津市', '河北省', '山西省', '内蒙古自治区',\r\n '辽宁省', '吉林省', '黑龙江省',\r\n '上海市', '江苏省', '浙江省', '安徽省', '福建省 ', '江西省', '山东省',\r\n '河南省', '湖北省', '湖南省', '广东省', '广西壮族自治区', '海南省',\r\n '重庆市', '四川省', '贵州省', '云南省', '西藏自治区',\r\n '陕西省', '甘肃省', '青海省', '宁夏回族自治区', '新疆维吾尔自治区',\r\n '台湾省', '香港特别行政区', '澳门特别行政区']\r\n # 市区列表\r\n cityList = ['请选择城市']\r\n # 县城列表\r\n countyList = ['请选择区/县']\r\n\r\n # 省份列表初始化\r\n for province in provinceList:\r\n self.provinceComboBox.addItem(province)\r\n # 市区列表初始化\r\n self.cityComboBox.setEnabled(False)\r\n for city in cityList:\r\n self.cityComboBox.addItem(city)\r\n # 县城列表初始化\r\n self.countyComboBox.setEnabled(False)\r\n for county in countyList:\r\n self.countyComboBox.addItem(county)\r\n\r\n # 省份下拉选择框条目变化事件监听器\r\n self.provinceComboBox.currentIndexChanged.connect(self.province_index_changed_event_handler)\r\n # 市区下拉选择框条目变化事件监听器\r\n self.cityComboBox.currentIndexChanged.connect(self.city_index_changed_event_handler)\r\n # 行政区域查询按钮点击触发器\r\n self.administrativeSearchButton.clicked.connect(self.administrative_search_button_event_handler)\r\n\r\n # 天气查询界面\r\n self.city = None\r\n # 天气信息查询输入框内容变化监听器\r\n self.weatherSearchLineEdit.textChanged[str].connect(self.weather_search_line_edit_changed)\r\n # 天气信息查询输入框回车键(Enter)监听器\r\n self.weatherSearchLineEdit.returnPressed.connect(lambda: self.get_weather_information_event_handler(self.city))\r\n # 天气信息查询按钮点击触发器\r\n self.weatherSearchButton.clicked.connect(lambda: self.get_weather_information_event_handler(self.city))\r\n\r\n # 交通态势界面\r\n # 省份列表\r\n trafficProvinceList = ['请选择省份',\r\n '北京市', '天津市', '河北省', '山西省', '内蒙古自治区',\r\n '辽宁省', '吉林省', '黑龙江省',\r\n '上海市', '江苏省', '浙江省', '安徽省', '福建省 ', '江西省', '山东省',\r\n '河南省', '湖北省', '湖南省', '广东省', '广西壮族自治区', '海南省',\r\n '重庆市', '四川省', '贵州省', '云南省', '西藏自治区',\r\n '陕西省', '甘肃省', '青海省', '宁夏回族自治区', '新疆维吾尔自治区']\r\n # 市区列表\r\n trafficCityList = ['请选择城市']\r\n # 交通态势查询方式下拉选择框条目变化事件监听器\r\n self.trafficRoadRealSituationComboBox.currentIndexChanged.connect(\r\n self.traffic_situation_index_changed_event_handler)\r\n # 省份列表初始化\r\n for province in trafficProvinceList:\r\n self.trafficRealRoadProvinceComboBox.addItem(province)\r\n # 市区列表初始化\r\n self.trafficRealRoadCityComboBox.setEnabled(False)\r\n for city in trafficCityList:\r\n self.trafficRealRoadCityComboBox.addItem(city)\r\n # 交通态势省份下拉选择框条目变化事件监听器\r\n self.trafficRealRoadProvinceComboBox.currentIndexChanged.connect(\r\n self.traffic_province_index_changed_event_handler)\r\n\r\n # 交通态势(道路实时路况)\r\n self.roadName = None\r\n # 交通态势(道路实时路况)输入框内容变化监听器\r\n self.trafficSituationRealRoadSearchRoadName.textChanged.connect(self.traffic_situation_real_road_text_changed)\r\n # 交通态势(道路实时路况)输入框回车键(Enter)监听器\r\n self.trafficSituationRealRoadSearchRoadName.returnPressed.connect(\r\n lambda: self.get_traffic_real_road_event_handler(self.roadName))\r\n # 交通态势(道路实时路况)信息查询按钮点击触发器\r\n self.trafficRealRoadSearchButton.clicked.connect(\r\n lambda: self.get_traffic_real_road_event_handler(self.roadName))\r\n\r\n # 交通态势分析系统界面\r\n RoadNameList = {\"太原市\": [\"迎泽大街\", \"建设南路\", \"建设北路\", \"太榆路\", \"滨河东路\", \"滨河西路\", \"并州北路\", \"并州南路\", \"南内环街\",\r\n \"五一路\", \"府东街\", \"长风街\", \"平阳路\", \"南中环街\", \"东中环路\", \"北中环街\", \"西中环路\"],\r\n \"大同市\": [\"魏都大道\", \"御河西路\", \"御河东路\", \"云中路\", \"文兴路\", \"同煤快线\", \"北都街\", \"南环路\", \"迎宾街\"],\r\n \"阳泉市\": [\"桃北东街\", \"桃北中街\", \"泉中路\", \"南大街\", \"东环路\"],\r\n \"长治市\": [\"英雄南路\", \"英雄中路\", \"英雄北路\", \"太行东街\", \"太行西街\"],\r\n \"晋城市\": [\"泽州路\", \"泽州南路\", \"中原东街\", \"中原西街\", \"凤台东街\", \"凤台西街\"],\r\n \"朔州市\": [\"民福东街\", \"民福西街\", \"张辽南路\", \"张辽北路\", \"开发南路\", \"开发北路\", \"文远路\"],\r\n \"忻州市\": [\"和平东街\", \"和平西街\", \"七一南路\", \"七一北路\", \"慕山南路\", \"慕山北路\", \"雁门西大道\", \"建设南路\", \"建设北路\"],\r\n \"吕梁市\": [\"龙凤南大街\", \"龙凤北大街\", \"北川河西路\", \"滨河北西路\", \"滨河北中路\", \"滨河北东路\", \"吕梁大道\"],\r\n \"晋中市\": [\"汇通北路\", \"汇通路\", \"汇通南路\", \"龙湖大街\", \"迎宾街\", \"顺城街\", \"中都路\", \"新建路\", \"定阳路\", \"锦纶路\", \"蕴华街\"],\r\n \"临汾市\": [\"滨河西路\", \"滨河路\", \"鼓楼南大街\", \"鼓楼北大街\"],\r\n \"运城市\": [\"解放南路\", \"解放北路\", \"中银南路\", \"中银北路\", \"机场路\", \"工农东街\", \"人民北路\", \"学苑路\"]\r\n }\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # # 交通态势(矩形区域实时路况)\r\n # self.positionBottomLeft = None\r\n # self.positionTopRight = None\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域左下角)内容变化监听器\r\n # self.trafficRectangleRoadSearchPositionLineEdit1.textChanged.connect(\r\n # self.trafficRectangleBottomLeftRoadTextChanged)\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域右上角)内容变化监听器\r\n # self.trafficRectangleRoadSearchPositionLineEdit2.textChanged.connect(\r\n # self.trafficRectangleTopRightRoadTextChanged)\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域左下角)回车键(Enter)监听器\r\n # self.trafficRectangleRoadSearchPositionLineEdit1.returnPressed.connect(\r\n # lambda: self.getTrafficPolygonRoadEventHandler(self.positionBottomLeft, self.positionTopRight))\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域左下角)回车键(Enter)监听器\r\n # self.trafficRectangleRoadSearchPositionLineEdit2.returnPressed.connect(\r\n # lambda: self.getTrafficPolygonRoadEventHandler(self.positionBottomLeft, self.positionTopRight))\r\n # # 交通态势(矩形区域实时路况)查询按钮点击触发器\r\n # self.trafficRectangleRoadLevelSearchButton.clicked.connect(\r\n # lambda: self.getTrafficPolygonRoadEventHandler(self.positionBottomLeft, self.positionTopRight))\r\n\r\n # 交通信息分析系统信息开始获取\r\n self.flagStart = False\r\n self.trafficSituationAnalysisSystemStartButton.clicked.connect(\r\n lambda: self.analysis_system_start_event_handler(RoadNameList))\r\n # 交通信息分析系统信息停止获取\r\n self.trafficSituationAnalysisSystemStopButton.clicked.connect(self.analysis_system_stop_event_handler)\r\n # 交通信息分析系统信息面板查看\r\n self.trafficSituationAnalysisSystemViewButton.clicked.connect(self.analysis_system_view_event_handler)\r\n\r\n # 侧边栏选择条目事件处理01\r\n def basic_function_list_widget_clicked(self):\r\n # 获取当前列表部件中所有选中项的一个列表\r\n selectedItems = self.basicFunctionListWidget.selectedItems()\r\n # 设置当前列表部件选中项为None\r\n selectItem = None\r\n # 清除选中的项\r\n self.searchServiceListWidget.clearSelection()\r\n self.advancedFunctionListWidget.clearSelection()\r\n # 获取当前选中项的名称\r\n for item in selectedItems:\r\n selectItem = item.text()\r\n if selectItem == \"路径规划\":\r\n self.amapProgramStackedWidget.setCurrentIndex(0)\r\n elif selectItem == \"静态地图\":\r\n self.amapProgramStackedWidget.setCurrentIndex(2)\r\n\r\n # 侧边栏选择条目事件处理02\r\n def search_service_list_widget_clicked(self):\r\n # 获取当前列表部件中所有选中项的一个列表\r\n selectedItems = self.searchServiceListWidget.selectedItems()\r\n # 设置当前列表部件选中项为None\r\n selectItem = None\r\n # 清除选中的项\r\n self.basicFunctionListWidget.clearSelection()\r\n self.advancedFunctionListWidget.clearSelection()\r\n # 获取当前选中项的名称\r\n for item in selectedItems:\r\n selectItem = item.text()\r\n if selectItem == \"IP地址查询\":\r\n self.amapProgramStackedWidget.setCurrentIndex(1)\r\n elif selectItem == \"行政区域查询\":\r\n self.amapProgramStackedWidget.setCurrentIndex(3)\r\n elif selectItem == \"天气查询\":\r\n self.amapProgramStackedWidget.setCurrentIndex(4)\r\n\r\n # 侧边栏选择条目事件处理03\r\n def advanced_function_list_widget_clicked(self):\r\n # 获取当前列表部件中所有选中项的一个列表\r\n selectedItems = self.advancedFunctionListWidget.selectedItems()\r\n # 设置当前列表部件选中项为None\r\n selectItem = None\r\n # 清除选中的项\r\n self.basicFunctionListWidget.clearSelection()\r\n self.searchServiceListWidget.clearSelection()\r\n # 获取当前选中项的名称\r\n for item in selectedItems:\r\n selectItem = item.text()\r\n if selectItem == \"交通态势\":\r\n self.amapProgramStackedWidget.setCurrentIndex(5)\r\n elif selectItem == \"交通态势分析系统\":\r\n self.amapProgramStackedWidget.setCurrentIndex(6)\r\n\r\n # 侧边栏选择条目事件处理04\r\n def other_options_list_widget_clicked(self):\r\n # 获取当前列表部件中所有选中项的一个列表\r\n selectedItems = self.otherOptionsListWidget.selectedItems()\r\n # 设置当前列表部件选中项为None\r\n selectItem = None\r\n # 获取当前选中项的名称\r\n for item in selectedItems:\r\n selectItem = item.text()\r\n if selectItem == \"设置\":\r\n self.settingsMainWindow = SettingsMainWindow()\r\n # 新建的窗口始终位于当前屏幕的最前面\r\n self.settingsMainWindow.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n # 阻塞父类窗口不能点击\r\n self.settingsMainWindow.setWindowModality(Qt.ApplicationModal)\r\n self.settingsMainWindow.show()\r\n # 点击打开条目后清除选中的项\r\n self.otherOptionsListWidget.clearSelection()\r\n\r\n elif selectItem == \"关于\":\r\n self.settingsMainWindow = SettingsMainWindow()\r\n self.settingsMainWindow.itemListWidget.setCurrentRow(3)\r\n self.settingsMainWindow.stackedWidget.setCurrentIndex(3)\r\n # 新建的窗口始终位于当前屏幕的最前面\r\n self.settingsMainWindow.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n # 阻塞父类窗口不能点击\r\n self.settingsMainWindow.setWindowModality(Qt.ApplicationModal)\r\n self.settingsMainWindow.show()\r\n # 点击打开条目后清除选中的项\r\n self.otherOptionsListWidget.clearSelection()\r\n\r\n # 文本输入框内容变化事件处理器\r\n # 步行路径规划起点输入框内容变化事件处理器\r\n def walking_departure_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.walkingDepartureAddress = self.text\r\n\r\n # 步行路径规划终点输入框内容变化事件处理器\r\n def walking_destination_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.walkingDestinationAddress = self.text\r\n\r\n # 公交路径规划起点输入框内容变化事件处理器\r\n def bus_departure_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.busDepartureAddress = self.text\r\n\r\n # 公交路径规划终点输入框内容变化事件处理器\r\n def bus_destination_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.busDestinationAddress = self.text\r\n\r\n # 驾驶路径规划起点输入框内容变化事件处理器\r\n def drive_departure_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.driveDepartureAddress = self.text\r\n\r\n # 驾驶路径规划终点输入框内容变化事件处理器\r\n def drive_destination_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.driveDestinationAddress = self.text\r\n\r\n # 骑行路径规划起点输入框内容变化事件处理器\r\n def ride_departure_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.rideDepartureAddress = self.text\r\n\r\n # 驾驶路径规划终点输入框内容变化事件处理器\r\n def ride_destination_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.rideDestinationAddress = self.text\r\n\r\n def static_maps_search_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.staticMapsPosition = self.text\r\n\r\n # IP地址查询输入框内容变化事件处理器\r\n def ip_location_line_edit_text_changed(self, text):\r\n self.text = text\r\n self.ip = self.text\r\n\r\n # 天气查询输入框内容变化事件处理器\r\n def weather_search_line_edit_changed(self, text):\r\n self.text = text\r\n self.city = self.text\r\n\r\n # 交通态势(道路实时路况)输入框内容变化事件处理器\r\n def traffic_situation_real_road_text_changed(self, text):\r\n self.text = text\r\n self.roadName = self.text\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域左上角)内容变化监听器\r\n # def trafficRectangleBottomLeftRoadTextChanged(self, text):\r\n # self.text = text\r\n # self.positionBottomLeft = self.text\r\n #\r\n # # 交通态势(矩形区域实时路况)输入框(矩形区域右上角)内容变化监听器\r\n # def trafficRectangleTopRightRoadTextChanged(self, text):\r\n # self.text = text\r\n # self.positionTopRight = self.text\r\n\r\n # 步行路径规划查询按钮事件处理器\r\n def get_walking_route_planning_event_handler(self, walkingDepartureAddress, walkingDestinationAddress):\r\n self.walkingDepartureAddress = walkingDepartureAddress\r\n self.walkingDestinationAddress = walkingDestinationAddress\r\n\r\n # 步行路径规划实例化\r\n walkingRoutePlanningOperation = WalkingRoutePlanningOperation()\r\n walkingDepartureCheckedResult = walkingRoutePlanningOperation.check_walking_departure_information(\r\n self.walkingDepartureAddress)\r\n walkingDestinationCheckedResult = walkingRoutePlanningOperation.check_walking_destination_information(\r\n self.walkingDestinationAddress)\r\n\r\n if walkingDepartureCheckedResult == 2:\r\n # 步行路径规划起点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"步行路径规划输入提示\",\r\n information=\"请您输入出发点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif walkingDestinationCheckedResult == 2:\r\n # 步行路径规划终点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"步行路径规划输入提示\",\r\n information=\"请您输入终点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif walkingDepartureCheckedResult and walkingDestinationCheckedResult:\r\n # 步行路径规划起点和终点输入框内容有信息\r\n walkingInformationList = walkingRoutePlanningOperation.get_walking_route_planning_information(\r\n self.walkingDepartureAddress,\r\n self.walkingDestinationAddress\r\n )\r\n # 在GUI窗口上显示获得的信息\r\n walkingInformation = '\\n'.join(walkingInformationList)\r\n self.walkingResultTextEdit.setText(walkingInformation)\r\n\r\n # 公交路径规划查询按钮事件处理器\r\n def get_bus_route_planning_event_handler(self, busDepartureAddress, busDestinationAddress):\r\n self.busDepartureAddress = busDepartureAddress\r\n self.busDestinationAddress = busDestinationAddress\r\n\r\n # 公交路径规划实例化\r\n busRoutePlanningOperation = BusRoutePlanningOperation()\r\n busDepartureCheckedResult = busRoutePlanningOperation.check_bus_departure_information(\r\n self.busDepartureAddress)\r\n busDestinationCheckedResult = busRoutePlanningOperation.check_bus_destination_information(\r\n self.busDestinationAddress)\r\n\r\n if busDepartureCheckedResult == 2:\r\n # 公交路径规划起点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"公交路径规划输入提示\",\r\n information=\"请您输入出发点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif busDestinationCheckedResult == 2:\r\n # 公交路径规划终点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"公交路径规划输入提示\",\r\n information=\"请您输入终点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif busDepartureCheckedResult and busDestinationCheckedResult:\r\n # 步行路径规划起点和终点输入框内容有信息\r\n busInformationList = busRoutePlanningOperation.get_bus_route_planning_information(\r\n self.busDepartureAddress,\r\n self.busDestinationAddress\r\n )\r\n # 在GUI窗口上显示获得的信息\r\n busInformation = '\\n'.join(busInformationList)\r\n self.busResultTextEdit.setText(busInformation)\r\n\r\n # 驾驶路径规划查询按钮事件处理器\r\n def get_drive_route_planning_event_handler(self, driveDepartureAddress, driveDestinationAddress):\r\n self.driveDepartureAddress = driveDepartureAddress\r\n self.driveDestinationAddress = driveDestinationAddress\r\n\r\n # 驾驶路径规划实例化\r\n driveRoutePlanningOperation = DriveRoutePlanningOperation()\r\n driveDepartureCheckedResult = driveRoutePlanningOperation.check_drive_departure_information(\r\n self.driveDepartureAddress)\r\n driveDestinationCheckedResult = driveRoutePlanningOperation.check_drive_destination_information(\r\n self.driveDestinationAddress)\r\n\r\n if driveDepartureCheckedResult == 2:\r\n # 驾驶路径规划起点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"驾驶路径规划输入提示\",\r\n information=\"请您输入出发点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif driveDestinationCheckedResult == 2:\r\n # 驾驶路径规划终点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"驾驶路径规划输入提示\",\r\n information=\"请您输入终点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif driveDepartureCheckedResult and driveDestinationCheckedResult:\r\n # 步行路径规划起点和终点输入框内容有信息\r\n driveInformationList = driveRoutePlanningOperation.get_drive_route_planning_information(\r\n self.driveDepartureAddress,\r\n self.driveDestinationAddress\r\n )\r\n # 在GUI窗口上显示获得的信息\r\n driveInformation = '\\n'.join(driveInformationList)\r\n self.driveResultTextEdit.setText(driveInformation)\r\n\r\n # 骑行路径规划查询按钮事件处理器\r\n def get_ride_route_planning_event_handler(self, rideDepartureAddress, rideDestinationAddress):\r\n self.rideDepartureAddress = rideDepartureAddress\r\n self.rideDestinationAddress = rideDestinationAddress\r\n\r\n # 骑行路径规划实例化\r\n rideRoutePlanningOperation = RideRoutePlanningOperation()\r\n rideDepartureCheckedResult = rideRoutePlanningOperation.check_ride_departure_information(\r\n self.rideDepartureAddress)\r\n rideDestinationCheckedResult = rideRoutePlanningOperation.check_ride_destination_information(\r\n self.rideDestinationAddress)\r\n\r\n if rideDepartureCheckedResult == 2:\r\n # 骑行路径规划起点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"骑行路径规划输入提示\",\r\n information=\"请您输入出发点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif rideDestinationCheckedResult == 2:\r\n # 骑行路径规划终点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"骑行路径规划输入提示\",\r\n information=\"请您输入终点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif rideDepartureCheckedResult and rideDestinationCheckedResult:\r\n # 骑行路径规划起点和终点输入框内容有信息\r\n rideInformationList = rideRoutePlanningOperation.get_ride_route_planning_information(\r\n self.rideDepartureAddress,\r\n self.rideDestinationAddress\r\n )\r\n rideInformation = '\\n'.join(rideInformationList)\r\n self.rideResultTextEdit.setText(rideInformation)\r\n\r\n # 静态地图查询按钮事件处理器\r\n def get_static_maps_event_handler(self, staticMapsPosition):\r\n self.staticMapsPosition = staticMapsPosition\r\n\r\n # 静态地图实例化\r\n staticMapsOperation = StaticMapsOperation()\r\n staticMapsCheckedResult = staticMapsOperation.check_static_maps_information(self.staticMapsPosition)\r\n\r\n if staticMapsCheckedResult == 2:\r\n # 静态地图地点输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"静态地图输入提示\",\r\n information=\"请您输入地点后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n # 静态地图实例初始化\r\n staticMapsInformation = staticMapsOperation.get_static_maps(staticMapsPosition=self.staticMapsPosition,\r\n zoom=15,\r\n size='351*236',\r\n scale=2,\r\n traffic=0\r\n )\r\n\r\n if staticMapsInformation == \"1\":\r\n # 静态地图查询内容存在错误\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(4)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"静态地图查询提示\",\r\n information=\"获取地图图片失败\",\r\n icon=\":/Error.png\"\r\n )\r\n\r\n elif staticMapsInformation == \"2\":\r\n # 静态地图查询内容存在错误\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(4)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"静态地图查询提示\",\r\n information=\"图片保存失败,请检查您的网络链接或是否有保存文件的权限\",\r\n icon=\":/Error.png\"\r\n )\r\n\r\n elif staticMapsInformation == \"3\":\r\n # 地理位置信息查询失败\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(4)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"静态地图查询提示\",\r\n information=\"您提供的地点信息查询失败,换个词进行搜索吧\",\r\n icon=\":/Error.png\"\r\n )\r\n\r\n else:\r\n # 显示图片\r\n img = Image.open(staticMapsInformation)\r\n frame = ImageQt(img)\r\n pixmap = QPixmap.fromImage(frame)\r\n self.item = QGraphicsPixmapItem(pixmap)\r\n self.scene = QGraphicsScene()\r\n self.scene.addItem(self.item)\r\n self.staticMapsPhotoView.setScene(self.scene)\r\n\r\n # IP地址查询按钮事件处理器\r\n def get_ip_location_event_handler(self, ip):\r\n self.ip = ip\r\n\r\n # IP地址实例初始化\r\n ipLocationOperation = IPLocationOperation()\r\n IPFormatCheckResult = ipLocationOperation.check_ip_formation(self.ip)\r\n\r\n if IPFormatCheckResult == 1:\r\n IPInformation = ipLocationOperation.get_ip_information(self.ip)\r\n if 'error_context' in IPInformation:\r\n self.IPLocationResultTextEdit.setText(IPInformation['error_context'])\r\n\r\n else:\r\n # 在GUI窗口上显示获得的信息\r\n ipInformation = '\\n'.join(IPInformation.values())\r\n self.IPLocationResultTextEdit.setText(ipInformation)\r\n\r\n elif IPFormatCheckResult == 2:\r\n selfMessageBox = SelfMessageBox()\r\n font = selfMessageBox.selfDefineFont()\r\n level = selfMessageBox.messageLevel(1)\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"IP输入提示\",\r\n information=\"您输入的IP地址内容为空\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n selfMessageBox = SelfMessageBox()\r\n font = selfMessageBox.selfDefineFont()\r\n level = selfMessageBox.messageLevel(3)\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"IP输入提示\",\r\n information=\"您输入的IP地址格式有误\",\r\n icon=\":/Warning.png\"\r\n )\r\n\r\n def get_ip_location_from_host(self):\r\n # 获取本机IP\r\n ip = urllib.request.urlopen('http://ip.42.pl/raw').read()\r\n ip = str(ip).strip('b')\r\n\r\n # IP地址实例初始化\r\n ipLocationOperation = IPLocationOperation()\r\n IPInformation = ipLocationOperation.get_ip_information(eval(ip))\r\n if 'error_context' in IPInformation:\r\n self.IPLocationResultTextEdit.setText(IPInformation['error_context'])\r\n\r\n else:\r\n # 在GUI窗口上显示获得的信息\r\n ipInformation = '\\n'.join(IPInformation.values())\r\n self.IPLocationResultTextEdit.setText(ipInformation)\r\n\r\n # 省份下拉选择框条目变化事件处理器\r\n def province_index_changed_event_handler(self):\r\n administrativeDistrictOperation = AdministrativeDistrictOperation()\r\n # 未选择省份\r\n if self.provinceComboBox.currentIndex() == 0:\r\n self.cityComboBox.setEnabled(False)\r\n self.cityComboBox.setCurrentIndex(0)\r\n\r\n else:\r\n # 选择省份\r\n indexText = self.provinceComboBox.currentText()\r\n administrativeList = administrativeDistrictOperation.get_sub_district(indexText)\r\n administrativeListLength = len(administrativeList)\r\n # 市区列表为空\r\n if administrativeListLength == 0:\r\n self.cityComboBox.setEnabled(False)\r\n\r\n # 市区列表不为空\r\n elif administrativeListLength > 0:\r\n self.cityComboBox.clear()\r\n self.cityComboBox.addItem(\"请选择城市\")\r\n self.cityComboBox.addItems(administrativeList)\r\n self.cityComboBox.setEnabled(True)\r\n\r\n # 市区下拉选择框条目变化事件处理器\r\n def city_index_changed_event_handler(self):\r\n # 直筒子市\r\n no_county_or_district_city = ['东莞市', '中山市', '儋州市', '嘉峪关市']\r\n MunicipalityCity = ['北京市', '上海市', '天津市', '重庆市']\r\n administrativeDistrictOperation = AdministrativeDistrictOperation()\r\n # 未选择城市\r\n if self.cityComboBox.currentIndex() == 0:\r\n self.countyComboBox.setEnabled(False)\r\n self.countyComboBox.setCurrentIndex(0)\r\n\r\n # 已选择城市\r\n else:\r\n indexText = self.cityComboBox.currentText()\r\n # 直筒子市,不显示区县行政区域\r\n if indexText in no_county_or_district_city:\r\n self.countyComboBox.clear()\r\n self.countyComboBox.setEnabled(False)\r\n\r\n # 直辖市,不显示区县行政区域\r\n elif self.provinceComboBox.currentText() in MunicipalityCity:\r\n self.countyComboBox.clear()\r\n self.countyComboBox.setEnabled(False)\r\n\r\n # 不是直筒子市,显示行政区域\r\n else:\r\n administrativeList = administrativeDistrictOperation.get_sub_district(indexText)\r\n administrativeListLength = len(administrativeList)\r\n # 县/区列表为空\r\n if administrativeListLength == 0:\r\n self.countyComboBox.setEnabled(False)\r\n\r\n # 县/区列表不为空\r\n elif administrativeListLength > 0:\r\n self.countyComboBox.clear()\r\n self.countyComboBox.addItem(\"请选择区/县\")\r\n self.countyComboBox.addItems(administrativeList)\r\n self.countyComboBox.setEnabled(True)\r\n\r\n # 行政区域查询按钮点击事件处理器\r\n def administrative_search_button_event_handler(self):\r\n # 省份/城市/县/区名称\r\n provinceIndexText = self.provinceComboBox.currentText()\r\n cityIndexText = self.cityComboBox.currentText()\r\n countyIndexText = self.countyComboBox.currentText()\r\n\r\n # 下subdistrict级行政区域\r\n subdistrict = self.subDistrictNumComboBox.currentIndex()\r\n resultDistrict = ''\r\n\r\n if countyIndexText != \"请选择区/县\":\r\n resultDistrict = countyIndexText\r\n elif cityIndexText != \"请选择城市\":\r\n resultDistrict = cityIndexText\r\n elif provinceIndexText != \"请选择省份\":\r\n resultDistrict = provinceIndexText\r\n\r\n if resultDistrict == '':\r\n # 行政区域查询选择框未选择\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"行政区域查询提示\",\r\n information=\"请您选择省份、城市、区/县后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n administrativeDistrictOperation = AdministrativeDistrictOperation()\r\n administrativeDistrictList = administrativeDistrictOperation.get_all_district_information(resultDistrict,\r\n subdistrict)\r\n administrativeDistrictInformation = '\\n'.join(administrativeDistrictList)\r\n self.administrativeResultTextEdit.setText(administrativeDistrictInformation)\r\n\r\n # 天气查询按钮事件处理器\r\n def get_weather_information_event_handler(self, city):\r\n self.city = city\r\n weatherOperation = WeatherOperation()\r\n weatherFormatCheckResult = weatherOperation.check_weather_information(self.city)\r\n\r\n if weatherFormatCheckResult == 1:\r\n # 天气查询实例初始化\r\n weatherInformationList = weatherOperation.get_weather_information(city=self.city, weatherType='base')\r\n weatherInformation = '\\n'.join(weatherInformationList)\r\n self.weatherResultTextEdit.setText(weatherInformation)\r\n\r\n elif weatherFormatCheckResult == 2:\r\n # 天气信息输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"天气输入提示\",\r\n information=\"请您输入城市后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n def traffic_situation_index_changed_event_handler(self):\r\n # 获取当前页面索引\r\n trafficSituationPage = self.trafficRoadRealSituationComboBox.currentIndex()\r\n if trafficSituationPage == 0:\r\n self.trafficSituationStackedWidget.setCurrentIndex(0)\r\n elif trafficSituationPage == 1:\r\n self.trafficSituationStackedWidget.setCurrentIndex(1)\r\n elif trafficSituationPage == 2:\r\n self.trafficSituationStackedWidget.setCurrentIndex(2)\r\n elif trafficSituationPage == 3:\r\n self.trafficSituationStackedWidget.setCurrentIndex(3)\r\n\r\n # 交通态势省份下拉选择框条目变化事件处理器\r\n def traffic_province_index_changed_event_handler(self):\r\n administrativeDistrictOperation = AdministrativeDistrictOperation()\r\n\r\n # 未选择省份\r\n if self.trafficRealRoadProvinceComboBox.currentIndex() == 0:\r\n self.trafficRealRoadCityComboBox.setEnabled(False)\r\n self.trafficRealRoadCityComboBox.setCurrentIndex(0)\r\n\r\n else:\r\n # 选择省份\r\n indexText = self.trafficRealRoadProvinceComboBox.currentText()\r\n administrativeList = administrativeDistrictOperation.get_sub_district(indexText)\r\n administrativeListLength = len(administrativeList)\r\n # 市区列表为空\r\n if administrativeListLength == 0:\r\n self.trafficRealRoadCityComboBox.setEnabled(False)\r\n\r\n # 市区列表不为空\r\n elif administrativeListLength > 0:\r\n self.trafficRealRoadCityComboBox.clear()\r\n self.trafficRealRoadCityComboBox.addItem(\"请选择城市\")\r\n self.trafficRealRoadCityComboBox.addItems(administrativeList)\r\n self.trafficRealRoadCityComboBox.setEnabled(True)\r\n\r\n def get_traffic_real_road_event_handler(self, roadName):\r\n self.roadName = roadName\r\n\r\n # 交通信息实例初始化\r\n trafficSituationOperation = TrafficSituationOperation()\r\n trafficSituationRealRoadCheckResult = trafficSituationOperation.check_real_road_information(self.roadName)\r\n\r\n if trafficSituationRealRoadCheckResult == 2:\r\n # 交通态势(道路实时路况)信息输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"交通态势查询提示\",\r\n information=\"请您输入道路名称后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif self.trafficRealRoadCityComboBox.currentIndex() == 0:\r\n # 交通态势(道路实时路况)下拉选择框未选择城市进行查询\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"交通态势查询提示\",\r\n information=\"请您选择城市后再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n # 获取选中的城市\r\n city = self.trafficRealRoadCityComboBox.currentText()\r\n\r\n # 获取交通态势(实时路况)图片信息\r\n position = str(city) + str(self.roadName)\r\n staticMapsOperation = StaticMapsOperation()\r\n staticMapsInformation = staticMapsOperation.get_static_maps(staticMapsPosition=position,\r\n zoom=11,\r\n size='224*193',\r\n scale=2,\r\n traffic=1\r\n )\r\n\r\n if staticMapsInformation not in [\"1\", \"2\", \"3\"]:\r\n # 显示图片\r\n img = Image.open(staticMapsInformation)\r\n frame = ImageQt(img)\r\n pixmap = QPixmap.fromImage(frame)\r\n self.item = QGraphicsPixmapItem(pixmap)\r\n self.scene = QGraphicsScene()\r\n self.scene.addItem(self.item)\r\n self.trafficRealReadRoadPhotoView.setScene(self.scene)\r\n\r\n # 获取描述信息\r\n if self.trafficRealRoadProvinceComboBox.currentText() == '北京市':\r\n city = '北京市'\r\n elif self.trafficRealRoadProvinceComboBox.currentText() == '上海市':\r\n city = '上海市'\r\n elif self.trafficRealRoadProvinceComboBox.currentText() == '天津市':\r\n city = '天津市'\r\n elif self.trafficRealRoadProvinceComboBox.currentText() == '重庆市':\r\n city = '重庆市'\r\n\r\n # 显示道路路况实时信息\r\n trafficSituationRealRoadList = trafficSituationOperation.get_traffic_situation_real_road_information(city,\r\n self.roadName)\r\n trafficSituationRealRoadInformation = '\\n'.join(trafficSituationRealRoadList)\r\n self.trafficSituationRealRoadTextEdit.setText(trafficSituationRealRoadInformation)\r\n\r\n # 设置按钮\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # def getTrafficPolygonRoadEventHandler(self, positionBottomLeft, positionTopRight):\r\n # self.positionBottomLeft = positionBottomLeft\r\n # self.positionTopRight = positionTopRight\r\n #\r\n # roadLevelIndex = self.trafficRectangleRoadLevelComboBox.currentIndex()\r\n #\r\n # # 交通态势初始化\r\n # trafficSituationOperation = TrafficSituationOperation()\r\n # positionBottomLeftCheckedResult = trafficSituationOperation.checkRectanglePositionInformation(\r\n # self.positionBottomLeft)\r\n # positionTopRightCheckedResult = trafficSituationOperation.checkRectanglePositionInformation(\r\n # self.positionTopRight)\r\n # if positionBottomLeftCheckedResult == 2:\r\n # # 交通态势(矩形区域实时路况)信息输入框(左下角区域)内容为空\r\n # # 消息框初始化(自定义消息框)\r\n # selfMessageBox = SelfMessageBox()\r\n # # 自定义字体\r\n # font = selfMessageBox.selfDefineFont()\r\n # # 自定义消息等级\r\n # level = selfMessageBox.messageLevel(1)\r\n # # 消息框界面初始化\r\n # selfMessageBox.initUI(self, font=font,\r\n # level=level,\r\n # title=\"交通态势查询提示\",\r\n # information=\"请您输入具体位置信息后再查询\",\r\n # icon=\":/About.png\"\r\n # )\r\n #\r\n # elif positionTopRightCheckedResult == 2:\r\n # # 交通态势(矩形区域实时路况)信息输入框(右上角区域)内容为空\r\n # # 消息框初始化(自定义消息框)\r\n # selfMessageBox = SelfMessageBox()\r\n # # 自定义字体\r\n # font = selfMessageBox.selfDefineFont()\r\n # # 自定义消息等级\r\n # level = selfMessageBox.messageLevel(1)\r\n # # 消息框界面初始化\r\n # selfMessageBox.initUI(self, font=font,\r\n # level=level,\r\n # title=\"交通态势查询提示\",\r\n # information=\"请您输入具体位置信息后再查询\",\r\n # icon=\":/About.png\"\r\n # )\r\n #\r\n # elif positionBottomLeftCheckedResult and positionTopRightCheckedResult:\r\n # # 中文地理位置名称转换为高德地图地理位置名称\r\n # geographicPositionBottomLeft = trafficSituationOperation.getGeographicCodingPosition(\r\n # self.positionBottomLeft)\r\n # geographicPositionTopRight = trafficSituationOperation.getGeographicCodingPosition(self.positionTopRight)\r\n #\r\n # if geographicPositionBottomLeft == \"1\" or geographicPositionTopRight == \"1\":\r\n # # 步行路径规划起点输入框内容为空\r\n # # 消息框初始化(自定义消息框)\r\n # selfMessageBox = SelfMessageBox()\r\n # # 自定义字体\r\n # font = selfMessageBox.selfDefineFont()\r\n # # 自定义消息等级\r\n # level = selfMessageBox.messageLevel(1)\r\n # # 消息框界面初始化\r\n # selfMessageBox.initUI(self, font=font,\r\n # level=level,\r\n # title=\"交通态势查询提示\",\r\n # information=\"您提供的地点信息查询失败,换个词进行搜索吧\",\r\n # icon=\":/About.png\"\r\n # )\r\n # else:\r\n # # 计算中心点\r\n # # 数据处理\r\n # # 将高德地图字符串格式数据转换成计算格式的数据\r\n # BottomLeft = geographicPositionBottomLeft.split(',')\r\n # TopRight = geographicPositionTopRight.split(',')\r\n # # 矩形区域左下角坐标元素\r\n # BottomLeftList = []\r\n # # 矩形区域右上角坐标元素\r\n # TopRightList = []\r\n # # 将字符串数据转换成float浮点数格式数据\r\n # for item in BottomLeft:\r\n # item = float(item)\r\n # BottomLeftList.append(item)\r\n # for item in TopRight:\r\n # item = float(item)\r\n # TopRightList.append(item)\r\n # # 将列表数据转换成元组数据,以进行计算\r\n # BottomLeftList = tuple(BottomLeftList)\r\n # TopRightList = tuple(TopRightList)\r\n # geographicPositionList = [BottomLeftList, TopRightList]\r\n # # 获取中心点\r\n # geographicPositionCenter = trafficSituationOperation.getCenterGeographicPosition(geographicPositionList)\r\n # # 将中心点的元组数据转换成字符串数据,并保留小数点后六位\r\n # geographicPositionCenterList = []\r\n # for item in geographicPositionCenter:\r\n # item = '{:.6f}'.format(item)\r\n # geographicPositionCenterList.append(item)\r\n # geographicPositionCenter = ','.join(geographicPositionCenterList)\r\n #\r\n # # 获取描述信息\r\n # trafficSituationRectangleRoadList = trafficSituationOperation.getTrafficSituationRectangleRoadInformation(\r\n # geographicPositionBottomLeft, geographicPositionTopRight, roadLevelIndex)\r\n # trafficSituationRectangleInformation = ''\r\n # for item in trafficSituationRectangleRoadList:\r\n # trafficSituationRectangleInformation = trafficSituationRectangleInformation + item + '\\n'\r\n # self.trafficRectangleRoadTextEdit.setText(trafficSituationRectangleInformation)\r\n #\r\n # # 获取中心点对应的图片信息\r\n # staticMapsOperation = StaticMapsOperation()\r\n # staticMapsInformation = staticMapsOperation.getStaticMapsbyLocation(\r\n # staticMapsPosition=geographicPositionCenter,\r\n # zoom=12,\r\n # size='224*193',\r\n # scale=2,\r\n # traffic=1\r\n # )\r\n #\r\n # # 显示图片\r\n # if staticMapsInformation not in [\"1\", \"2\", \"3\"]:\r\n # img = Image.open(staticMapsInformation)\r\n # frame = ImageQt(img)\r\n # pixmap = QPixmap.fromImage(frame)\r\n # self.item = QGraphicsPixmapItem(pixmap)\r\n # self.scene = QGraphicsScene()\r\n # self.scene.addItem(self.item)\r\n # self.trafficRectangleReadRoadPhotoView.setScene(self.scene)\r\n\r\n # 交通信息分析系统信息开始获取按钮事件处理器\r\n def analysis_system_start_event_handler(self, RoadNameList):\r\n self.RoadNameList = RoadNameList\r\n\r\n # 按钮已经按下,不要再重复按键\r\n if self.flagStart:\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"交通态势系统提示\",\r\n information=\"程序已经在运行中\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n # 设置按钮按下状态\r\n self.flagStart = True\r\n # 获取当前显示的内容\r\n currentContext = self.trafficSituationAnalysisSystemTextEdit.toPlainText()\r\n currentContextList = currentContext.split(\"===================\")\r\n # 清空显示的内容\r\n self.trafficSituationAnalysisSystemTextEdit.clear()\r\n # 内容为空,直接显示结果\r\n if currentContext == '':\r\n self.trafficSituationAnalysisSystemTextEdit.setText(\"数据采集情况\\n程序在后台获取数据中\\n\")\r\n # 之前存在数据\r\n elif currentContextList[0]:\r\n currentContextList[0] = \"数据采集情况\\n程序在后台获取数据中\\n\"\r\n currentContext = '==================='.join(currentContextList)\r\n self.trafficSituationAnalysisSystemTextEdit.setText(currentContext)\r\n else:\r\n self.trafficSituationAnalysisSystemTextEdit.setText(\r\n \"数据采集情况\\n程序在后台获取数据中\\n===================\\n{0}\".format(currentContext))\r\n self.thread = GetTrafficData(self.RoadNameList)\r\n self.thread.signal.connect(self.callback) # 连接回调函数,接收结果\r\n self.thread.start()\r\n\r\n def analysis_system_stop_event_handler(self):\r\n if self.flagStart:\r\n self.thread.stop()\r\n # 获取当前显示的内容\r\n currentContext = self.trafficSituationAnalysisSystemTextEdit.toPlainText()\r\n currentContextList = currentContext.split(\"===================\")\r\n # 清空显示的内容\r\n self.trafficSituationAnalysisSystemTextEdit.clear()\r\n # 内容为空,直接显示结果\r\n if currentContext == '':\r\n self.trafficSituationAnalysisSystemTextEdit.setText(\"数据采集情况\\n数据获取完成\\n\")\r\n # 之前存在数据\r\n elif currentContextList[0]:\r\n currentContextList[0] = \"数据采集情况\\n数据获取完成\\n\"\r\n currentContext = '==================='.join(currentContextList)\r\n self.trafficSituationAnalysisSystemTextEdit.setText(currentContext)\r\n else:\r\n self.trafficSituationAnalysisSystemTextEdit.setText(\r\n \"数据采集情况\\n数据获取完成\\n===================\\n{0}\".format(currentContext))\r\n # 取消按钮按下状态\r\n self.flagStart = False\r\n\r\n else:\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"交通态势系统提示\",\r\n information=\"程序尚未运行\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n # 回调函数\r\n def callback(self, msg):\r\n self.msg = msg\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Traffic Data batch Operation Successfully get'\r\n )\r\n # 文件中输出回调信息\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context=self.msg\r\n )\r\n\r\n # 交通态势信息查看事件处理器\r\n def analysis_system_view_event_handler(self):\r\n # 获取下拉框选择的城市信息\r\n currentCity = self.trafficSituationAnalysisSystemCityComboBox.currentText()\r\n\r\n if currentCity == '请选择城市':\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"交通态势系统提示\",\r\n information=\"请选择具体的城市再查询\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n else:\r\n # 读取Excel具体内容\r\n trafficInformationReadOperation = TrafficInformationReadOperation()\r\n # 读取excel具体信息\r\n result = trafficInformationReadOperation.read_excel_xls(currentCity)\r\n\r\n wholeDataLength = int(result['wholeDataLength'])\r\n effectiveDataLength = int(result['effectiveDataLength'])\r\n percentContext = result['percentContext']\r\n\r\n # 数据颜色填充\r\n data = {\"拥堵\": (effectiveDataLength, QtGui.QColor(\"#FFFF7F\"), QtGui.QColor(\"#E74856\")),\r\n \"畅通\": (wholeDataLength - effectiveDataLength, QtGui.QColor(\"#7FBF7F\"), QtGui.QColor(\"#0078D4\"))\r\n }\r\n\r\n series = QtChart.QPieSeries()\r\n series.setPieSize(0.7)\r\n\r\n # 设置字体样式\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n\r\n for name, (value, color, borderColor) in data.items():\r\n sliceItem = series.append(name, value)\r\n sliceItem.setBorderColor(borderColor)\r\n sliceItem.setLabel(\"{0}情况\".format(name))\r\n sliceItem.setLabelFont(font)\r\n sliceItem.setLabelVisible(True)\r\n sliceItem.setBrush(color)\r\n\r\n # 创建图表\r\n chart = QtChart.QChart()\r\n chart.setBackgroundVisible(False)\r\n chart.addSeries(series)\r\n # 设置标题\r\n chart.setFont(font)\r\n chart.setTitle(\"山西省{0}的道路通行情况通行数据统计\".format(currentCity))\r\n chart.setTitleFont(font)\r\n # 设置标签\r\n chart.legend().setAlignment(QtCore.Qt.AlignBottom)\r\n chart.legend().setFont(font)\r\n\r\n self.trafficSituationAnalysisSystemPhoto.setChart(chart)\r\n\r\n # 设置文字模块\r\n currentContext = self.trafficSituationAnalysisSystemTextEdit.toPlainText()\r\n currentContextList = currentContext.split(\"===================\")\r\n currentContextListLength = len(currentContextList)\r\n # 清空之前的内容\r\n self.trafficSituationAnalysisSystemTextEdit.clear()\r\n # 内容为空,直接显示结果\r\n if currentContext == '':\r\n self.trafficSituationAnalysisSystemTextEdit.setText(\"道路通行分析\\n{0}\".format(percentContext))\r\n # 之前存在数据\r\n elif currentContextListLength > 1:\r\n currentContextList[1] = \"道路通行分析\\n\" + percentContext\r\n currentContext = '==================='.join(currentContextList)\r\n self.trafficSituationAnalysisSystemTextEdit.setText(currentContext)\r\n # 内容不为空,同时打印前面的内容(追加显示)\r\n else:\r\n self.trafficSituationAnalysisSystemTextEdit.append(\r\n \"{0}===================\\n{1}\".format(currentContext, percentContext))\r\n" }, { "alpha_fraction": 0.44499585032463074, "alphanum_fraction": 0.4612627625465393, "avg_line_length": 34.27000045776367, "blob_id": "26a60f0816f034755ff49c87892dc932b5b8f71c", "content_id": "2067097e40a75855ff9f0659d054e8857cfe07b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3867, "license_type": "permissive", "max_line_length": 123, "num_lines": 100, "path": "/FundamentalFunctions/IPLocationOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\nimport re\r\n\r\nfrom AmapFunctions.IPLocation import IPLocation\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass IPLocationOperation:\r\n \"\"\"\r\n Class:IP地址查询操作\r\n \"\"\"\r\n def __init__(self):\r\n self.ip = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_ip_formation(self, ip: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的IP格式是否符合规范要求\r\n Args:\r\n ip: 用户输入的IP地址\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.ip = ip\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n checkedResult = self.ip is None or self.ip == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ip check result 1 :{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n # 用户输入的IP地址为空\r\n if checkedResult:\r\n return 2\r\n # 匹配 0.0.0.0-255.255.255.255的表达式书写方法\r\n pattern = re.compile(r'(([1-9]?\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.){3}([1-9]?\\d|1\\d\\d|2[0-4]\\d|25[0-5])')\r\n IPCheckResult = bool(pattern.match(self.ip))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - ip check result 2:{1}'.format(function_name,\r\n IPCheckResult)\r\n )\r\n\r\n return IPCheckResult\r\n\r\n def get_ip_information(self, ip: str\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取IP地址对应的具体信息\r\n Args:\r\n ip:IP地址\r\n Returns:\r\n 返回获取到IP地址对应的具体信息\r\n \"\"\"\r\n\r\n self.ip = ip\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n ipLocation = IPLocation()\r\n # 获取的IP原信息(未解析)\r\n resultIPLocation = ipLocation.get_ip_location(self.ip, input_type=4)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultIPLocation:{1}'.format(function_name,\r\n resultIPLocation)\r\n )\r\n\r\n # 解析后的IP地址信息\r\n resultIPDetailInformation = ipLocation.parse_ip_location(resultIPLocation)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultIPDetailInformation:{1}'.format(function_name,\r\n resultIPDetailInformation)\r\n )\r\n\r\n return resultIPDetailInformation\r\n" }, { "alpha_fraction": 0.5985915660858154, "alphanum_fraction": 0.6048052906990051, "avg_line_length": 28.174999237060547, "blob_id": "bc92baeb41c6a568f12afa366482db8791875a14", "content_id": "6b177c38ef8bab72d35a5a7a6177a78de1a68f9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2874, "license_type": "permissive", "max_line_length": 101, "num_lines": 80, "path": "/SettingsMainWindow.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import sys\r\n\r\nfrom PyQt5 import QtWidgets\r\n\r\nfrom Window.SettingsUI import Ui_SettingsUI\r\n\r\n\r\n# from LoginMainWindow import LoginMainWindow\r\n# from MainWindow import MainWindow\r\n\r\n\r\nclass SettingsMainWindow(QtWidgets.QMainWindow, Ui_SettingsUI):\r\n \"\"\"\r\n 函数:设置窗口界面函数LoginMainWindow\r\n \"\"\"\r\n\r\n def __init__(self, parent=None):\r\n \"\"\"\r\n 函数:设置窗口界面组件初始化\r\n Args:\r\n parent:arent作为构造函数的最后一个参数被传入,但通常情况下不必显示去指定parent对象。因为当调用局管理器时,部局管理器会自动处理这种parent-child关系。\r\n \"\"\"\r\n\r\n # 对继承自父类的属性进行初始化\r\n super(SettingsMainWindow, self).__init__()\r\n self.setupUi(self)\r\n\r\n # 侧边栏选择条目触发器\r\n self.itemListWidget.itemClicked.connect(self.item_list_widget_clicked)\r\n\r\n # 个人中心退出按钮事件触发器\r\n self.logoutButton.clicked.connect(self.logout_button_clicked)\r\n\r\n # 侧边栏选择条目事件处理01\r\n def item_list_widget_clicked(self):\r\n # 获取当前列表部件中所有选中项的一个列表\r\n selectedItems = self.itemListWidget.selectedItems()\r\n # 设置当前列表部件选中项为None\r\n selectItem = None\r\n # # 清除选中的项\r\n # self.itemListWidget.clearSelection()\r\n # 获取当前选中项的名称\r\n for item in selectedItems:\r\n selectItem = item.text().lstrip()\r\n if selectItem == \"个人中心\":\r\n self.itemListWidget.setCurrentRow(0)\r\n self.stackedWidget.setCurrentIndex(0)\r\n elif selectItem == \"静态地图\":\r\n self.itemListWidget.setCurrentRow(1)\r\n self.stackedWidget.setCurrentIndex(1)\r\n elif selectItem == \"天气类型\":\r\n self.itemListWidget.setCurrentRow(2)\r\n self.stackedWidget.setCurrentIndex(2)\r\n elif selectItem == \"详细说明\":\r\n self.itemListWidget.setCurrentRow(3)\r\n self.stackedWidget.setCurrentIndex(3)\r\n elif selectItem == \"参考及引用\":\r\n self.itemListWidget.setCurrentRow(4)\r\n self.stackedWidget.setCurrentIndex(4)\r\n\r\n # 个人中心退出按钮事件触发器\r\n def logout_button_clicked(self):\r\n #\r\n # self.loginMainWindow = LoginMainWindow()\r\n # self.mainWindow = MainWindow()\r\n # # 设置窗口关闭\r\n # self.close()\r\n # # 主窗口关闭\r\n # self.mainWindow.close()\r\n # # 打开登录窗口\r\n # time.sleep(0.5)\r\n # self.loginMainWindow.show()\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n loginWindow = SettingsMainWindow()\r\n loginWindow.show()\r\n sys.exit(app.exec_())\r\n" }, { "alpha_fraction": 0.526755154132843, "alphanum_fraction": 0.5334974527359009, "avg_line_length": 35.829959869384766, "blob_id": "22595dbca27cc74f408b03ae08759584b0a21cb4", "content_id": "66004676ce6d4aa3a938fe4fe5a153a353195519", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10422, "license_type": "permissive", "max_line_length": 137, "num_lines": 247, "path": "/FundamentalFunctions/TrafficSituationOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\n# from math import cos, sin, atan2, sqrt, radians, degrees\r\n# from AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.TrafficSituationByBaiduMap import TrafficSituationByBaiduMap\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass TrafficSituationOperation:\r\n \"\"\"\r\n Class:交通信息执具体操作\r\n \"\"\"\r\n def __init__(self):\r\n self.city = None\r\n self.roadName = None\r\n self.position = None\r\n self.bounds = None\r\n self.geographicLocations = None\r\n self.geographicPositionBottomLeft = None\r\n self.geographicPositionTopRight = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_real_road_information(self, roadName: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的道路名称是否符合规范要求\r\n Args:\r\n roadName: 用户输入的道路名称\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.roadName = roadName\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n checkedResult = self.roadName is None or self.roadName == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - traffic real road check result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # def checkRectanglePositionInformation(self, position: str):\r\n # \"\"\"\r\n # 函数:检测用户提供的地理位置是否符合规范要求\r\n # Args:\r\n # position: 用户输入的地理位置\r\n # Returns:\r\n # 检测类型识别码\r\n # \"\"\"\r\n #\r\n # self.position = position\r\n #\r\n # if self.position is None or self.position == '':\r\n # return 2\r\n # # TODO:\r\n # # 使用python正则表达式验证用户名格式\r\n # # 此处检测格式错误返回false\r\n # else:\r\n # return True\r\n\r\n def check_rectangle_road_information(self, bounds: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的矩形区域的位置是否符合规范要求\r\n Args:\r\n bounds: 矩形区域的地理位置\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.bounds = bounds\r\n\r\n if self.bounds is None or self.bounds == '':\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式bounds格式是否正确\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_traffic_situation_real_road_information(self, city: str,\r\n roadName: str\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取输入的道路名称对应的具体路况信息\r\n Args:\r\n city:城市名称\r\n roadName:道路名称\r\n Returns:\r\n 返回输入的道路名称对应的具体路况信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str升级为dict\r\n self.city = city\r\n self.roadName = roadName\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n trafficSituation = TrafficSituationByBaiduMap()\r\n # 获取到的交通态势原信息(未解析)\r\n resultTrafficRealRoadInformation = trafficSituation.get_traffic_situation_by_road(city=self.city,\r\n road_name=roadName)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultTrafficRealRoadInformation:{1}'.format(function_name,\r\n resultTrafficRealRoadInformation)\r\n )\r\n\r\n # 对获取的数据进行解析\r\n resultTrafficRealRoadDetailInformation = trafficSituation.parse_traffic_situation(\r\n resultTrafficRealRoadInformation)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultTrafficRealRoadDetailInformation:{1}'.format(\r\n function_name,\r\n resultTrafficRealRoadDetailInformation)\r\n )\r\n\r\n return resultTrafficRealRoadDetailInformation\r\n\r\n # def getCenterGeographicPosition(self, geographicLocations: list):\r\n # \"\"\"\r\n # 函数:获取多边形地理坐标的中心点\r\n # Args:\r\n # geographicLocations:用户输入的多个地理位置\r\n # Returns:\r\n # 中心点对应的地理位置\r\n # \"\"\"\r\n #\r\n # self.geographicLocations = geographicLocations\r\n #\r\n # x = 0\r\n # y = 0\r\n # z = 0\r\n # length = len(self.geographicLocations)\r\n #\r\n # for lon, lat in self.geographicLocations:\r\n # lon = radians(float(lon))\r\n # lat = radians(float(lat))\r\n # x += cos(lat) * cos(lon)\r\n # y += cos(lat) * sin(lon)\r\n # z += sin(lat)\r\n #\r\n # x = float(x / length)\r\n # y = float(y / length)\r\n # z = float(z / length)\r\n #\r\n # return degrees(atan2(y, x)), degrees(atan2(z, sqrt(x * x + y * y)))\r\n #\r\n # def getGeographicCodingPosition(self, position: str\r\n # ) -> str:\r\n # \"\"\"\r\n # 函数:获取中文名称地点对应的地理位置信息(高德地图)\r\n # Args:\r\n # position: 中文位置名称\r\n # Returns: 高德地图对应地点地理位置信息\r\n # \"\"\"\r\n #\r\n # self.position = position\r\n #\r\n # geographicCoding = GeographicCoding()\r\n # positionJsonDecode = geographicCoding.get_geographic_coding(address=self.position,\r\n # city='')\r\n # parsePositionInformation = geographicCoding.parse_geographic_coding(positionJsonDecode)\r\n #\r\n # # 地理位置编码\r\n # if 'error_context' not in parsePositionInformation:\r\n # resultPositionGeographicCoding = parsePositionInformation['geographic_position']\r\n # return resultPositionGeographicCoding\r\n #\r\n # else:\r\n # return \"1\"\r\n\r\n # 这里有一些异常发生,将在未来的某一个版本进行修复\r\n # There are some exceptions occurring here that will be fixed in a future release\r\n # def getTrafficSituationRectangleRoadInformation(self, geographicPositionBottomLeft: str,\r\n # geographicPositionTopRight: str,\r\n # roadGrade: int\r\n # ) -> list:\r\n # \"\"\"\r\n # 函数:获取输入的矩形区域的地理位置对应的具体路况信息\r\n # Args:\r\n # geographicPositionBottomLeft:矩形区域的左下角地理位置\r\n # geographicPositionTopRight:矩形区域的右上角地理位置\r\n # roadGrade:道路等级\r\n # Returns:\r\n # 返回输入的矩形区域的地理位置对应的具体路况信息\r\n # \"\"\"\r\n #\r\n # self.geographicPositionBottomLeft = geographicPositionBottomLeft\r\n # self.geographicPositionTopRight = geographicPositionTopRight\r\n # self.roadGrade = roadGrade\r\n #\r\n # geographicPositionList = [self.geographicPositionBottomLeft, self.geographicPositionTopRight]\r\n # reversedGeographicPositionList = []\r\n #\r\n # comparingPositionPositionBottomLeft = self.geographicPositionBottomLeft.split(',')\r\n # comparingPositionPositionTopRight = self.geographicPositionTopRight.split(',')\r\n #\r\n # if eval(comparingPositionPositionBottomLeft[0]) > eval(comparingPositionPositionTopRight[0]):\r\n # geographicPositionList = list(reversed(geographicPositionList))\r\n #\r\n # for item in geographicPositionList:\r\n # reverseList = item.split(',')\r\n # reversedList = list(reversed(reverseList))\r\n # reversedGeographicPositionList.append(','.join(reversedList))\r\n #\r\n # autonaviBounds = ';'.join(reversedGeographicPositionList)\r\n # # autonaviBounds = \"39.912078,116.464303;39.918276,116.475442\"\r\n #\r\n # # 使用百度地图API进行矩形区域查询\r\n # trafficSituation = TrafficSituationByBaiduMap()\r\n # # 获取到的交通态势原信息(未解析)\r\n # resultTrafficRectangleRoadInformation = trafficSituation.get_traffic_situation_by_rectangle(\r\n # bounds=autonaviBounds, road_grade=self.roadGrade, coord_type_input=\"gcj02\")\r\n # resultTrafficRectangleRoadDetailInformation = trafficSituation.parse_traffic_situation(\r\n # resultTrafficRectangleRoadInformation)\r\n # return resultTrafficRectangleRoadDetailInformation\r\n" }, { "alpha_fraction": 0.44492843747138977, "alphanum_fraction": 0.4511512219905853, "avg_line_length": 48.21875, "blob_id": "c5cc29b0edb9d697473714c1be8fc18a07f2b5e7", "content_id": "52bd30a43d5d07e679573e35bd09e34c7685b0c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1805, "license_type": "permissive", "max_line_length": 93, "num_lines": 32, "path": "/running/runInputPrompt.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "from AmapFunctions.InputPrompt import *\r\n\r\nif __name__ == '__main__':\r\n\r\n datatype_dict = {'1': 'all', '2': 'poi', '3': 'bus', '4': 'busline', '': 'all'}\r\n\r\n keywords = input(\"请输入您要搜索的关键字:\")\r\n input_type = input(\"请输入您要查询POI类型(可选):\")\r\n location = input(\"请输入您要查询的经纬度值(可选):\")\r\n city = input(\"请输入您要查询的城市(可选):\")\r\n data_type = input(\"请输入您要获取的数据类型,默认、1或2为全部,3为公交站点,4为公交线路:\")\r\n\r\n inputprompt = InputPrompt()\r\n # 空值,默认为全部\r\n if datatype_dict[data_type] == '':\r\n # print(\"1\")\r\n # print(datatype_dict[data_type])\r\n result_input_prompt = inputprompt.get_input_prompt(keywords=keywords,\r\n input_type=input_type,\r\n location=location,\r\n city=city,\r\n datatype=datatype_dict[data_type])\r\n inputprompt.parse_input_prompt(result_input_prompt, datatype_dict[data_type])\r\n else:\r\n # print(\"2\")\r\n # print(datatype_dict[data_type])\r\n result_input_prompt = inputprompt.get_input_prompt(keywords=keywords,\r\n input_type=input_type,\r\n location=location,\r\n city=city,\r\n datatype=datatype_dict[data_type])\r\n inputprompt.parse_input_prompt(result_input_prompt, datatype_dict[data_type])\r\n" }, { "alpha_fraction": 0.3941541910171509, "alphanum_fraction": 0.40043842792510986, "avg_line_length": 45.35293960571289, "blob_id": "536ddfba5ddb0ada772be9c79834a152624498b5", "content_id": "79b561571e190381835eb4a3a01038316ef6f067", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14533, "license_type": "permissive", "max_line_length": 117, "num_lines": 289, "path": "/AmapFunctions/IPLocation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport time\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass IPLocation:\r\n \"\"\"\r\n Class:IP定位\r\n IP定位是一个简单的HTTP接口,根据用户输入的IP地址,能够快速的帮用户定位IP的所在位置。\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n self.input_type = None\r\n self.ip = None\r\n self.json_decode = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_ip_location(self, ip: str,\r\n input_type: int\r\n ) -> dict:\r\n \"\"\"\r\n 函数:IP地址查询数据。\\n\r\n Args:\r\n ip:需要搜索的IP地址(仅支持国内),必填。若用户不填写IP,则取客户http之中的请求来进行定位\r\n input_type:IP类型,可选值4:ipv4,6:ipv6,必填。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.ip = ip\r\n self.input_type = input_type\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'ip': self.ip,\r\n 'type': self.input_type\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v5/ip?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - IP Location data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_ip_location(self, json_decode: dict\r\n ) -> dict:\r\n \"\"\"\r\n 函数:解析IP地址查询数据。\r\n Args:\r\n json_decode:get_ip_location()方法从网络中获取的数据\r\n Returns:\r\n 返回获取到的IP地址信息\r\n \"\"\"\r\n\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = {}\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n cityList = ['北京市', '上海市', '天津市', '重庆市']\r\n\r\n try:\r\n if self.json_decode['info'] != 'OK':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '0':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n\r\n # 国家、省份、城市、区县\r\n country = self.json_decode['country']\r\n province = self.json_decode['province']\r\n city = self.json_decode['city']\r\n district = self.json_decode['district']\r\n\r\n # 运营商、IP地址\r\n isp = self.json_decode['isp']\r\n ip = self.json_decode['ip']\r\n\r\n # 解析IP地址信息\r\n if province:\r\n for item in cityList:\r\n # 省份为直辖市\r\n if item == province:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - province:{1}'.format(\r\n function_name,\r\n province)\r\n )\r\n\r\n netWorkInformation = \"您当前的网络所位于的地区是{0}{1}\".format(city, district)\r\n ispInformation = \"您当前使用的网络提供的运营商是{0}{1}\".format(country, isp)\r\n ipInformation = \"您查询的IP地址是{0}\".format(ip)\r\n\r\n resultContext['netWorkInformation'] = netWorkInformation\r\n resultContext['ispInformation'] = ispInformation\r\n resultContext['ipInformation'] = ipInformation\r\n\r\n return resultContext\r\n\r\n # 其他地区\r\n else:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - province:{1}'.format(function_name,\r\n province)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - city:{1}'.format(function_name,\r\n city)\r\n )\r\n\r\n netWorkInformation = \"您当前的网络所处于的地区是{0}{1}{2}\".format(province, city, district)\r\n ispInformation = \"您当前使用的网络提供的运营商是{0}{1}\".format(country, isp)\r\n ipInformation = \"您查询的IP地址是{0}\".format(ip)\r\n\r\n resultContext['netWorkInformation'] = netWorkInformation\r\n resultContext['ispInformation'] = ispInformation\r\n resultContext['ipInformation'] = ipInformation\r\n\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n context = \"IP地址信息查询失败,换个地址进行搜索吧\"\r\n resultContext['error_context'] = context\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"IP地址信息查询失败,换个地址进行搜索吧\"\r\n resultContext['error_context'] = context\r\n return resultContext\r\n" }, { "alpha_fraction": 0.7867063283920288, "alphanum_fraction": 0.8105158805847168, "avg_line_length": 37.769229888916016, "blob_id": "ba6d45cf98840d4f2cb942eee88abbe5d14dd803", "content_id": "0dbc8531b884305f18da42927fd1dfd81681cece", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5096, "license_type": "permissive", "max_line_length": 156, "num_lines": 78, "path": "/README.md", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "<div align=center><img alt=\"AmapProgram Logo\" src=\"https://github.com/Gaoyifei1011/AmapProgram/blob/main/Resources/Icon/RoutePlanningLogo.png\"/></div>\n\n# <p align=\"center\">AmapProgram</p>\n<p align=\"center\">使用高德地图Web API制作的小程序</p>\n\n## 关于(About)\n\n**AmapProgram**项目是一个基于高德地图Web API实现的一个可视化的小程序。前端使用Python PyQt5框架实现,后台基于高德地图的Web API。已经在Windows 10平台上良好运行。<br>\n\n这个项目是为了完成我的毕业设计所用的。且由于我是一个初学者,Python学习的深度并没有那么深厚,所以代码中可能存在一些混乱,命名规则没有遵守工程化,请大家多多包涵。\n\n## 介绍(Introduce)\n\n### 程序使用(Program USES)\n\n#### 程序打开方式(Program Opening Ways)\n目前提供了已经编译好的预览版的直接运行文件,当然你也可以自己克隆代码使用Python运行<br>\n1.下载并运行[Release](https://github.com/Gaoyifei1011/AmapProgram/releases)目录里面已经打包好的的AmapProgram.exe。<br>\n2.使用python运行源代码里面的main.py,需要自己在[高德开放平台](https://lbs.amap.com/)和[百度地图开放平台](https://lbsyun.baidu.com/)申请自己所需要的Key,找到代码中的APIKey属性和sk属性对应的值进行替换,具体申请步骤请自行查询。<br>\n在AmapProgram目录下打开cmd或powershell,输入以下内容(确保电脑已经安装Python3和必备的第三方库):\n```bash\npython main.py\n```\n\n#### 程序主界面(Program Main Interface)\n运行这个主程序后需要进行登录(未来设计是会进行链接数据库的,但是由于时间的问题,没有进行数据库的开发),以及设计应用的时候没有添加注册用户的模块,所以复制完文件后默认使用root用户进行登录,进入到主界面。<br>\n<div align=center><img alt=\"LoginMainWindow\" src=\"https://github.com/Gaoyifei1011/AmapProgram/blob/main/ScreenShots/LoginMainWindow.png\"/></div>\n<p align=\"center\">登录界面窗口显示</p><br>\n\n<div align=center><img alt=\"MainWindow\" src=\"https://github.com/Gaoyifei1011/AmapProgram/blob/main/ScreenShots/MainWindow.png\"/></div>\n<p align=\"center\">主界面窗口显示</p>\n\n### 程序开发运行环境(Program Development and Runtime Environment)\n\n#### 开发环境(Development Environment)\nWindows 10 Build 21382(Dev Channel) / Windows 10 Build 19041(Stable Channel)<br>\nJetbarins Pycharm 2021.1 + (Python3.9 / Python 3.8 / Python 3.7)<br>\n默认推荐使用Windows 10 19041和Python3.9版本。\n\n#### 第三方库(Third-party Libraries)\n源代码运行程序前必须要安装Python的第三方库依赖(安装第三方库建议使用国内的镜像源,用以加快速度)<br>\n```bash\npip install apscheduler\npip install inspect\npip install loguru\npip install matplotlib\npip install numpy\npip install pandas\npip install pathlib\npip install pillow\npip install PyQt5\npip install pyqtchart\npip install requests\npip install urllib.request\npip install xlrd\npip install xlutils\npip install xlwt\n```\n这些第三方库是我目前已经了解到的必须会使用到的第三方库,可能有一部分库没有使用到,建议根据运行程序出现错误的原因网上搜索对应的第三方库的安装,就可以顺利运行了。\n\n#### 其他说明(Other Instructions)\n\n1.应用所有数据来源均来自高德地图。<br>\n2.原来的开发进度是使用高德地图的API来实现所有功能的,但是由于高德地图关于实时路况的API没有开放。所以暂时使用百度地图的实时路况的API数据来实现这一功能。\n\n## 参考及引用(Reference & Quotation)\n1.开发参考:[高德地图Web API文档](https://lbs.amap.com/api/webservice/summary/),[百度地图Web API文档](https://lbsyun.baidu.com/index.php?title=webapi)。<br>\n2.图片库参考:Windows Fluent UI Photo Library、小爱同学、高德地图。\n\n## 注意(Attention)\n\n1.由于在构建应用之初没有对应用的登录进行一个良好的设计,需要将Release中的AmapAccount压缩包里面的Account文件夹复制到%localAppdata%目录下。<br>\n2.导入后用户名和密码都是root就可以进入主界面了。<br>\n3.由于在构建项目中仅仅对一部分内容进行了日志记录和异常处理,所以可能在运行过程中会发生闪退现象。<br>\n4.我本人要进行考研的复习,这个问题有时间以后在进行处理吧,如果发现任何Bug,请在Github仓库中创建一个新的[Issue](https://github.com/Gaoyifei1011/AmapProgram/issues),我会尽可能使用我的闲余时间进行回答的。<br>\n5.未来有时间会再制作一个英文的Readme.md的。<br>\n6.本项目目前仅用于学习和交流使用,如果涉及到商业用途发生一切问题,本人不承担任何责任,如果涉及到侵权的内容,请尽快联系我本人,我会尽快删除。<br>\n7.有喜欢这个项目的其他童鞋欢迎点击一下star,感谢你们的支持与认可。<br>\n" }, { "alpha_fraction": 0.5241864323616028, "alphanum_fraction": 0.5281442403793335, "avg_line_length": 26.78481101989746, "blob_id": "a4e8e0954928ae3b702071eaad46e8663180be9d", "content_id": "706fd3467369e46bdb26656232677302a77ac8cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2470, "license_type": "permissive", "max_line_length": 125, "num_lines": 79, "path": "/Window/MessageBoxUI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "from typing import Any\r\n\r\nfrom PyQt5.QtGui import QFont, QIcon\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\n\r\nclass SelfMessageBox:\r\n \"\"\"\r\n Class:自定义QMessageBox(修改)\r\n \"\"\"\r\n def __init__(self):\r\n super(SelfMessageBox, self).__init__()\r\n\r\n def selfDefineFont(self, fontName: str = \"微软雅黑 Light\",\r\n fontSize: int = 12\r\n ) -> QFont():\r\n \"\"\"\r\n 函数:自定义字体样式\r\n Args:\r\n fontName: 字体名称\r\n fontSize: 字体大小\r\n Returns:\r\n font: 需要的字体类型\r\n \"\"\"\r\n\r\n self.fontName = fontName\r\n self.fontSize = fontSize\r\n\r\n font = QFont()\r\n # 设置字体类型\r\n font.setFamily(self.fontName)\r\n # 设置字体大小\r\n font.setPointSize(self.fontSize)\r\n return font\r\n\r\n def messageLevel(self, level: int\r\n ) -> 'QMessageBox.Icon':\r\n \"\"\"\r\n 函数:对话框消息等级\r\n Args:\r\n level: 消息等级\r\n Returns:\r\n QMessageBox对应的消息等级\r\n \"\"\"\r\n\r\n self.level = level\r\n\r\n if self.level == 1:\r\n return QMessageBox.Information\r\n elif self.level == 2:\r\n return QMessageBox.Question\r\n elif self.level == 3:\r\n return QMessageBox.Warning\r\n elif self.level == 4:\r\n return QMessageBox.Critical\r\n elif self.level == 5:\r\n return QMessageBox.About\r\n\r\n def initUI(self, MainWindow,\r\n **parameters: [str, Any]\r\n ) -> None:\r\n \"\"\"\r\n 函数:窗口界面初始化\r\n Args:\r\n MainWindow:主窗口界面实例\r\n **parameters:消息框对应的其他参数\r\n \"\"\"\r\n\r\n self.MainWindow = MainWindow\r\n self.parameters = parameters\r\n\r\n if self.parameters['level'] and self.parameters['title'] and self.parameters['information']:\r\n self.messageBox = QMessageBox(self.parameters['level'], self.parameters['title'], self.parameters['information'])\r\n if self.parameters['icon']:\r\n self.messageBox.setWindowIcon(QIcon(self.parameters['icon']))\r\n if self.parameters['font']:\r\n self.messageBox.setFont(self.parameters['font'])\r\n self.messageBox.addButton(self.MainWindow.tr(\"确定\"), QMessageBox.YesRole)\r\n self.messageBox.exec_()\r\n" }, { "alpha_fraction": 0.3897198736667633, "alphanum_fraction": 0.39627963304519653, "avg_line_length": 48.0095100402832, "blob_id": "f6c7b6da5a0d8cc09e2f9cc8ba0f40bba8bd017d", "content_id": "ce348970688385f43caa2da6afef7a2e7ea9bf77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34088, "license_type": "permissive", "max_line_length": 140, "num_lines": 631, "path": "/AmapFunctions/AdministrativeDistrictEnquiry.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport time\r\nfrom typing import Any\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass AdministrativeDistrictEnquiry:\r\n \"\"\"\r\n Class:行政区域查询\r\n 行政区域查询是一类简单的HTTP接口,根据用户输入的搜索条件可以帮助用户快速的查找特定的行政区域信息。\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n self.district = None\r\n self.district_level = None\r\n self.extensions = None\r\n self.filter = None\r\n self.global_sub_district_value = None\r\n self.json_decode = None\r\n self.keywords = None\r\n self.offset = None\r\n self.output = None\r\n self.page = None\r\n self.sub_district = None\r\n self.sub_district_value = None\r\n self.global_sub_district_value = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_administrative_district(self, keywords: str,\r\n sub_district: int,\r\n **kwargs: dict[str, Any]\r\n ) -> dict:\r\n \"\"\"\r\n 函数:行政区域查询数据。\\n\r\n Args:\r\n keywords:查询关键字,可选。规则:只支持单个关键词语搜索关键词支持:行政区名称、citycode、adcode。例如,在subdistrict=2,搜索省份(例如山东),能够显示市(例如济南),区(例如历下区)。adcode信息可参考城市编码表获取\r\n sub_district:子级行政区,可选。规则:设置显示下级行政区级数(行政区级别包括:国家、省/直辖市、市、区/县、乡镇/街道多级数据)。可选值:0、1、2、3等数字,并以此类推\r\n 0:不返回下级行政区;1:返回下一级行政区;2:返回下两级行政区;3:返回下三级行政区。\r\n 需要在此特殊说明,目前部分城市和省直辖县因为没有区县的概念,故在市级下方直接显示街道。例如:广东-东莞、海南-文昌市\r\n kwargs:\r\n page:需要第几页数据,可选。最外层的districts最多会返回20个数据,若超过限制,请用page请求下一页数据。例如page=2;page=3。默认page=1\r\n offset:最外层返回数据个数,可选。\r\n extensions:返回结果控制,可选。此项控制行政区信息中返回行政区边界坐标点; 可选值:base、all;base:不返回行政区边界坐标点;all:只返回当前查询district的边界值,不返回子节点的边界值;\r\n 目前不能返回乡镇/街道级别的边界值。\r\n filter:根据区划过滤,可选。按照指定行政区划进行过滤,填入后则只返回该省/直辖市信息。填入adcode,为了保证数据的正确,强烈建议填入此参数\r\n output:返回数据格式类型,可选。可选值:JSON,XML。\r\n \"\"\"\r\n\r\n self.keywords = keywords\r\n self.sub_district = sub_district\r\n\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'filter' in kwargs:\r\n self.filter = kwargs['filter']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'offset' in kwargs:\r\n self.offset = kwargs['offset']\r\n if 'page' in kwargs:\r\n self.page = kwargs['page']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'keywords': self.keywords,\r\n 'subdistrict': self.sub_district,\r\n }\r\n\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.filter is not None:\r\n parameters.update(filter=self.filter)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n if self.offset is not None:\r\n parameters.update(offset=self.offset)\r\n if self.page is not None:\r\n parameters.update(page=self.page)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/config/district?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Administrative district data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n\r\n return error_information_dict\r\n\r\n def parse_administrative_district(self, json_decode: dict,\r\n sub_district: int\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析行政区域查询数据。\r\n Args:\r\n json_decode:get_administrative_district()方法从网络中获取的数据\r\n sub_district:返回的下几级行政区域的标志\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n self.sub_district = sub_district\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n district_level = {'country': '国',\r\n 'province': '省',\r\n 'city': '市',\r\n 'district': '区/县级市/县',\r\n 'street': '街道/镇/乡'\r\n }\r\n\r\n # 请求结果\r\n keywords_count = self.json_decode['count']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - keywords count:{1}'.format(function_name,\r\n keywords_count)\r\n )\r\n resultContext.append(\"根据您提供的关键字已为您查找到{0}个结果\".format(keywords_count))\r\n\r\n # 行政区域数目\r\n districts = self.json_decode['districts']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - districts acquired successfully'.format(\r\n function_name)\r\n )\r\n\r\n # 输出行政区信息\r\n sub_district_value = self.sub_district\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - sub_district_value:{1}'.format(function_name,\r\n sub_district_value)\r\n )\r\n\r\n global_sub = self.sub_district\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename, log_level=1,\r\n context='Function name:{0} - global_sub:{1}'.format(function_name,\r\n global_sub)\r\n )\r\n\r\n if districts and sub_district_value >= 0: # 里面的信息不为空\r\n for district in districts:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - {1}'.format(function_name,\r\n self.print_subdistrict.__name__\r\n )\r\n )\r\n context = self.print_subdistrict(district, sub_district_value - 1, district_level,\r\n global_sub)\r\n resultContext.extend(context)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - print district successful run.'.format(function_name)\r\n )\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n resultContext.append(errorInfo)\r\n context = \"行政区域信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"行政区域信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n def print_subdistrict(self, district: dict,\r\n sub_district_value: int,\r\n district_level: dict,\r\n global_sub_district_value: int\r\n ) -> list:\r\n \"\"\"\r\n 函数:打印查询的行政区域\r\n Args:\r\n district: 传入的关键字查询对应的行政区域\r\n sub_district_value:代表当前下一级行政区域的位置\r\n district_level:行政区域级别\r\n global_sub_district_value:传入全局查询的行政区域\r\n \"\"\"\r\n\r\n # TODO:未来版本由于数据量巨大,将其放入子线程中进行,防止卡父GUI进程\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.district = district\r\n self.district_level = district_level\r\n self.global_sub_district_value = global_sub_district_value\r\n self.sub_district_value = sub_district_value\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n name = self.district['name']\r\n level = self.district_level[self.district['level']]\r\n\r\n # 当前行政区域\r\n subtraction = global_sub_district_value - sub_district_value - 1\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - global:{1}'.format(function_name,\r\n str(self.global_sub_district_value))\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - sub_district_value:{1}'.format(function_name,\r\n sub_district_value)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - subtraction:{1}'.format(function_name,\r\n str(subtraction))\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - district search successfully'.format(function_name)\r\n )\r\n\r\n # 同级行政区域\r\n if subtraction == 0:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,\r\n subtraction, name,\r\n level)\r\n )\r\n resultContext.append(\"您提供的关键字查询名为“{0}”的行政区级别为“{1}”\".format(name, level))\r\n\r\n # 下一级行政区域\r\n elif subtraction == 1:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,\r\n subtraction,\r\n name,\r\n level)\r\n )\r\n resultContext.append(\"您查询的关键字的下一级行政区名为“{0}”的行政区级别为“{1}”\".format(name, level))\r\n\r\n # 下二级行政区域\r\n elif subtraction == 2:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,\r\n subtraction,\r\n name,\r\n level)\r\n )\r\n resultContext.append(\"您查询的关键字的下二级行政区名为“{0}”的行政区级别为“{1}”\".format(name, level))\r\n\r\n # 下三级行政区域\r\n elif subtraction == 3:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - context:{1} - {2} - {3}'.format(function_name,\r\n subtraction,\r\n name,\r\n level\r\n )\r\n )\r\n resultContext.append(\"您查询的关键字的下三级行政区名为“{0}”的行政区级别为“{1}”\".format(name, level))\r\n\r\n else:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - Query Failed'.format(function_name)\r\n )\r\n resultContext.append(\"查询错误\")\r\n\r\n # 条件成立,继续搜索下一级行政区\r\n sub_districts = self.district['districts']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - search sub districts'.format(function_name)\r\n )\r\n\r\n # 行政区域结果数目\r\n len_sub_districts = len(self.district['districts'])\r\n\r\n if len_sub_districts > 0:\r\n resultContext.append(\"该行政区域包括{0}个结果\".format(len_sub_districts))\r\n\r\n if sub_districts and self.sub_district_value >= 0:\r\n for sub_district in sub_districts:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - {1}'.format(function_name,\r\n self.print_subdistrict.__name__)\r\n )\r\n context = self.print_subdistrict(sub_district, self.sub_district_value - 1, self.district_level,\r\n self.global_sub_district_value)\r\n resultContext.extend(context)\r\n\r\n return resultContext\r\n\r\n def get_sub_administrative_district(self, json_decode\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析行政区域下一级数据。\r\n Args:\r\n json_decode:get_administrative_district()方法从网络中获取的数据\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n\r\n # 请求结果\r\n keywords_count = self.json_decode['count']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - keywords count:{1}'.format(function_name,\r\n keywords_count)\r\n )\r\n\r\n # 行政区域数目\r\n districts = self.json_decode['districts']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - districts acquired successfully'.format(\r\n function_name)\r\n )\r\n\r\n # 输出行政区信息\r\n if districts: # 里面的信息不为空\r\n for district in districts:\r\n # 下一级行政区域列表\r\n sub_districts = district['districts']\r\n sub_districts.sort(key=lambda x: x['adcode'])\r\n for subdistrict in sub_districts:\r\n resultContext.append(subdistrict['name'])\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - print district successful run.'.format(function_name)\r\n )\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n resultContext.append(errorInfo)\r\n context = \"行政区域信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"行政区域信息查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n" }, { "alpha_fraction": 0.6439114212989807, "alphanum_fraction": 0.6488314867019653, "avg_line_length": 35.8139533996582, "blob_id": "cba10b02974b7d1b413e6fa2df627146472554f4", "content_id": "1cc1fb2ecf2509b9ab5ac9f9a5014c73d4b1c05f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2126, "license_type": "permissive", "max_line_length": 120, "num_lines": 43, "path": "/running/runSearchPOI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "from AmapFunctions.SearchPOI import *\r\n\r\n# 关键字搜索\r\n# keywords = input(\"请输入您要查询的关键字(可选):\")\r\n# types = input(\"请输入您要查询的类型(可选):\")\r\n# city = input(\"请输入您要查询关键字对应的城市(可选):\")\r\n\r\n# 周边搜索\r\n# location = input(\"请输入您要查询的中心点坐标:\")\r\n# keywords = input(\"请输入您要查询的关键字(可选):\")\r\n# types = input(\"请输入您要查询的类型(可选):\")\r\n# city = input(\"请输入您要查询关键字对应的城市(可选):\")\r\n# context = input(\"请输入您要查询距离中心点坐标的半径,默认为3000(可选):\")\r\n# if context == '':\r\n# radius = 1000\r\n# else:\r\n# radius = int(context)\r\n\r\n# 多边形搜索\r\n# polygon = input(\"请输入您要查询经纬度坐标对:\")\r\n# keywords = input(\"请输入您要查询的关键字(可选):\")\r\n# types = input(\"请输入您要查询的类型(可选):\")\r\n\r\nsearch_poi = SearchPOI()\r\n\r\n# 获取数据——关键字搜索\r\n# result_search_poi = search_poi.get_search_poi_by_keywords(keywords=keywords,types=types,city=city,extensions='all')\r\n# search_poi.parse_search_poi(result_search_poi,keywords=keywords,extensions='all')\r\n\r\n# 获取数据——周边搜索\r\n# result_search_poi = search_poi.get_search_poi_by_arounds(location=location, keywords=keywords, types=types, city=city,\r\n# radius=radius, extensions='all')\r\n# search_poi.parse_search_poi(result_search_poi, keywords=keywords, extensions='all')\r\n\r\n# 获取数据——多边形搜索\r\n# result_search_poi = search_poi.get_search_poi_by_polygon(polygon=polygon, keywords=keywords, types=types,\r\n# extensions='all')\r\n# search_poi.parse_search_poi(result_search_poi, keywords=keywords, extensions='all')\r\n\r\n# 获取数据——ID查询\r\npoi_id = input(\"请输入您的兴趣点ID:\")\r\nresult_search_poi = search_poi.get_search_poi_by_id(poi_id=poi_id)\r\nsearch_poi.parse_search_poi(result_search_poi, extensions='all')\r\n" }, { "alpha_fraction": 0.4850083291530609, "alphanum_fraction": 0.4894503057003021, "avg_line_length": 34.75510025024414, "blob_id": "b7c35205fcf027a1a7bfee257b3d1e8537c02df3", "content_id": "1c5173beee76e3861ad37e60c89db7d19ee6d406", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3924, "license_type": "permissive", "max_line_length": 133, "num_lines": 98, "path": "/FundamentalFunctions/WeatherOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.WeatherInformation import WeatherInformation\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass WeatherOperation:\r\n \"\"\"\r\n Class:天气信息操作\r\n \"\"\"\r\n def __init__(self):\r\n self.city = None\r\n self.weatherInformation = None\r\n self.weatherType = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_weather_information(self, city: str) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的天气信息是否符合规范要求\r\n Args:\r\n city:用户输入的城市\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n self.city = city\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检测结果\r\n checkedResult = self.city is None or self.city == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - weather check result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_weather_information(self, city: str,\r\n weatherType: str\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取输入城市对应的天气信息\r\n Args:\r\n city:城市名称\r\n weatherType:查询的天气类型\r\n Returns:\r\n 返回获取到输入城市对应的天气具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str升级为dict\r\n self.city = city\r\n self.weatherType = weatherType\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n weatherInformation = WeatherInformation()\r\n # 获取的天气原信息(未解析)\r\n resultWeatherInformation = weatherInformation.get_weather_information(self.city, extensions=self.weatherType)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultWeatherInformation:{1}'.format(function_name,\r\n resultWeatherInformation)\r\n )\r\n # 解析后的天气信息\r\n resultWeatherDetailInformation = weatherInformation.parse_weather_information(resultWeatherInformation,\r\n self.weatherType)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultWeatherDetailInformation:{1}'.format(function_name,\r\n resultWeatherDetailInformation)\r\n )\r\n\r\n return resultWeatherDetailInformation\r\n" }, { "alpha_fraction": 0.5560283660888672, "alphanum_fraction": 0.5574468374252319, "avg_line_length": 30.045454025268555, "blob_id": "2fd79c76bac213b6b315a9ee569e8c27e86a99d0", "content_id": "642bee296610917af7bf231133e0404c5ab810a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "permissive", "max_line_length": 71, "num_lines": 22, "path": "/SelfExpection/CustomExpection.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "#_*_coding=UTF-8_*_\r\nclass CustomExpection(Exception):\r\n def __init__(self)->None:\r\n super(CustomExpection,self).__init__()\r\n self.error_info = None\r\n\r\n def get_error_info(self, error_info: dict) -> tuple[str, str, str]:\r\n \"\"\"\r\n 函数:返回自定义异常的错误信息\r\n Args:\r\n error_info:自定义异常的内容\r\n Returns:\r\n info:自定义异常信息\r\n detail_information:具体的异常信息内容\r\n error_prompt:异常发生的提示\r\n \"\"\"\r\n self.error_info = error_info\r\n info = error_info['info']\r\n detail_information = error_info['detail_information']\r\n error_prompt = error_info['error_prompt']\r\n\r\n return info, detail_information, error_prompt\r\n" }, { "alpha_fraction": 0.5202711224555969, "alphanum_fraction": 0.5529057383537292, "avg_line_length": 42.261112213134766, "blob_id": "a6cd0afeca8e3e01a93aecfc8763b3ecb9da87af", "content_id": "818ee0ff97f262390628f6271affd8b45db04b2c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11329, "license_type": "permissive", "max_line_length": 191, "num_lines": 180, "path": "/SelfExpection/OfficialException.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "class OfficialException(BaseException):\r\n def __init__(self) -> None:\r\n self.error_info = None\r\n\r\n def get_error_info(self, error_info: dict) -> tuple[str, str, str]:\r\n \"\"\"\r\n 函数:返回错误信息\r\n Args:\r\n error_info:获得的json格式数据文档\r\n Returns:\r\n errcode:info返回值\r\n errorInfo:状态描述\r\n solution:问题排查策略\r\n \"\"\"\r\n self.error_info = error_info\r\n\r\n infocode = error_info['infocode']\r\n if infocode == '100001':\r\n errcode = 'INVALID_USER_KEY'\r\n errorInfo = 'key不正确或过期'\r\n solution = '开发者发起请求时,传入的key不正确或者过期。'\r\n # 写入日志文件\r\n elif infocode == '10002':\r\n # 写入日志文件\r\n errcode = 'SERVICE_NOT_AVAILABLE'\r\n errorInfo = '没有权限使用相应的服务或者请求接口的路径拼写错误。'\r\n solution = '1.开发者没有权限使用相应的服务,例如:开发者申请了WEB定位功能的key,却使用该key访问逆地理编码功能时,就会返回该错误。反之亦然。\\n2.开发者请求接口的路径拼写错误。例如:正确的https://restapi.amap.com/v3/ip在程序中被拼装写了https://restapi.amap.com/vv3/ip\"。'\r\n elif infocode == '10003':\r\n errcode = 'DAILY_QUERY_OVER_LIMIT'\r\n errorInfo = '访问已超出日访问量'\r\n solution = '开发者的日访问量超限,被系统自动封停,第二天0:00会自动解封。'\r\n elif infocode == '10004':\r\n errcode = 'ACCESS_TOO_FREQUENT'\r\n errorInfo = '单位时间内访问过于频繁'\r\n solution = '开发者的单位时间内(1分钟)访问量超限,被系统自动封停,下一分钟自动解封。'\r\n elif infocode == '10005':\r\n errcode = 'INVALID_USER_IP'\r\n errorInfo = 'IP白名单出错,发送请求的服务器IP不在IP白名单内。'\r\n solution = '开发者在LBS官网控制台设置的IP白名单不正确。白名单中未添加对应服务器的出口IP。可到\"控制台>配置\" 中设定IP白名单。'\r\n elif infocode == '10006':\r\n errcode = 'INVALID_USER_DOMAIN'\r\n errorInfo = '绑定域名无效'\r\n solution = '开发者绑定的域名无效,需要在官网控制台重新设置。'\r\n elif infocode == '10007':\r\n errcode = 'INVALID_USER_SIGNATURE'\r\n errorInfo = '数字签名未通过验证'\r\n solution = '开发者签名未通过开发者在key控制台中,开启了“数字签名”功能,但没有按照指定算法生成“数字签名”。'\r\n elif infocode == '10008':\r\n errcode = 'INVALID_USER_SCODE'\r\n errorInfo = 'MD5安全码未通过验证'\r\n solution = '需要开发者判定key绑定的SHA1,package是否与sdk包里的一致。'\r\n elif infocode == '10009':\r\n errcode = 'USERKEY_PLAT_NOMATCH'\r\n errorInfo = '请求key与绑定平台不符'\r\n solution = '请求中使用的key与绑定平台不符,例如:开发者申请的是js api的key,却用来调web服务接口。'\r\n elif infocode == '10010':\r\n errcode = 'IP_QUERY_OVER_LIMIT'\r\n errorInfo = 'IP访问超限'\r\n solution = '未设定IP白名单的开发者使用key发起请求,从单个IP向服务器发送的请求次数超出限制,被系统自动封停。'\r\n elif infocode == '10011':\r\n errcode = 'NOT_SUPPORT_HTTPS'\r\n errorInfo = '服务不支持https请求'\r\n solution = '服务不支持https请求,请联系提供商。'\r\n elif infocode == '10012':\r\n errcode = 'INSUFFICIENT_PRIVILEGES'\r\n errorInfo = '权限不足,服务请求被拒绝'\r\n solution = '由于不具备请求该服务的权限,所以服务被拒绝。'\r\n elif infocode == '10013':\r\n errcode = 'USER_KEY_RECYCLED'\r\n errorInfo = 'Key被删除'\r\n solution = '开发者删除了key,key被删除后无法正常使用。'\r\n elif infocode == '10014':\r\n errcode = 'QPS_HAS_EXCEEDED_THE_LIMIT'\r\n errorInfo = '云图服务QPS超限'\r\n solution = 'QPS超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10015':\r\n errcode = 'GATEWAY_TIMEOUT'\r\n errorInfo = '受单机QPS限流限制'\r\n solution = '受单机QPS限流限制时出现该问题,建议降低请求的QPS。'\r\n elif infocode == '10016':\r\n errcode = 'SERVER_IS_BUSY'\r\n errorInfo = '服务器负载过高'\r\n solution = '服务器负载过高,请稍后再试。'\r\n elif infocode == '10017':\r\n errcode = 'RESOURCE_UNAVAILABLE'\r\n errorInfo = '所请求的资源不可用'\r\n solution = '所请求的资源不可用。'\r\n elif infocode == '10019':\r\n errcode = 'CQPS_HAS_EXCEEDED_THE_LIMIT'\r\n errorInfo = '使用的某个服务总QPS超限'\r\n solution = 'QPS超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10020':\r\n errcode = 'CKQPS_HAS_EXCEEDED_THE_LIMIT'\r\n errorInfo = '某个Key使用某个服务接口QPS超出限制'\r\n solution = 'QPS超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10021':\r\n errcode = 'CUQPS_HAS_EXCEEDED_THE_LIMIT '\r\n errorInfo = '账号使用某个服务接口QPS超出限制'\r\n solution = 'QPS超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10026':\r\n errcode = 'INVALID_REQUEST'\r\n errorInfo = '账号处于被封禁状态'\r\n solution = '由于违规行为账号被封禁不可用,如有异议请登录控制台提交工单进行申诉。'\r\n elif infocode == '10029':\r\n errcode = 'ABROAD_DAILY_QUERY_OVER_LIMIT'\r\n errorInfo = '某个Key的QPS超出限制'\r\n solution = 'QPS超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10044':\r\n errcode = 'USER_DAILY_QUERY_OVER_LIMIT'\r\n errorInfo = '账号维度日调用量超出限制'\r\n solution = '账号维度日调用量超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '10045':\r\n errcode = 'USER_ABROAD_DAILY_QUERY_OVER_LIMIT'\r\n errorInfo = '账号维度海外服务日调用量超出限制'\r\n solution = '账号维度海外服务接口日调用量超出限制,超出部分的请求被拒绝。限流阈值内的请求依旧会正常返回。'\r\n elif infocode == '20000':\r\n errcode = 'INVALID_PARAMS'\r\n errorInfo = '请求参数非法'\r\n solution = '请求参数的值没有按照规范要求填写。例如,某参数值域范围为[1,3],开发者误填了’4’。'\r\n elif infocode == '20001':\r\n errcode = 'MISSING_REQUIRED_PARAMS'\r\n errorInfo = '缺少必填参数'\r\n solution = '缺少接口中要求的必填参数。'\r\n elif infocode == '20002':\r\n errcode = 'ILLEGAL_REQUEST'\r\n errorInfo = '请求协议非法'\r\n solution = '请求协议非法。比如某接口仅支持get请求,结果用了POST方式'\r\n elif infocode == '20003':\r\n errcode = 'UNKNOWN_ERROR'\r\n errorInfo = '其他未知错误'\r\n solution = '其他未知错误'\r\n elif infocode == '20011':\r\n errcode = 'INSUFFICIENT_ABROAD_PRIVILEGES'\r\n errorInfo = '查询坐标或规划点(包括起点、终点、途经点)在海外,但没有海外地图权限'\r\n solution = '使用逆地理编码接口、输入提示接口、周边搜索接口、路径规划接口时可能出现该问题,规划点(包括起点、终点、途经点)不在中国陆地范围内。'\r\n elif infocode == '20012':\r\n errcode = 'ILLEGAL_CONTENT'\r\n errorInfo = '查询信息存在非法内容'\r\n solution = '使用搜索接口时可能出现该问题,通常是由于查询内容非法导致。'\r\n elif infocode == '20800':\r\n errcode = 'OUT_OF_SERVICE'\r\n errorInfo = '规划点(包括起点、终点、途经点)不在中国陆地范围内'\r\n solution = '使用路径规划服务接口时可能出现该问题,规划点(包括起点、终点、途经点)不在中国陆地范围内。'\r\n elif infocode == '20801':\r\n errcode = 'NO_ROADS_NEARBY'\r\n errorInfo = '划点(起点、终点、途经点)附近搜不到路'\r\n solution = '使用路径规划服务接口时可能出现该问题,划点(起点、终点、途经点)附近搜不到路。'\r\n elif infocode == '20802':\r\n errcode = 'ROUTE_FAIL'\r\n errorInfo = '路线计算失败,通常是由于道路连通关系导致'\r\n solution = '使用路径规划服务接口时可能出现该问题,路线计算失败,通常是由于道路连通关系导致。'\r\n elif infocode == '20803':\r\n errcode = 'OVER_DIRECTION_RANGE'\r\n errorInfo = '起点终点距离过长'\r\n solution = '使用路径规划服务接口时可能出现该问题,路线计算失败,通常是由于道路起点和终点距离过长导致。'\r\n elif infocode == '30001' or '30002' or '30003' or '32000' or '32001' or '32002' or '32003' or '32200' or '32201' or '32202' or '32203':\r\n errcode = 'ENGINE_RESPONSE_DATA_ERROR'\r\n errorInfo = '服务响应失败'\r\n solution = '出现3开头的错误码,建议先检查传入参数是否正确。'\r\n elif infocode == '40000':\r\n errcode = 'QUOTA_PLAN_RUN_OUT'\r\n errorInfo = '余额耗尽'\r\n solution = '所购买服务的余额耗尽,无法继续使用服务'\r\n elif infocode == '40001':\r\n errcode = 'GEOFENCE_MAX_COUNT_REACHED'\r\n errorInfo = '围栏个数达到上限'\r\n solution = 'Key可创建的地理围栏的数量,已达上限。'\r\n elif infocode == '40002':\r\n errcode = 'SERVICE_EXPIRED'\r\n errorInfo = '购买服务到期'\r\n solution = '所购买的服务期限已到,无法继续使用。'\r\n elif infocode == '40003':\r\n errcode = 'ABROAD_QUOTA_PLAN_RUN_OUT'\r\n errorInfo = '海外服务余额耗尽'\r\n solution = '所购买服务的海外余额耗尽,无法继续使用服务'\r\n else:\r\n errcode = 'UNKNOWN_ERROR'\r\n errorInfo = '其他未知错误'\r\n solution = '其他未知错误'\r\n return errcode, errorInfo, solution\r\n" }, { "alpha_fraction": 0.4010942876338959, "alphanum_fraction": 0.41228431463241577, "avg_line_length": 48.8199348449707, "blob_id": "06c0d6525c17299a68447cd6cb30f537ac44b057", "content_id": "36ee53752b088f072e4304af04b1ba05c1daa88b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66409, "license_type": "permissive", "max_line_length": 140, "num_lines": 1194, "path": "/AmapFunctions/SearchPOI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport os\r\nimport time\r\nimport urllib.request\r\nfrom http.client import IncompleteRead, RemoteDisconnected\r\nfrom typing import Any\r\nfrom urllib.error import HTTPError, URLError\r\n\r\nimport requests\r\nfrom PIL import Image\r\n\r\nfrom SelfExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass SearchPOI:\r\n \"\"\"\r\n Class:搜索POI\r\n 搜索服务API是一类简单的HTTP接口,提供多种查询POI信息的能力,其中包括关键字搜索、周边搜索、多边形搜索、ID查询四种筛选机制。\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.city = None\r\n self.cityLimit = None\r\n self.children = None\r\n self.extensions = None\r\n self.filename = None\r\n self.json_decode = None\r\n self.keyword = None\r\n self.keywords = None\r\n self.location = None\r\n self.num_retries = None\r\n self.offset = None\r\n self.output = None\r\n self.page = None\r\n self.poi = None\r\n self.polygon = None\r\n self.poi_id = None\r\n self.radius = None\r\n self.sortRule = None\r\n self.suggestion = None\r\n self.sug_address = None\r\n self.types = None\r\n self.url = None\r\n\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n open_time = 0\r\n\r\n def get_search_poi_by_keywords(self, keywords: str,\r\n types: str,\r\n **kwargs: dict[str, Any]\r\n ) -> dict:\r\n \"\"\"\r\n 函数:关键字搜索。\\n\r\n Args:\r\n keywords:查询关键字,必填(keywords和types两者至少必选其一)。规则: 多个关键字用“|”分割,若不指定city,并且搜索的为泛词(例如“美食”)的情况下,返回的内容为城市列表以及此城市内有多少结\r\n 果符合要求。\r\n types:查询POI类型,必填(keywords和types两者至少必选其一)。可选值:分类代码 或 汉字(若用汉字,请严格按照附件之中的汉字填写)\r\n 分类代码由六位数字组成,一共分为三个部分,前两个数字代表大类;中间两个数字代表中类;最后两个数字代表小类。\r\n 若指定了某个大类,则所属的中类、小类都会被显示。\r\n 例如:010000为汽车服务(大类),010100为加油站(中类),010101为中国石化(小类),010900为汽车租赁(中类),010901为汽车租赁还车(小类)\r\n 当指定010000,则010100等中类、010101等小类都会被包含,当指定010900,则010901等小类都会被包含。\r\n 若不指定city,返回的内容为城市列表以及此城市内有多少结果符合要求。\r\n 当您的keywords和types都是空时,默认指定types为120000(商务住宅)&150000(交通设施服务)\r\n kwargs:\r\n city:查询城市,可选。可选值:城市中文、中文全拼、citycode、adcode。如:北京/beijing/010/110000。\r\n 填入此参数后,会尽量优先返回此城市数据,但是不一定仅局限此城市结果,若仅需要某个城市数据请调用citylimit参数。:在深圳市搜天安门,返回北京天安门结果。\r\n citylimit:仅返回指定城市数据,可选,默认为False。可选值:true/false\r\n children:是否按照层级展示子POI数据,可选,默认0。可选值:children=1,当为0的时候,子POI都会显示。当为1的时候,子POI会归类到父POI之中。仅在extensions=all的时候生效\r\n offset:每页记录数据,可选,默认20。强烈建议不超过25,若超过25可能造成访问报错\r\n page:当前页数,可选,默认1。最大翻页数100\r\n extensions:返回结果控制,可选,默认base。此项默认返回基本地址信息;取值为all返回地址信息、附近POI、道路以及道路交叉口信息。\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.keywords = keywords\r\n self.types = types\r\n\r\n if 'city' in kwargs:\r\n self.city = kwargs['city']\r\n if 'cityLimit' in kwargs:\r\n self.cityLimit = kwargs['cityLimit']\r\n if 'children' in kwargs:\r\n self.children = kwargs['children']\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'offset' in kwargs:\r\n self.offset = kwargs['offset']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'page' in kwargs:\r\n self.page = kwargs['page']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': SearchPOI.APIkey,\r\n 'keywords': self.keywords,\r\n 'types': self.types,\r\n }\r\n\r\n if self.city is not None:\r\n parameters.update(city=self.city)\r\n if self.cityLimit is not None:\r\n parameters.update(citylimit=self.cityLimit)\r\n if self.children is not None:\r\n parameters.update(children=self.children)\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.offset is not None:\r\n parameters.update(offset=self.offset)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n if self.page is not None:\r\n parameters.update(page=self.page)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/place/text?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Keywords search POI data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def get_search_poi_by_around(self, location: str,\r\n **kwargs: dict[str, Any]\r\n ) -> dict:\r\n \"\"\"\r\n 函数:周边搜索。\\n\r\n Args:\r\n location:中心点坐标,必填。规则: 经度和纬度用\",\"分割,经度在前,纬度在后,经纬度小数点后不得超过6位\r\n kwargs:\r\n keywords:查询关键字,可选。规则: 多个关键字用“|”分割\r\n types:查询POI类型,可选。多个类型用“|”分割;可选值:分类代码 或 汉字 (若用汉字,请严格按照附件之中的汉字填写)。分类代码由六位数字组成,一共分为三个部分,前两个数字代表大类;中间两个数字代表中类;最后\r\n 两个数字代表小类。若指定了某个大类,则所属的中类、小类都会被显示。\r\n 例如:010000为汽车服务(大类),010100为加油站(中类),010101为中国石化(小类),010900为汽车租赁(中类),010901为汽车租赁还车(小类)\r\n 当指定010000,则010100等中类、010101等小类都会被包含。当指定010900,则010901等小类都会被包含\r\n 当keywords和types均为空的时候,默认指定types为050000(餐饮服务)、070000(生活服务)、120000(商务住宅)\r\n city:查询城市,可选,默认全国范围内搜索。可选值:城市中文、中文全拼、citycode、adcode。如:北京/beijing/010/110000\r\n 当用户指定的经纬度和city出现冲突,若范围内有用户指定city的数据,则返回相关数据,否则返回为空。\r\n 如:经纬度指定石家庄,而city却指定天津,若搜索范围内有天津的数据则返回相关数据,否则返回为空。\r\n radius:查询半径,可选,默认3000。取值范围:0-50000。规则:大于50000按默认值,单位:米\r\n sortRule:排序规则,可选,默认distance。规定返回结果的排序规则。按距离排序:distance;综合排序:weight\r\n offset:每页记录数据,可选,默认25。强烈建议不超过25,若超过25可能造成访问报错\r\n page:当前页数,可选,默认1。最大翻页数100\r\n extensions:返回结果控制,可选,默认base。此项默认返回基本地址信息;取值为all返回地址信息、附近POI、道路以及道路交叉口信息。\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.location = location\r\n\r\n if 'city' in kwargs:\r\n self.city = kwargs['city']\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'keywords' in kwargs:\r\n self.keywords = kwargs['keywords']\r\n if 'offset' in kwargs:\r\n self.offset = kwargs['offset']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'page' in kwargs:\r\n self.page = kwargs['page']\r\n if 'radius' in kwargs:\r\n self.radius = kwargs['radius']\r\n if 'sortRule' in kwargs:\r\n self.sortRule = kwargs['sortRule']\r\n if 'types' in kwargs:\r\n self.types = kwargs['types']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': SearchPOI.APIkey,\r\n 'location': self.location\r\n }\r\n\r\n if self.city is not None:\r\n parameters.update(city=self.city)\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.keywords is not None:\r\n parameters.update(keywords=self.keywords)\r\n if self.offset is not None:\r\n parameters.update(output=self.output)\r\n if self.page is not None:\r\n parameters.update(page=self.page)\r\n if self.radius is not None:\r\n parameters.update(radius=self.radius)\r\n if self.sortRule is not None:\r\n parameters.update(sortrule=self.sortRule)\r\n if self.types is not None:\r\n parameters.update(types=self.types)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/place/around?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Around search POI data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def get_search_poi_by_polygon(self, polygon: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:多边形搜索。\\n\r\n Args:\r\n polygon:经纬度坐标对,必填。规则:经度和纬度用\",\"分割,经度在前,纬度在后,坐标对用\"|\"分割。经纬度小数点后不得超过6位。多边形为矩形时,可传入左上右下两顶点坐标对;其他情况下首尾坐标对需相同。\r\n kwargs:\r\n keywords:查询关键字,可选。规则: 多个关键字用“|”分割\r\n types:查询POI类型,可选。多个类型用“|”分割;可选值:分类代码 或 汉字 (若用汉字,请严格按照附件之中的汉字填写)。分类代码由六位数字组成,一共分为三个部分,前两个数字代表大类;中间两个数字代表中类;最后\r\n 两个数字代表小类。若指定了某个大类,则所属的中类、小类都会被显示。\r\n 例如:010000为汽车服务(大类),010100为加油站(中类),010101为中国石化(小类),010900为汽车租赁(中类),010901为汽车租赁还车(小类)\r\n 当指定010000,则010100等中类、010101等小类都会被包含。当指定010900,则010901等小类都会被包含\r\n 当keywords和types均为空的时候,默认指定types为050000(餐饮服务)、070000(生活服务)、120000(商务住宅)\r\n offset:每页记录数据,可选,默认20。强烈建议不超过25,若超过25可能造成访问报错\r\n page:当前页数,可选,默认1。最大翻页数100\r\n extensions:返回结果控制,可选,默认base。此项默认返回基本地址信息;取值为all返回地址信息、附近POI、道路以及道路交叉口信息。\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.polygon = polygon\r\n\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'keywords' in kwargs:\r\n self.keywords = kwargs['keywords']\r\n if 'offset' in kwargs:\r\n self.offset = kwargs['offset']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'page' in kwargs:\r\n self.page = kwargs['page']\r\n if 'types' in kwargs:\r\n self.types = kwargs['types']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'polygon': self.polygon\r\n }\r\n\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.keywords is not None:\r\n parameters.update(keywords=self.keywords)\r\n if self.offset is not None:\r\n parameters.update(offset=self.offset)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n if self.page is not None:\r\n parameters.update(page=self.page)\r\n if self.types is not None:\r\n parameters.update(types=self.types)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/place/polygon?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Polygon search POI data successful get.'.format(\r\n function_name)\r\n )\r\n json_decode = json.loads(request_information.text)\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def get_search_poi_by_id(self, poi_id: str,\r\n ) -> dict:\r\n \"\"\"\r\n 函数:ID查询。\\n\r\n Args:\r\n poi_id: 兴趣点ID,必填。兴趣点的唯一标识ID\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.poi_id = poi_id\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': SearchPOI.APIkey,\r\n 'id': self.poi_id\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/place/detail?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - ID search POI data successful get.'.format(\r\n function_name)\r\n )\r\n\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_search_poi(self, json_decode: dict,\r\n extensions: str,\r\n keywords: str = ''\r\n ) -> None:\r\n \"\"\"\r\n 函数:解析IP地址查询数据。\r\n Args:\r\n json_decode:get_ip_location()方法从网络中获取的数据\r\n keywords:查询的关键字\r\n extensions:获取的数据类型\r\n \"\"\"\r\n\r\n self.extensions = extensions\r\n self.json_decode = json_decode\r\n self.keywords = keywords\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode['infocode'])\r\n )\r\n # 搜索方案的数目\r\n search_count = self.json_decode['count']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - search count:{1}'.format(function_name,\r\n search_count)\r\n )\r\n\r\n # 城市建议列表\r\n if 'suggestion' in self.json_decode:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Suggestion data get successfully.'.format(\r\n function_name)\r\n )\r\n\r\n if self.json_decode['suggestion']['cities'] or self.json_decode['suggestion']['keywords']:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Suggestion cities or keywords data get successfully.'.format(\r\n function_name)\r\n )\r\n suggestions = self.json_decode['suggestion']\r\n for suggestion in suggestions:\r\n self.print_suggestion(suggestion, self.keywords)\r\n\r\n # 建议地址结果\r\n if 'sug_address' in self.json_decode:\r\n sug_addresses = self.json_decode['sug_address']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Suggestion address get successfully.'.format(\r\n function_name)\r\n )\r\n for sug_address in sug_addresses:\r\n self.print_sug_address(sug_address, self.keywords)\r\n\r\n # 搜索POI信息列表\r\n if self.json_decode['pois']:\r\n pois = self.json_decode['pois']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - POI data get successfully'.format(\r\n function_name,\r\n pois)\r\n )\r\n for poi in pois:\r\n self.print_poi(poi, extensions=self.extensions)\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n def print_suggestion(self, suggestion: dict,\r\n keyword: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:输出城市建议列表中的信息\r\n Args:\r\n suggestion: 城市建议列表,当搜索的文本关键字在限定城市中没有返回时会返回建议城市列表\r\n keyword:查询的关键字\r\n \"\"\"\r\n\r\n self.keyword = keyword\r\n self.suggestion = suggestion\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n keywords = self.suggestion['keywords']\r\n cities = self.suggestion['cities']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - keywords:{1}'.format(function_name,\r\n keywords)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - cities data get successfully'.format(function_name)\r\n )\r\n print(\"您输入的关键字暂未查到具体信息,根据您提供的关键字已搜索到关键字对应的城市列表\")\r\n for city in cities:\r\n # 城市名称,该城市包含此关键字的个数,该城市的citycode,该城市的adcode\r\n name = city['name']\r\n num = city['num']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - city name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - num:{1}'.format(function_name,\r\n num)\r\n )\r\n print(\"包含该关键字“{0}”城市有“{1}”,其中包含{2}条结果\".format(self.keyword, name, num))\r\n\r\n def print_sug_address(self, sug_address: dict,\r\n keyword: str = ''\r\n ) -> None:\r\n \"\"\"\r\n 函数:建议地址结果,当搜索结果并非是POI(是地址时),且没有搜索到POI时返回\r\n Args:\r\n sug_address:建议地址结果,当搜索结果并非是POI(是地址时),且没有搜索到POI时返回\r\n keyword:查询的关键字\r\n \"\"\"\r\n self.keyword = keyword\r\n self.sug_address = sug_address\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n name = sug_address['name']\r\n address = sug_address['address']\r\n country = sug_address['country']\r\n pname = sug_address['pname']\r\n cityname = sug_address['cityname']\r\n adname = sug_address['adname']\r\n # 暂未查询到相关信息,故目前不使用\r\n district = sug_address['district']\r\n street = sug_address['street']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - address name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - address:{1}'.format(function_name,\r\n address)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - country:{1}'.format(function_name,\r\n country)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - pname:{1}'.format(function_name,\r\n pname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - city name:{1}'.format(function_name,\r\n cityname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adname:{1}'.format(function_name,\r\n adname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - street:{1}'.format(function_name,\r\n street)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - district:{1}'.format(function_name,\r\n district)\r\n )\r\n print(\"您输入的关键字暂未查到具体信息,根据您提供的关键字已搜索到关键字对应的建议地址结果\")\r\n print(\"查询的该关键字“{0}”的建议地址结果的名称为{1},在{2}{3}省{4}市\".format(self.keyword, name, country, pname, cityname))\r\n print(\"详细的地址描述是{0},所属区域为{1}{2}\".format(address, adname, street))\r\n\r\n def print_poi(self, poi: dict,\r\n extensions: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:搜索POI信息列表\r\n Args:\r\n poi:POI信息\r\n extensions:查询的poi类型\r\n \"\"\"\r\n\r\n self.extensions = extensions\r\n self.poi = poi\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n name = self.poi['name']\r\n poi_type = self.poi['type']\r\n typecode = self.poi['typecode']\r\n biz_type = self.poi['biz_type']\r\n address = self.poi['address']\r\n distance = self.poi['distance']\r\n tel = self.poi['tel']\r\n pname = self.poi['pname']\r\n cityname = self.poi['cityname']\r\n adname = self.poi['adname']\r\n alias = self.poi['alias']\r\n business_area = self.poi['business_area']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{1}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - poi type:{1}'.format(function_name,\r\n poi_type)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - type code:{1}'.format(function_name,\r\n typecode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - biztype:{1}'.format(function_name,\r\n biz_type)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - address:{1}'.format(function_name,\r\n address)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - distance:{1}'.format(function_name,\r\n distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - tel:{1}'.format(function_name,\r\n tel)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - province name:{1}'.format(function_name,\r\n pname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - city name:{1}'.format(function_name,\r\n cityname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adname:{1}'.format(function_name,\r\n adname)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - alias:{1}'.format(function_name,\r\n alias)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - business area:{1}'.format(function_name,\r\n business_area)\r\n )\r\n\r\n print(\"=========================================================\")\r\n print(\"您要查询的关键字名称为{0},对应的地址是{1}{2}{3}{4}\\n\".format(name, pname, cityname, adname, address), end='')\r\n if tel:\r\n print(\",电话号码:{0}\".format(tel))\r\n print(\"所查询关键字的类型是{0}\".format(poi_type), end='')\r\n if distance:\r\n print(\"离中心点距离大约{0}米\".format(distance))\r\n if alias:\r\n print(\"它的另外的名称包括{0}\".format(alias), end='')\r\n if business_area:\r\n print(\"所属商圈是{0}\".format(business_area))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - POI data successfully print.'.format(function_name)\r\n )\r\n\r\n # extensions为all返回\r\n if self.extensions == \"all\":\r\n postcode = self.poi['postcode']\r\n website = self.poi['website']\r\n email = self.poi['email']\r\n pcode = self.poi['pcode']\r\n adcode = self.poi['adcode']\r\n tag = self.poi['tag']\r\n indoor_map = self.poi['indoor_map']\r\n indoor_data = self.poi['indoor_data']\r\n biz_ext = self.poi['biz_ext']\r\n photos = self.poi['photos']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - post code:{1}'.format(function_name,\r\n postcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - website:{1}'.format(function_name,\r\n website)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - email:{1}'.format(function_name,\r\n email)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - pcode:{1}'.format(function_name,\r\n pcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - adcode:{1}'.format(function_name,\r\n adcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - tag:{1}'.format(function_name,\r\n tag)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - indoor map:{1}'.format(function_name,\r\n indoor_map)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - indoor data:{1}'.format(function_name,\r\n indoor_data)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - biz ext:{1}'.format(function_name,\r\n biz_ext)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - photos data successfully get.'.format(function_name)\r\n )\r\n\r\n if website:\r\n print(\",对应的网页地址是{0}\\n\".format(website), end='')\r\n if email:\r\n print(\",邮箱地址是{0}\".format(email))\r\n if photos:\r\n for item, photo in enumerate(photos):\r\n if photo['title']:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - photo title:{1}'.format(function_name,\r\n photo['title'])\r\n )\r\n print(\"{0}\".format(photo['title']))\r\n # 图片保存的位置\r\n temp_directory = os.getenv('TEMP')\r\n # Photo目录文件夹\r\n list_photo = [temp_directory, 'Photo']\r\n photo_directory = '\\\\'.join(list_photo)\r\n if not os.path.exists(photo_directory):\r\n os.mkdir(photo_directory)\r\n # 子目录(各个名称对应的图片)\r\n list_directory = [temp_directory, 'Photo\\\\{0}'.format(name)]\r\n directory = '\\\\'.join(list_directory)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - directory:{1}'.format(function_name,\r\n directory)\r\n )\r\n if not os.path.exists(directory):\r\n os.mkdir(directory)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - directory created.'.format(function_name)\r\n )\r\n list_filename = [temp_directory, 'Photo\\\\{0}\\\\{0}{1}.jpg'.format(name, item)]\r\n filename = '\\\\'.join(list_filename)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - filename:{1}'.format(function_name,\r\n filename)\r\n )\r\n # 保存图片\r\n self.save_photo(photo['url'], filename=filename)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - photo saved.'.format(function_name)\r\n )\r\n # 显示图片,只显示前8张照片\r\n if SearchPOI.open_time < 8:\r\n image = Image.open(filename)\r\n image.show()\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - open time:{1}'.format(function_name,\r\n SearchPOI.open_time)\r\n )\r\n print(\"已打开图片\" + str(filename)) # TODO:保存到日志文件中\r\n SearchPOI.open_time = SearchPOI.open_time + 1\r\n if SearchPOI.open_time >= 8:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - 由于用户指定和本地计算机网络资源的限制,不打开之后从网络下载的图片,请手动打开Windows资源管理器对应的目录查看'.format(\r\n function_name)\r\n )\r\n\r\n def save_photo(self, url: str,\r\n filename: str,\r\n num_retries: int = 3\r\n ) -> None:\r\n \"\"\"\r\n 函数:将网页url对应的图片保存到本地目录下\r\n Args:\r\n url:网页图片连接\r\n filename:保存到本地图片的位置\r\n num_retries:重连次数\r\n \"\"\"\r\n\r\n self.filename = filename\r\n self.num_retries = num_retries\r\n self.url = url\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n img_src = url\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - img_src:{1}'.format(function_name,\r\n img_src)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - filename:{1}'.format(function_name,\r\n self.filename)\r\n )\r\n\r\n header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) \\\r\n AppleWebKit/537.36 (KHTML, like Gecko) \\\r\n Chrome/35.0.1916.114 Safari/537.36',\r\n 'Cookie': 'AspxAutoDetectCookieSupport=1'\r\n }\r\n # Request类可以使用给定的header访问URL\r\n result = urllib.request.Request(url=img_src, headers=header)\r\n\r\n try:\r\n response = urllib.request.urlopen(result) # 得到访问的网址\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - response successfully'.format(function_name)\r\n )\r\n with open(self.filename, 'wb') as file:\r\n content = response.read() # 获得图片\r\n file.write(content) # 保存图片\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - picture saved successfully'.format(function_name)\r\n )\r\n except HTTPError as e: # HTTP响应异常处理\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.reason)\r\n )\r\n except URLError as e: # 一定要放到HTTPError之后,因为它包含了前者\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - error reason:{1}'.format(function_name,\r\n e.reason)\r\n )\r\n except IncompleteRead or RemoteDisconnected:\r\n if self.num_retries == 0: # 重连机制\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - retries time:{1}'.format(function_name,\r\n self.num_retries)\r\n )\r\n return\r\n else:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - retries time:{1}'.format(function_name,\r\n self.num_retries)\r\n )\r\n self.save_photo(url, self.filename, self.num_retries - 1)\r\n" }, { "alpha_fraction": 0.4085465669631958, "alphanum_fraction": 0.4149220287799835, "avg_line_length": 45.375511169433594, "blob_id": "8bcdfb5e65e73f0389456435325d88d07b57e0e8", "content_id": "8b2e931daea39c12068d3c69ee2191069d9123e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12287, "license_type": "permissive", "max_line_length": 117, "num_lines": 245, "path": "/AmapFunctions/LocationTransformation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport inspect\r\nimport json\r\nimport time\r\nfrom typing import Any\r\n\r\nimport requests\r\n\r\nfrom SelfExpection.CustomExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass LocationTransformation:\r\n \"\"\"\r\n Class:坐标转换(高德地图坐标转换为百度地图)\r\n 坐标转换是一类简单的HTTP接口,能够将用户输入的非高德坐标(GPS坐标、mapBar坐标、baidu坐标)转换成高德坐标\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n self.coordsys = None\r\n self.json_decode = None\r\n self.locations = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_location_transform(self, locations: str,\r\n **kwargs: dict[str, Any]\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取转换格式后的地理位置数据\r\n Args:\r\n locations:坐标点,经度和纬度用“,”分割,经度在前,纬度在后,经纬度小数点后不得超过6位。多个坐标对之间用“|”进行分隔最多支持40对坐标\r\n kwargs:\r\n coordsys:原坐标系,可选值:gps;mapBar;baidu;autonavi(不进行转换)。默认autonavi\r\n Returns:转换格式后的地理位置数据\r\n \"\"\"\r\n\r\n self.locations = locations\r\n\r\n if 'coordsys' in kwargs:\r\n self.coordsys = kwargs['coordsys']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'locations': self.locations,\r\n }\r\n\r\n if self.coordsys is not None:\r\n parameters.update(coordsys=self.coordsys)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/assistant/coordinate/convert?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - IP Location data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_location_transform(self, json_decode: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析转换格式后的地理位置数据。\r\n Args:\r\n json_decode:get_ip_location()方法从网络中获取的数据\r\n Returns:\r\n 返回获取到的IP地址信息\r\n \"\"\"\r\n\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n locations = self.json_decode['locations']\r\n resultContext.append(locations)\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n context = \"Error\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n context = \"Error\"\r\n resultContext.append(context)\r\n return resultContext\r\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.670634925365448, "avg_line_length": 20.909090042114258, "blob_id": "193bb04d1cf84e8b62ddc22dd03f76d437b28bd0", "content_id": "68c6f1b34b6e4cfeab1a63860b03607b490cb0e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "permissive", "max_line_length": 43, "num_lines": 11, "path": "/main.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import sys\r\n\r\nfrom PyQt5 import QtWidgets\r\n\r\nfrom LoginMainWindow import LoginMainWindow\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n loginWindow = LoginMainWindow()\r\n loginWindow.show()\r\n sys.exit(app.exec_())\r\n" }, { "alpha_fraction": 0.44516050815582275, "alphanum_fraction": 0.4499027132987976, "avg_line_length": 32.41004180908203, "blob_id": "58061cd0d784a1e2b6037be60aea76567ef044dc", "content_id": "8eb6c14147625ca448340fe1913fbdc31ae1a7f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8798, "license_type": "permissive", "max_line_length": 117, "num_lines": 239, "path": "/FundamentalFunctions/AccountOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\nimport os\r\nimport pathlib\r\n\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass AccountOperation:\r\n \"\"\"\r\n Class:账户登录操作\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.directory = None\r\n self.file = None\r\n self.passwordText = None\r\n self.userInformationFile = None\r\n self.userNameText = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_user_name(self, userNameText: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的账号是否符合规范要求\r\n Args:\r\n userNameText: 用户输入账号\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.userNameText = userNameText\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.userNameText is None or self.userNameText == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - user name login result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def check_password(self, passwordText: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的账号是否符合规范要求\r\n Args:\r\n passwordText: 用户输入的密码\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.passwordText = passwordText\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.passwordText is None or self.passwordText == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - password login result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证密码格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def create_directory(self, directory: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:创建记录用户账号及密码的目录\r\n Args:\r\n directory: 系统指定的目录\r\n \"\"\"\r\n\r\n self.directory = directory\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n # 指定的目录不存在,创建\r\n if os.path.exists(self.directory):\r\n if os.path.isfile(self.directory):\r\n os.remove(self.directory)\r\n os.mkdir(self.directory)\r\n\r\n except FileNotFoundError:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - The system could not find the specified path.'.format(\r\n function_name)\r\n )\r\n\r\n except Exception:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Unknown Error.'.format(\r\n function_name)\r\n )\r\n\r\n def create_file(self, file: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:创建记录用户账号及密码的文件\r\n Args:\r\n file: 系统指定的文件\r\n \"\"\"\r\n\r\n self.file = file\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n # 指定的目录不存在,创建\r\n if os.path.exists(self.file):\r\n if os.path.isdir(self.file):\r\n os.removedirs(self.file)\r\n pathlib.Path(self.file).touch()\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - File {0} successfully created.'.format(\r\n self.file)\r\n )\r\n\r\n except FileNotFoundError:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - File does not exist.'.format(\r\n function_name)\r\n )\r\n\r\n except LookupError:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Error specifying encoding.'.format(\r\n function_name)\r\n )\r\n\r\n except UnicodeDecodeError:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Decoding error while reading file.'.format(\r\n function_name)\r\n )\r\n\r\n except Exception:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Unknown Error.'.format(\r\n function_name)\r\n )\r\n\r\n def login(self, userInformationFile: str,\r\n userNameText: str,\r\n passwordText: str\r\n ) -> bool:\r\n \"\"\"\r\n 函数:进行账户登录,验证登录的账号及密码是否正确\r\n Args:\r\n userInformationFile: 记录用户账号及密码的文件\r\n userNameText: 用户输入的账号\r\n passwordText: 用户输入的密码\r\n Returns:\r\n 检测账号和密码是否正确的识别码\r\n \"\"\"\r\n\r\n self.userInformationFile = userInformationFile\r\n self.userNameText = userNameText\r\n self.passwordText = passwordText\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n flag = False\r\n with open(self.userInformationFile, 'r', encoding='utf-8') as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n line = line.strip(\"\\n\")\r\n userName, password = line.split(',')\r\n if self.userNameText == userName and self.passwordText == password:\r\n flag = True\r\n break\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - login checked result \"{1}\".'.format(\r\n function_name, flag)\r\n )\r\n\r\n return flag\r\n" }, { "alpha_fraction": 0.5172709226608276, "alphanum_fraction": 0.5194757580757141, "avg_line_length": 36.876190185546875, "blob_id": "47b2b13732892f3e0e2b827e9688a61996044bb5", "content_id": "0c1ba28fccbe76fbef15bc25216ab280a642239c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9490, "license_type": "permissive", "max_line_length": 120, "num_lines": 210, "path": "/LoginMainWindow.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\nimport os.path\r\nfrom typing import Any\r\n\r\nfrom PyQt5 import QtWidgets\r\n\r\nfrom FundamentalFunctions.AccountOperation import AccountOperation\r\nfrom MainWindow import MainWindow\r\nfrom Window.MessageBoxUI import SelfMessageBox\r\nfrom Window.loginUI import Ui_AmapLoginUI\r\nfrom logrecord.WriteLog import WriteLog\r\nfrom Resources.Icon.Icon import *\r\n\r\n\r\nclass LoginMainWindow(QtWidgets.QMainWindow, Ui_AmapLoginUI):\r\n \"\"\"\r\n 函数:主窗口界面函数LoginMainWindow\r\n \"\"\"\r\n\r\n def __init__(self, parent=None):\r\n \"\"\"\r\n 函数:登录窗口界面组件初始化\r\n Args:\r\n parent:arent作为构造函数的最后一个参数被传入,但通常情况下不必显示去指定parent对象。因为当调用局管理器时,部局管理器会自动处理这种parent-child关系。\r\n \"\"\"\r\n\r\n # 对继承自父类的属性进行初始化\r\n super(LoginMainWindow, self).__init__()\r\n self.setupUi(self)\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 用户账号名称和密码初始化\r\n self.userNameText = None\r\n self.passwordText = None\r\n\r\n # 用户账号密码输入框内容变化监听器\r\n self.userNameLineEdit.textChanged[str].connect(self.userNameLineEditTextChanged)\r\n self.passwordLineEdit.textChanged[str].connect(self.passwordLineEditTextChanged)\r\n\r\n # 用户账号密码输入框按下回车键(Enter)监听器\r\n self.userNameLineEdit.returnPressed.connect(\r\n lambda: self.loginEventHandler(self.userNameText, self.passwordText))\r\n self.passwordLineEdit.returnPressed.connect(\r\n lambda: self.loginEventHandler(self.userNameText, self.passwordText))\r\n # 登录按钮点击触发器\r\n self.loginInButton.clicked.connect(lambda: self.loginEventHandler(self.userNameText, self.passwordText))\r\n\r\n def userNameLineEditTextChanged(self, text: Any\r\n ) -> None:\r\n \"\"\"\r\n 函数:事件监听器:检测userNameLineEdit窗口文字发生变化时做出相应的操作\r\n Args:\r\n text: 从userNameLineEdit中即时获取用户的输入文本\r\n \"\"\"\r\n self.userNameText = text\r\n\r\n def passwordLineEditTextChanged(self, text):\r\n \"\"\"\r\n 函数:事件监听器:检测passwordLineEdit窗口文字发生变化时做出相应的操作\r\n Args:\r\n text: 从passwordLineEdit中即时获取用户的输入文本\r\n \"\"\"\r\n self.passwordText = text\r\n\r\n def loginEventHandler(self, userNameText: str,\r\n passwordText: str\r\n ) -> None:\r\n \"\"\"\r\n 函数:事件处理器:检测用户的登录操作\r\n Args:\r\n userNameText:用户的登录内容\r\n passwordText:用户的密码内容\r\n \"\"\"\r\n\r\n self.userNameText = userNameText\r\n self.passwordText = passwordText\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 用户操作实例化\r\n accountOperation = AccountOperation()\r\n # 检测用户登录账号和密码内容的格式\r\n userNameCheckedResult = accountOperation.check_user_name(self.userNameText)\r\n passwordCheckedResult = accountOperation.check_password(self.passwordText)\r\n\r\n if userNameCheckedResult == 2:\r\n # 账号输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(1)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"账号输入提示\",\r\n information=\"请您输入账号后再登录\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif userNameCheckedResult == 0:\r\n # 账号输入框中账号格式不正确\r\n # 账号输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义字体\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(3)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"账号输入提示\",\r\n information=\"您输入的账号格式不正确\",\r\n icon=\":/Warning.png\"\r\n )\r\n\r\n elif passwordCheckedResult == 2:\r\n # 密码输入框内容为空\r\n selfMessageBox = SelfMessageBox()\r\n font = selfMessageBox.selfDefineFont()\r\n level = selfMessageBox.messageLevel(1)\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"密码输入提示\",\r\n information=\"请您输入密码后再登录\",\r\n icon=\":/About.png\"\r\n )\r\n\r\n elif passwordCheckedResult == 0:\r\n # 密码输入框中账号格式不正确\r\n # 账号输入框内容为空\r\n # 消息框初始化(自定义消息框)\r\n selfMessageBox = SelfMessageBox()\r\n # 自定义消息等级\r\n font = selfMessageBox.selfDefineFont()\r\n # 自定义消息等级\r\n level = selfMessageBox.messageLevel(3)\r\n # 消息框界面初始化\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"密码输入提示\",\r\n information=\"您输入的密码格式不正确\",\r\n icon=\":/Warning.png\"\r\n )\r\n\r\n elif userNameCheckedResult and passwordCheckedResult:\r\n # 用户账号密码格式正确,尝试登录\r\n # 用户信息存储根目录(目录不存在则创建)\r\n\r\n # TODO:优化路径创建,采用递归方式创建\r\n # 文件保存路径\r\n local_appdata_directory = os.getenv('LOCALAPPDATA')\r\n\r\n # 根目录\r\n temp_directory = '\\\\'.join([local_appdata_directory, 'AmapProgram'])\r\n # 目录不存在,创建\r\n if not os.path.exists(temp_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n os.mkdir(temp_directory)\r\n\r\n # 账户数据目录\r\n account_directory = '\\\\'.join([temp_directory, 'Account'])\r\n # 目录不存在,创建\r\n if not os.path.exists(account_directory):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - directory successfully created'.format(function_name)\r\n )\r\n accountOperation.create_directory(account_directory)\r\n\r\n userInformationDirectory = os.path.join(account_directory, 'UserInformation')\r\n accountOperation.create_directory(userInformationDirectory)\r\n\r\n # 用户信息文件(文件不存在则创建)\r\n userInformationFile = os.path.join(userInformationDirectory, 'userinformation')\r\n accountOperation.create_file(userInformationFile)\r\n\r\n # 进行登录操作\r\n checkResult = accountOperation.login(userInformationFile, self.userNameText, self.passwordText)\r\n if checkResult:\r\n self.hide() # 隐藏登录界面\r\n self.mainWindow = MainWindow()\r\n self.mainWindow.show()\r\n else:\r\n # 登录失败,弹出对话框\r\n selfMessageBox = SelfMessageBox()\r\n font = selfMessageBox.selfDefineFont()\r\n level = selfMessageBox.messageLevel(3)\r\n selfMessageBox.initUI(self, font=font,\r\n level=level,\r\n title=\"账号输入信息\",\r\n information=\"您输入的账号或密码有误\",\r\n icon=\":/Warning.png\"\r\n )\r\n" }, { "alpha_fraction": 0.5409985780715942, "alphanum_fraction": 0.5446486473083496, "avg_line_length": 40.61666488647461, "blob_id": "73df8034c5a3f8468127e1f68ddc857150021ad5", "content_id": "9e12f5c484b4a89e0b241cbcd9db6d14587ea7d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8313, "license_type": "permissive", "max_line_length": 138, "num_lines": 180, "path": "/FundamentalFunctions/WalkingRoutePlanningOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.RoutePlanning import RoutePlanning\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass WalkingRoutePlanningOperation:\r\n \"\"\"\r\n Class:步行路径规划操作\r\n \"\"\"\r\n def __init__(self):\r\n self.walkingDepartureAddress = None\r\n self.walkingDestinationAddress = None\r\n self.walkingDepartureCity = None\r\n self.walkingDestinationCity = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_walking_departure_information(self, walkingDepartureAddress: str,\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的步行路径出发点是否符合规范要求\r\n Args:\r\n walkingDepartureAddress: 用户输入的出发点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.walkingDepartureAddress = walkingDepartureAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检测结果\r\n checkedResult = self.walkingDepartureAddress is None or self.walkingDepartureAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking departure address check result:{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证用户名格式\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def check_walking_destination_information(self, walkingDestinationAddress: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的步行路径终点是否符合规范要求\r\n Args:\r\n walkingDestinationAddress: 用户输入的终点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n self.walkingDestinationAddress = walkingDestinationAddress\r\n\r\n # 检测结果\r\n checkedResult = self.walkingDestinationAddress is None or self.walkingDestinationAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking destination address check result:{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 检测用户提供的步行路径出发点是否符合规范要求\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_walking_route_planning_information(self, walkingDepartureAddress: str,\r\n walkingDestinationAddress: str,\r\n walkingDepartureCity: str = '',\r\n walkingDestinationCity: str = ''\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取步行路径规划的具体信息\r\n Args:\r\n walkingDepartureAddress: 用户输入的出发点\r\n walkingDestinationAddress: 用户输入的终点\r\n walkingDepartureCity: 用户输入的出发点对应的城市\r\n walkingDestinationCity: 用户输入的终点对应的城市\r\n Returns:\r\n 返回获取的步行路径规划对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从str升级为dict\r\n self.walkingDepartureAddress = walkingDepartureAddress\r\n self.walkingDestinationAddress = walkingDestinationAddress\r\n # 在以后的版本中添加\r\n self.walkingDepartureCity = walkingDepartureCity\r\n self.walkingDestinationCity = walkingDestinationCity\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 步行路径规划\r\n geographicCoding = GeographicCoding()\r\n # 获取起点终点对应的初始编码信息\r\n # TODO:优化city参数\r\n walkingDepartureJsonDecode = geographicCoding.get_geographic_coding(address=self.walkingDepartureAddress,\r\n city='')\r\n walkingDestinationJsonDecode = geographicCoding.get_geographic_coding(address=self.walkingDestinationAddress,\r\n city='')\r\n\r\n parseWalkingDepartureInformation = geographicCoding.parse_geographic_coding(walkingDepartureJsonDecode)\r\n parseWalkingDestinationInformation = geographicCoding.parse_geographic_coding(walkingDestinationJsonDecode)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking departure information:{1}'.format(function_name,\r\n parseWalkingDepartureInformation)\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking destination information:{1}'.format(function_name,\r\n parseWalkingDestinationInformation)\r\n )\r\n\r\n # 起点位置编码\r\n if 'error_context' not in parseWalkingDepartureInformation:\r\n resultDepartureGeographicCoding = parseWalkingDepartureInformation['geographic_position']\r\n else:\r\n return [parseWalkingDepartureInformation['error_context']]\r\n\r\n # 终点位置编码\r\n if 'error_context' not in parseWalkingDestinationInformation:\r\n resultDestinationGeographicCoding = parseWalkingDestinationInformation['geographic_position']\r\n else:\r\n return [parseWalkingDestinationInformation['error_context']]\r\n\r\n routePlanning = RoutePlanning()\r\n walkingRoutePlanning = routePlanning.get_walking_route_planning(origin=resultDepartureGeographicCoding,\r\n destination=resultDestinationGeographicCoding)\r\n\r\n # 解析路径规划信息\r\n resultWalkingRoutePlanning = routePlanning.parse_walking_route_planning(walkingRoutePlanning)\r\n promptInformation = \"从{0}到{1}的步行导航信息如下所示\".format(self.walkingDepartureAddress, self.walkingDestinationAddress)\r\n resultWalkingRoutePlanning.insert(0, promptInformation)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result walking route planning:{1}'.format(function_name,\r\n resultWalkingRoutePlanning)\r\n )\r\n\r\n return resultWalkingRoutePlanning\r\n" }, { "alpha_fraction": 0.39633840322494507, "alphanum_fraction": 0.4059401750564575, "avg_line_length": 50.41484832763672, "blob_id": "e722535a9216c79946f417243f756300d54e8eed", "content_id": "c17da96b0fb69d7fbdf73c4321193b3832415388", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105616, "license_type": "permissive", "max_line_length": 196, "num_lines": 1832, "path": "/AmapFunctions/RoutePlanning.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n# 导入的库\r\nimport datetime\r\nimport inspect\r\nimport json\r\nimport time\r\n\r\nimport requests\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding # 导入地理编码模块\r\nfrom SelfExpection import CustomExpection\r\nfrom SelfExpection.OfficialException import OfficialException\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass RoutePlanning:\r\n \"\"\"\r\n Class:路径规划\r\n 无需展现地图的场景下,进行线路查询,如以线路结果页形式展现换乘方案\r\n 根据返回线路数据,自行开发线路导航\r\n \"\"\"\r\n\r\n def __init__(self) -> None:\r\n self.avoidPolygons = None\r\n self.avoidRoad = None\r\n self.batch = None\r\n self.busLine = None\r\n self.bus_entrance = None\r\n self.bus_exit = None\r\n self.bus_time = None\r\n self.city = None\r\n self.cityd = None\r\n self.carType = None\r\n self.date = None\r\n self.destination = None\r\n self.destinationId = None\r\n self.destinationType = None\r\n self.extensions = None\r\n self.ferry = None\r\n self.json_decode = None\r\n self.nightFlag = None\r\n self.noSteps = None\r\n self.number = None\r\n self.origin = None\r\n self.originId = None\r\n self.originType = None\r\n self.output = None\r\n self.province = None\r\n self.railway = None\r\n self.roadAggregation = None\r\n self.segment = None\r\n self.strategy = None\r\n self.waypoints = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n # 获取高德地图数据API的钥匙\r\n APIkey = '<请自己输入自己申请的API Key>'\r\n\r\n def get_walking_route_planning(self, origin: str,\r\n destination: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取步行路径规划数据。\\n\r\n Args:\r\n origin:出发点,必填。规则: lon,lat(经度,纬度), “,”分割,如117.500244, 40.417801。经纬度小数点不超过6位。\r\n destination:目的地,必填。规则: lon,lat(经度,纬度), “,”分割,如117.500244, 40.417801。经纬度小数点不超过6位。\r\n kwargs:\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.destination = destination\r\n self.origin = origin\r\n\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'origin': self.origin,\r\n 'destination': self.destination,\r\n }\r\n\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/direction/walking?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Walking route Planning data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_walking_route_planning(self, json_decode: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析步行路径规划数据。\\n\r\n Args:\r\n json_decode:get_walking_route_planning()方法从网络中获取到的数据\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n # 自定义异常\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n # 步行方案路线信息列表\r\n paths = self.json_decode['route']['paths']\r\n len_paths = len(paths)\r\n resultContext.append(\"已为您智能生成{0}种步行方案\".format(len_paths))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - path length:{1}'.format(function_name,\r\n len_paths)\r\n )\r\n\r\n # 步行方案\r\n for path in paths:\r\n # 步行距离\r\n distance = path['distance']\r\n resultContext.append(\"本次步行规划步行的长度为{0:.0f}米\".format(int(distance)))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - distance:{1}'.format(function_name,\r\n distance)\r\n )\r\n\r\n # 步行时长\r\n duration = str(datetime.timedelta(seconds=int(path['duration']))).split(\":\")\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - duration:{1}'.format(function_name,\r\n duration)\r\n )\r\n if duration[1] == '0': # 在60秒以内\r\n resultContext.append(\"本次步行规划步行的时长为{0}秒\".format(duration[2]))\r\n elif duration[0] == '0': # 在一小时以内\r\n resultContext.append(\"本次步行规划步行的时长为{0}分{1}秒\".format(duration[1], duration[2]))\r\n else:\r\n if duration[1] == '0':\r\n resultContext.append(\"本次步行规划步行的时长为{0}时{1}秒\".format(duration[0], duration[2]))\r\n else:\r\n resultContext.append(\"本次步行规划步行的时长为{0}时{1}分{2}秒\".format(duration[0], duration[1],\r\n duration[2]))\r\n\r\n # 步行结果列表\r\n steps = path['steps']\r\n len_steps = len(steps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - step length:{1}'.format(function_name,\r\n len_steps)\r\n )\r\n resultContext.append(\"本次路径规划共分为{0}步\".format(len_steps))\r\n\r\n for item, step in enumerate(steps):\r\n # 路段步行指示\r\n instruction = step['instruction']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - instruction:{1}'.format(function_name,\r\n instruction)\r\n )\r\n resultContext.append(\"第{0}步:{1}\".format(item + 1, instruction))\r\n resultContext.append(\"步行导航结束\")\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n # 异常信息\r\n resultContext.append(errorInfo)\r\n context = \"步行导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n # 异常信息\r\n context = \"步行导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n def get_bus_route_planning(self, origin: str,\r\n destination: str,\r\n city: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取公交路径规划数据。\\n\r\n Args:\r\n origin:出发点,必填。规则: lon,lat(经度,纬度), “,”分割,如117.500244, 40.417801。经纬度小数点不超过6位。\r\n destination:目的地,必填。规则: lon,lat(经度,纬度), “,”分割,如117.500244, 40.417801。经纬度小数点不超过6位。\r\n city:城市/跨城规划时的起点城市,必填。目前支持市内公交换乘/跨城公交的起点城市。可选值:城市名称/citycode。\r\n kwargs:\r\n cityd:跨城公交规划时的终点城市,可选( 跨城必填 )。跨城公交规划必填参数。可选值:城市名称/citycode。\r\n extensions:返回结果详略,可选,默认base。可选值:base(default)/all。base:返回基本信息;all:返回全部信息。\r\n strategy:公交换乘策略,可选。可选值:0:最快捷模式;1:最经济模式;2:最少换乘模式;3:最少步行模式;5:不乘地铁模式。\r\n nightFlag:是否计算夜班车,可选。可选值:0:不计算夜班车;1:计算夜班车。\r\n date:出发日期,可选根据出发时间和日期,筛选可乘坐的公交路线,格式示例:date=2014-3-19。在无需设置预计出发时间时,请不要在请求之中携带此参数。\r\n bus_time:出发时间,可选。根据出发时间和日期,筛选可乘坐的公交路线,格式示例:time=22:34。在无需设置预计出发时间时,请不要在请求之中携带此参数。\r\n output:返回数据格式类型,可选,默认JSON格式。可选值:JSON,XML。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.city = city\r\n self.destination = destination\r\n self.origin = origin\r\n\r\n if 'bus_time' in kwargs:\r\n self.bus_time = kwargs['bus_time']\r\n if 'cityd' in kwargs:\r\n self.cityd = kwargs['cityd']\r\n else:\r\n self.cityd = ''\r\n if 'date' in kwargs:\r\n self.date = kwargs['date']\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'nightFlag' in kwargs:\r\n self.nightFlag = kwargs['nightFlag']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'strategy' in kwargs:\r\n self.strategy = kwargs['strategy']\r\n else:\r\n self.strategy = 10\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'origin': self.origin,\r\n 'destination': self.destination,\r\n 'city': self.city,\r\n 'cityd': self.cityd,\r\n }\r\n\r\n if self.bus_time is not None:\r\n parameters.update(bus_time=self.bus_time)\r\n if self.cityd is not None:\r\n parameters.update(cityd=self.cityd)\r\n if self.date is not None:\r\n parameters.update(date=self.date)\r\n if self.extensions is not None:\r\n parameters.update(nightflag=self.nightFlag)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n if self.strategy is not None:\r\n parameters.update(strategy=self.strategy)\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/direction/transit/integrated?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Bus route Planning data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_bus_route_planning(self, json_decode: dict, batch: bool = False\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析公交路径规划数据。\\n\r\n Args:\r\n json_decode:get_bus_route_planning()方法从网络中获取到的数据\r\n batch:是否为多值查询\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n self.batch = batch\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n\r\n if self.json_decode['infocode'] == \"10000\": # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n\r\n geographic_coding = GeographicCoding()\r\n # 起始位置数据\r\n origin = self.json_decode['route']['origin']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - origin:{1}'.format(function_name,\r\n origin)\r\n )\r\n\r\n inverse_start_json_decode = geographic_coding.get_inverse_geographic_coding(\r\n location=origin,\r\n radius=100,\r\n roadLevel=1,\r\n extensions='base')\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - inverse_start_json_decode get successfully.'.format(\r\n function_name)\r\n )\r\n\r\n start_bus_station_information = geographic_coding.parse_inverse_geographic_coding(\r\n inverse_json_decode=inverse_start_json_decode, flag_batch=self.batch)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - start_bus_station_information:{1}'.format(\r\n function_name,\r\n start_bus_station_information)\r\n )\r\n\r\n # 终点位置数据\r\n destination = self.json_decode['route']['destination']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - destination:{1}'.format(function_name,\r\n destination)\r\n )\r\n\r\n inverse_end_json_decode = geographic_coding.get_inverse_geographic_coding(\r\n location=destination,\r\n radius=100,\r\n roadLevel=1,\r\n extensions='base')\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - inverse_end_json_decode get successfully.'.format(\r\n function_name)\r\n )\r\n\r\n terminal_bus_station_information = geographic_coding.parse_inverse_geographic_coding(\r\n inverse_json_decode=inverse_end_json_decode, flag_batch=self.batch)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - terminal_bus_station_information:{1}'.format(\r\n function_name,\r\n terminal_bus_station_information)\r\n )\r\n\r\n distance = int(self.json_decode['route']['distance']) / 1000\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - distance:{1}'.format(function_name,\r\n distance)\r\n )\r\n\r\n taxi_cost = self.json_decode['route']['taxi_cost']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - taxi_cost:{1}'.format(function_name,\r\n taxi_cost)\r\n )\r\n\r\n resultContext.append(\"您好,从{0}到{1}的公交路程规划如下所示:\".format(start_bus_station_information,\r\n terminal_bus_station_information))\r\n resultContext.append(\"此次路线规划的起点到终点的距离为{0:.2f}公里\".format(distance))\r\n resultContext.append(\"若您采用打车方案,打车费用预计为{0}元\".format(taxi_cost))\r\n\r\n # 换乘方案\r\n transits = self.json_decode['route']['transits']\r\n len_transits = self.json_decode[\"count\"]\r\n resultContext.append(\"已为您智能生成如下{0}种换乘方案。\".format(len_transits))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - transits length:{1}'.format(function_name,\r\n len_transits)\r\n )\r\n\r\n # 详细内容\r\n for item, transit in enumerate(transits):\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - item:{1}'.format(function_name,\r\n item)\r\n )\r\n transit_cost = 0\r\n if transit['cost']:\r\n transit_cost = float(transit['cost'])\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - transit_cost:{1}'.format(function_name,\r\n transit_cost)\r\n )\r\n transit_duration = str(datetime.timedelta(seconds=int(transit['duration']))).split(\":\")\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - transit_duration:{1}'.format(function_name,\r\n destination)\r\n )\r\n transit_walking_distance = transit['walking_distance']\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - transit_walking_distance:{1}'.format(\r\n function_name,\r\n transit_walking_distance)\r\n )\r\n transit_nightflag = transit['nightflag'] # 夜间乘车标志\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - transit_nightflag:{1}'.format(function_name,\r\n transit_nightflag)\r\n )\r\n\r\n resultContext.append(\"=========================================\")\r\n\r\n # 方案换乘花费\r\n resultContext.append(\"方案{0}的换乘价格为{1:.0f}元\".format(item + 1, transit_cost))\r\n\r\n # 方案换乘时间\r\n if transit_duration[1] == '0': # 在60秒以内\r\n resultContext.append(\"此换乘方案预期时间为{0}秒\".format(transit_duration[2]))\r\n elif transit_duration[0] == '0': # 在一小时以内\r\n resultContext.append(\r\n \"此公交乘坐方案预计花费的时间为{0}分{1}秒\".format(transit_duration[1], transit_duration[2]))\r\n else:\r\n if transit_duration[1] == '0':\r\n resultContext.append(\r\n \"此换乘方案预期时间为{0}时{1}秒\".format(transit_duration[0], transit_duration[2]))\r\n else:\r\n resultContext.append(\r\n \"此换乘方案预期时间为{0}时{1}分{2}秒\".format(transit_duration[0], transit_duration[1],\r\n transit_duration[2]))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - transit successfully executed'.format(\r\n function_name)\r\n )\r\n\r\n # 此方案总步行距离\r\n resultContext.append(\"方案{0}的总步行距离为{1}米\".format(item + 1, transit_walking_distance))\r\n\r\n # 换乘路段列表\r\n segments = transit['segments']\r\n len_segments = len(segments)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - segments length:{1}'.format(function_name,\r\n len_segments)\r\n )\r\n\r\n resultContext.append(\"此方案需要{0}次中转\".format(len_segments - 1))\r\n\r\n for segment in segments:\r\n context = self.print_bus_segments(segment)\r\n resultContext.extend(context)\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n resultContext.append(errorInfo)\r\n context = \"公交导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n context = \"步行导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n def print_bus_segments(self, segment: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:打印公交路线信息\r\n Args:\r\n segment:换乘路段列表中的具体换乘方案\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.segment = segment\r\n\r\n resultContext = []\r\n\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # taxi字段,占位符\r\n if self.segment['taxi']:\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - taxi data:{1}'.format(function_name,\r\n self.segment['taxi'])\r\n )\r\n\r\n # 此路段步行导航信息\r\n if self.segment['walking']:\r\n resultContext.append(\"==============================\")\r\n resultContext.append(\"请步行走到离您将要乘坐的公交站或地铁口\")\r\n walking = self.segment['walking']\r\n # 步行距离长度,步行持续时间\r\n walking_distance = walking['distance']\r\n walking_duration = walking['duration']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking_distance:{1}'.format(function_name,\r\n walking_distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - walking duration:{1}'.format(function_name,\r\n walking_duration)\r\n )\r\n # 路线规划\r\n resultContext.append(\"步行距离长度{0}米,预计行走{1}秒\".format(walking_distance, walking_duration))\r\n steps = walking['steps']\r\n len_steps = len(steps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - steps length:{1}'.format(function_name,\r\n len_steps)\r\n )\r\n\r\n # 具体步骤\r\n for sub_item, step in enumerate(steps):\r\n instruction = step['instruction']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - instruction:{1}'.format(function_name,\r\n instruction)\r\n )\r\n resultContext.append(\"第{0}步:{1}\".format(sub_item + 1, instruction))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - step action:{1}'.format(function_name,\r\n step['assistant_action'])\r\n )\r\n\r\n # 步行到达终点\r\n if step['assistant_action']:\r\n assistant_action = step['assistant_action']\r\n resultContext.append(\"步行已到达目的地:{0}\".format(assistant_action))\r\n resultContext.append(\"步行导航结束\")\r\n\r\n if self.segment['bus']: # 此路段公交导航信息\r\n resultContext.append(\"================================\")\r\n buslines = self.segment['bus']['buslines']\r\n bus_entrance = self.segment['entrance']\r\n bus_exit = self.segment['exit']\r\n # 公交路线\r\n for busline in buslines:\r\n context = self.parse_buslines(busline, bus_entrance, bus_exit)\r\n resultContext.extend(context)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - buslines data successfully get'.format(function_name)\r\n )\r\n\r\n if self.segment['railway']['via_stops']: # 此路段乘坐火车的信息\r\n resultContext.append(\"=============================\")\r\n railway = self.segment['railway']\r\n context = self.parse_railway_lines(railway)\r\n resultContext.extend(context)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - railway data successfully get'.format(function_name,\r\n railway)\r\n )\r\n return resultContext\r\n\r\n def parse_buslines(self, busline: dict,\r\n bus_entrance=None,\r\n bus_exit=None\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析公交数据\r\n Args:\r\n busline:此路段公交导航信息列表\r\n bus_entrance:地铁入口,只在地铁路段有值\r\n bus_exit:地铁出口,只在地铁路段有值\r\n \"\"\"\r\n\r\n self.busLine = busline\r\n self.bus_entrance = bus_entrance\r\n self.bus_exit = bus_exit\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 公交路线的名称,此段起乘站信息,此段下车站信息,公交路线名称,公交类型,公交行驶距离,公交预计行驶时间,\r\n # 首班车时间,末班车时间,此段途经公交站数,此段途经公交站点列表\r\n if self.bus_exit is None:\r\n self.bus_exit = {}\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus exit:{0}'.format(function_name,\r\n self.bus_exit)\r\n )\r\n\r\n if self.bus_entrance is None:\r\n self.bus_entrance = {}\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus entrance:{0}'.format(function_name,\r\n self.bus_entrance)\r\n )\r\n\r\n name = self.busLine['name']\r\n departure_stop = self.busLine['departure_stop']['name']\r\n arrival_stop = self.busLine['arrival_stop']['name']\r\n busline_type = self.busLine['type']\r\n busline_distance = int(self.busLine['distance'])\r\n busline_duration = str(datetime.timedelta(seconds=int(self.busLine['duration']))).split(\":\")\r\n start_time = self.busLine['start_time']\r\n end_time = self.busLine['end_time']\r\n via_num = self.busLine['via_num']\r\n via_stops = self.busLine['via_stops']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{0}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - departure stop:{0}'.format(function_name,\r\n departure_stop)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - arrival stop:{0}'.format(function_name,\r\n arrival_stop)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - busline type:{0}'.format(function_name,\r\n busline_type)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - busline distance:{0}'.format(function_name,\r\n busline_distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus duration:{0}'.format(function_name,\r\n busline_duration)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - start time:{0}'.format(function_name,\r\n start_time)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - end time:{0}'.format(function_name,\r\n end_time)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - via num:{0}'.format(function_name,\r\n via_num)\r\n )\r\n\r\n # 地铁线路路线\r\n if busline_type == '地铁线路':\r\n resultContext.append(\"您将要乘坐的地铁路线为{0}\".format(name))\r\n resultContext.append(\r\n \"该地铁路线的发车时间为{0}时{1}分,末班车时间为{2}时{3}分,请提前规划好您的时间,以免错过末班车。\".format(start_time[0:2], start_time[2:4],\r\n end_time[0:2], end_time[2:4]))\r\n resultContext.append(\"您在该段地铁乘坐路线长度的预计为{0}公里,\".format(busline_distance / 1000))\r\n # TODO:\r\n # Need to combine\r\n\r\n # 公交乘坐时间\r\n if busline_duration[1] == '0': # 在60秒以内\r\n resultContext.append(\"预计乘坐{0}秒\".format(busline_duration[2]))\r\n elif busline_duration[0] == '0': # 在一小时以内\r\n resultContext.append(\"预计乘坐{0}分{1}秒\".format(busline_duration[1], busline_duration[2]))\r\n else:\r\n if busline_duration[1] == '0':\r\n resultContext.append(\"预计乘坐{0}时{1}秒\".format(busline_duration[0], busline_duration[2]))\r\n else:\r\n resultContext.append(\"预计乘坐{0}{0}时{1}分{2}秒\".format(busline_duration[0], busline_duration[1],\r\n busline_duration[2]))\r\n\r\n resultContext.append(\"从起点站{0}出发,终点站{1}下车\".format(departure_stop, arrival_stop))\r\n resultContext.append(\"乘坐路段经过{0}个地铁站\".format(via_num))\r\n\r\n # 路线详情\r\n # 起点站\r\n if self.bus_entrance:\r\n resultContext.append(\"起点站:{0} —— {1}\".format(departure_stop, self.bus_entrance['name']))\r\n else:\r\n resultContext.append(\"起点站:{0}\".format(departure_stop))\r\n\r\n for item, via_stop in enumerate(via_stops):\r\n resultContext.append(\"途径站{0}:{1}\".format(item + 1, via_stop['name']))\r\n\r\n # 终点站\r\n if self.bus_exit:\r\n resultContext.append(\"终点站:{0} —— {1}\".format(arrival_stop, self.bus_exit['name']))\r\n else:\r\n resultContext.append(\"终点站:{0}\".format(arrival_stop))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - subway data successfully print'.format(function_name)\r\n )\r\n\r\n # 普通公交路线\r\n elif busline_type == '普通公交线路':\r\n resultContext.append(\"您将要乘坐的公交路线为{0}\".format(name))\r\n if start_time:\r\n resultContext.append(\r\n \"该公交路线的发车时间为{0}时{1}分,末班车时间为{2}时{3}分,请提前规划好您的行程,以免错过末班车\".format(start_time[0:2], start_time[2:4],\r\n end_time[0:2], end_time[2:4]))\r\n else:\r\n resultContext.append(\"该路公交暂无运营时间信息,请注意留意当地路牌指示\")\r\n resultContext.append(\"您在该段公交乘坐路线长度的预计为{0:.2f}公里,\".format(busline_distance / 1000))\r\n # TODO:Need to combine\r\n\r\n # 公交乘坐时间\r\n if busline_duration[1] == '0': # 在60秒以内\r\n resultContext.append(\"预计乘坐{0}秒\".format(busline_duration[2]))\r\n elif busline_duration[0] == '0': # 在一小时以内\r\n resultContext.append(\"预计乘坐{0}分{1}秒\".format(busline_duration[1], busline_duration[2]))\r\n else:\r\n if busline_duration[1] == '0':\r\n resultContext.append(\"预计乘坐{0}时{1}秒\".format(busline_duration[0], busline_duration[2]))\r\n else:\r\n resultContext.append(\"预计乘坐{0}时{1}分{2}秒\".format(busline_duration[0], busline_duration[1],\r\n busline_duration[2]))\r\n\r\n resultContext.append(\"从起点站{0}出发\".format(departure_stop))\r\n resultContext.append(\"乘坐路段经过{0}个公交站\".format(via_num))\r\n for item, via_stop in enumerate(via_stops):\r\n resultContext.append(\"途径站{0}:{1}\".format(item + 1, via_stop['name']))\r\n resultContext.append(\"终点站{0}下车\".format(arrival_stop))\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - bus data successfully print'.format(function_name)\r\n )\r\n\r\n return resultContext\r\n\r\n def parse_railway_lines(self, railway: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析火车路径规划数据\r\n Args:\r\n railway:乘坐火车的信息列表\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.railway = railway\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n railway_time = str(datetime.timedelta(seconds=int(self.railway['time']))).split(\":\")\r\n name = self.railway['name']\r\n trip = self.railway['trip']\r\n distance = int(self.railway['distance']) / 1000\r\n railway_type = self.railway['type']\r\n departure_stop = self.railway['departure_stop']\r\n arrival_stop = self.railway['arrival_stop']\r\n via_stops = self.railway['via_stop']\r\n spaces = self.railway['spaces']\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - railway time:{0}'.format(function_name,\r\n railway_time)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - name:{0}'.format(function_name,\r\n name)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - trip:{0}'.format(function_name,\r\n trip)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - distance:{0}'.format(function_name,\r\n distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - railway type:{0}'.format(function_name,\r\n railway_type)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - departure stop:{0}'.format(function_name,\r\n departure_stop)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - arrival stop:{0}'.format(function_name,\r\n arrival_stop)\r\n )\r\n\r\n # 动车信息\r\n if railway_type == \"D字头的动车火车\":\r\n resultContext.append(\"您将要乘坐的动车路线为{0}\".format(name))\r\n resultContext.append(\"动车车次为{0}\".format(trip))\r\n resultContext.append(\"您将在该动车上预计乘坐{0}小时,该动车预计运行{0}公里\".format(railway_time, distance))\r\n\r\n # 价格情况\r\n resultContext.append(\"您乘坐的该班次动车一等座票{0}元,二等座票{1}元,无座票{2}元\".format(spaces[0]['cost'], spaces[1]['cost'],\r\n spaces[2]['cost']))\r\n\r\n # 路线详情\r\n # 起始站信息\r\n start = departure_stop['start']\r\n departure_name = departure_stop['name']\r\n departure_time = departure_stop['time']\r\n\r\n # 终点站信息\r\n end = arrival_stop['end']\r\n arrival_name = arrival_stop['name']\r\n arrival_time = arrival_stop['time']\r\n\r\n if start:\r\n resultContext.append(\"您将从起点站{0}站出发,发车时间是{0}时{1}分,请您合理安排您的行程\".format(departure_name, departure_time[0:2],\r\n departure_time[2:4]))\r\n else:\r\n resultContext.append(\"您将从途径站{0}站出发,发车时间是{0}时{1}分,请您合理安排您的行程\".format(departure_name, departure_time[0:2],\r\n departure_time[2:4]))\r\n\r\n # 途径站信息\r\n if via_stops:\r\n for item, via_stop in enumerate(via_stops):\r\n via_stop_name = via_stop['name']\r\n via_stop_time = via_stop['time']\r\n via_stop_wait = via_stop['wait']\r\n resultContext.append(\"途径站{0}:{1}。进站时间:{2},停靠时间{3}分钟\".format(item + 1, via_stop_name, via_stop_time,\r\n via_stop_wait))\r\n\r\n # 终点站信息\r\n if end:\r\n resultContext.append(\r\n \"您将在{0}时{1}分到达动车的终点站{2}站\".format(arrival_time[0:2], arrival_time[2:4], arrival_name))\r\n else:\r\n resultContext.append(\r\n \"您将在{0}时{1}分到达本次旅程的终点站{2}站\".format(arrival_time[0:2], arrival_time[2:4], arrival_name))\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - railway data successfully print:{0}'.format(\r\n function_name)\r\n )\r\n\r\n return resultContext\r\n\r\n def get_drive_route_planning(self, origin: str,\r\n destination: str,\r\n **kwargs\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取驾车路径规划数据。\\n\r\n Args:\r\n origin:出发点,必填。经度在前,纬度在后,经度和纬度用\",\"分割,经纬度小数点后不得超过6位。格式为x1,y1|x2,y2|x3,y3。由于在实际使用过程中,存在定位飘点的情况。为了解决此类问题,允许传入多个起\r\n 点用于计算车头角度。最多允许传入3个坐标对,每对坐标之间距离必须超过2m。 虽然对每对坐标之间长度没有上限,但是如果超过4米会有概率性出现不准确的情况。使用三个点来判断距离和角度的有效性,如果两者都有\r\n 效,使用第一个点和最后一个点计算的角度设置抓路的角度,规划路径时以最后一个坐标对进行规划。\r\n destination:目的地,必填。经度在前,纬度在后,经度和纬度用\",\"分割,经纬度小数点后不得超过6位。\r\n kwargs:\r\n extensions:返回结果控制,必填,默认base。可选值:base/all。base:返回基本信息;all:返回全部信息。\r\n originId:出发点poiId,选填。当起点为POI时,建议填充此值。\r\n destinationId:目的地poiId,选填。当终点为POI时,建议填充此值。\r\n originType:起点的poi类别,选填。当用户知道起点POI的类别时候,建议填充此值。\r\n destinationType:终点的poi类别,选填。当用户知道终点POI的类别时候,建议填充此值。\r\n strategy:驾车选择策略,选填,默认10。返回结果会躲避拥堵,路程较短,尽量缩短时间,与高德地图的默认策略也就是不进行任何勾选一致。\r\n 下方10~20的策略,会返回多条路径规划结果。下方策略 0~9的策略,仅会返回一条路径规划结果。\r\n 下方策略返回多条路径规划结果\r\n 10,返回结果会躲避拥堵,路程较短,尽量缩短时间,与高德地图的默认策略也就是不进行任何勾选一致\r\n 11,返回三个结果包含:时间最短;距离最短;躲避拥堵 (由于有更优秀的算法,建议用10代替)\r\n 12,返回的结果考虑路况,尽量躲避拥堵而规划路径,与高德地图的“躲避拥堵”策略一致\r\n 13,返回的结果不走高速,与高德地图“不走高速”策略一致\r\n 14,返回的结果尽可能规划收费较低甚至免费的路径,与高德地图“避免收费”策略一致\r\n 15,返回的结果考虑路况,尽量躲避拥堵而规划路径,并且不走高速,与高德地图的“躲避拥堵&不走高速”策略一致\r\n 16,返回的结果尽量不走高速,并且尽量规划收费较低甚至免费的路径结果,与高德地图的“避免收费&不走高速”策略一致\r\n 17,返回路径规划结果会尽量的躲避拥堵,并且规划收费较低甚至免费的路径结果,与高德地图的“躲避拥堵&避免收费”策略一致\r\n 18,返回的结果尽量躲避拥堵,规划收费较低甚至免费的路径结果,并且尽量不走高速路,与高德地图的“避免拥堵&避免收费&不走高速”策略一致\r\n 19,返回的结果会优先选择高速路,与高德地图的“高速优先”策略一致\r\n 20,返回的结果会优先考虑高速路,并且会考虑路况躲避拥堵,与高德地图的“躲避拥堵&高速优先”策略一致\r\n 下方策略仅返回一条路径规划结果\r\n 0,速度优先,不考虑当时路况,此路线不一定距离最短\r\n 1,费用优先,不走收费路段,且耗时最少的路线\r\n 2,距离优先,不考虑路况,仅走距离最短的路线,但是可能存在穿越小路/小区的情况\r\n 3,速度优先,不走快速路,例如京通快速路(因为策略迭代,建议使用13)\r\n 4,躲避拥堵,但是可能会存在绕路的情况,耗时可能较长\r\n 5,多策略(同时使用速度优先、费用优先、距离优先三个策略计算路径)。\r\n 其中必须说明,就算使用三个策略算路,会根据路况不固定的返回一到三条路径规划信息。\r\n 6,速度优先,不走高速,但是不排除走其余收费路段\r\n 7,费用优先,不走高速且避免所有收费路段\r\n 8,躲避拥堵和收费,可能存在走高速的情况,并且考虑路况不走拥堵路线,但有可能存在绕路和时间较长\r\n 9,躲避拥堵和收费,不走高速\r\n waypoints:途经点,选填。经度和纬度用\",\"分割,经度在前,纬度在后,小数点后不超过6位,坐标点之间用\";\"分隔最大数目:16个坐标点。如果输入多个途径点,则按照用户输入的顺序进行路径规划。\r\n avoidPolygons:避让区域,选填。区域避让,支持32个避让区域,每个区域最多可有16个顶点经度和纬度用\",\"分割,经度在前,纬度在后,小数点后不超过6位,坐标点之间用\";\"分隔,区域之间用\"|\"分隔。如果是四边形则有四个坐标点,如果是五边形则有五个坐标点;同时传入避让区域及避让道路,仅支持避让道路;避让区域不能超过81平方公里,否则避让区域会失效。\r\n avoidRoad:避让道路名,选填。只支持一条避让道路。\r\n province:用汉字填入车牌省份缩写,用于判断是否限行,选填。例如:京\r\n number:填入除省份及标点之外,车牌的字母和数字(需大写),选填。用于判断限行相关,选填。例如:NH1N11,支持6位传统车牌和7位新能源车牌。\r\n carType:车辆类型,选填,默认普通汽车。0:普通汽车(默认值);1:纯电动车;2:插电混动车。\r\n ferry:在路径规划中,是否使用轮渡,选填,默认使用渡轮。0:使用渡轮(默认) ,1:不使用渡轮。\r\n roadAggregation:是否返回路径聚合信息,选填,默认false。false:不返回路径聚合信息,true:返回路径聚合信息,在steps上层增加roads做聚合\r\n noSteps:是否返回steps字段内容,选填,默认0。当取值为0时,steps字段内容正常返回;当取值为1时,steps字段内容为空。\r\n output:返回数据格式类型,选填,默认JSON格式。可选值:JSON,XML。\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.destination = destination\r\n self.origin = origin\r\n\r\n if 'avoidPolygons' in kwargs:\r\n self.avoidPolygons = kwargs['avoidPolygons']\r\n if 'avoidRoad' in kwargs:\r\n self.avoidRoad = kwargs['avoidRoad']\r\n if 'carType' in kwargs:\r\n self.carType = kwargs['carType']\r\n if 'destinationId' in kwargs:\r\n self.destinationId = kwargs['destinationId']\r\n if 'destinationType' in kwargs:\r\n self.destinationType = kwargs['destinationType']\r\n if 'extensions' in kwargs:\r\n self.extensions = kwargs['extensions']\r\n if 'ferry' in kwargs:\r\n self.ferry = kwargs['ferry']\r\n if 'noSteps' in kwargs:\r\n self.noSteps = kwargs['noSteps']\r\n if 'number' in kwargs:\r\n self.number = kwargs['number']\r\n if 'originId' in kwargs:\r\n self.originId = kwargs['originId']\r\n if 'originType' in kwargs:\r\n self.originType = kwargs['originType']\r\n if 'output' in kwargs:\r\n self.output = kwargs['output']\r\n if 'province' in kwargs:\r\n self.province = kwargs['province']\r\n if 'roadAggregation' in kwargs:\r\n self.roadAggregation = kwargs['roadAggregation']\r\n if 'strategy' in kwargs:\r\n self.strategy = kwargs['strategy']\r\n else:\r\n self.strategy = 10\r\n if 'waypoints' in kwargs:\r\n self.waypoints = kwargs['waypoints']\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'origin': self.origin,\r\n 'destination': self.destination,\r\n }\r\n\r\n if self.avoidPolygons is not None:\r\n parameters.update(avoidpolygons=self.avoidPolygons)\r\n if self.avoidRoad is not None:\r\n parameters.update(avoidroad=self.avoidRoad)\r\n if self.carType is not None:\r\n parameters.update(cartype=self.carType)\r\n if self.destinationId is not None:\r\n parameters.update(destinationid=self.destinationId)\r\n if self.destinationType is not None:\r\n parameters.update(destinationtype=self.destinationType)\r\n if self.extensions is not None:\r\n parameters.update(extensions=self.extensions)\r\n if self.ferry is not None:\r\n parameters.update(ferry=self.ferry)\r\n if self.noSteps is not None:\r\n parameters.update(nosteps=self.noSteps)\r\n if self.number is not None:\r\n parameters.update(number=self.number)\r\n if self.originId is not None:\r\n parameters.update(originid=self.originId)\r\n if self.originType is not None:\r\n parameters.update(origintype=self.originType)\r\n if self.output is not None:\r\n parameters.update(output=self.output)\r\n if self.province is not None:\r\n parameters.update(province=self.province)\r\n if self.roadAggregation is not None:\r\n parameters.update(roadaggregation=self.roadAggregation)\r\n if self.strategy is not None:\r\n parameters.update(strategy=self.strategy)\r\n if self.waypoints is not None:\r\n parameters.update(waypoints=self.waypoints)\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v3/direction/driving?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Drive route data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_drive_route_planning(self, json_decode: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析驾驶路径规划数据。\\n\r\n Args:\r\n json_decode:get_drive_route_planning()方法从网络中获取的数据\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['status'] == '1':\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n if self.json_decode['infocode'] == \"10000\":\r\n # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - infocode:{1}'.format(function_name,\r\n self.json_decode[\r\n 'infocode'])\r\n )\r\n # 驾车路径规划方案数目\r\n drive_count = self.json_decode[\"count\"]\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - drive count:{1}'.format(function_name,\r\n drive_count)\r\n )\r\n resultContext.append(\"您选择的是驾驶规划,已为您智能生成如下{0}种出行方案\".format(drive_count))\r\n resultContext.append(\"=========================================================\")\r\n paths = self.json_decode['route']['paths']\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - paths data successfully parsed.'.format(\r\n function_name)\r\n )\r\n\r\n for item, path in enumerate(paths):\r\n strategy = path['strategy']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - strategy:{1}'.format(function_name,\r\n strategy)\r\n )\r\n resultContext.append(\"方案{0}是{1}的方案\".format(item + 1, strategy))\r\n resultContext.append(\"出行规划如下所示:\")\r\n\r\n # 步数\r\n steps = path['steps']\r\n len_steps = len(steps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - steps length:{1}'.format(function_name,\r\n len_steps)\r\n )\r\n resultContext.append(\"出行方案{0}共{1}步\".format(item + 1, len_steps))\r\n\r\n # 路径规划收费情况\r\n tolls = path['tolls']\r\n toll_distance = path['toll_distance']\r\n traffic_lights = path['traffic_lights']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - tolls:{1}'.format(function_name,\r\n tolls)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - toll_distance:{1}'.format(function_name,\r\n toll_distance)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - traffic_lights:{1}'.format(function_name,\r\n traffic_lights)\r\n )\r\n\r\n # 不计费路段\r\n if tolls == \"0\":\r\n resultContext.append(\"此次路线规划已使用免费无高速方案\")\r\n else:\r\n resultContext.append(\"此次路线规划已使用途径高速方案,预计收费{0}元,收费路段长度为{1:.1f}公里\".format(\r\n tolls, int(toll_distance) / 1000))\r\n\r\n # 路径红绿灯个数情况\r\n resultContext.append(\"此次导航中会经过{0}个红绿灯\".format(traffic_lights))\r\n\r\n # 详细路线内容\r\n for sub_item, step in enumerate(steps):\r\n information = step['instruction']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - instruction:{1}'.format(function_name,\r\n information)\r\n )\r\n resultContext.append(\"第{0}步:{1}\".format(sub_item + 1, information))\r\n resultContext.append(\"导航结束\")\r\n resultContext.append(\"=========================================================\")\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - drive data successfully print.'.format(\r\n function_name)\r\n )\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n\r\n # 异常信息\r\n resultContext.append(errorInfo)\r\n context = \"驾驶导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n # 异常信息\r\n context = \"驾驶导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n def get_ride_route_planning(self, origin: str,\r\n destination: str\r\n ) -> dict:\r\n \"\"\"\r\n 函数:获取骑行路径规划数据。\\n\r\n Args:\r\n origin:出发点经纬度,必填。填入规则:X,Y,采用\",\"分隔,例如“ 117.500244, 40.417801 ”,小数点后不得超过6位\r\n destination:目的地经纬度,必填。填入规则:X,Y,采用\",\"分隔,例如“ 117.500244, 40.417801 ”,小数点后不得超过6位\r\n Returns:返回获得的json格式数据或错误信息\r\n \"\"\"\r\n\r\n self.destination = destination\r\n self.origin = origin\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 传入参数\r\n parameters = {'key': self.APIkey,\r\n 'origin': self.origin,\r\n 'destination': self.destination\r\n }\r\n\r\n # 获取数据\r\n try:\r\n request_information = requests.get(\"https://restapi.amap.com/v4/direction/bicycling?parameters\",\r\n params=parameters)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - request_information:{1}'.format(function_name,\r\n request_information)\r\n )\r\n request_information.close() # 关闭访问\r\n request_information.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\r\n # 返回格式化后的JSON数据\r\n json_decode = json.loads(request_information.text)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - Ride route data successful get.'.format(\r\n function_name)\r\n )\r\n return json_decode\r\n\r\n except requests.exceptions.ConnectionError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_connection = 'ConnectionError -- please wait 3 seconds'\r\n error_connection_dict = {'status': '2',\r\n 'info': 'requests.exceptions.ConnectionError',\r\n 'detail_information': requests.exceptions.ConnectionError,\r\n 'error_prompt': error_connection\r\n }\r\n return error_connection_dict\r\n\r\n except requests.exceptions.ChunkedEncodingError as e:\r\n time.sleep(1)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__\r\n )\r\n )\r\n # 异常信息\r\n error_chuck_encoding = 'ChunkedEncodingError -- please wait 3 seconds'\r\n error_chuck_encoding_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_chuck_encoding\r\n }\r\n return error_chuck_encoding_dict\r\n\r\n except Exception as e:\r\n time.sleep(1)\r\n error_information = 'Unfortunately -- An Unknown Error Happened, Please wait 3 seconds'\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=5,\r\n context='Function name:{0} - {1} has occured.'.format(function_name,\r\n e.__class__.__name__)\r\n )\r\n # 异常信息\r\n error_information_dict = {'status': '2',\r\n 'info': 'HTTPError',\r\n 'detail_information': requests.exceptions.ChunkedEncodingError,\r\n 'error_prompt': error_information\r\n }\r\n return error_information_dict\r\n\r\n def parse_ride_route_planning(self, json_decode: dict\r\n ) -> list:\r\n \"\"\"\r\n 函数:解析骑行路径规划数据。\\n\r\n Args:\r\n json_decode:get_ride_route_planning()方法从网络中获取的数据\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.json_decode = json_decode\r\n\r\n # 输出结果\r\n resultContext = []\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n try:\r\n if 'errcode' not in self.json_decode:\r\n if self.json_decode['status'] == '0':\r\n # 官方文档异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise OfficialException\r\n\r\n elif self.json_decode['status'] == '2':\r\n # 自定义异常\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - status:{1}'.format(function_name,\r\n self.json_decode['status'])\r\n )\r\n raise CustomExpection\r\n\r\n elif self.json_decode['errcode'] == 0: # 请求数据成功的状态码\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=6,\r\n context='Function name:{0} - errcode:{1},not error'.format(function_name,\r\n self.json_decode[\r\n 'errcode'])\r\n )\r\n # 骑行方案列表信息\r\n paths = self.json_decode['data']['paths']\r\n len_paths = len(paths)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - paths length:{1}'.format(function_name,\r\n len_paths)\r\n )\r\n resultContext.append(\"已为您智能生成{0}种骑行方案\".format(len_paths))\r\n\r\n for path in paths:\r\n steps = path['steps']\r\n len_steps = len(steps)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - steps length:{1}'.format(function_name,\r\n len_steps)\r\n )\r\n resultContext.append(\"本次路径规划共分为{0}步\".format(len_steps))\r\n\r\n for sub_item, step in enumerate(steps):\r\n instruction = step['instruction']\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - instruction:{1}'.format(\r\n function_name,\r\n instruction)\r\n )\r\n resultContext.append(\"第{0}步:{1}\".format(sub_item + 1, instruction))\r\n resultContext.append(\"骑行导航结束\")\r\n return resultContext\r\n\r\n except OfficialException as officialException:\r\n # 获得的错误信息\r\n errcode, errorInfo, solution = officialException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errcode:{1}'.format(function_name,\r\n errcode)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - errorInfo:{1}'.format(function_name,\r\n errorInfo)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - solution:{1}'.format(function_name,\r\n solution)\r\n )\r\n # 异常信息\r\n resultContext.append(errorInfo)\r\n context = \"骑行导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n\r\n except CustomExpection as customException:\r\n info, detail_information, error_prompt = customException.get_error_info(self.json_decode)\r\n # 打印到日志文件中\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - info:{1}'.format(function_name,\r\n info)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='Function name:{0} - detail_information:{1}'.format(function_name,\r\n detail_information)\r\n )\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=3,\r\n context='error_prompt:{0}'.format(error_prompt)\r\n )\r\n\r\n # 异常信息\r\n context = \"骑行导航查询失败,换个词进行搜索吧\"\r\n resultContext.append(context)\r\n return resultContext\r\n" }, { "alpha_fraction": 0.6434822678565979, "alphanum_fraction": 0.6712344288825989, "avg_line_length": 36.94170379638672, "blob_id": "24f1de02834b02995b9c8f332134e12de5d102cf", "content_id": "4ec38f377a07aeb541bf481a70c91c9b2978824c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10646, "license_type": "permissive", "max_line_length": 120, "num_lines": 223, "path": "/FundamentalFunctions/SouthShanxiAreaDataVisualization.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\"\"\"\r\nCopy from Jupyter Notebook\r\n\"\"\"\r\n# TODO: In the future version will insert into the 山西省道路信息分析系统 page.\r\n\r\n# 设置字体,否则中文会显示异常\r\nplt.rcParams['font.sans-serif'] = ['Microsoft YaHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\nplt.rcParams['figure.figsize'] = (22.0, 14.0)\r\nplt.title(\"晋南地区各城市道路通行情况\")\r\n\r\n# 使用pandas读取excel文件\r\ndf_changzhi = pd.read_excel(r'F:/01.XLS', sheet_name='长治市')\r\ndf_jincheng = pd.read_excel(r'F:/01.XLS', sheet_name='晋城市')\r\ndf_linfeng = pd.read_excel(r'F:/01.XLS', sheet_name='临汾市')\r\ndf_yuncheng = pd.read_excel(r'F:/01.XLS', sheet_name='运城市')\r\n\r\n# 设置子图默认间距\r\nplt.subplots_adjust(hspace=0.5)\r\n# 长治市数据可视化\r\nplt.subplot(2, 2, 1)\r\n# 添加条形图的标题\r\nplt.title('长治市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nchangzhi_road_name = df_changzhi.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nchangzhi_road_cong = df_changzhi.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nchangzhi_combination = tuple(zip(changzhi_road_cong['路段拥堵评价'].values(), changzhi_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nchangzhi_cong_proportion = []\r\nchangzhi_clear_road = []\r\n\r\nfor item in list(changzhi_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n changzhi_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n changzhi_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nchangzhi_information = df_changzhi.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n# print(changzhi_information)\r\n# print(type(changzhi_information))\r\nchangzhi_information['拥堵占比'] = changzhi_cong_proportion\r\nchangzhi_information['道路畅通评价'] = changzhi_clear_road\r\n# print(changzhi_information)\r\n# print(list(changzhi_information['道路名称']))\r\n# print(list(changzhi_information['路段拥堵评价']))\r\n# print(list(changzhi_information['拥堵占比']))\r\n# print(changzhi_information['道路畅通评价'])\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 长治市道路名称\r\nchangzhi_road_name_list = ['太行东街', '太行西街', '英雄中路', '英雄北路', '英雄南路']\r\nplt.xticks(range(len(changzhi_road_name_list)), changzhi_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(5) - 0.3, height=list(changzhi_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(5), height=list(changzhi_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(5) + 0.3, height=list(changzhi_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 晋城市数据可视化\r\nplt.subplot(2, 2, 2)\r\n# 添加条形图的标题\r\nplt.title('晋城市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\njincheng_road_name = df_jincheng.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\njincheng_road_cong = df_jincheng.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\njincheng_combination = tuple(zip(jincheng_road_cong['路段拥堵评价'].values(), jincheng_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\njincheng_cong_proportion = []\r\njincheng_clear_road = []\r\n\r\nfor item in list(jincheng_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n jincheng_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n jincheng_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\njincheng_information = df_jincheng.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(jincheng_information))\r\njincheng_information['拥堵占比'] = jincheng_cong_proportion\r\njincheng_information['道路畅通评价'] = jincheng_clear_road\r\n\r\n# print(list(jincheng_information['道路名称']))\r\n# print(list(jincheng_information['路段拥堵评价']))\r\n# print(list(jincheng_information['拥堵占比']))\r\n# print(list(jincheng_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 晋城市道路名称\r\njincheng_road_name_list = ['中原东街', '中原西街', '凤台东街', '凤台西街', '泽州南路', '泽州路']\r\nplt.xticks(range(len(jincheng_road_name_list)), jincheng_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(6) - 0.3, height=list(jincheng_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(6), height=list(jincheng_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(6) + 0.3, height=list(jincheng_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 临汾市数据可视化\r\nplt.subplot(2, 2, 3)\r\n# 添加条形图的标题\r\nplt.title('临汾市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nlinfeng_road_name = df_linfeng.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nlinfeng_road_cong = df_linfeng.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nlinfeng_combination = tuple(zip(linfeng_road_cong['路段拥堵评价'].values(), linfeng_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nlinfeng_cong_proportion = []\r\nlinfeng_clear_road = []\r\n\r\nfor item in list(linfeng_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n linfeng_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n linfeng_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nlinfeng_information = df_linfeng.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(linfeng_information))\r\nlinfeng_information['拥堵占比'] = linfeng_cong_proportion\r\nlinfeng_information['道路畅通评价'] = linfeng_clear_road\r\n\r\n# print(list(linfeng_information['道路名称']))\r\n# print(list(linfeng_information['路段拥堵评价']))\r\n# print(list(linfeng_information['拥堵占比']))\r\n# print(list(linfeng_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 临汾市道路名称\r\nlinfeng_road_name_list = ['滨河西路', '滨河路', '鼓楼北大街', '鼓楼南大街']\r\nplt.xticks(range(len(linfeng_road_name_list)), linfeng_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(4) - 0.3, height=list(linfeng_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(4), height=list(linfeng_information['道路畅通评价']), alpha=0.5, width=0.3, color='green', edgecolor='blue',\r\n label='道路畅通次数')\r\nplt.bar(np.arange(4) + 0.3, height=list(linfeng_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\n\r\n# 运城市数据可视化\r\nplt.subplot(2, 2, 4)\r\n# 添加条形图的标题\r\nplt.title('运城市各道路通行情况', fontsize=16)\r\nplt.ylabel(\"拥堵路段信息统计(单位:出现次数)\", fontsize=16)\r\nplt.xlabel(\"道路名称\", fontsize=16)\r\n\r\n# 获取指定道路名称的拥堵评价信息\r\nyuncheng_road_name = df_yuncheng.groupby(['道路名称']).agg({'道路名称': 'count'}).to_dict()\r\nyuncheng_road_cong = df_yuncheng.groupby(['道路名称']).agg({'路段拥堵评价': 'count'}).to_dict()\r\n\r\n# 两个道路信息进行合并\r\nyuncheng_combination = tuple(zip(yuncheng_road_cong['路段拥堵评价'].values(), yuncheng_road_name['道路名称'].values()))\r\n\r\n# 获取拥堵占比信息,道路通畅次数\r\nyuncheng_cong_proportion = []\r\nyuncheng_clear_road = []\r\n\r\nfor item in list(yuncheng_combination):\r\n # print(\"{:.2f}\".format(item[0]/item[1]))\r\n yuncheng_cong_proportion.append(float(\"{:.2f}\".format(item[0] / item[1])))\r\n yuncheng_clear_road.append(int(\"{0}\".format(item[1] - item[0])))\r\n\r\nyuncheng_information = df_yuncheng.groupby(['道路名称']).agg({'道路名称': 'count', '路段拥堵评价': 'count'})\r\n\r\n# print(type(yuncheng_information))\r\nyuncheng_information['拥堵占比'] = yuncheng_cong_proportion\r\nyuncheng_information['道路畅通评价'] = yuncheng_clear_road\r\n\r\n# print(list(yuncheng_information['道路名称']))\r\n# print(list(yuncheng_information['路段拥堵评价']))\r\n# print(list(yuncheng_information['拥堵占比']))\r\n# print(list(yuncheng_information['道路畅通评价']))\r\n# 纵轴列表数据\r\ny = range(0, 101, 10)\r\n# print(y)\r\n\r\n# 运城市道路名称\r\nyuncheng_road_name_list = ['中银北路', '中银南路', '人民北路', '学苑路', '工农东街', '机场路', '解放北路', '解放南路']\r\nplt.xticks(range(len(yuncheng_road_name_list)), yuncheng_road_name_list, rotation=70, fontsize=16)\r\n\r\nplt.yticks(range(0, 101, 10), fontsize=16)\r\n\r\nplt.bar(np.arange(8) - 0.3, height=list(yuncheng_road_name['道路名称'].values()), alpha=0.5, width=0.3, color='skyblue',\r\n edgecolor='red', label='道路获取次数')\r\nplt.bar(np.arange(8), height=list(yuncheng_information['道路畅通评价']), alpha=0.5, width=0.3, color='green',\r\n edgecolor='blue', label='道路畅通次数')\r\nplt.bar(np.arange(8) + 0.3, height=list(yuncheng_information['路段拥堵评价']), alpha=0.5, width=0.3, color='yellow',\r\n edgecolor='blue', label='道路拥堵次数')\r\nplt.savefig(r\"C:\\Users\\高怡飞\\Desktop\\03.png\", dpi=600)\r\n" }, { "alpha_fraction": 0.4970286786556244, "alphanum_fraction": 0.501946747303009, "avg_line_length": 45.843135833740234, "blob_id": "f23aea87343111d462dc872376a674b5544adb3c", "content_id": "9d14d7e551bd263d9dccdc5a79dd114713704569", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10212, "license_type": "permissive", "max_line_length": 125, "num_lines": 204, "path": "/FundamentalFunctions/AdministrativeDistrictOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.AdministrativeDistrictEnquiry import AdministrativeDistrictEnquiry\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass AdministrativeDistrictOperation:\r\n \"\"\"\r\n Class:行政区域查询操作\r\n \"\"\"\r\n def __init__(self):\r\n self.district = None\r\n self.subdistrict = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def get_sub_district(self, district: str\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取选择的行政区域对应的下级行政区域\r\n Args:\r\n district:根行政区域\r\n Returns:\r\n 返回获取到根行政区域对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.district = district\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n administrativeDistrictEnquiry = AdministrativeDistrictEnquiry()\r\n\r\n # 北京市对应的行政区域\r\n if self.district == '北京市':\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords='北京城区',\r\n sub_district=1)\r\n resultAdministrativeList = administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n # 上海市对应的行政区域\r\n elif self.district == '上海市':\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords='上海城区',\r\n sub_district=1)\r\n resultAdministrativeList = administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n # 天津市对应的行政区域\r\n elif self.district == '天津市':\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords='天津城区',\r\n sub_district=1)\r\n resultAdministrativeList = administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n # 重庆市对应的行政区域\r\n elif self.district == '重庆市':\r\n resultAdministrativeList = []\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords='重庆城区',\r\n sub_district=1)\r\n resultAdministrativeList.extend(administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry))\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords='重庆郊县',\r\n sub_district=1)\r\n resultAdministrativeList.extend(administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry))\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n # 普通地区对应的行政区域\r\n else:\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords=self.district,\r\n sub_district=1)\r\n resultAdministrativeList = administrativeDistrictEnquiry.get_sub_administrative_district(\r\n resultAdministrativeDistrictEnquiry)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n return resultAdministrativeList\r\n\r\n def get_all_district_information(self, district: str,\r\n subdistrict: int\r\n )->list:\r\n \"\"\"\r\n 函数:获取选择的行政区域对应的下级行政区域\r\n Args:\r\n district:根行政区域\r\n subdistrict:下级行政区级数\r\n Returns:\r\n 返回获取到根行政区域对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n self.district = district\r\n self.subdistrict = subdistrict\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 获取内容\r\n administrativeDistrictEnquiry = AdministrativeDistrictEnquiry()\r\n resultAdministrativeDistrictEnquiry = administrativeDistrictEnquiry.get_administrative_district(\r\n keywords=self.district,\r\n sub_district=subdistrict)\r\n resultAdministrativeList = administrativeDistrictEnquiry.parse_administrative_district(\r\n resultAdministrativeDistrictEnquiry, self.subdistrict)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeDistrictEnquiry:{1}'.format(\r\n function_name,\r\n resultAdministrativeDistrictEnquiry)\r\n )\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - resultAdministrativeList:{1}'.format(function_name,\r\n resultAdministrativeList)\r\n )\r\n\r\n return resultAdministrativeList\r\n" }, { "alpha_fraction": 0.5206891894340515, "alphanum_fraction": 0.5243440866470337, "avg_line_length": 39.41081237792969, "blob_id": "c0e4ae3af3fcdefcef92581285b06a320e6fdff5", "content_id": "26a767e8a479dd0ff4d5c6f8da5329caef0e5455", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8345, "license_type": "permissive", "max_line_length": 130, "num_lines": 185, "path": "/FundamentalFunctions/BusRoutePlanningOperation.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "import inspect\r\n\r\nfrom AmapFunctions.GeographicCoding import GeographicCoding\r\nfrom AmapFunctions.RoutePlanning import RoutePlanning\r\nfrom logrecord.WriteLog import WriteLog\r\n\r\n\r\nclass BusRoutePlanningOperation:\r\n \"\"\"\r\n Class:公交路径查询规划操作\r\n \"\"\"\r\n def __init__(self):\r\n self.busDepartureAddress = None\r\n self.busDestinationAddress = None\r\n self.busDepartureCity = None\r\n self.busDestinationCity = None\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n writeLog.write_to_log(file_name=log_filename, log_level=1, context='Class name:{0} start'.format(class_name))\r\n\r\n def check_bus_departure_information(self, busDepartureAddress: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的公交路径出发点是否符合规范要求\r\n Args:\r\n busDepartureAddress: 用户输入的出发点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.busDepartureAddress = busDepartureAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.busDepartureAddress is None or self.busDepartureAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus departure address check result:{1}'.format(function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 使用python正则表达式验证提供的公交路径出发点是否符合规范要求\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def check_bus_destination_information(self, busDestinationAddress: str\r\n ) -> int:\r\n \"\"\"\r\n 函数:检测用户提供的公交路径终点是否符合规范要求\r\n Args:\r\n busDestinationAddress: 用户输入的终点\r\n Returns:\r\n 检测类型识别码\r\n \"\"\"\r\n\r\n self.busDestinationAddress = busDestinationAddress\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 检查结果\r\n checkedResult = self.busDestinationAddress is None or self.busDestinationAddress == ''\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus destination address check result:{1}'.format(\r\n function_name,\r\n checkedResult)\r\n )\r\n\r\n if checkedResult:\r\n return 2\r\n # TODO:\r\n # 检测用户提供的公交路径出发点是否符合规范要求\r\n # 此处检测格式错误返回false\r\n else:\r\n return True\r\n\r\n def get_bus_route_planning_information(self, busDepartureAddress: str,\r\n busDestinationAddress: str,\r\n busDepartureCity: str = '',\r\n busDestinationCity: str = ''\r\n ) -> list:\r\n \"\"\"\r\n 函数:获取公交路径规划的具体信息\r\n Args:\r\n busDepartureAddress: 用户输入的出发点\r\n busDestinationAddress: 用户输入的终点\r\n busDepartureCity: 用户输入的出发点对应的城市\r\n busDestinationCity: 用户输入的终点对应的城市\r\n Returns:\r\n 返回获取的步行路径规划对应的具体信息\r\n \"\"\"\r\n\r\n # TODO:未来版本将返回数据从list升级为dict\r\n\r\n self.busDepartureAddress = busDepartureAddress\r\n self.busDestinationAddress = busDestinationAddress\r\n # 在以后的版本中添加\r\n self.busDepartureCity = busDepartureCity\r\n self.busDestinationCity = busDestinationCity\r\n\r\n # 写入日志\r\n writeLog = WriteLog()\r\n class_name = self.__class__.__name__\r\n function_name = inspect.stack()[0][3]\r\n log_filename = writeLog.create_filename(class_name=class_name)\r\n\r\n # 公交路径规划\r\n geographicCoding = GeographicCoding()\r\n # 获取起点终点对应的初始编码信息\r\n # TODO:优化city参数\r\n busDepartureJsonDecode = geographicCoding.get_geographic_coding(address=self.busDepartureAddress,\r\n city='')\r\n busDestinationJsonDecode = geographicCoding.get_geographic_coding(address=self.busDestinationAddress,\r\n city='')\r\n\r\n parseBusDepartureInformation = geographicCoding.parse_geographic_coding(busDepartureJsonDecode)\r\n parseBusDestinationInformation = geographicCoding.parse_geographic_coding(busDestinationJsonDecode)\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus departure information:{1}'.format(function_name,\r\n parseBusDepartureInformation)\r\n )\r\n\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - bus destination information:{1}'.format(function_name,\r\n parseBusDestinationInformation)\r\n )\r\n\r\n # 起点位置编码\r\n if 'error_context' not in parseBusDepartureInformation:\r\n resultDepartureGeographicCoding = parseBusDepartureInformation['geographic_position']\r\n else:\r\n return [parseBusDepartureInformation['error_context']]\r\n\r\n # 终点位置编码\r\n if 'error_context' not in parseBusDestinationInformation:\r\n resultDestinationGeographicCoding = parseBusDestinationInformation['geographic_position']\r\n else:\r\n return [parseBusDestinationInformation['error_context']]\r\n\r\n # 起点对应的城市\r\n resultDepartureCity = parseBusDepartureInformation['geographic_city']\r\n\r\n routePlanning = RoutePlanning()\r\n busRoutePlanning = routePlanning.get_bus_route_planning(origin=resultDepartureGeographicCoding,\r\n destination=resultDestinationGeographicCoding,\r\n city=resultDepartureCity\r\n )\r\n\r\n # 输出路径规划信息\r\n resultBusRoutePlanning = routePlanning.parse_bus_route_planning(busRoutePlanning)\r\n promptInformation = \"从{0}到{1}的步行导航信息如下所示\".format(self.busDepartureAddress, self.busDestinationAddress)\r\n resultBusRoutePlanning.insert(0, promptInformation)\r\n # only for debugging\r\n writeLog.write_to_log(file_name=log_filename,\r\n log_level=1,\r\n context='Function name:{0} - result bus route planning:{1}'.format(function_name,\r\n resultBusRoutePlanning)\r\n )\r\n\r\n return resultBusRoutePlanning\r\n" }, { "alpha_fraction": 0.6970430612564087, "alphanum_fraction": 0.7142189741134644, "avg_line_length": 66.38957977294922, "blob_id": "f4321f9ff27bcffe9ef8861b1441c900581f5d68", "content_id": "52884d8351d3bdc79b4710d668737c2332a8ee55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202292, "license_type": "permissive", "max_line_length": 199, "num_lines": 2880, "path": "/Window/MainUI.py", "repo_name": "Gaoyifei1011/AmapProgram", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n# Form implementation generated from reading ui file 'F:\\AmapProgram\\Window\\ui\\MainWindow.ui'\r\n#\r\n# Created by: PyQt5 UI code generator 5.15.4\r\n#\r\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\r\n# run again. Do not edit this file unless you know what you are doing.\r\n\r\nfrom PyQt5 import QtGui, QtWidgets, QtChart\r\nfrom PyQt5.QtChart import QChartView\r\nfrom PyQt5.QtGui import QIcon, QPixmap\r\n\r\nfrom Resources.Icon.Icon import *\r\n\r\n\r\nclass Ui_AmapMainUI(object):\r\n def setupUi(self, AmapProgramForm):\r\n AmapProgramForm.setObjectName(\"AmapProgramForm\")\r\n AmapProgramForm.resize(1280, 720)\r\n self.amapProgramFrame = QtWidgets.QFrame(AmapProgramForm)\r\n self.amapProgramFrame.setGeometry(QtCore.QRect(-1, -1, 1280, 720))\r\n font = QtGui.QFont()\r\n font.setFamily(\"宋体\")\r\n font.setPointSize(14)\r\n self.amapProgramFrame.setFont(font)\r\n self.amapProgramFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.amapProgramFrame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.amapProgramFrame.setObjectName(\"amapProgramFrame\")\r\n self.verticalLayoutWidget = QtWidgets.QWidget(self.amapProgramFrame)\r\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 180, 711))\r\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\r\n self.amapProgramVerticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\r\n self.amapProgramVerticalLayout.setContentsMargins(10, 30, 0, 10)\r\n self.amapProgramVerticalLayout.setObjectName(\"amapProgramVerticalLayout\")\r\n self.amapProgramHorizontalLayout = QtWidgets.QHBoxLayout()\r\n self.amapProgramHorizontalLayout.setObjectName(\"amapProgramHorizontalLayout\")\r\n self.usersPhotoLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.usersPhotoLabel.setMinimumSize(QtCore.QSize(50, 50))\r\n self.usersPhotoLabel.setMaximumSize(QtCore.QSize(50, 50))\r\n self.usersPhotoLabel.setText(\"\")\r\n self.usersPhotoLabel.setObjectName(\"usersPhotoLabel\")\r\n self.usersPixmap = QPixmap(\":/DefaultUsers.png\")\r\n self.usersPhotoLabel.setPixmap(self.usersPixmap)\r\n self.usersPhotoLabel.setScaledContents(True)\r\n self.amapProgramHorizontalLayout.addWidget(self.usersPhotoLabel)\r\n self.usersNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.usersNameLabel.setMinimumSize(QtCore.QSize(111, 50))\r\n self.usersNameLabel.setMaximumSize(QtCore.QSize(16777215, 50))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.usersNameLabel.setFont(font)\r\n self.usersNameLabel.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)\r\n self.usersNameLabel.setWordWrap(True)\r\n self.usersNameLabel.setObjectName(\"usersNameLabel\")\r\n self.amapProgramHorizontalLayout.addWidget(self.usersNameLabel)\r\n self.amapProgramHorizontalLayout.setStretch(0, 1)\r\n self.amapProgramHorizontalLayout.setStretch(1, 2)\r\n self.amapProgramVerticalLayout.addLayout(self.amapProgramHorizontalLayout)\r\n self.divisionLine4 = QtWidgets.QFrame(self.verticalLayoutWidget)\r\n self.divisionLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.divisionLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.divisionLine4.setObjectName(\"divisionLine4\")\r\n self.amapProgramVerticalLayout.addWidget(self.divisionLine4)\r\n self.basicFunctionLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.basicFunctionLabel.setFont(font)\r\n self.basicFunctionLabel.setObjectName(\"basicFunctionLabel\")\r\n self.amapProgramVerticalLayout.addWidget(self.basicFunctionLabel)\r\n self.basicFunctionListWidget = QtWidgets.QListWidget(self.verticalLayoutWidget)\r\n self.basicFunctionListWidget.setMaximumSize(QtCore.QSize(169, 80))\r\n self.basicFunctionListWidget.setStyleSheet(\"QListWidget, QListView, QTreeWidget, QTreeView {\\n\"\r\n \" outline: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QListWidget {\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" height:80px;\\n\"\r\n \" NoFrame\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item {\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #F5F5F5;\\n\"\r\n \" height:35px;\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item:selected {\\n\"\r\n \" background: rgb(204,232,255);\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #2DB5EC;\\n\"\r\n \"}\")\r\n self.basicFunctionListWidget.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.basicFunctionListWidget.setObjectName(\"basicFunctionListWidget\")\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\":/RoutePlanningLogo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon)\r\n self.basicFunctionListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon1 = QtGui.QIcon()\r\n icon1.addPixmap(QtGui.QPixmap(\":/Maps.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon1)\r\n self.basicFunctionListWidget.addItem(item)\r\n self.amapProgramVerticalLayout.addWidget(self.basicFunctionListWidget)\r\n self.divisionLine1 = QtWidgets.QFrame(self.verticalLayoutWidget)\r\n self.divisionLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.divisionLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.divisionLine1.setObjectName(\"divisionLine1\")\r\n self.amapProgramVerticalLayout.addWidget(self.divisionLine1)\r\n self.searchServiceLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.searchServiceLabel.setFont(font)\r\n self.searchServiceLabel.setObjectName(\"searchServiceLabel\")\r\n self.amapProgramVerticalLayout.addWidget(self.searchServiceLabel)\r\n self.searchServiceListWidget = QtWidgets.QListWidget(self.verticalLayoutWidget)\r\n self.searchServiceListWidget.setMaximumSize(QtCore.QSize(169, 115))\r\n self.searchServiceListWidget.setStyleSheet(\"QListWidget, QListView, QTreeWidget, QTreeView {\\n\"\r\n \" outline: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QListWidget {\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" height:80px;\\n\"\r\n \" NoFrame\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item {\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #F5F5F5;\\n\"\r\n \" height:35px;\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item:selected {\\n\"\r\n \" background: rgb(204,232,255);\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #2DB5EC;\\n\"\r\n \"}\")\r\n self.searchServiceListWidget.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.searchServiceListWidget.setObjectName(\"searchServiceListWidget\")\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon2 = QtGui.QIcon()\r\n icon2.addPixmap(QtGui.QPixmap(\":/IPLocation.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon2)\r\n self.searchServiceListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon3 = QtGui.QIcon()\r\n icon3.addPixmap(QtGui.QPixmap(\":/AdministrativeDistrict.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon3)\r\n self.searchServiceListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon4 = QtGui.QIcon()\r\n icon4.addPixmap(QtGui.QPixmap(\":/WeatherLogo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon4)\r\n self.searchServiceListWidget.addItem(item)\r\n self.amapProgramVerticalLayout.addWidget(self.searchServiceListWidget)\r\n self.divisionLine2 = QtWidgets.QFrame(self.verticalLayoutWidget)\r\n self.divisionLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.divisionLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.divisionLine2.setObjectName(\"divisionLine2\")\r\n self.amapProgramVerticalLayout.addWidget(self.divisionLine2)\r\n self.advancedFunction = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.advancedFunction.setFont(font)\r\n self.advancedFunction.setObjectName(\"advancedFunction\")\r\n self.amapProgramVerticalLayout.addWidget(self.advancedFunction)\r\n self.advancedFunctionListWidget = QtWidgets.QListWidget(self.verticalLayoutWidget)\r\n self.advancedFunctionListWidget.setMaximumSize(QtCore.QSize(169, 80))\r\n self.advancedFunctionListWidget.setStyleSheet(\"QListWidget, QListView, QTreeWidget, QTreeView {\\n\"\r\n \" outline: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QListWidget {\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" height:80px;\\n\"\r\n \" NoFrame\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item {\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #F5F5F5;\\n\"\r\n \" height:35px;\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item:selected {\\n\"\r\n \" background: rgb(204,232,255);\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #2DB5EC;\\n\"\r\n \"}\")\r\n self.advancedFunctionListWidget.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.advancedFunctionListWidget.setObjectName(\"advancedFunctionListWidget\")\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon5 = QtGui.QIcon()\r\n icon5.addPixmap(QtGui.QPixmap(\":/TrafficSituation.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon5)\r\n self.advancedFunctionListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n item.setFont(font)\r\n icon6 = QtGui.QIcon()\r\n icon6.addPixmap(QtGui.QPixmap(\":/TrafficAnalysisSystem.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon6)\r\n self.advancedFunctionListWidget.addItem(item)\r\n self.amapProgramVerticalLayout.addWidget(self.advancedFunctionListWidget)\r\n self.divisionLine3 = QtWidgets.QFrame(self.verticalLayoutWidget)\r\n self.divisionLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.divisionLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.divisionLine3.setObjectName(\"divisionLine3\")\r\n self.amapProgramVerticalLayout.addWidget(self.divisionLine3)\r\n self.otherOptionsLabel = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n self.otherOptionsLabel.setFont(font)\r\n self.otherOptionsLabel.setObjectName(\"otherOptionsLabel\")\r\n self.amapProgramVerticalLayout.addWidget(self.otherOptionsLabel)\r\n self.otherOptionsListWidget = QtWidgets.QListWidget(self.verticalLayoutWidget)\r\n self.otherOptionsListWidget.setMaximumSize(QtCore.QSize(169, 80))\r\n self.otherOptionsListWidget.setStyleSheet(\"QListWidget, QListView, QTreeWidget, QTreeView {\\n\"\r\n \" outline: 0px;\\n\"\r\n \"}\\n\"\r\n \"\\n\"\r\n \"QListWidget {\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" height:80px;\\n\"\r\n \" NoFrame\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item {\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #F5F5F5;\\n\"\r\n \" height:35px;\\n\"\r\n \"}\\n\"\r\n \"QListWidget::Item:selected {\\n\"\r\n \" background: rgb(204,232,255);\\n\"\r\n \" color: rgb(0,0,0);\\n\"\r\n \" border-left: 4px solid #2DB5EC;\\n\"\r\n \"}\")\r\n self.otherOptionsListWidget.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.otherOptionsListWidget.setObjectName(\"otherOptionsListWidget\")\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon7 = QtGui.QIcon()\r\n icon7.addPixmap(QtGui.QPixmap(\":/SettingsLogo.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon7)\r\n self.otherOptionsListWidget.addItem(item)\r\n item = QtWidgets.QListWidgetItem()\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(14)\r\n item.setFont(font)\r\n icon8 = QtGui.QIcon()\r\n icon8.addPixmap(QtGui.QPixmap(\":/About.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n item.setIcon(icon8)\r\n self.otherOptionsListWidget.addItem(item)\r\n self.amapProgramVerticalLayout.addWidget(self.otherOptionsListWidget)\r\n spacerItem = QtWidgets.QSpacerItem(20, 104, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.amapProgramVerticalLayout.addItem(spacerItem)\r\n self.amapProgramVerticalLayout.setStretch(0, 1)\r\n self.amapProgramStackedWidget = QtWidgets.QStackedWidget(self.amapProgramFrame)\r\n self.amapProgramStackedWidget.setGeometry(QtCore.QRect(180, 0, 1100, 720))\r\n self.amapProgramStackedWidget.setObjectName(\"amapProgramStackedWidget\")\r\n self.routePlanningPage = QtWidgets.QWidget()\r\n self.routePlanningPage.setObjectName(\"routePlanningPage\")\r\n self.gridLayoutWidget_2 = QtWidgets.QWidget(self.routePlanningPage)\r\n self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 0, 1090, 720))\r\n self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\")\r\n self.gridLayoutPage1 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)\r\n self.gridLayoutPage1.setContentsMargins(10, 0, 0, 0)\r\n self.gridLayoutPage1.setObjectName(\"gridLayoutPage1\")\r\n self.RoutePlanningTabPage = QtWidgets.QTabWidget(self.gridLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.RoutePlanningTabPage.setFont(font)\r\n self.RoutePlanningTabPage.setObjectName(\"RoutePlanningTabPage\")\r\n self.walkingRoutePlanningTab = QtWidgets.QWidget()\r\n self.walkingRoutePlanningTab.setObjectName(\"walkingRoutePlanningTab\")\r\n self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.walkingRoutePlanningTab)\r\n self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 1071, 691))\r\n self.horizontalLayoutWidget_2.setObjectName(\"horizontalLayoutWidget_2\")\r\n self.walkingHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)\r\n self.walkingHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.walkingHorizontal1.setObjectName(\"walkingHorizontal1\")\r\n self.walkingVertical1 = QtWidgets.QVBoxLayout()\r\n self.walkingVertical1.setObjectName(\"walkingVertical1\")\r\n self.walkingRoutePlanningLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.walkingRoutePlanningLabel.setFont(font)\r\n self.walkingRoutePlanningLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.walkingRoutePlanningLabel.setObjectName(\"walkingRoutePlanningLabel\")\r\n self.walkingRoutePlanningLabel.setMargin(10)\r\n self.walkingVertical1.addWidget(self.walkingRoutePlanningLabel)\r\n self.walkingSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.walkingSearchResultContextLabel.setFont(font)\r\n self.walkingSearchResultContextLabel.setObjectName(\"walkingSearchResultContextLabel\")\r\n self.walkingSearchResultContextLabel.setMargin(10)\r\n self.walkingVertical1.addWidget(self.walkingSearchResultContextLabel)\r\n self.walkingSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingSearchInformationLabel.setFont(font)\r\n self.walkingSearchInformationLabel.setWordWrap(True)\r\n self.walkingSearchInformationLabel.setObjectName(\"walkingSearchInformationLabel\")\r\n self.walkingSearchInformationLabel.setMargin(10)\r\n self.walkingVertical1.addWidget(self.walkingSearchInformationLabel)\r\n self.walingRoutePlanningLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_2)\r\n self.walingRoutePlanningLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.walingRoutePlanningLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.walingRoutePlanningLine1.setObjectName(\"walingRoutePlanningLine1\")\r\n self.walkingVertical1.addWidget(self.walingRoutePlanningLine1)\r\n self.walkingHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.walkingHorizontal2.setContentsMargins(-1, -1, 10, -1)\r\n self.walkingHorizontal2.setObjectName(\"walkingHorizontal2\")\r\n self.walkingVertical2 = QtWidgets.QVBoxLayout()\r\n self.walkingVertical2.setObjectName(\"walkingVertical2\")\r\n self.walkingHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.walkingHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.walkingHorizontal3.setObjectName(\"walkingHorizontal3\")\r\n self.walkingDepartureLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingDepartureLabel.setFont(font)\r\n self.walkingDepartureLabel.setObjectName(\"walkingDepartureLabel\")\r\n self.walkingHorizontal3.addWidget(self.walkingDepartureLabel)\r\n self.walkingDepartureLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingDepartureLineEdit.setFont(font)\r\n self.walkingDepartureLineEdit.setObjectName(\"walkingDepartureLineEdit\")\r\n self.walkingHorizontal3.addWidget(self.walkingDepartureLineEdit)\r\n self.walkingVertical2.addLayout(self.walkingHorizontal3)\r\n self.walkingHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.walkingHorizontal4.setContentsMargins(10, 10, 10, 10)\r\n self.walkingHorizontal4.setObjectName(\"walkingHorizontal4\")\r\n self.walkingDestinationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingDestinationLabel.setFont(font)\r\n self.walkingDestinationLabel.setObjectName(\"walkingDestinationLabel\")\r\n self.walkingHorizontal4.addWidget(self.walkingDestinationLabel)\r\n self.walkingDestinationLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingDestinationLineEdit.setFont(font)\r\n self.walkingDestinationLineEdit.setObjectName(\"walkingDestinationLineEdit\")\r\n self.walkingHorizontal4.addWidget(self.walkingDestinationLineEdit)\r\n self.walkingVertical2.addLayout(self.walkingHorizontal4)\r\n self.walkingHorizontal2.addLayout(self.walkingVertical2)\r\n self.walkingSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingSearchButton.setFont(font)\r\n self.walkingSearchButton.setObjectName(\"walkingSearchButton\")\r\n self.walkingHorizontal2.addWidget(self.walkingSearchButton)\r\n self.walkingHorizontal2.setStretch(0, 3)\r\n self.walkingHorizontal2.setStretch(1, 1)\r\n self.walkingVertical1.addLayout(self.walkingHorizontal2)\r\n self.walingRoutePlanningLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_2)\r\n self.walingRoutePlanningLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.walingRoutePlanningLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.walingRoutePlanningLine2.setObjectName(\"walingRoutePlanningLine2\")\r\n self.walkingVertical1.addWidget(self.walingRoutePlanningLine2)\r\n self.walkingResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingResultTextEdit.setFont(font)\r\n self.walkingResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #FFFFFF;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.walkingResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.walkingResultTextEdit.setObjectName(\"walkingResultTextEdit\")\r\n self.walkingResultTextEdit.setReadOnly(True)\r\n self.walkingResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.walkingResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.walkingResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.walkingVertical1.addWidget(self.walkingResultTextEdit)\r\n self.walkingHorizontal1.addLayout(self.walkingVertical1)\r\n self.walingRoutePlanningLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_2)\r\n self.walingRoutePlanningLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.walingRoutePlanningLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.walingRoutePlanningLine5.setObjectName(\"walingRoutePlanningLine5\")\r\n self.walkingHorizontal1.addWidget(self.walingRoutePlanningLine5)\r\n self.walkingVertical3 = QtWidgets.QVBoxLayout()\r\n self.walkingVertical3.setObjectName(\"walkingVertical3\")\r\n self.walkingRoutePlanningInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.walkingRoutePlanningInformationLabel.setFont(font)\r\n self.walkingRoutePlanningInformationLabel.setObjectName(\"walkingRoutePlanningInformationLabel\")\r\n self.walkingRoutePlanningInformationLabel.setMargin(10)\r\n self.walkingVertical3.addWidget(self.walkingRoutePlanningInformationLabel)\r\n self.walkingRoutePlanningDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingRoutePlanningDetailInformationLabel.setFont(font)\r\n self.walkingRoutePlanningDetailInformationLabel.setWordWrap(True)\r\n self.walkingRoutePlanningDetailInformationLabel.setObjectName(\"walkingRoutePlanningDetailInformationLabel\")\r\n self.walkingRoutePlanningDetailInformationLabel.setMargin(10)\r\n self.walkingVertical3.addWidget(self.walkingRoutePlanningDetailInformationLabel)\r\n self.walingRoutePlanningLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_2)\r\n self.walingRoutePlanningLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.walingRoutePlanningLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.walingRoutePlanningLine3.setObjectName(\"walingRoutePlanningLine3\")\r\n self.walkingVertical3.addWidget(self.walingRoutePlanningLine3)\r\n self.walkingInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.walkingInputContextLabel.setFont(font)\r\n self.walkingInputContextLabel.setObjectName(\"walkingInputContextLabel\")\r\n self.walkingInputContextLabel.setMargin(10)\r\n self.walkingVertical3.addWidget(self.walkingInputContextLabel)\r\n self.walkingInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingInputInformationLabel.setFont(font)\r\n self.walkingInputInformationLabel.setWordWrap(True)\r\n self.walkingInputInformationLabel.setObjectName(\"walkingInputInformationLabel\")\r\n self.walkingInputInformationLabel.setMargin(10)\r\n self.walkingVertical3.addWidget(self.walkingInputInformationLabel)\r\n self.walingRoutePlanningLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_2)\r\n self.walingRoutePlanningLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.walingRoutePlanningLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.walingRoutePlanningLine4.setObjectName(\"walingRoutePlanningLine4\")\r\n self.walkingVertical3.addWidget(self.walingRoutePlanningLine4)\r\n self.walkingHorizontal5 = QtWidgets.QHBoxLayout()\r\n self.walkingHorizontal5.setContentsMargins(10, -1, -1, -1)\r\n self.walkingHorizontal5.setObjectName(\"walkingHorizontal5\")\r\n self.walkingFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n self.walkingFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.walkingFeedBackPhotoLabel.setText(\"\")\r\n self.walkingFeedBackPhotoLabel.setObjectName(\"walkingFeedBackPhotoLabel\")\r\n self.walkingPixmap = QPixmap(\":/FeedBack.png\")\r\n self.walkingFeedBackPhotoLabel.setPixmap(self.walkingPixmap)\r\n self.walkingFeedBackPhotoLabel.setScaledContents(True)\r\n self.walkingHorizontal5.addWidget(self.walkingFeedBackPhotoLabel)\r\n self.walkingFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_2)\r\n self.walkingFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.walkingFeedBackLabel.setFont(font)\r\n self.walkingFeedBackLabel.setOpenExternalLinks(True)\r\n self.walkingFeedBackLabel.setObjectName(\"walkingFeedBackLabel\")\r\n self.walkingHorizontal5.addWidget(self.walkingFeedBackLabel)\r\n self.walkingVertical3.addLayout(self.walkingHorizontal5)\r\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.walkingVertical3.addItem(spacerItem1)\r\n self.walkingHorizontal1.addLayout(self.walkingVertical3)\r\n self.walkingHorizontal1.setStretch(0, 2)\r\n self.walkingHorizontal1.setStretch(2, 1)\r\n self.RoutePlanningTabPage.addTab(self.walkingRoutePlanningTab, \"\")\r\n self.busRoutePlanningTab = QtWidgets.QWidget()\r\n self.busRoutePlanningTab.setObjectName(\"busRoutePlanningTab\")\r\n self.horizontalLayoutWidget_7 = QtWidgets.QWidget(self.busRoutePlanningTab)\r\n self.horizontalLayoutWidget_7.setGeometry(QtCore.QRect(0, 0, 1071, 691))\r\n self.horizontalLayoutWidget_7.setObjectName(\"horizontalLayoutWidget_7\")\r\n self.busHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_7)\r\n self.busHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.busHorizontal1.setObjectName(\"busHorizontal1\")\r\n self.busVertical1 = QtWidgets.QVBoxLayout()\r\n self.busVertical1.setObjectName(\"busVertical1\")\r\n self.busRoutePlanningLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.busRoutePlanningLabel.setFont(font)\r\n self.busRoutePlanningLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.busRoutePlanningLabel.setObjectName(\"busRoutePlanningLabel\")\r\n self.busRoutePlanningLabel.setMargin(10)\r\n self.busVertical1.addWidget(self.busRoutePlanningLabel)\r\n self.busSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.busSearchResultContextLabel.setFont(font)\r\n self.busSearchResultContextLabel.setObjectName(\"busSearchResultContextLabel\")\r\n self.busSearchResultContextLabel.setMargin(10)\r\n self.busVertical1.addWidget(self.busSearchResultContextLabel)\r\n self.busSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busSearchInformationLabel.setFont(font)\r\n self.busSearchInformationLabel.setWordWrap(True)\r\n self.busSearchInformationLabel.setObjectName(\"busSearchInformationLabel\")\r\n self.busSearchInformationLabel.setMargin(10)\r\n self.busVertical1.addWidget(self.busSearchInformationLabel)\r\n self.busRoutePlanningLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_7)\r\n self.busRoutePlanningLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.busRoutePlanningLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.busRoutePlanningLine1.setObjectName(\"busRoutePlanningLine1\")\r\n self.busVertical1.addWidget(self.busRoutePlanningLine1)\r\n self.busHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.busHorizontal2.setContentsMargins(-1, -1, 10, -1)\r\n self.busHorizontal2.setObjectName(\"busHorizontal2\")\r\n self.busVertical2 = QtWidgets.QVBoxLayout()\r\n self.busVertical2.setObjectName(\"busVertical2\")\r\n self.busHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.busHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.busHorizontal3.setObjectName(\"busHorizontal3\")\r\n self.busDepartureLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busDepartureLabel.setFont(font)\r\n self.busDepartureLabel.setObjectName(\"busDepartureLabel\")\r\n self.busHorizontal3.addWidget(self.busDepartureLabel)\r\n self.busDepartureLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busDepartureLineEdit.setFont(font)\r\n self.busDepartureLineEdit.setObjectName(\"busDepartureLineEdit\")\r\n self.busHorizontal3.addWidget(self.busDepartureLineEdit)\r\n self.busVertical2.addLayout(self.busHorizontal3)\r\n self.busHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.busHorizontal4.setContentsMargins(10, 10, 10, 10)\r\n self.busHorizontal4.setObjectName(\"busHorizontal4\")\r\n self.busDestinationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busDestinationLabel.setFont(font)\r\n self.busDestinationLabel.setObjectName(\"busDestinationLabel\")\r\n self.busHorizontal4.addWidget(self.busDestinationLabel)\r\n self.busDestinationLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busDestinationLineEdit.setFont(font)\r\n self.busDestinationLineEdit.setObjectName(\"busDestinationLineEdit\")\r\n self.busHorizontal4.addWidget(self.busDestinationLineEdit)\r\n self.busVertical2.addLayout(self.busHorizontal4)\r\n self.busHorizontal2.addLayout(self.busVertical2)\r\n self.busSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busSearchButton.setFont(font)\r\n self.busSearchButton.setObjectName(\"busSearchButton\")\r\n self.busHorizontal2.addWidget(self.busSearchButton)\r\n self.busHorizontal2.setStretch(0, 3)\r\n self.busHorizontal2.setStretch(1, 1)\r\n self.busVertical1.addLayout(self.busHorizontal2)\r\n self.busRoutePlanningLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_7)\r\n self.busRoutePlanningLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.busRoutePlanningLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.busRoutePlanningLine2.setObjectName(\"busRoutePlanningLine2\")\r\n self.busVertical1.addWidget(self.busRoutePlanningLine2)\r\n self.busResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busResultTextEdit.setFont(font)\r\n self.busResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #FFFFFF;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.busResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.busResultTextEdit.setObjectName(\"busResultTextEdit\")\r\n self.busResultTextEdit.setReadOnly(True)\r\n self.busResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.busResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.busResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.busVertical1.addWidget(self.busResultTextEdit)\r\n self.busHorizontal1.addLayout(self.busVertical1)\r\n self.busRoutePlanningLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_7)\r\n self.busRoutePlanningLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.busRoutePlanningLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.busRoutePlanningLine5.setObjectName(\"busRoutePlanningLine5\")\r\n self.busHorizontal1.addWidget(self.busRoutePlanningLine5)\r\n self.busVertical3 = QtWidgets.QVBoxLayout()\r\n self.busVertical3.setObjectName(\"busVertical3\")\r\n self.busRoutePlanningInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.busRoutePlanningInformationLabel.setFont(font)\r\n self.busRoutePlanningInformationLabel.setObjectName(\"busRoutePlanningInformationLabel\")\r\n self.busRoutePlanningInformationLabel.setMargin(10)\r\n self.busVertical3.addWidget(self.busRoutePlanningInformationLabel)\r\n self.busRoutePlanningDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busRoutePlanningDetailInformationLabel.setFont(font)\r\n self.busRoutePlanningDetailInformationLabel.setWordWrap(True)\r\n self.busRoutePlanningDetailInformationLabel.setObjectName(\"busRoutePlanningDetailInformationLabel\")\r\n self.busRoutePlanningDetailInformationLabel.setMargin(10)\r\n self.busVertical3.addWidget(self.busRoutePlanningDetailInformationLabel)\r\n self.busRoutePlanningLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_7)\r\n self.busRoutePlanningLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.busRoutePlanningLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.busRoutePlanningLine3.setObjectName(\"busRoutePlanningLine3\")\r\n self.busVertical3.addWidget(self.busRoutePlanningLine3)\r\n self.busInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.busInputContextLabel.setFont(font)\r\n self.busInputContextLabel.setObjectName(\"busInputContextLabel\")\r\n self.busInputContextLabel.setMargin(10)\r\n self.busVertical3.addWidget(self.busInputContextLabel)\r\n self.busInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busInputInformationLabel.setFont(font)\r\n self.busInputInformationLabel.setObjectName(\"busInputInformationLabel\")\r\n self.busInputInformationLabel.setMargin(10)\r\n self.busVertical3.addWidget(self.busInputInformationLabel)\r\n self.busRoutePlanningLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_7)\r\n self.busRoutePlanningLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.busRoutePlanningLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.busRoutePlanningLine4.setObjectName(\"busRoutePlanningLine4\")\r\n self.busVertical3.addWidget(self.busRoutePlanningLine4)\r\n self.busHorizontal5 = QtWidgets.QHBoxLayout()\r\n self.busHorizontal5.setContentsMargins(10, -1, -1, -1)\r\n self.busHorizontal5.setObjectName(\"busHorizontal5\")\r\n self.busFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n self.busFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.busFeedBackPhotoLabel.setText(\"\")\r\n self.busFeedBackPhotoLabel.setObjectName(\"busFeedBackPhotoLabel\")\r\n self.busPixmap = QPixmap(\":/FeedBack.png\")\r\n self.busFeedBackPhotoLabel.setPixmap(self.busPixmap)\r\n self.busFeedBackPhotoLabel.setScaledContents(True)\r\n self.busHorizontal5.addWidget(self.busFeedBackPhotoLabel)\r\n self.busFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_7)\r\n self.busFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.busFeedBackLabel.setFont(font)\r\n self.busFeedBackLabel.setOpenExternalLinks(True)\r\n self.busFeedBackLabel.setObjectName(\"busFeedBackLabel\")\r\n self.busHorizontal5.addWidget(self.busFeedBackLabel)\r\n self.busVertical3.addLayout(self.busHorizontal5)\r\n spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.busVertical3.addItem(spacerItem2)\r\n self.busHorizontal1.addLayout(self.busVertical3)\r\n self.busHorizontal1.setStretch(0, 2)\r\n self.busHorizontal1.setStretch(2, 1)\r\n self.RoutePlanningTabPage.addTab(self.busRoutePlanningTab, \"\")\r\n self.rideRoutePlanningTab = QtWidgets.QWidget()\r\n self.rideRoutePlanningTab.setObjectName(\"rideRoutePlanningTab\")\r\n self.horizontalLayoutWidget_9 = QtWidgets.QWidget(self.rideRoutePlanningTab)\r\n self.horizontalLayoutWidget_9.setGeometry(QtCore.QRect(0, 0, 1071, 691))\r\n self.horizontalLayoutWidget_9.setObjectName(\"horizontalLayoutWidget_9\")\r\n self.rideHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_9)\r\n self.rideHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.rideHorizontal1.setObjectName(\"rideHorizontal1\")\r\n self.rideVertical1 = QtWidgets.QVBoxLayout()\r\n self.rideVertical1.setObjectName(\"rideVertical1\")\r\n self.rideRoutePlanningLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.rideRoutePlanningLabel.setFont(font)\r\n self.rideRoutePlanningLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.rideRoutePlanningLabel.setObjectName(\"rideRoutePlanningLabel\")\r\n self.rideRoutePlanningLabel.setMargin(10)\r\n self.rideVertical1.addWidget(self.rideRoutePlanningLabel)\r\n self.rideSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.rideSearchResultContextLabel.setFont(font)\r\n self.rideSearchResultContextLabel.setObjectName(\"rideSearchResultContextLabel\")\r\n self.rideSearchResultContextLabel.setMargin(10)\r\n self.rideVertical1.addWidget(self.rideSearchResultContextLabel)\r\n self.rideSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideSearchInformationLabel.setFont(font)\r\n self.rideSearchInformationLabel.setWordWrap(True)\r\n self.rideSearchInformationLabel.setObjectName(\"rideSearchInformationLabel\")\r\n self.rideSearchInformationLabel.setMargin(10)\r\n self.rideVertical1.addWidget(self.rideSearchInformationLabel)\r\n self.rideRoutePlanningLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_9)\r\n self.rideRoutePlanningLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.rideRoutePlanningLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.rideRoutePlanningLine1.setObjectName(\"rideRoutePlanningLine1\")\r\n self.rideVertical1.addWidget(self.rideRoutePlanningLine1)\r\n self.rideHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.rideHorizontal2.setContentsMargins(-1, -1, 10, -1)\r\n self.rideHorizontal2.setObjectName(\"rideHorizontal2\")\r\n self.rideVertical2 = QtWidgets.QVBoxLayout()\r\n self.rideVertical2.setObjectName(\"rideVertical2\")\r\n self.rideHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.rideHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.rideHorizontal3.setObjectName(\"rideHorizontal3\")\r\n self.rideDepartureLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideDepartureLabel.setFont(font)\r\n self.rideDepartureLabel.setObjectName(\"rideDepartureLabel\")\r\n self.rideHorizontal3.addWidget(self.rideDepartureLabel)\r\n self.rideDepartureLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideDepartureLineEdit.setFont(font)\r\n self.rideDepartureLineEdit.setObjectName(\"rideDepartureLineEdit\")\r\n self.rideHorizontal3.addWidget(self.rideDepartureLineEdit)\r\n self.rideVertical2.addLayout(self.rideHorizontal3)\r\n self.rideHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.rideHorizontal4.setContentsMargins(10, 10, 10, 10)\r\n self.rideHorizontal4.setObjectName(\"rideHorizontal4\")\r\n self.rideDestinationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideDestinationLabel.setFont(font)\r\n self.rideDestinationLabel.setObjectName(\"rideDestinationLabel\")\r\n self.rideHorizontal4.addWidget(self.rideDestinationLabel)\r\n self.rideDestinationLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideDestinationLineEdit.setFont(font)\r\n self.rideDestinationLineEdit.setObjectName(\"rideDestinationLineEdit\")\r\n self.rideHorizontal4.addWidget(self.rideDestinationLineEdit)\r\n self.rideVertical2.addLayout(self.rideHorizontal4)\r\n self.rideHorizontal2.addLayout(self.rideVertical2)\r\n self.rideSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideSearchButton.setFont(font)\r\n self.rideSearchButton.setObjectName(\"rideSearchButton\")\r\n self.rideHorizontal2.addWidget(self.rideSearchButton)\r\n self.rideHorizontal2.setStretch(0, 3)\r\n self.rideHorizontal2.setStretch(1, 1)\r\n self.rideVertical1.addLayout(self.rideHorizontal2)\r\n self.rideRoutePlanningLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_9)\r\n self.rideRoutePlanningLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.rideRoutePlanningLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.rideRoutePlanningLine2.setObjectName(\"rideRoutePlanningLine2\")\r\n self.rideVertical1.addWidget(self.rideRoutePlanningLine2)\r\n self.rideResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideResultTextEdit.setFont(font)\r\n self.rideResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #FFFFFF;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.rideResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.rideResultTextEdit.setObjectName(\"rideResultTextEdit\")\r\n self.rideResultTextEdit.setReadOnly(True)\r\n self.rideResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.rideResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.rideResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.rideVertical1.addWidget(self.rideResultTextEdit)\r\n self.rideHorizontal1.addLayout(self.rideVertical1)\r\n self.rideRoutePlanningLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_9)\r\n self.rideRoutePlanningLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.rideRoutePlanningLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.rideRoutePlanningLine5.setObjectName(\"rideRoutePlanningLine5\")\r\n self.rideHorizontal1.addWidget(self.rideRoutePlanningLine5)\r\n self.rideVertical3 = QtWidgets.QVBoxLayout()\r\n self.rideVertical3.setObjectName(\"rideVertical3\")\r\n self.rideRoutePlanningInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.rideRoutePlanningInformationLabel.setFont(font)\r\n self.rideRoutePlanningInformationLabel.setObjectName(\"rideRoutePlanningInformationLabel\")\r\n self.rideRoutePlanningInformationLabel.setMargin(10)\r\n self.rideVertical3.addWidget(self.rideRoutePlanningInformationLabel)\r\n self.rideRoutePlanningDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideRoutePlanningDetailInformationLabel.setFont(font)\r\n self.rideRoutePlanningDetailInformationLabel.setWordWrap(True)\r\n self.rideRoutePlanningDetailInformationLabel.setObjectName(\"rideRoutePlanningDetailInformationLabel\")\r\n self.rideRoutePlanningDetailInformationLabel.setMargin(10)\r\n self.rideVertical3.addWidget(self.rideRoutePlanningDetailInformationLabel)\r\n self.rideRoutePlanningLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_9)\r\n self.rideRoutePlanningLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.rideRoutePlanningLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.rideRoutePlanningLine3.setObjectName(\"rideRoutePlanningLine3\")\r\n self.rideVertical3.addWidget(self.rideRoutePlanningLine3)\r\n self.rideInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.rideInputContextLabel.setFont(font)\r\n self.rideInputContextLabel.setObjectName(\"rideInputContextLabel\")\r\n self.rideInputContextLabel.setMargin(10)\r\n self.rideVertical3.addWidget(self.rideInputContextLabel)\r\n self.rideInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideInputInformationLabel.setFont(font)\r\n self.rideInputInformationLabel.setObjectName(\"rideInputInformationLabel\")\r\n self.rideInputInformationLabel.setMargin(10)\r\n self.rideVertical3.addWidget(self.rideInputInformationLabel)\r\n self.rideRoutePlanningLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_9)\r\n self.rideRoutePlanningLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.rideRoutePlanningLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.rideRoutePlanningLine4.setObjectName(\"rideRoutePlanningLine4\")\r\n self.rideVertical3.addWidget(self.rideRoutePlanningLine4)\r\n self.rideHorizontal5 = QtWidgets.QHBoxLayout()\r\n self.rideHorizontal5.setContentsMargins(10, -1, -1, -1)\r\n self.rideHorizontal5.setObjectName(\"rideHorizontal5\")\r\n self.rideFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n self.rideFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.rideFeedBackPhotoLabel.setText(\"\")\r\n self.rideFeedBackPhotoLabel.setObjectName(\"rideFeedBackPhotoLabel\")\r\n self.ridePixmap = QPixmap(\":/FeedBack.png\")\r\n self.rideFeedBackPhotoLabel.setPixmap(self.ridePixmap)\r\n self.rideFeedBackPhotoLabel.setScaledContents(True)\r\n self.rideHorizontal5.addWidget(self.rideFeedBackPhotoLabel)\r\n self.rideFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_9)\r\n self.rideFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.rideFeedBackLabel.setFont(font)\r\n self.rideFeedBackLabel.setOpenExternalLinks(True)\r\n self.rideFeedBackLabel.setObjectName(\"rideFeedBackLabel\")\r\n self.rideHorizontal5.addWidget(self.rideFeedBackLabel)\r\n self.rideVertical3.addLayout(self.rideHorizontal5)\r\n spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.rideVertical3.addItem(spacerItem3)\r\n self.rideHorizontal1.addLayout(self.rideVertical3)\r\n self.rideHorizontal1.setStretch(0, 2)\r\n self.rideHorizontal1.setStretch(2, 1)\r\n self.RoutePlanningTabPage.addTab(self.rideRoutePlanningTab, \"\")\r\n self.driveRoutePlanningTab = QtWidgets.QWidget()\r\n self.driveRoutePlanningTab.setObjectName(\"driveRoutePlanningTab\")\r\n self.horizontalLayoutWidget_8 = QtWidgets.QWidget(self.driveRoutePlanningTab)\r\n self.horizontalLayoutWidget_8.setGeometry(QtCore.QRect(0, 0, 1071, 691))\r\n self.horizontalLayoutWidget_8.setObjectName(\"horizontalLayoutWidget_8\")\r\n self.driveHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_8)\r\n self.driveHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.driveHorizontal1.setObjectName(\"driveHorizontal1\")\r\n self.driveVertical1 = QtWidgets.QVBoxLayout()\r\n self.driveVertical1.setObjectName(\"driveVertical1\")\r\n self.driveRoutePlanningLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.driveRoutePlanningLabel.setFont(font)\r\n self.driveRoutePlanningLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.driveRoutePlanningLabel.setObjectName(\"driveRoutePlanningLabel\")\r\n self.driveRoutePlanningLabel.setMargin(10)\r\n self.driveVertical1.addWidget(self.driveRoutePlanningLabel)\r\n self.driveSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.driveSearchResultContextLabel.setFont(font)\r\n self.driveSearchResultContextLabel.setObjectName(\"driveSearchResultContextLabel\")\r\n self.driveSearchResultContextLabel.setMargin(10)\r\n self.driveVertical1.addWidget(self.driveSearchResultContextLabel)\r\n self.driveSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveSearchInformationLabel.setFont(font)\r\n self.driveSearchInformationLabel.setWordWrap(True)\r\n self.driveSearchInformationLabel.setObjectName(\"driveSearchInformationLabel\")\r\n self.driveSearchInformationLabel.setMargin(10)\r\n self.driveVertical1.addWidget(self.driveSearchInformationLabel)\r\n self.driveRoutePlanningLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_8)\r\n self.driveRoutePlanningLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.driveRoutePlanningLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.driveRoutePlanningLine1.setObjectName(\"driveRoutePlanningLine1\")\r\n self.driveVertical1.addWidget(self.driveRoutePlanningLine1)\r\n self.driveHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.driveHorizontal2.setContentsMargins(-1, -1, 10, -1)\r\n self.driveHorizontal2.setObjectName(\"driveHorizontal2\")\r\n self.driveVertical2 = QtWidgets.QVBoxLayout()\r\n self.driveVertical2.setObjectName(\"driveVertical2\")\r\n self.driveHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.driveHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.driveHorizontal3.setObjectName(\"driveHorizontal3\")\r\n self.driveDepartureLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveDepartureLabel.setFont(font)\r\n self.driveDepartureLabel.setObjectName(\"driveDepartureLabel\")\r\n self.driveHorizontal3.addWidget(self.driveDepartureLabel)\r\n self.driveDepartureLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveDepartureLineEdit.setFont(font)\r\n self.driveDepartureLineEdit.setObjectName(\"driveDepartureLineEdit\")\r\n self.driveHorizontal3.addWidget(self.driveDepartureLineEdit)\r\n self.driveVertical2.addLayout(self.driveHorizontal3)\r\n self.driveHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.driveHorizontal4.setContentsMargins(10, 10, 10, 10)\r\n self.driveHorizontal4.setObjectName(\"driveHorizontal4\")\r\n self.driveDestinationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveDestinationLabel.setFont(font)\r\n self.driveDestinationLabel.setObjectName(\"driveDestinationLabel\")\r\n self.driveHorizontal4.addWidget(self.driveDestinationLabel)\r\n self.driveDestinationLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveDestinationLineEdit.setFont(font)\r\n self.driveDestinationLineEdit.setObjectName(\"driveDestinationLineEdit\")\r\n self.driveHorizontal4.addWidget(self.driveDestinationLineEdit)\r\n self.driveVertical2.addLayout(self.driveHorizontal4)\r\n self.driveHorizontal2.addLayout(self.driveVertical2)\r\n self.driveSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveSearchButton.setFont(font)\r\n self.driveSearchButton.setObjectName(\"driveSearchButton\")\r\n self.driveHorizontal2.addWidget(self.driveSearchButton)\r\n self.driveHorizontal2.setStretch(0, 3)\r\n self.driveHorizontal2.setStretch(1, 1)\r\n self.driveVertical1.addLayout(self.driveHorizontal2)\r\n self.driveRoutePlanningLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_8)\r\n self.driveRoutePlanningLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.driveRoutePlanningLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.driveRoutePlanningLine2.setObjectName(\"driveRoutePlanningLine2\")\r\n self.driveVertical1.addWidget(self.driveRoutePlanningLine2)\r\n self.driveResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveResultTextEdit.setFont(font)\r\n self.driveResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #FFFFFF;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.driveResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.driveResultTextEdit.setObjectName(\"driveResultTextEdit\")\r\n self.driveResultTextEdit.setReadOnly(True)\r\n self.driveResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.driveResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.driveResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.driveVertical1.addWidget(self.driveResultTextEdit)\r\n self.driveHorizontal1.addLayout(self.driveVertical1)\r\n self.driveRoutePlanningLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_8)\r\n self.driveRoutePlanningLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.driveRoutePlanningLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.driveRoutePlanningLine5.setObjectName(\"driveRoutePlanningLine5\")\r\n self.driveHorizontal1.addWidget(self.driveRoutePlanningLine5)\r\n self.driveVertical3 = QtWidgets.QVBoxLayout()\r\n self.driveVertical3.setObjectName(\"driveVertical3\")\r\n self.driveRoutePlanningInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.driveRoutePlanningInformationLabel.setFont(font)\r\n self.driveRoutePlanningInformationLabel.setObjectName(\"driveRoutePlanningInformationLabel\")\r\n self.driveRoutePlanningInformationLabel.setMargin(10)\r\n self.driveVertical3.addWidget(self.driveRoutePlanningInformationLabel)\r\n self.driveRoutePlanningDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveRoutePlanningDetailInformationLabel.setFont(font)\r\n self.driveRoutePlanningDetailInformationLabel.setWordWrap(True)\r\n self.driveRoutePlanningDetailInformationLabel.setObjectName(\"driveRoutePlanningDetailInformationLabel\")\r\n self.driveRoutePlanningDetailInformationLabel.setMargin(10)\r\n self.driveVertical3.addWidget(self.driveRoutePlanningDetailInformationLabel)\r\n self.driveRoutePlanningLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_8)\r\n self.driveRoutePlanningLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.driveRoutePlanningLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.driveRoutePlanningLine3.setObjectName(\"driveRoutePlanningLine3\")\r\n self.driveVertical3.addWidget(self.driveRoutePlanningLine3)\r\n self.driveInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.driveInputContextLabel.setFont(font)\r\n self.driveInputContextLabel.setObjectName(\"driveInputContextLabel\")\r\n self.driveInputContextLabel.setMargin(10)\r\n self.driveVertical3.addWidget(self.driveInputContextLabel)\r\n self.driveInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveInputInformationLabel.setFont(font)\r\n self.driveInputInformationLabel.setObjectName(\"driveInputInformationLabel\")\r\n self.driveInputInformationLabel.setMargin(10)\r\n self.driveVertical3.addWidget(self.driveInputInformationLabel)\r\n self.driveRoutePlanningLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_8)\r\n self.driveRoutePlanningLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.driveRoutePlanningLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.driveRoutePlanningLine4.setObjectName(\"driveRoutePlanningLine4\")\r\n self.driveVertical3.addWidget(self.driveRoutePlanningLine4)\r\n self.driveHorizontal5 = QtWidgets.QHBoxLayout()\r\n self.driveHorizontal5.setContentsMargins(10, -1, -1, -1)\r\n self.driveHorizontal5.setObjectName(\"driveHorizontal5\")\r\n self.driveFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n self.driveFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.driveFeedBackPhotoLabel.setText(\"\")\r\n self.driveFeedBackPhotoLabel.setObjectName(\"driveFeedBackPhotoLabel\")\r\n self.drivePixmap = QPixmap(\":/FeedBack.png\")\r\n self.driveFeedBackPhotoLabel.setPixmap(self.drivePixmap)\r\n self.driveFeedBackPhotoLabel.setScaledContents(True)\r\n self.driveHorizontal5.addWidget(self.driveFeedBackPhotoLabel)\r\n self.driveFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_8)\r\n self.driveFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.driveFeedBackLabel.setFont(font)\r\n self.driveFeedBackLabel.setOpenExternalLinks(True)\r\n self.driveFeedBackLabel.setObjectName(\"driveFeedBackLabel\")\r\n self.driveHorizontal5.addWidget(self.driveFeedBackLabel)\r\n self.driveVertical3.addLayout(self.driveHorizontal5)\r\n spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.driveVertical3.addItem(spacerItem4)\r\n self.driveHorizontal1.addLayout(self.driveVertical3)\r\n self.driveHorizontal1.setStretch(0, 2)\r\n self.driveHorizontal1.setStretch(2, 1)\r\n self.RoutePlanningTabPage.addTab(self.driveRoutePlanningTab, \"\")\r\n self.gridLayoutPage1.addWidget(self.RoutePlanningTabPage, 0, 0, 1, 1)\r\n self.amapProgramStackedWidget.addWidget(self.routePlanningPage)\r\n self.IPLocationPage = QtWidgets.QWidget()\r\n self.IPLocationPage.setObjectName(\"IPLocationPage\")\r\n self.horizontalLayoutWidget_10 = QtWidgets.QWidget(self.IPLocationPage)\r\n self.horizontalLayoutWidget_10.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_10.setObjectName(\"horizontalLayoutWidget_10\")\r\n self.IPLocationHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_10)\r\n self.IPLocationHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.IPLocationHorizontal1.setObjectName(\"IPLocationHorizontal1\")\r\n self.IPLocationLine6 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine6.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.IPLocationLine6.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine6.setObjectName(\"IPLocationLine6\")\r\n self.IPLocationHorizontal1.addWidget(self.IPLocationLine6)\r\n self.IPLocationVertical1 = QtWidgets.QVBoxLayout()\r\n self.IPLocationVertical1.setObjectName(\"IPLocationVertical1\")\r\n self.IPLocationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.IPLocationLabel.setFont(font)\r\n self.IPLocationLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.IPLocationLabel.setObjectName(\"IPLocationLabel\")\r\n self.IPLocationLabel.setMargin(10)\r\n self.IPLocationVertical1.addWidget(self.IPLocationLabel)\r\n self.IPLocationSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.IPLocationSearchResultContextLabel.setFont(font)\r\n self.IPLocationSearchResultContextLabel.setObjectName(\"IPLocationSearchResultContextLabel\")\r\n self.IPLocationSearchResultContextLabel.setMargin(10)\r\n self.IPLocationVertical1.addWidget(self.IPLocationSearchResultContextLabel)\r\n self.IPLocationSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationSearchInformationLabel.setFont(font)\r\n self.IPLocationSearchInformationLabel.setWordWrap(True)\r\n self.IPLocationSearchInformationLabel.setObjectName(\"IPLocationSearchInformationLabel\")\r\n self.IPLocationSearchInformationLabel.setMargin(10)\r\n self.IPLocationVertical1.addWidget(self.IPLocationSearchInformationLabel)\r\n self.IPLocationLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.IPLocationLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine1.setObjectName(\"IPLocationLine1\")\r\n self.IPLocationVertical1.addWidget(self.IPLocationLine1)\r\n self.IPLocationHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.IPLocationHorizontal2.setContentsMargins(10, 10, 10, 10)\r\n self.IPLocationHorizontal2.setObjectName(\"IPLocationHorizontal2\")\r\n self.IPLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLabel.setFont(font)\r\n self.IPLabel.setObjectName(\"IPLabel\")\r\n self.IPLocationHorizontal2.addWidget(self.IPLabel)\r\n self.IPLocationLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationLineEdit.setFont(font)\r\n self.IPLocationLineEdit.setObjectName(\"IPLocationLineEdit\")\r\n self.IPLocationHorizontal2.addWidget(self.IPLocationLineEdit)\r\n self.IPLocationGetLocalNetWorkButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationGetLocalNetWorkButton.setFont(font)\r\n self.IPLocationGetLocalNetWorkButton.setObjectName(\"IPLocationGetLocalNetWorkButton\")\r\n self.IPLocationHorizontal2.addWidget(self.IPLocationGetLocalNetWorkButton)\r\n self.IPLocationSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationSearchButton.setFont(font)\r\n self.IPLocationSearchButton.setObjectName(\"IPLocationSearchButton\")\r\n self.IPLocationHorizontal2.addWidget(self.IPLocationSearchButton)\r\n self.IPLocationHorizontal2.setStretch(1, 3)\r\n self.IPLocationHorizontal2.setStretch(3, 1)\r\n self.IPLocationVertical1.addLayout(self.IPLocationHorizontal2)\r\n self.IPLocationVertical2 = QtWidgets.QVBoxLayout()\r\n self.IPLocationVertical2.setContentsMargins(10, -1, 10, -1)\r\n self.IPLocationVertical2.setObjectName(\"IPLocationVertical2\")\r\n self.IPLocationResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationResultTextEdit.setFont(font)\r\n self.IPLocationResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.IPLocationResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.IPLocationResultTextEdit.setObjectName(\"IPLocationResultTextEdit\")\r\n self.IPLocationResultTextEdit.setReadOnly(True)\r\n self.IPLocationResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.IPLocationResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.IPLocationResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.IPLocationVertical2.addWidget(self.IPLocationResultTextEdit)\r\n self.IPLocationVertical1.addLayout(self.IPLocationVertical2)\r\n self.IPLocationLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.IPLocationLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine2.setObjectName(\"IPLocationLine2\")\r\n self.IPLocationVertical1.addWidget(self.IPLocationLine2)\r\n self.IPLocationHorizontal1.addLayout(self.IPLocationVertical1)\r\n self.IPLocationLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.IPLocationLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine5.setObjectName(\"IPLocationLine5\")\r\n self.IPLocationHorizontal1.addWidget(self.IPLocationLine5)\r\n self.IPLocationVertical3 = QtWidgets.QVBoxLayout()\r\n self.IPLocationVertical3.setObjectName(\"IPLocationVertical3\")\r\n self.IPLocationInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.IPLocationInformationLabel.setFont(font)\r\n self.IPLocationInformationLabel.setObjectName(\"IPLocationInformationLabel\")\r\n self.IPLocationInformationLabel.setMargin(10)\r\n self.IPLocationVertical3.addWidget(self.IPLocationInformationLabel)\r\n self.IPLocationDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationDetailInformationLabel.setFont(font)\r\n self.IPLocationDetailInformationLabel.setWordWrap(True)\r\n self.IPLocationDetailInformationLabel.setObjectName(\"IPLocationDetailInformationLabel\")\r\n self.IPLocationDetailInformationLabel.setMargin(10)\r\n self.IPLocationVertical3.addWidget(self.IPLocationDetailInformationLabel)\r\n self.IPLocationLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.IPLocationLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine3.setObjectName(\"IPLocationLine3\")\r\n self.IPLocationVertical3.addWidget(self.IPLocationLine3)\r\n self.IPLocationInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.IPLocationInputContextLabel.setFont(font)\r\n self.IPLocationInputContextLabel.setObjectName(\"IPLocationInputContextLabel\")\r\n self.IPLocationInputContextLabel.setMargin(10)\r\n self.IPLocationVertical3.addWidget(self.IPLocationInputContextLabel)\r\n self.IPLocationInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationInputInformationLabel.setFont(font)\r\n self.IPLocationInputInformationLabel.setWordWrap(True)\r\n self.IPLocationInputInformationLabel.setObjectName(\"IPLocationInputInformationLabel\")\r\n self.IPLocationInputInformationLabel.setMargin(10)\r\n self.IPLocationVertical3.addWidget(self.IPLocationInputInformationLabel)\r\n self.IPLocationLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_10)\r\n self.IPLocationLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.IPLocationLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.IPLocationLine4.setObjectName(\"IPLocationLine4\")\r\n self.IPLocationVertical3.addWidget(self.IPLocationLine4)\r\n self.IPLocationHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.IPLocationHorizontal3.setContentsMargins(10, -1, -1, -1)\r\n self.IPLocationHorizontal3.setObjectName(\"IPLocationHorizontal3\")\r\n self.IPLocationFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.IPLocationFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.IPLocationFeedBackPhotoLabel.setText(\"\")\r\n self.IPLocationFeedBackPhotoLabel.setObjectName(\"IPLocationFeedBackPhotoLabel\")\r\n self.IPLocationPixmap = QPixmap(\":/FeedBack.png\")\r\n self.IPLocationFeedBackPhotoLabel.setPixmap(self.IPLocationPixmap)\r\n self.IPLocationFeedBackPhotoLabel.setScaledContents(True)\r\n self.IPLocationHorizontal3.addWidget(self.IPLocationFeedBackPhotoLabel)\r\n self.IPLocationFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_10)\r\n self.IPLocationFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.IPLocationFeedBackLabel.setFont(font)\r\n self.IPLocationFeedBackLabel.setOpenExternalLinks(True)\r\n self.IPLocationFeedBackLabel.setObjectName(\"IPLocationFeedBackLabel\")\r\n self.IPLocationHorizontal3.addWidget(self.IPLocationFeedBackLabel)\r\n self.IPLocationVertical3.addLayout(self.IPLocationHorizontal3)\r\n spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.IPLocationVertical3.addItem(spacerItem5)\r\n self.IPLocationHorizontal1.addLayout(self.IPLocationVertical3)\r\n self.IPLocationHorizontal1.setStretch(1, 2)\r\n self.IPLocationHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.IPLocationPage)\r\n self.staticMapsPage = QtWidgets.QWidget()\r\n self.staticMapsPage.setObjectName(\"staticMapsPage\")\r\n self.horizontalLayoutWidget_14 = QtWidgets.QWidget(self.staticMapsPage)\r\n self.horizontalLayoutWidget_14.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_14.setObjectName(\"horizontalLayoutWidget_14\")\r\n self.staticMapsHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_14)\r\n self.staticMapsHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.staticMapsHorizontal1.setObjectName(\"staticMapsHorizontal1\")\r\n self.staticMapsLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_14)\r\n self.staticMapsLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.staticMapsLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.staticMapsLine5.setObjectName(\"staticMapsLine5\")\r\n self.staticMapsHorizontal1.addWidget(self.staticMapsLine5)\r\n self.staticMapsVertical1 = QtWidgets.QVBoxLayout()\r\n self.staticMapsVertical1.setObjectName(\"staticMapsVertical1\")\r\n self.staticMapsLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.staticMapsLabel.setFont(font)\r\n self.staticMapsLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.staticMapsLabel.setObjectName(\"staticMapsLabel\")\r\n self.staticMapsLabel.setMargin(10)\r\n self.staticMapsVertical1.addWidget(self.staticMapsLabel)\r\n self.staticMapsSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.staticMapsSearchResultContextLabel.setFont(font)\r\n self.staticMapsSearchResultContextLabel.setObjectName(\"staticMapsSearchResultContextLabel\")\r\n self.staticMapsSearchResultContextLabel.setMargin(10)\r\n self.staticMapsVertical1.addWidget(self.staticMapsSearchResultContextLabel)\r\n self.staticMapsSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsSearchInformationLabel.setFont(font)\r\n self.staticMapsSearchInformationLabel.setWordWrap(True)\r\n self.staticMapsSearchInformationLabel.setObjectName(\"staticMapsSearchInformationLabel\")\r\n self.staticMapsSearchInformationLabel.setMargin(10)\r\n self.staticMapsVertical1.addWidget(self.staticMapsSearchInformationLabel)\r\n self.staticMapsLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_14)\r\n self.staticMapsLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.staticMapsLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.staticMapsLine1.setObjectName(\"staticMapsLine1\")\r\n self.staticMapsVertical1.addWidget(self.staticMapsLine1)\r\n self.staticMapsHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.staticMapsHorizontal2.setContentsMargins(10, 10, 10, 10)\r\n self.staticMapsHorizontal2.setObjectName(\"staticMapsHorizontal2\")\r\n self.staticMapsSearchLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsSearchLineEdit.setFont(font)\r\n self.staticMapsSearchLineEdit.setObjectName(\"staticMapsSearchLineEdit\")\r\n self.staticMapsHorizontal2.addWidget(self.staticMapsSearchLineEdit)\r\n self.staticMapsSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsSearchButton.setFont(font)\r\n self.staticMapsSearchButton.setObjectName(\"staticMapsSearchButton\")\r\n self.staticMapsHorizontal2.addWidget(self.staticMapsSearchButton)\r\n self.staticMapsHorizontal2.setStretch(0, 3)\r\n self.staticMapsHorizontal2.setStretch(1, 1)\r\n self.staticMapsVertical1.addLayout(self.staticMapsHorizontal2)\r\n self.staticMapsPhotoView = QtWidgets.QGraphicsView(self.horizontalLayoutWidget_14)\r\n self.staticMapsPhotoView.setStyleSheet(\"QGraphicsView{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.staticMapsPhotoView.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.staticMapsPhotoView.setObjectName(\"staticMapsPhotoView\")\r\n self.staticMapsVertical1.addWidget(self.staticMapsPhotoView)\r\n self.staticMapsHorizontal1.addLayout(self.staticMapsVertical1)\r\n self.staticMapsLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_14)\r\n self.staticMapsLine4.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.staticMapsLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.staticMapsLine4.setObjectName(\"staticMapsLine4\")\r\n self.staticMapsHorizontal1.addWidget(self.staticMapsLine4)\r\n self.staticMapsVertical2 = QtWidgets.QVBoxLayout()\r\n self.staticMapsVertical2.setObjectName(\"staticMapsVertical2\")\r\n self.staticMapsServiceLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.staticMapsServiceLabel.setFont(font)\r\n self.staticMapsServiceLabel.setObjectName(\"staticMapsServiceLabel\")\r\n self.staticMapsServiceLabel.setMargin(10)\r\n self.staticMapsVertical2.addWidget(self.staticMapsServiceLabel)\r\n self.staticMapsDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsDetailInformationLabel.setFont(font)\r\n self.staticMapsDetailInformationLabel.setWordWrap(True)\r\n self.staticMapsDetailInformationLabel.setObjectName(\"staticMapsDetailInformationLabel\")\r\n self.staticMapsDetailInformationLabel.setMargin(10)\r\n self.staticMapsVertical2.addWidget(self.staticMapsDetailInformationLabel)\r\n self.staticMapsLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_14)\r\n self.staticMapsLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.staticMapsLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.staticMapsLine2.setObjectName(\"staticMapsLine2\")\r\n self.staticMapsVertical2.addWidget(self.staticMapsLine2)\r\n self.staticMapsSearchContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.staticMapsSearchContextLabel.setFont(font)\r\n self.staticMapsSearchContextLabel.setObjectName(\"staticMapsSearchContextLabel\")\r\n self.staticMapsSearchContextLabel.setMargin(10)\r\n self.staticMapsVertical2.addWidget(self.staticMapsSearchContextLabel)\r\n self.staticMapsInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsInputInformationLabel.setFont(font)\r\n self.staticMapsInputInformationLabel.setWordWrap(True)\r\n self.staticMapsInputInformationLabel.setObjectName(\"staticMapsInputInformationLabel\")\r\n self.staticMapsInputInformationLabel.setMargin(10)\r\n self.staticMapsVertical2.addWidget(self.staticMapsInputInformationLabel)\r\n self.staticMapsLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_14)\r\n self.staticMapsLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.staticMapsLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.staticMapsLine3.setObjectName(\"staticMapsLine3\")\r\n self.staticMapsVertical2.addWidget(self.staticMapsLine3)\r\n self.staticMapsHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.staticMapsHorizontal3.setContentsMargins(10, -1, -1, -1)\r\n self.staticMapsHorizontal3.setObjectName(\"staticMapsHorizontal3\")\r\n self.staticMapsFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n self.staticMapsFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.staticMapsFeedBackPhotoLabel.setText(\"\")\r\n self.staticMapsFeedBackPhotoLabel.setObjectName(\"staticMapsFeedBackPhotoLabel\")\r\n self.staticMapsPixmap = QPixmap(\":/FeedBack.png\")\r\n self.staticMapsFeedBackPhotoLabel.setPixmap(self.staticMapsPixmap)\r\n self.staticMapsFeedBackPhotoLabel.setScaledContents(True)\r\n self.staticMapsHorizontal3.addWidget(self.staticMapsFeedBackPhotoLabel)\r\n self.staticMapsFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_14)\r\n self.staticMapsFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.staticMapsFeedBackLabel.setFont(font)\r\n self.staticMapsFeedBackLabel.setOpenExternalLinks(True)\r\n self.staticMapsFeedBackLabel.setObjectName(\"staticMapsFeedBackLabel\")\r\n self.staticMapsHorizontal3.addWidget(self.staticMapsFeedBackLabel)\r\n self.staticMapsVertical2.addLayout(self.staticMapsHorizontal3)\r\n spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.staticMapsVertical2.addItem(spacerItem6)\r\n self.staticMapsHorizontal1.addLayout(self.staticMapsVertical2)\r\n self.staticMapsHorizontal1.setStretch(1, 2)\r\n self.staticMapsHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.staticMapsPage)\r\n self.administrativeDistrictEnquiryPage = QtWidgets.QWidget()\r\n self.administrativeDistrictEnquiryPage.setObjectName(\"administrativeDistrictEnquiryPage\")\r\n self.horizontalLayoutWidget_13 = QtWidgets.QWidget(self.administrativeDistrictEnquiryPage)\r\n self.horizontalLayoutWidget_13.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_13.setObjectName(\"horizontalLayoutWidget_13\")\r\n self.administrativeHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_13)\r\n self.administrativeHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.administrativeHorizontal1.setObjectName(\"administrativeHorizontal1\")\r\n self.administrativeLine6 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine6.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.administrativeLine6.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine6.setObjectName(\"administrativeLine6\")\r\n self.administrativeHorizontal1.addWidget(self.administrativeLine6)\r\n self.administrativeVertical1 = QtWidgets.QVBoxLayout()\r\n self.administrativeVertical1.setObjectName(\"administrativeVertical1\")\r\n self.administrativeDistrictEnquiryLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.administrativeDistrictEnquiryLabel.setFont(font)\r\n self.administrativeDistrictEnquiryLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.administrativeDistrictEnquiryLabel.setObjectName(\"administrativeDistrictEnquiryLabel\")\r\n self.administrativeDistrictEnquiryLabel.setMargin(10)\r\n self.administrativeVertical1.addWidget(self.administrativeDistrictEnquiryLabel)\r\n self.administrativeSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.administrativeSearchResultContextLabel.setFont(font)\r\n self.administrativeSearchResultContextLabel.setObjectName(\"administrativeSearchResultContextLabel\")\r\n self.administrativeSearchResultContextLabel.setMargin(10)\r\n self.administrativeVertical1.addWidget(self.administrativeSearchResultContextLabel)\r\n self.administrativeSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeSearchInformationLabel.setFont(font)\r\n self.administrativeSearchInformationLabel.setWordWrap(True)\r\n self.administrativeSearchInformationLabel.setObjectName(\"administrativeSearchInformationLabel\")\r\n self.administrativeSearchInformationLabel.setMargin(10)\r\n self.administrativeVertical1.addWidget(self.administrativeSearchInformationLabel)\r\n self.administrativeLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.administrativeLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine1.setObjectName(\"administrativeLine1\")\r\n self.administrativeVertical1.addWidget(self.administrativeLine1)\r\n self.administrativeHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.administrativeHorizontal2.setContentsMargins(10, 10, 10, 0)\r\n self.administrativeHorizontal2.setObjectName(\"administrativeHorizontal2\")\r\n self.administrativeLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeLabel.setFont(font)\r\n self.administrativeLabel.setObjectName(\"administrativeLabel\")\r\n self.administrativeHorizontal2.addWidget(self.administrativeLabel)\r\n self.provinceComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.provinceComboBox.setFont(font)\r\n self.provinceComboBox.setObjectName(\"provinceComboBox\")\r\n self.administrativeHorizontal2.addWidget(self.provinceComboBox)\r\n self.cityComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.cityComboBox.setFont(font)\r\n self.cityComboBox.setObjectName(\"cityComboBox\")\r\n self.administrativeHorizontal2.addWidget(self.cityComboBox)\r\n self.countyComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.countyComboBox.setFont(font)\r\n self.countyComboBox.setPlaceholderText(\"\")\r\n self.countyComboBox.setObjectName(\"countyComboBox\")\r\n self.administrativeHorizontal2.addWidget(self.countyComboBox)\r\n self.administrativeSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeSearchButton.setFont(font)\r\n self.administrativeSearchButton.setObjectName(\"administrativeSearchButton\")\r\n self.administrativeHorizontal2.addWidget(self.administrativeSearchButton)\r\n self.administrativeHorizontal2.setStretch(1, 1)\r\n self.administrativeHorizontal2.setStretch(2, 1)\r\n self.administrativeHorizontal2.setStretch(3, 1)\r\n self.administrativeVertical1.addLayout(self.administrativeHorizontal2)\r\n self.administrativeHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.administrativeHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.administrativeHorizontal3.setObjectName(\"administrativeHorizontal3\")\r\n self.subDistrictNumLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.subDistrictNumLabel.setFont(font)\r\n self.subDistrictNumLabel.setObjectName(\"subDistrictNumLabel\")\r\n self.administrativeHorizontal3.addWidget(self.subDistrictNumLabel)\r\n self.subDistrictNumComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.subDistrictNumComboBox.setFont(font)\r\n self.subDistrictNumComboBox.setFrame(False)\r\n self.subDistrictNumComboBox.setModelColumn(0)\r\n self.subDistrictNumComboBox.setObjectName(\"subDistrictNumComboBox\")\r\n self.subDistrictNumComboBox.addItem(\"\")\r\n self.subDistrictNumComboBox.addItem(\"\")\r\n self.subDistrictNumComboBox.addItem(\"\")\r\n self.subDistrictNumComboBox.addItem(\"\")\r\n self.administrativeHorizontal3.addWidget(self.subDistrictNumComboBox)\r\n self.administrativeHorizontal3.setStretch(0, 3)\r\n self.administrativeHorizontal3.setStretch(1, 1)\r\n self.administrativeVertical1.addLayout(self.administrativeHorizontal3)\r\n self.administrativeLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.administrativeLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine2.setObjectName(\"administrativeLine2\")\r\n self.administrativeVertical1.addWidget(self.administrativeLine2)\r\n self.administrativeResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeResultTextEdit.setFont(font)\r\n self.administrativeResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.administrativeResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.administrativeResultTextEdit.setObjectName(\"administrativeResultTextEdit\")\r\n self.administrativeResultTextEdit.setReadOnly(True)\r\n self.administrativeResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.administrativeResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.administrativeResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.administrativeVertical1.addWidget(self.administrativeResultTextEdit)\r\n self.administrativeHorizontal1.addLayout(self.administrativeVertical1)\r\n self.administrativeLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.administrativeLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine5.setObjectName(\"administrativeLine5\")\r\n self.administrativeHorizontal1.addWidget(self.administrativeLine5)\r\n self.administrativeVertical2 = QtWidgets.QVBoxLayout()\r\n self.administrativeVertical2.setObjectName(\"administrativeVertical2\")\r\n self.administrativeServiceLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.administrativeServiceLabel.setFont(font)\r\n self.administrativeServiceLabel.setObjectName(\"administrativeServiceLabel\")\r\n self.administrativeServiceLabel.setMargin(10)\r\n self.administrativeVertical2.addWidget(self.administrativeServiceLabel)\r\n self.administrativeDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeDetailInformationLabel.setFont(font)\r\n self.administrativeDetailInformationLabel.setWordWrap(True)\r\n self.administrativeDetailInformationLabel.setObjectName(\"administrativeDetailInformationLabel\")\r\n self.administrativeDetailInformationLabel.setMargin(10)\r\n self.administrativeVertical2.addWidget(self.administrativeDetailInformationLabel)\r\n self.administrativeLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.administrativeLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine3.setObjectName(\"administrativeLine3\")\r\n self.administrativeVertical2.addWidget(self.administrativeLine3)\r\n self.administrativeInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.administrativeInputContextLabel.setFont(font)\r\n self.administrativeInputContextLabel.setObjectName(\"administrativeInputContextLabel\")\r\n self.administrativeInputContextLabel.setMargin(10)\r\n self.administrativeVertical2.addWidget(self.administrativeInputContextLabel)\r\n self.administrativeInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeInputInformationLabel.setFont(font)\r\n self.administrativeInputInformationLabel.setWordWrap(True)\r\n self.administrativeInputInformationLabel.setObjectName(\"administrativeInputInformationLabel\")\r\n self.administrativeInputInformationLabel.setMargin(10)\r\n self.administrativeVertical2.addWidget(self.administrativeInputInformationLabel)\r\n self.administrativeLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_13)\r\n self.administrativeLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.administrativeLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.administrativeLine4.setObjectName(\"administrativeLine4\")\r\n self.administrativeVertical2.addWidget(self.administrativeLine4)\r\n self.administrativeHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.administrativeHorizontal4.setContentsMargins(10, -1, -1, -1)\r\n self.administrativeHorizontal4.setObjectName(\"administrativeHorizontal4\")\r\n self.administrativeFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n self.administrativeFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.administrativeFeedBackPhotoLabel.setText(\"\")\r\n self.administrativeFeedBackPhotoLabel.setObjectName(\"administrativeFeedBackPhotoLabel\")\r\n self.administrativeMapsPixmap = QPixmap(\":/FeedBack.png\")\r\n self.administrativeFeedBackPhotoLabel.setPixmap(self.administrativeMapsPixmap)\r\n self.administrativeFeedBackPhotoLabel.setScaledContents(True)\r\n self.administrativeHorizontal4.addWidget(self.administrativeFeedBackPhotoLabel)\r\n self.administrativeFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_13)\r\n self.administrativeFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.administrativeFeedBackLabel.setFont(font)\r\n self.administrativeFeedBackLabel.setOpenExternalLinks(True)\r\n self.administrativeFeedBackLabel.setObjectName(\"administrativeFeedBackLabel\")\r\n self.administrativeHorizontal4.addWidget(self.administrativeFeedBackLabel)\r\n self.administrativeVertical2.addLayout(self.administrativeHorizontal4)\r\n spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.administrativeVertical2.addItem(spacerItem7)\r\n self.administrativeHorizontal1.addLayout(self.administrativeVertical2)\r\n self.administrativeHorizontal1.setStretch(1, 2)\r\n self.administrativeHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.administrativeDistrictEnquiryPage)\r\n self.weatherInformationPage = QtWidgets.QWidget()\r\n self.weatherInformationPage.setObjectName(\"weatherInformationPage\")\r\n self.horizontalLayoutWidget_11 = QtWidgets.QWidget(self.weatherInformationPage)\r\n self.horizontalLayoutWidget_11.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_11.setObjectName(\"horizontalLayoutWidget_11\")\r\n self.weatherHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_11)\r\n self.weatherHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.weatherHorizontal1.setObjectName(\"weatherHorizontal1\")\r\n self.weatherLine6 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine6.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.weatherLine6.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine6.setObjectName(\"weatherLine6\")\r\n self.weatherHorizontal1.addWidget(self.weatherLine6)\r\n self.weatherVertical1 = QtWidgets.QVBoxLayout()\r\n self.weatherVertical1.setObjectName(\"weatherVertical1\")\r\n self.weatherLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.weatherLabel.setFont(font)\r\n self.weatherLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.weatherLabel.setObjectName(\"weatherLabel\")\r\n self.weatherLabel.setMargin(10)\r\n self.weatherVertical1.addWidget(self.weatherLabel)\r\n self.weatherSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.weatherSearchResultContextLabel.setFont(font)\r\n self.weatherSearchResultContextLabel.setObjectName(\"weatherSearchResultContextLabel\")\r\n self.weatherSearchResultContextLabel.setMargin(10)\r\n self.weatherVertical1.addWidget(self.weatherSearchResultContextLabel)\r\n self.weatherSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherSearchInformationLabel.setFont(font)\r\n self.weatherSearchInformationLabel.setWordWrap(True)\r\n self.weatherSearchInformationLabel.setObjectName(\"weatherSearchInformationLabel\")\r\n self.weatherSearchInformationLabel.setMargin(10)\r\n self.weatherVertical1.addWidget(self.weatherSearchInformationLabel)\r\n self.weatherLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.weatherLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine1.setObjectName(\"weatherLine1\")\r\n self.weatherVertical1.addWidget(self.weatherLine1)\r\n self.weatherHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.weatherHorizontal3.setContentsMargins(10, 10, 10, 10)\r\n self.weatherHorizontal3.setObjectName(\"weatherHorizontal3\")\r\n self.weatherSearchLineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherSearchLineEdit.setFont(font)\r\n self.weatherSearchLineEdit.setObjectName(\"weatherSearchLineEdit\")\r\n self.weatherHorizontal3.addWidget(self.weatherSearchLineEdit)\r\n self.weatherSearchButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherSearchButton.setFont(font)\r\n self.weatherSearchButton.setObjectName(\"weatherSearchButton\")\r\n self.weatherHorizontal3.addWidget(self.weatherSearchButton)\r\n self.weatherHorizontal3.setStretch(0, 3)\r\n self.weatherHorizontal3.setStretch(1, 1)\r\n self.weatherVertical1.addLayout(self.weatherHorizontal3)\r\n self.weatherLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.weatherLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine2.setObjectName(\"weatherLine2\")\r\n self.weatherVertical1.addWidget(self.weatherLine2)\r\n self.weatherResultTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherResultTextEdit.setFont(font)\r\n self.weatherResultTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.weatherResultTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.weatherResultTextEdit.setObjectName(\"weatherResultTextEdit\")\r\n self.weatherResultTextEdit.setReadOnly(True)\r\n self.weatherResultTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.weatherResultTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.weatherResultTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.weatherVertical1.addWidget(self.weatherResultTextEdit)\r\n self.weatherHorizontal1.addLayout(self.weatherVertical1)\r\n self.weatherLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.weatherLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine5.setObjectName(\"weatherLine5\")\r\n self.weatherHorizontal1.addWidget(self.weatherLine5)\r\n self.weatherVertical2 = QtWidgets.QVBoxLayout()\r\n self.weatherVertical2.setObjectName(\"weatherVertical2\")\r\n self.weatherInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.weatherInformationLabel.setFont(font)\r\n self.weatherInformationLabel.setObjectName(\"weatherInformationLabel\")\r\n self.weatherInformationLabel.setMargin(10)\r\n self.weatherVertical2.addWidget(self.weatherInformationLabel)\r\n self.weatherDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherDetailInformationLabel.setFont(font)\r\n self.weatherDetailInformationLabel.setWordWrap(True)\r\n self.weatherDetailInformationLabel.setObjectName(\"weatherDetailInformationLabel\")\r\n self.weatherDetailInformationLabel.setMargin(10)\r\n self.weatherVertical2.addWidget(self.weatherDetailInformationLabel)\r\n self.weatherLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.weatherLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine3.setObjectName(\"weatherLine3\")\r\n self.weatherVertical2.addWidget(self.weatherLine3)\r\n self.weatherInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.weatherInputContextLabel.setFont(font)\r\n self.weatherInputContextLabel.setObjectName(\"weatherInputContextLabel\")\r\n self.weatherInputContextLabel.setMargin(10)\r\n self.weatherVertical2.addWidget(self.weatherInputContextLabel)\r\n self.weatherInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherInputInformationLabel.setFont(font)\r\n self.weatherInputInformationLabel.setWordWrap(True)\r\n self.weatherInputInformationLabel.setObjectName(\"weatherInputInformationLabel\")\r\n self.weatherInputInformationLabel.setMargin(10)\r\n self.weatherVertical2.addWidget(self.weatherInputInformationLabel)\r\n self.weatherLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_11)\r\n self.weatherLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.weatherLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.weatherLine4.setObjectName(\"weatherLine4\")\r\n self.weatherVertical2.addWidget(self.weatherLine4)\r\n self.weatherHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.weatherHorizontal4.setContentsMargins(10, -1, -1, -1)\r\n self.weatherHorizontal4.setObjectName(\"weatherHorizontal4\")\r\n self.weatherFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n self.weatherFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.weatherFeedBackPhotoLabel.setText(\"\")\r\n self.weatherFeedBackPhotoLabel.setObjectName(\"weatherFeedBackPhotoLabel\")\r\n self.weatherPixmap = QPixmap(\":/FeedBack.png\")\r\n self.weatherFeedBackPhotoLabel.setPixmap(self.weatherPixmap)\r\n self.weatherFeedBackPhotoLabel.setScaledContents(True)\r\n self.weatherHorizontal4.addWidget(self.weatherFeedBackPhotoLabel)\r\n self.weatherFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_11)\r\n self.weatherFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.weatherFeedBackLabel.setFont(font)\r\n self.weatherFeedBackLabel.setOpenExternalLinks(True)\r\n self.weatherFeedBackLabel.setObjectName(\"weatherFeedBackLabel\")\r\n self.weatherHorizontal4.addWidget(self.weatherFeedBackLabel)\r\n self.weatherVertical2.addLayout(self.weatherHorizontal4)\r\n spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.weatherVertical2.addItem(spacerItem8)\r\n self.weatherHorizontal1.addLayout(self.weatherVertical2)\r\n self.weatherHorizontal1.setStretch(1, 2)\r\n self.weatherHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.weatherInformationPage)\r\n self.trafficSituationPage = QtWidgets.QWidget()\r\n self.trafficSituationPage.setObjectName(\"trafficSituationPage\")\r\n self.horizontalLayoutWidget_12 = QtWidgets.QWidget(self.trafficSituationPage)\r\n self.horizontalLayoutWidget_12.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_12.setObjectName(\"horizontalLayoutWidget_12\")\r\n self.trafficSituationHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_12)\r\n self.trafficSituationHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.trafficSituationHorizontal1.setObjectName(\"trafficSituationHorizontal1\")\r\n self.trafficSituationLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_12)\r\n self.trafficSituationLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.trafficSituationLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationLine5.setObjectName(\"trafficSituationLine5\")\r\n self.trafficSituationHorizontal1.addWidget(self.trafficSituationLine5)\r\n self.trafficSituationVertical1 = QtWidgets.QVBoxLayout()\r\n self.trafficSituationVertical1.setObjectName(\"trafficSituationVertical1\")\r\n self.trafficSituationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.trafficSituationLabel.setFont(font)\r\n self.trafficSituationLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.trafficSituationLabel.setObjectName(\"trafficSituationLabel\")\r\n self.trafficSituationLabel.setMargin(10)\r\n self.trafficSituationVertical1.addWidget(self.trafficSituationLabel)\r\n self.trafficSituationSearchResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationSearchResultContextLabel.setFont(font)\r\n self.trafficSituationSearchResultContextLabel.setObjectName(\"trafficSituationSearchResultContextLabel\")\r\n self.trafficSituationSearchResultContextLabel.setMargin(10)\r\n self.trafficSituationVertical1.addWidget(self.trafficSituationSearchResultContextLabel)\r\n self.trafficSituationSearchInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationSearchInformationLabel.setFont(font)\r\n self.trafficSituationSearchInformationLabel.setWordWrap(True)\r\n self.trafficSituationSearchInformationLabel.setObjectName(\"trafficSituationSearchInformationLabel\")\r\n self.trafficSituationSearchInformationLabel.setMargin(10)\r\n self.trafficSituationVertical1.addWidget(self.trafficSituationSearchInformationLabel)\r\n self.trafficSituationHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationHorizontal3.setContentsMargins(10, -1, 10, 0)\r\n self.trafficSituationHorizontal3.setObjectName(\"trafficSituationHorizontal3\")\r\n self.trafficSituationSearchModeLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationSearchModeLabel.setFont(font)\r\n self.trafficSituationSearchModeLabel.setObjectName(\"trafficSituationSearchModeLabel\")\r\n self.trafficSituationHorizontal3.addWidget(self.trafficSituationSearchModeLabel)\r\n self.trafficRoadRealSituationComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRoadRealSituationComboBox.setFont(font)\r\n self.trafficRoadRealSituationComboBox.setObjectName(\"trafficRoadRealSituationComboBox\")\r\n self.trafficRoadRealSituationComboBox.setEnabled(False)\r\n self.trafficRoadRealSituationComboBox.addItem(\"\")\r\n self.trafficRoadRealSituationComboBox.addItem(\"\")\r\n self.trafficRoadRealSituationComboBox.addItem(\"\")\r\n self.trafficRoadRealSituationComboBox.addItem(\"\")\r\n self.trafficSituationHorizontal3.addWidget(self.trafficRoadRealSituationComboBox)\r\n self.trafficSituationHorizontal3.setStretch(0, 1)\r\n self.trafficSituationHorizontal3.setStretch(1, 1)\r\n self.trafficSituationVertical1.addLayout(self.trafficSituationHorizontal3)\r\n self.trafficSituationLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_12)\r\n self.trafficSituationLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationLine1.setObjectName(\"trafficSituationLine1\")\r\n self.trafficSituationVertical1.addWidget(self.trafficSituationLine1)\r\n self.trafficSituationStackedWidget = QtWidgets.QStackedWidget(self.horizontalLayoutWidget_12)\r\n self.trafficSituationStackedWidget.setObjectName(\"trafficSituationStackedWidget\")\r\n self.realRoadTrafficSituationPage = QtWidgets.QWidget()\r\n self.realRoadTrafficSituationPage.setObjectName(\"realRoadTrafficSituationPage\")\r\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.realRoadTrafficSituationPage)\r\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(0, 10, 701, 481))\r\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\r\n self.trafficSituationRealRoadVertical1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\r\n self.trafficSituationRealRoadVertical1.setContentsMargins(0, 0, 0, 0)\r\n self.trafficSituationRealRoadVertical1.setObjectName(\"trafficSituationRealRoadVertical1\")\r\n self.trafficSituationRealRoadHorizontal1 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRealRoadHorizontal1.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationRealRoadHorizontal1.setObjectName(\"trafficSituationRealRoadHorizontal1\")\r\n self.trafficRealRoadSelectCityLabel = QtWidgets.QLabel(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealRoadSelectCityLabel.setFont(font)\r\n self.trafficRealRoadSelectCityLabel.setObjectName(\"trafficRealRoadSelectCityLabel\")\r\n self.trafficSituationRealRoadHorizontal1.addWidget(self.trafficRealRoadSelectCityLabel)\r\n self.trafficRealRoadProvinceComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealRoadProvinceComboBox.setFont(font)\r\n self.trafficRealRoadProvinceComboBox.setObjectName(\"trafficRealRoadProvinceComboBox\")\r\n self.trafficSituationRealRoadHorizontal1.addWidget(self.trafficRealRoadProvinceComboBox)\r\n self.trafficRealRoadCityComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealRoadCityComboBox.setFont(font)\r\n self.trafficRealRoadCityComboBox.setObjectName(\"trafficRealRoadCityComboBox\")\r\n self.trafficSituationRealRoadHorizontal1.addWidget(self.trafficRealRoadCityComboBox)\r\n self.trafficSituationRealRoadHorizontal1.setStretch(0, 2)\r\n self.trafficSituationRealRoadHorizontal1.setStretch(1, 1)\r\n self.trafficSituationRealRoadHorizontal1.setStretch(2, 1)\r\n self.trafficSituationRealRoadVertical1.addLayout(self.trafficSituationRealRoadHorizontal1)\r\n self.trafficSituationRealRoadHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRealRoadHorizontal2.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationRealRoadHorizontal2.setObjectName(\"trafficSituationRealRoadHorizontal2\")\r\n self.trafficRealRoadNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealRoadNameLabel.setFont(font)\r\n self.trafficRealRoadNameLabel.setObjectName(\"trafficRealRoadNameLabel\")\r\n self.trafficSituationRealRoadHorizontal2.addWidget(self.trafficRealRoadNameLabel)\r\n self.trafficSituationRealRoadSearchRoadName = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationRealRoadSearchRoadName.setFont(font)\r\n self.trafficSituationRealRoadSearchRoadName.setObjectName(\"trafficSituationRealRoadSearchRoadName\")\r\n self.trafficSituationRealRoadHorizontal2.addWidget(self.trafficSituationRealRoadSearchRoadName)\r\n self.trafficRealRoadSearchButton = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealRoadSearchButton.setFont(font)\r\n self.trafficRealRoadSearchButton.setObjectName(\"trafficRealRoadSearchButton\")\r\n self.trafficSituationRealRoadHorizontal2.addWidget(self.trafficRealRoadSearchButton)\r\n self.trafficSituationRealRoadHorizontal2.setStretch(0, 3)\r\n self.trafficSituationRealRoadHorizontal2.setStretch(1, 3)\r\n self.trafficSituationRealRoadHorizontal2.setStretch(2, 2)\r\n self.trafficSituationRealRoadVertical1.addLayout(self.trafficSituationRealRoadHorizontal2)\r\n self.trafficSituationRealRoadHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRealRoadHorizontal3.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationRealRoadHorizontal3.setObjectName(\"trafficSituationRealRoadHorizontal3\")\r\n self.trafficRealReadRoadPhotoView = QtWidgets.QGraphicsView(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRealReadRoadPhotoView.setFont(font)\r\n self.trafficRealReadRoadPhotoView.setStyleSheet(\"QGraphicsView{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficRealReadRoadPhotoView.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficRealReadRoadPhotoView.setObjectName(\"trafficRealReadRoadPhotoView\")\r\n self.trafficSituationRealRoadHorizontal3.addWidget(self.trafficRealReadRoadPhotoView)\r\n self.trafficSituationRealRoadTextEdit = QtWidgets.QTextEdit(self.verticalLayoutWidget_2)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationRealRoadTextEdit.setFont(font)\r\n self.trafficSituationRealRoadTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficSituationRealRoadTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficSituationRealRoadTextEdit.setObjectName(\"trafficSituationRealRoadTextEdit\")\r\n self.trafficSituationRealRoadTextEdit.setReadOnly(True)\r\n self.trafficSituationRealRoadTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.trafficSituationRealRoadTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.trafficSituationRealRoadTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.trafficSituationRealRoadHorizontal3.addWidget(self.trafficSituationRealRoadTextEdit)\r\n self.trafficSituationRealRoadHorizontal3.setStretch(0, 2)\r\n self.trafficSituationRealRoadHorizontal3.setStretch(1, 1)\r\n self.trafficSituationRealRoadVertical1.addLayout(self.trafficSituationRealRoadHorizontal3)\r\n self.trafficSituationStackedWidget.addWidget(self.realRoadTrafficSituationPage)\r\n self.rectangleRoadTrafficSituationPage = QtWidgets.QWidget()\r\n self.rectangleRoadTrafficSituationPage.setObjectName(\"rectangleRoadTrafficSituationPage\")\r\n self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.rectangleRoadTrafficSituationPage)\r\n self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(0, 10, 701, 481))\r\n self.verticalLayoutWidget_3.setObjectName(\"verticalLayoutWidget_3\")\r\n self.trafficSituationRectangleRoadVertical1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)\r\n self.trafficSituationRectangleRoadVertical1.setContentsMargins(0, 0, 0, 0)\r\n self.trafficSituationRectangleRoadVertical1.setObjectName(\"trafficSituationRectangleRoadVertical1\")\r\n self.trafficSituationRectangleRoadHorizontal1 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRectangleRoadHorizontal1.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationRectangleRoadHorizontal1.setObjectName(\"trafficSituationRectangleRoadHorizontal1\")\r\n self.trafficRectangleRoadSearchPositionNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadSearchPositionNameLabel.setFont(font)\r\n self.trafficRectangleRoadSearchPositionNameLabel.setObjectName(\"trafficRectangleRoadSearchPositionNameLabel\")\r\n self.trafficSituationRectangleRoadHorizontal1.addWidget(self.trafficRectangleRoadSearchPositionNameLabel)\r\n self.trafficRectangleRoadSearchPositionLineEdit1 = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadSearchPositionLineEdit1.setFont(font)\r\n self.trafficRectangleRoadSearchPositionLineEdit1.setObjectName(\"trafficRectangleRoadSearchPositionLineEdit1\")\r\n self.trafficSituationRectangleRoadHorizontal1.addWidget(self.trafficRectangleRoadSearchPositionLineEdit1)\r\n self.trafficRectangleRoadSearchPositionLineEdit2 = QtWidgets.QLineEdit(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadSearchPositionLineEdit2.setFont(font)\r\n self.trafficRectangleRoadSearchPositionLineEdit2.setObjectName(\"trafficRectangleRoadSearchPositionLineEdit2\")\r\n self.trafficSituationRectangleRoadHorizontal1.addWidget(self.trafficRectangleRoadSearchPositionLineEdit2)\r\n self.trafficSituationRectangleRoadHorizontal1.setStretch(0, 6)\r\n self.trafficSituationRectangleRoadHorizontal1.setStretch(1, 2)\r\n self.trafficSituationRectangleRoadHorizontal1.setStretch(2, 2)\r\n self.trafficSituationRectangleRoadVertical1.addLayout(self.trafficSituationRectangleRoadHorizontal1)\r\n self.trafficSituationRectangleRoadHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRectangleRoadHorizontal2.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationRectangleRoadHorizontal2.setObjectName(\"trafficSituationRectangleRoadHorizontal2\")\r\n self.trafficRectangleRoadLevelLabel = QtWidgets.QLabel(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadLevelLabel.setFont(font)\r\n self.trafficRectangleRoadLevelLabel.setObjectName(\"trafficRectangleRoadLevelLabel\")\r\n self.trafficSituationRectangleRoadHorizontal2.addWidget(self.trafficRectangleRoadLevelLabel)\r\n self.trafficRectangleRoadLevelComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadLevelComboBox.setFont(font)\r\n self.trafficRectangleRoadLevelComboBox.setObjectName(\"trafficRectangleRoadLevelComboBox\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficRectangleRoadLevelComboBox.addItem(\"\")\r\n self.trafficSituationRectangleRoadHorizontal2.addWidget(self.trafficRectangleRoadLevelComboBox)\r\n self.trafficRectangleRoadLevelSearchButton = QtWidgets.QPushButton(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadLevelSearchButton.setFont(font)\r\n self.trafficRectangleRoadLevelSearchButton.setObjectName(\"trafficRectangleRoadLevelSearchButton\")\r\n self.trafficSituationRectangleRoadHorizontal2.addWidget(self.trafficRectangleRoadLevelSearchButton)\r\n self.trafficSituationRectangleRoadHorizontal2.setStretch(0, 6)\r\n self.trafficSituationRectangleRoadHorizontal2.setStretch(1, 2)\r\n self.trafficSituationRectangleRoadHorizontal2.setStretch(2, 2)\r\n self.trafficSituationRectangleRoadVertical1.addLayout(self.trafficSituationRectangleRoadHorizontal2)\r\n self.trafficSituationRectangleRoadHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationRectangleRoadHorizontal3.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationRectangleRoadHorizontal3.setObjectName(\"trafficSituationRectangleRoadHorizontal3\")\r\n self.trafficRectangleReadRoadPhotoView = QtWidgets.QGraphicsView(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleReadRoadPhotoView.setFont(font)\r\n self.trafficRectangleReadRoadPhotoView.setStyleSheet(\"QGraphicsView{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficRectangleReadRoadPhotoView.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficRectangleReadRoadPhotoView.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficRectangleReadRoadPhotoView.setObjectName(\"trafficRectangleReadRoadPhotoView\")\r\n self.trafficSituationRectangleRoadHorizontal3.addWidget(self.trafficRectangleReadRoadPhotoView)\r\n self.trafficRectangleRoadTextEdit = QtWidgets.QTextEdit(self.verticalLayoutWidget_3)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficRectangleRoadTextEdit.setFont(font)\r\n self.trafficRectangleRoadTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficRectangleRoadTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficRectangleRoadTextEdit.setObjectName(\"trafficRectangleRoadTextEdit\")\r\n self.trafficRectangleRoadTextEdit.setReadOnly(True)\r\n self.trafficRectangleRoadTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.trafficRectangleRoadTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.trafficRectangleRoadTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.trafficSituationRectangleRoadHorizontal3.addWidget(self.trafficRectangleRoadTextEdit)\r\n self.trafficSituationRectangleRoadHorizontal3.setStretch(0, 2)\r\n self.trafficSituationRectangleRoadHorizontal3.setStretch(1, 1)\r\n self.trafficSituationRectangleRoadVertical1.addLayout(self.trafficSituationRectangleRoadHorizontal3)\r\n self.trafficSituationStackedWidget.addWidget(self.rectangleRoadTrafficSituationPage)\r\n self.polygonRoadTrafficSituationPage = QtWidgets.QWidget()\r\n self.polygonRoadTrafficSituationPage.setObjectName(\"polygonRoadTrafficSituationPage\")\r\n self.verticalLayoutWidget_4 = QtWidgets.QWidget(self.polygonRoadTrafficSituationPage)\r\n self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(0, 10, 701, 481))\r\n self.verticalLayoutWidget_4.setObjectName(\"verticalLayoutWidget_4\")\r\n self.trafficSituationPolygonRoadVertical1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_4)\r\n self.trafficSituationPolygonRoadVertical1.setContentsMargins(0, 0, 0, 0)\r\n self.trafficSituationPolygonRoadVertical1.setObjectName(\"trafficSituationPolygonRoadVertical1\")\r\n self.trafficSituationPolygonRoadHorizontal1 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationPolygonRoadHorizontal1.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationPolygonRoadHorizontal1.setObjectName(\"trafficSituationPolygonRoadHorizontal1\")\r\n self.trafficPolygonRoadSearchPositionNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadSearchPositionNameLabel.setFont(font)\r\n self.trafficPolygonRoadSearchPositionNameLabel.setObjectName(\"trafficPolygonRoadSearchPositionNameLabel\")\r\n self.trafficSituationPolygonRoadHorizontal1.addWidget(self.trafficPolygonRoadSearchPositionNameLabel)\r\n self.trafficPolygonRoadSearchPositionLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadSearchPositionLineEdit.setFont(font)\r\n self.trafficPolygonRoadSearchPositionLineEdit.setObjectName(\"trafficPolygonRoadSearchPositionLineEdit\")\r\n self.trafficSituationPolygonRoadHorizontal1.addWidget(self.trafficPolygonRoadSearchPositionLineEdit)\r\n self.trafficSituationPolygonRoadHorizontal1.setStretch(0, 1)\r\n self.trafficSituationPolygonRoadHorizontal1.setStretch(1, 1)\r\n self.trafficSituationPolygonRoadVertical1.addLayout(self.trafficSituationPolygonRoadHorizontal1)\r\n self.trafficSituationPolygonRoadHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationPolygonRoadHorizontal2.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationPolygonRoadHorizontal2.setObjectName(\"trafficSituationPolygonRoadHorizontal2\")\r\n self.trafficPolygonRoadLevelLabel = QtWidgets.QLabel(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadLevelLabel.setFont(font)\r\n self.trafficPolygonRoadLevelLabel.setObjectName(\"trafficPolygonRoadLevelLabel\")\r\n self.trafficSituationPolygonRoadHorizontal2.addWidget(self.trafficPolygonRoadLevelLabel)\r\n self.trafficPolygonRoadLevelComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadLevelComboBox.setFont(font)\r\n self.trafficPolygonRoadLevelComboBox.setObjectName(\"trafficPolygonRoadLevelComboBox\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficPolygonRoadLevelComboBox.addItem(\"\")\r\n self.trafficSituationPolygonRoadHorizontal2.addWidget(self.trafficPolygonRoadLevelComboBox)\r\n self.trafficPolygonRoadLevelSearchButton = QtWidgets.QPushButton(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadLevelSearchButton.setFont(font)\r\n self.trafficPolygonRoadLevelSearchButton.setObjectName(\"trafficPolygonRoadLevelSearchButton\")\r\n self.trafficSituationPolygonRoadHorizontal2.addWidget(self.trafficPolygonRoadLevelSearchButton)\r\n self.trafficSituationPolygonRoadHorizontal2.setStretch(0, 2)\r\n self.trafficSituationPolygonRoadHorizontal2.setStretch(1, 1)\r\n self.trafficSituationPolygonRoadHorizontal2.setStretch(2, 1)\r\n self.trafficSituationPolygonRoadVertical1.addLayout(self.trafficSituationPolygonRoadHorizontal2)\r\n self.trafficSituationPolygonRoadHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationPolygonRoadHorizontal3.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationPolygonRoadHorizontal3.setObjectName(\"trafficSituationPolygonRoadHorizontal3\")\r\n self.trafficPolygonRoadPhotoView = QtWidgets.QGraphicsView(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadPhotoView.setFont(font)\r\n self.trafficPolygonRoadPhotoView.setStyleSheet(\"QGraphicsView{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficPolygonRoadPhotoView.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficPolygonRoadPhotoView.setObjectName(\"trafficPolygonRoadPhotoView\")\r\n self.trafficSituationPolygonRoadHorizontal3.addWidget(self.trafficPolygonRoadPhotoView)\r\n self.trafficPolygonRoadTextEdit = QtWidgets.QTextEdit(self.verticalLayoutWidget_4)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficPolygonRoadTextEdit.setFont(font)\r\n self.trafficPolygonRoadTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficPolygonRoadTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficPolygonRoadTextEdit.setObjectName(\"trafficPolygonRoadTextEdit\")\r\n self.trafficPolygonRoadTextEdit.setReadOnly(True)\r\n self.trafficPolygonRoadTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.trafficPolygonRoadTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.trafficPolygonRoadTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.trafficSituationPolygonRoadHorizontal3.addWidget(self.trafficPolygonRoadTextEdit)\r\n self.trafficSituationPolygonRoadHorizontal3.setStretch(0, 2)\r\n self.trafficSituationPolygonRoadHorizontal3.setStretch(1, 1)\r\n self.trafficSituationPolygonRoadVertical1.addLayout(self.trafficSituationPolygonRoadHorizontal3)\r\n self.trafficSituationStackedWidget.addWidget(self.polygonRoadTrafficSituationPage)\r\n self.aroundRoadTrafficSituationPage = QtWidgets.QWidget()\r\n self.aroundRoadTrafficSituationPage.setObjectName(\"aroundRoadTrafficSituationPage\")\r\n self.verticalLayoutWidget_5 = QtWidgets.QWidget(self.aroundRoadTrafficSituationPage)\r\n self.verticalLayoutWidget_5.setGeometry(QtCore.QRect(0, 10, 701, 481))\r\n self.verticalLayoutWidget_5.setObjectName(\"verticalLayoutWidget_5\")\r\n self.trafficSituationAroundRoadVertical1 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_5)\r\n self.trafficSituationAroundRoadVertical1.setContentsMargins(0, 0, 0, 0)\r\n self.trafficSituationAroundRoadVertical1.setObjectName(\"trafficSituationAroundRoadVertical1\")\r\n self.trafficSituationAroundRoadHorizontal1 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAroundRoadHorizontal1.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationAroundRoadHorizontal1.setObjectName(\"trafficSituationAroundRoadHorizontal1\")\r\n self.trafficAroundRoadSearchPositionNameLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadSearchPositionNameLabel.setFont(font)\r\n self.trafficAroundRoadSearchPositionNameLabel.setObjectName(\"trafficAroundRoadSearchPositionNameLabel\")\r\n self.trafficSituationAroundRoadHorizontal1.addWidget(self.trafficAroundRoadSearchPositionNameLabel)\r\n self.trafficAroundRoadSearchPositionNameLineEdit = QtWidgets.QLineEdit(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadSearchPositionNameLineEdit.setFont(font)\r\n self.trafficAroundRoadSearchPositionNameLineEdit.setText(\"\")\r\n self.trafficAroundRoadSearchPositionNameLineEdit.setObjectName(\"trafficAroundRoadSearchPositionNameLineEdit\")\r\n self.trafficSituationAroundRoadHorizontal1.addWidget(self.trafficAroundRoadSearchPositionNameLineEdit)\r\n self.trafficSituationAroundRoadHorizontal1.setStretch(0, 1)\r\n self.trafficSituationAroundRoadHorizontal1.setStretch(1, 1)\r\n self.trafficSituationAroundRoadVertical1.addLayout(self.trafficSituationAroundRoadHorizontal1)\r\n self.trafficSituationAroundRoadHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAroundRoadHorizontal2.setContentsMargins(10, -1, 10, 10)\r\n self.trafficSituationAroundRoadHorizontal2.setObjectName(\"trafficSituationAroundRoadHorizontal2\")\r\n self.trafficAroundRoadLevelLabel = QtWidgets.QLabel(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadLevelLabel.setFont(font)\r\n self.trafficAroundRoadLevelLabel.setObjectName(\"trafficAroundRoadLevelLabel\")\r\n self.trafficSituationAroundRoadHorizontal2.addWidget(self.trafficAroundRoadLevelLabel)\r\n self.trafficAroundRoadLevelComboBox = QtWidgets.QComboBox(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadLevelComboBox.setFont(font)\r\n self.trafficAroundRoadLevelComboBox.setObjectName(\"trafficAroundRoadLevelComboBox\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficAroundRoadLevelComboBox.addItem(\"\")\r\n self.trafficSituationAroundRoadHorizontal2.addWidget(self.trafficAroundRoadLevelComboBox)\r\n self.trafficAroundRoadLevelSearchButton = QtWidgets.QPushButton(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadLevelSearchButton.setFont(font)\r\n self.trafficAroundRoadLevelSearchButton.setObjectName(\"trafficAroundRoadLevelSearchButton\")\r\n self.trafficSituationAroundRoadHorizontal2.addWidget(self.trafficAroundRoadLevelSearchButton)\r\n self.trafficSituationAroundRoadHorizontal2.setStretch(0, 2)\r\n self.trafficSituationAroundRoadHorizontal2.setStretch(1, 1)\r\n self.trafficSituationAroundRoadHorizontal2.setStretch(2, 1)\r\n self.trafficSituationAroundRoadVertical1.addLayout(self.trafficSituationAroundRoadHorizontal2)\r\n self.trafficSituationAroundRoadHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAroundRoadHorizontal3.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationAroundRoadHorizontal3.setObjectName(\"trafficSituationAroundRoadHorizontal3\")\r\n self.trafficAroundRoadPhotoView = QtWidgets.QGraphicsView(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadPhotoView.setFont(font)\r\n self.trafficAroundRoadPhotoView.setStyleSheet(\"QGraphicsView{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficAroundRoadPhotoView.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficAroundRoadPhotoView.setObjectName(\"trafficAroundRoadPhotoView\")\r\n self.trafficSituationAroundRoadHorizontal3.addWidget(self.trafficAroundRoadPhotoView)\r\n self.trafficAroundRoadTextEdit = QtWidgets.QTextEdit(self.verticalLayoutWidget_5)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficAroundRoadTextEdit.setFont(font)\r\n self.trafficAroundRoadTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficAroundRoadTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficAroundRoadTextEdit.setObjectName(\"trafficAroundRoadTextEdit\")\r\n self.trafficAroundRoadTextEdit.setReadOnly(True)\r\n self.trafficAroundRoadTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.trafficAroundRoadTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.trafficAroundRoadTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.trafficSituationAroundRoadHorizontal3.addWidget(self.trafficAroundRoadTextEdit)\r\n self.trafficSituationAroundRoadHorizontal3.setStretch(0, 2)\r\n self.trafficSituationAroundRoadHorizontal3.setStretch(1, 1)\r\n self.trafficSituationAroundRoadVertical1.addLayout(self.trafficSituationAroundRoadHorizontal3)\r\n self.trafficSituationStackedWidget.addWidget(self.aroundRoadTrafficSituationPage)\r\n self.trafficSituationVertical1.addWidget(self.trafficSituationStackedWidget)\r\n self.trafficSituationHorizontal1.addLayout(self.trafficSituationVertical1)\r\n self.trafficSituationLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_12)\r\n self.trafficSituationLine4.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.trafficSituationLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationLine4.setObjectName(\"trafficSituationLine4\")\r\n self.trafficSituationHorizontal1.addWidget(self.trafficSituationLine4)\r\n self.trafficSituationVertical2 = QtWidgets.QVBoxLayout()\r\n self.trafficSituationVertical2.setObjectName(\"trafficSituationVertical2\")\r\n self.trafficSituationServiceLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationServiceLabel.setFont(font)\r\n self.trafficSituationServiceLabel.setObjectName(\"trafficSituationServiceLabel\")\r\n self.trafficSituationServiceLabel.setMargin(10)\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationServiceLabel)\r\n self.trafficSituationDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationDetailInformationLabel.setFont(font)\r\n self.trafficSituationDetailInformationLabel.setWordWrap(True)\r\n self.trafficSituationDetailInformationLabel.setOpenExternalLinks(False)\r\n self.trafficSituationDetailInformationLabel.setObjectName(\"trafficSituationDetailInformationLabel\")\r\n self.trafficSituationDetailInformationLabel.setMargin(10)\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationDetailInformationLabel)\r\n self.trafficSituationLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_12)\r\n self.trafficSituationLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationLine2.setObjectName(\"trafficSituationLine2\")\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationLine2)\r\n self.trafficSituationInputContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationInputContextLabel.setFont(font)\r\n self.trafficSituationInputContextLabel.setObjectName(\"trafficSituationInputContextLabel\")\r\n self.trafficSituationInputContextLabel.setMargin(10)\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationInputContextLabel)\r\n self.trafficSituationInputInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationInputInformationLabel.setFont(font)\r\n self.trafficSituationInputInformationLabel.setWordWrap(True)\r\n self.trafficSituationInputInformationLabel.setObjectName(\"trafficSituationInputInformationLabel\")\r\n self.trafficSituationInputInformationLabel.setMargin(10)\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationInputInformationLabel)\r\n self.trafficSituationLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_12)\r\n self.trafficSituationLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationLine3.setObjectName(\"trafficSituationLine3\")\r\n self.trafficSituationVertical2.addWidget(self.trafficSituationLine3)\r\n self.trafficSituationHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationHorizontal4.setContentsMargins(10, -1, -1, -1)\r\n self.trafficSituationHorizontal4.setObjectName(\"trafficSituationHorizontal4\")\r\n self.trafficSituationFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n self.trafficSituationFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.trafficSituationFeedBackPhotoLabel.setText(\"\")\r\n self.trafficSituationFeedBackPhotoLabel.setObjectName(\"trafficSituationFeedBackPhotoLabel\")\r\n self.trafficPixmap = QPixmap(\":/FeedBack.png\")\r\n self.trafficSituationFeedBackPhotoLabel.setPixmap(self.trafficPixmap)\r\n self.trafficSituationFeedBackPhotoLabel.setScaledContents(True)\r\n self.trafficSituationHorizontal4.addWidget(self.trafficSituationFeedBackPhotoLabel)\r\n self.trafficSituationFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_12)\r\n self.trafficSituationFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationFeedBackLabel.setFont(font)\r\n self.trafficSituationFeedBackLabel.setOpenExternalLinks(True)\r\n self.trafficSituationFeedBackLabel.setObjectName(\"trafficSituationFeedBackLabel\")\r\n self.trafficSituationHorizontal4.addWidget(self.trafficSituationFeedBackLabel)\r\n self.trafficSituationVertical2.addLayout(self.trafficSituationHorizontal4)\r\n spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.trafficSituationVertical2.addItem(spacerItem9)\r\n self.trafficSituationHorizontal1.addLayout(self.trafficSituationVertical2)\r\n self.trafficSituationHorizontal1.setStretch(1, 2)\r\n self.trafficSituationHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.trafficSituationPage)\r\n self.trafficSituationAnalysisSystemPage = QtWidgets.QWidget()\r\n self.trafficSituationAnalysisSystemPage.setObjectName(\"trafficSituationAnalysisSystemPage\")\r\n self.horizontalLayoutWidget_17 = QtWidgets.QWidget(self.trafficSituationAnalysisSystemPage)\r\n self.horizontalLayoutWidget_17.setGeometry(QtCore.QRect(0, 0, 1101, 721))\r\n self.horizontalLayoutWidget_17.setObjectName(\"horizontalLayoutWidget_17\")\r\n self.trafficSituationAnalysisSystemHorizontal1 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemHorizontal1.setContentsMargins(10, 10, 10, 10)\r\n self.trafficSituationAnalysisSystemHorizontal1.setObjectName(\"trafficSituationAnalysisSystemHorizontal1\")\r\n self.trafficSituationAnalysisSystemLine6 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine6.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.trafficSituationAnalysisSystemLine6.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine6.setObjectName(\"trafficSituationAnalysisSystemLine6\")\r\n self.trafficSituationAnalysisSystemHorizontal1.addWidget(self.trafficSituationAnalysisSystemLine6)\r\n self.trafficSituationAnalysisSystemVertical1 = QtWidgets.QVBoxLayout()\r\n self.trafficSituationAnalysisSystemVertical1.setObjectName(\"trafficSituationAnalysisSystemVertical1\")\r\n self.trafficSituationAnalysisSystemLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑\")\r\n font.setPointSize(20)\r\n self.trafficSituationAnalysisSystemLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemLabel.setAlignment(QtCore.Qt.AlignCenter)\r\n self.trafficSituationAnalysisSystemLabel.setObjectName(\"trafficSituationAnalysisSystemLabel\")\r\n self.trafficSituationAnalysisSystemLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical1.addWidget(self.trafficSituationAnalysisSystemLabel)\r\n self.trafficSituationAnalysisSystemResultContextLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationAnalysisSystemResultContextLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemResultContextLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemResultContextLabel\")\r\n self.trafficSituationAnalysisSystemResultContextLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical1.addWidget(self.trafficSituationAnalysisSystemResultContextLabel)\r\n self.trafficSituationAnalysisSystemLine1 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine1.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationAnalysisSystemLine1.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine1.setObjectName(\"trafficSituationAnalysisSystemLine1\")\r\n self.trafficSituationAnalysisSystemVertical1.addWidget(self.trafficSituationAnalysisSystemLine1)\r\n self.trafficSituationAnalysisSystemHorizontal2 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAnalysisSystemHorizontal2.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationAnalysisSystemHorizontal2.setObjectName(\"trafficSituationAnalysisSystemHorizontal2\")\r\n self.trafficSituationAnalysisSystemControlLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemControlLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemControlLabel.setObjectName(\"trafficSituationAnalysisSystemControlLabel\")\r\n self.trafficSituationAnalysisSystemHorizontal2.addWidget(self.trafficSituationAnalysisSystemControlLabel)\r\n self.trafficSituationAnalysisSystemStartButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemStartButton.setFont(font)\r\n self.trafficSituationAnalysisSystemStartButton.setObjectName(\"trafficSituationAnalysisSystemStartButton\")\r\n self.trafficSituationAnalysisSystemStartButton.setEnabled(True)\r\n self.trafficSituationAnalysisSystemHorizontal2.addWidget(self.trafficSituationAnalysisSystemStartButton)\r\n self.trafficSituationAnalysisSystemStopButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemStopButton.setFont(font)\r\n self.trafficSituationAnalysisSystemStopButton.setObjectName(\"trafficSituationAnalysisSystemStopButton\")\r\n self.trafficSituationAnalysisSystemStopButton.setEnabled(True)\r\n self.trafficSituationAnalysisSystemHorizontal2.addWidget(self.trafficSituationAnalysisSystemStopButton)\r\n self.trafficSituationAnalysisSystemHorizontal2.setStretch(0, 3)\r\n self.trafficSituationAnalysisSystemHorizontal2.setStretch(1, 1)\r\n self.trafficSituationAnalysisSystemHorizontal2.setStretch(2, 1)\r\n self.trafficSituationAnalysisSystemHorizontal2.setStretch(3, 1)\r\n self.trafficSituationAnalysisSystemVertical1.addLayout(self.trafficSituationAnalysisSystemHorizontal2)\r\n self.trafficSituationAnalysisSystemLine2 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine2.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationAnalysisSystemLine2.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine2.setObjectName(\"trafficSituationAnalysisSystemLine2\")\r\n self.trafficSituationAnalysisSystemVertical1.addWidget(self.trafficSituationAnalysisSystemLine2)\r\n self.trafficSituationAnalysisSystemHorizontal3 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAnalysisSystemHorizontal3.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationAnalysisSystemHorizontal3.setObjectName(\"trafficSituationAnalysisSystemHorizontal3\")\r\n self.trafficSituationAnalysisSystemAnalysisStateLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemAnalysisStateLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemAnalysisStateLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemAnalysisStateLabel\")\r\n self.trafficSituationAnalysisSystemHorizontal3.addWidget(self.trafficSituationAnalysisSystemAnalysisStateLabel)\r\n self.trafficSituationAnalysisSystemCityComboBox = QtWidgets.QComboBox(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemCityComboBox.setFont(font)\r\n self.trafficSituationAnalysisSystemCityComboBox.setObjectName(\"trafficSituationAnalysisSystemCityComboBox\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemCityComboBox.addItem(\"\")\r\n self.trafficSituationAnalysisSystemHorizontal3.addWidget(self.trafficSituationAnalysisSystemCityComboBox)\r\n self.trafficSituationAnalysisSystemViewButton = QtWidgets.QPushButton(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemViewButton.setFont(font)\r\n self.trafficSituationAnalysisSystemViewButton.setObjectName(\"trafficSituationAnalysisSystemViewButton\")\r\n self.trafficSituationAnalysisSystemViewButton.setEnabled(True)\r\n self.trafficSituationAnalysisSystemHorizontal3.addWidget(self.trafficSituationAnalysisSystemViewButton)\r\n self.trafficSituationAnalysisSystemHorizontal3.setStretch(0, 3)\r\n self.trafficSituationAnalysisSystemHorizontal3.setStretch(1, 1)\r\n self.trafficSituationAnalysisSystemHorizontal3.setStretch(2, 1)\r\n self.trafficSituationAnalysisSystemVertical1.addLayout(self.trafficSituationAnalysisSystemHorizontal3)\r\n self.trafficSituationAnalysisSystemHorizontal4 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAnalysisSystemHorizontal4.setContentsMargins(10, -1, 10, -1)\r\n self.trafficSituationAnalysisSystemHorizontal4.setObjectName(\"trafficSituationAnalysisSystemHorizontal4\")\r\n chart = QtChart.QChart()\r\n chart.setBackgroundVisible(False)\r\n self.trafficSituationAnalysisSystemPhoto = QChartView(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemPhoto.setChart(chart)\r\n self.trafficSituationAnalysisSystemPhoto.setEnabled(True)\r\n self.trafficSituationAnalysisSystemPhoto.setStyleSheet(\"trafficSituationAnalysisSystemPhoto{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficSituationAnalysisSystemPhoto.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficSituationAnalysisSystemPhoto.setObjectName(\"trafficSituationAnalysisSystemPhoto\")\r\n self.trafficSituationAnalysisSystemHorizontal4.addWidget(self.trafficSituationAnalysisSystemPhoto)\r\n self.trafficSituationAnalysisSystemTextEdit = QtWidgets.QTextEdit(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemTextEdit.setFont(font)\r\n self.trafficSituationAnalysisSystemTextEdit.setEnabled(True)\r\n self.trafficSituationAnalysisSystemTextEdit.setStyleSheet(\"QTextEdit{\\n\"\r\n \" color: Black;\\n\"\r\n \" background: #F0F0F0;\\n\"\r\n \" NoFrame;\\n\"\r\n \"}\")\r\n self.trafficSituationAnalysisSystemTextEdit.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n self.trafficSituationAnalysisSystemTextEdit.setObjectName(\"trafficSituationAnalysisSystemTextEdit\")\r\n self.trafficSituationAnalysisSystemTextEdit.setReadOnly(True)\r\n self.trafficSituationAnalysisSystemTextEdit.setCursor(QtCore.Qt.ArrowCursor)\r\n self.trafficSituationAnalysisSystemTextEdit.setContextMenuPolicy(QtCore.Qt.NoContextMenu)\r\n self.trafficSituationAnalysisSystemTextEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)\r\n self.trafficSituationAnalysisSystemHorizontal4.addWidget(self.trafficSituationAnalysisSystemTextEdit)\r\n self.trafficSituationAnalysisSystemHorizontal4.setStretch(0, 2)\r\n self.trafficSituationAnalysisSystemHorizontal4.setStretch(1, 1)\r\n self.trafficSituationAnalysisSystemVertical1.addLayout(self.trafficSituationAnalysisSystemHorizontal4)\r\n self.trafficSituationAnalysisSystemHorizontal1.addLayout(self.trafficSituationAnalysisSystemVertical1)\r\n self.trafficSituationAnalysisSystemLine5 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine5.setFrameShape(QtWidgets.QFrame.VLine)\r\n self.trafficSituationAnalysisSystemLine5.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine5.setObjectName(\"trafficSituationAnalysisSystemLine5\")\r\n self.trafficSituationAnalysisSystemHorizontal1.addWidget(self.trafficSituationAnalysisSystemLine5)\r\n self.trafficSituationAnalysisSystemVertical2 = QtWidgets.QVBoxLayout()\r\n self.trafficSituationAnalysisSystemVertical2.setObjectName(\"trafficSituationAnalysisSystemVertical2\")\r\n self.trafficSituationAnalysisSystemInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationAnalysisSystemInformationLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemInformationLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemInformationLabel\")\r\n self.trafficSituationAnalysisSystemInformationLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(self.trafficSituationAnalysisSystemInformationLabel)\r\n self.trafficSituationAnalysisSystemDetailInformationLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemDetailInformationLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemDetailInformationLabel.setWordWrap(True)\r\n self.trafficSituationAnalysisSystemDetailInformationLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemDetailInformationLabel\")\r\n self.trafficSituationAnalysisSystemDetailInformationLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(\r\n self.trafficSituationAnalysisSystemDetailInformationLabel)\r\n self.trafficSituationAnalysisSystemLine3 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine3.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationAnalysisSystemLine3.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine3.setObjectName(\"trafficSituationAnalysisSystemLine3\")\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(self.trafficSituationAnalysisSystemLine3)\r\n self.trafficSituationAnalysisSystemSystemFunctionLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(16)\r\n self.trafficSituationAnalysisSystemSystemFunctionLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemSystemFunctionLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemSystemFunctionLabel\")\r\n self.trafficSituationAnalysisSystemSystemFunctionLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(self.trafficSituationAnalysisSystemSystemFunctionLabel)\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel = QtWidgets.QLabel(\r\n self.horizontalLayoutWidget_17)\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel.setWordWrap(True)\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemSystemFunctionInformationLabel\")\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel.setMargin(10)\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel)\r\n self.trafficSituationAnalysisSystemLine4 = QtWidgets.QFrame(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemLine4.setFrameShape(QtWidgets.QFrame.HLine)\r\n self.trafficSituationAnalysisSystemLine4.setFrameShadow(QtWidgets.QFrame.Sunken)\r\n self.trafficSituationAnalysisSystemLine4.setObjectName(\"trafficSituationAnalysisSystemLine4\")\r\n self.trafficSituationAnalysisSystemVertical2.addWidget(self.trafficSituationAnalysisSystemLine4)\r\n self.trafficSituationAnalysisSystemHorizontal5 = QtWidgets.QHBoxLayout()\r\n self.trafficSituationAnalysisSystemHorizontal5.setContentsMargins(10, -1, -1, -1)\r\n self.trafficSituationAnalysisSystemHorizontal5.setObjectName(\"trafficSituationAnalysisSystemHorizontal5\")\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel.setMaximumSize(QtCore.QSize(20, 20))\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel.setText(\"\")\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel.setObjectName(\r\n \"trafficSituationAnalysisSystemFeedBackPhotoLabel\")\r\n self.trafficSituationPixmap = QPixmap(\":/FeedBack.png\")\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel.setPixmap(self.trafficSituationPixmap)\r\n self.trafficSituationAnalysisSystemFeedBackPhotoLabel.setScaledContents(True)\r\n self.trafficSituationAnalysisSystemHorizontal5.addWidget(self.trafficSituationAnalysisSystemFeedBackPhotoLabel)\r\n self.trafficSituationAnalysisSystemFeedBackLabel = QtWidgets.QLabel(self.horizontalLayoutWidget_17)\r\n self.trafficSituationAnalysisSystemFeedBackLabel.setMaximumSize(QtCore.QSize(16777215, 20))\r\n font = QtGui.QFont()\r\n font.setFamily(\"微软雅黑 Light\")\r\n font.setPointSize(12)\r\n self.trafficSituationAnalysisSystemFeedBackLabel.setFont(font)\r\n self.trafficSituationAnalysisSystemFeedBackLabel.setOpenExternalLinks(True)\r\n self.trafficSituationAnalysisSystemFeedBackLabel.setObjectName(\"trafficSituationAnalysisSystemFeedBackLabel\")\r\n self.trafficSituationAnalysisSystemHorizontal5.addWidget(self.trafficSituationAnalysisSystemFeedBackLabel)\r\n self.trafficSituationAnalysisSystemVertical2.addLayout(self.trafficSituationAnalysisSystemHorizontal5)\r\n spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.trafficSituationAnalysisSystemVertical2.addItem(spacerItem10)\r\n self.trafficSituationAnalysisSystemHorizontal1.addLayout(self.trafficSituationAnalysisSystemVertical2)\r\n self.trafficSituationAnalysisSystemHorizontal1.setStretch(1, 2)\r\n self.trafficSituationAnalysisSystemHorizontal1.setStretch(3, 1)\r\n self.amapProgramStackedWidget.addWidget(self.trafficSituationAnalysisSystemPage)\r\n\r\n self.retranslateUi(AmapProgramForm)\r\n self.amapProgramStackedWidget.setCurrentIndex(0)\r\n self.RoutePlanningTabPage.setCurrentIndex(0)\r\n self.basicFunctionListWidget.setCurrentRow(0)\r\n self.subDistrictNumComboBox.setCurrentIndex(0)\r\n self.trafficSituationStackedWidget.setCurrentIndex(0)\r\n QtCore.QMetaObject.connectSlotsByName(AmapProgramForm)\r\n\r\n def retranslateUi(self, AmapProgramForm):\r\n _translate = QtCore.QCoreApplication.translate\r\n AmapProgramForm.setWindowTitle(\"高德地图小程序\")\r\n AmapProgramForm.setWindowFlags(\r\n QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint)\r\n AmapProgramForm.setWindowIcon(QIcon(\":/logo.png\"))\r\n self.usersNameLabel.setText(_translate(\"AmapProgramForm\", \"默认用户\"))\r\n self.basicFunctionLabel.setText(_translate(\"AmapProgramForm\", \"基础功能\"))\r\n __sortingEnabled = self.basicFunctionListWidget.isSortingEnabled()\r\n self.basicFunctionListWidget.setSortingEnabled(False)\r\n item = self.basicFunctionListWidget.item(0)\r\n item.setText(_translate(\"AmapProgramForm\", \"路径规划\"))\r\n item = self.basicFunctionListWidget.item(1)\r\n item.setText(_translate(\"AmapProgramForm\", \"静态地图\"))\r\n self.basicFunctionListWidget.setSortingEnabled(__sortingEnabled)\r\n self.searchServiceLabel.setText(_translate(\"AmapProgramForm\", \"查询服务\"))\r\n __sortingEnabled = self.searchServiceListWidget.isSortingEnabled()\r\n self.searchServiceListWidget.setSortingEnabled(False)\r\n item = self.searchServiceListWidget.item(0)\r\n item.setText(_translate(\"AmapProgramForm\", \"IP地址查询\"))\r\n item = self.searchServiceListWidget.item(1)\r\n item.setText(_translate(\"AmapProgramForm\", \"行政区域查询\"))\r\n item = self.searchServiceListWidget.item(2)\r\n item.setText(_translate(\"AmapProgramForm\", \"天气查询\"))\r\n self.searchServiceListWidget.setSortingEnabled(__sortingEnabled)\r\n self.advancedFunction.setText(_translate(\"AmapProgramForm\", \"高级功能\"))\r\n __sortingEnabled = self.advancedFunctionListWidget.isSortingEnabled()\r\n self.advancedFunctionListWidget.setSortingEnabled(False)\r\n item = self.advancedFunctionListWidget.item(0)\r\n item.setText(_translate(\"AmapProgramForm\", \"交通态势\"))\r\n item = self.advancedFunctionListWidget.item(1)\r\n item.setText(_translate(\"AmapProgramForm\", \"交通态势分析系统\"))\r\n self.advancedFunctionListWidget.setSortingEnabled(__sortingEnabled)\r\n self.otherOptionsLabel.setText(_translate(\"AmapProgramForm\", \"其他选项\"))\r\n __sortingEnabled = self.otherOptionsListWidget.isSortingEnabled()\r\n self.otherOptionsListWidget.setSortingEnabled(False)\r\n item = self.otherOptionsListWidget.item(0)\r\n item.setText(_translate(\"AmapProgramForm\", \"设置\"))\r\n item = self.otherOptionsListWidget.item(1)\r\n item.setText(_translate(\"AmapProgramForm\", \"关于\"))\r\n self.otherOptionsListWidget.setSortingEnabled(__sortingEnabled)\r\n self.walkingRoutePlanningLabel.setText(_translate(\"AmapProgramForm\", \"步行路径规划\"))\r\n self.walkingSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.walkingSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"步行路径规划可以为您规划100KM以内的步行通勤方案,并提供相应方案的步行路径规划和实时路线图。\"))\r\n self.walkingDepartureLabel.setText(_translate(\"AmapProgramForm\", \"出发地:\"))\r\n self.walkingDepartureLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入起点\"))\r\n self.walkingDestinationLabel.setText(_translate(\"AmapProgramForm\", \"目的地:\"))\r\n self.walkingDestinationLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入终点\"))\r\n self.walkingSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.walkingRoutePlanningInformationLabel.setText(_translate(\"AmapProgramForm\", \"步行路径规划说明\"))\r\n self.walkingRoutePlanningDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的出发地和目的地,能够快速的帮助用户查询到最优的步行路径的行走路线。\"))\r\n self.walkingInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.walkingInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"搜索的地名(仅支持中国大陆地区)\"))\r\n self.walkingFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.RoutePlanningTabPage.setTabText(self.RoutePlanningTabPage.indexOf(self.walkingRoutePlanningTab),\r\n _translate(\"AmapProgramForm\", \"步行路径规划\"))\r\n self.busRoutePlanningLabel.setText(_translate(\"AmapProgramForm\", \"公交路径规划\"))\r\n self.busSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.busSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"公交路径规划可以为您规划综合各类公共(火车、公交、地铁)交通方式的通勤方案,并提供相应方案的公交路径规划和实时路线图。\"))\r\n self.busDepartureLabel.setText(_translate(\"AmapProgramForm\", \"出发地:\"))\r\n self.busDepartureLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入起点\"))\r\n self.busDestinationLabel.setText(_translate(\"AmapProgramForm\", \"目的地:\"))\r\n self.busDestinationLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入终点\"))\r\n self.busSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.busRoutePlanningInformationLabel.setText(_translate(\"AmapProgramForm\", \"公交路径规划说明\"))\r\n self.busRoutePlanningDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的出发地和目的地,能够快速的帮助用户查询到最优的公交路径的出行路线。\"))\r\n self.busInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.busInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"搜索的地名(仅支持中国大陆地区)\"))\r\n self.busFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.RoutePlanningTabPage.setTabText(self.RoutePlanningTabPage.indexOf(self.busRoutePlanningTab),\r\n _translate(\"AmapProgramForm\", \"公交路径规划\"))\r\n self.rideRoutePlanningLabel.setText(_translate(\"AmapProgramForm\", \"骑行路径规划\"))\r\n self.rideSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.rideSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"骑行路径规划可以为您规划骑行通勤方案,并提供相应方案的骑行路径规划和实时路线图。\"))\r\n self.rideDepartureLabel.setText(_translate(\"AmapProgramForm\", \"出发地:\"))\r\n self.rideDepartureLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入起点\"))\r\n self.rideDestinationLabel.setText(_translate(\"AmapProgramForm\", \"目的地:\"))\r\n self.rideDestinationLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入终点\"))\r\n self.rideSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.rideRoutePlanningInformationLabel.setText(_translate(\"AmapProgramForm\", \"骑行路径规划说明\"))\r\n self.rideRoutePlanningDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的出发地和目的地,能够快速的帮助用户查询到最优的骑行路径的出行路线。\"))\r\n self.rideInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.rideInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"搜索的地名(仅支持中国大陆地区)\"))\r\n self.rideFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.RoutePlanningTabPage.setTabText(self.RoutePlanningTabPage.indexOf(self.rideRoutePlanningTab),\r\n _translate(\"AmapProgramForm\", \"骑行路径规划\"))\r\n self.driveRoutePlanningLabel.setText(_translate(\"AmapProgramForm\", \"驾车路径规划\"))\r\n self.driveSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.driveSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"驾车路径规划可以为您规划小客车、轿车通勤出行的方案,并提供相应方案的驾车路径规划和实时路线图。\"))\r\n self.driveDepartureLabel.setText(_translate(\"AmapProgramForm\", \"出发地:\"))\r\n self.driveDepartureLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入起点\"))\r\n self.driveDestinationLabel.setText(_translate(\"AmapProgramForm\", \"目的地:\"))\r\n self.driveDestinationLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"输入终点\"))\r\n self.driveSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.driveRoutePlanningInformationLabel.setText(_translate(\"AmapProgramForm\", \"驾车路径规划说明\"))\r\n self.driveRoutePlanningDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的出发地和目的地,能够快速的帮助用户查询到最优的驾车路径的出行路线。\"))\r\n self.driveInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.driveInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"搜索的地名(仅支持中国大陆地区)\"))\r\n self.driveFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.RoutePlanningTabPage.setTabText(self.RoutePlanningTabPage.indexOf(self.driveRoutePlanningTab),\r\n _translate(\"AmapProgramForm\", \"驾车路径规划\"))\r\n self.IPLocationLabel.setText(_translate(\"AmapProgramForm\", \"IP地址查询\"))\r\n self.IPLocationSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.IPLocationSearchInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.若为直辖市则直接显示直辖市名称\\n\"\r\n \"2.如果在局域网IP网段内,则显示您当前的网络位于局域网\\n\"\r\n \"3.非法IP以及国外的IP地址则显示查询失败\"))\r\n self.IPLabel.setText(_translate(\"AmapProgramForm\", \"IP地址\"))\r\n self.IPLocationLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"请输入IP地址或者域名\"))\r\n self.IPLocationGetLocalNetWorkButton.setText(_translate(\"AmapProgramForm\", \"获取当前网络IP地址\"))\r\n self.IPLocationSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.IPLocationInformationLabel.setText(_translate(\"AmapProgramForm\", \"IP地址查询说明\"))\r\n self.IPLocationDetailInformationLabel.setText(_translate(\"AmapProgramForm\", \"根据用户输入的IP地址,能够快速的帮助用户定位IP地址所在的位置\"))\r\n self.IPLocationInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.IPLocationInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.可以通过输入框搜索相应的IP地址(仅支持中国大陆地区)\\n\"\r\n \"2.点击获取当前网络IP地址查询当前网络IP信息\\n\"\r\n \"3.若用户不填写IP地址,则取客户HTTP之中的请求来进行定位\"))\r\n self.IPLocationFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.staticMapsLabel.setText(_translate(\"AmapProgramForm\", \"静态地图\"))\r\n self.staticMapsSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.staticMapsSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的城市名称,显示相应的实时地图图片(数据来源:高德地图)\"))\r\n self.staticMapsSearchLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"请输入要查询的地名\"))\r\n self.staticMapsSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.staticMapsServiceLabel.setText(_translate(\"AmapProgramForm\", \"静态地图服务说明\"))\r\n self.staticMapsDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户定位或查询的城市,能够快速的帮助用户展现更加直观的区域信息\"))\r\n self.staticMapsSearchContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.staticMapsInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.搜索的地名(仅支持中国大陆地区)\"))\r\n self.staticMapsFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.administrativeDistrictEnquiryLabel.setText(_translate(\"AmapProgramForm\", \"行政区域查询\"))\r\n self.administrativeSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.administrativeSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户选择的内容,搜索相应行政区域的子行政区域的具体信息\"))\r\n self.administrativeLabel.setText(_translate(\"AmapProgramForm\", \"请选择行政区域\"))\r\n self.administrativeSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.subDistrictNumLabel.setText(_translate(\"AmapProgramForm\", \"下级行政区数\"))\r\n self.subDistrictNumComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"不返回下级行政区\"))\r\n self.subDistrictNumComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"返回下一级行政区\"))\r\n self.subDistrictNumComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"返回下两级行政区\"))\r\n self.subDistrictNumComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"返回下三级行政区\"))\r\n self.administrativeServiceLabel.setText(_translate(\"AmapProgramForm\", \"行政区域查询服务说明\"))\r\n self.administrativeDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的搜索条件可以帮助用户快速的查找特定的行政区域信息\"))\r\n self.administrativeInputContextLabel.setText(_translate(\"AmapProgramForm\", \"选择内容说明\"))\r\n self.administrativeInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.可以通过选择框选择要搜索的行政区域,行政区域级别\\n\"\r\n \"2.行政区域等级(由高到低排列):省、市、区/县、乡镇/街道\"))\r\n self.administrativeFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.weatherLabel.setText(_translate(\"AmapProgramForm\", \"天气查询\"))\r\n self.weatherSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.weatherSearchInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.实况天气查询:实况天气每小时更新多次\\n\"\r\n \"2.预报天气查询:预报天气每天更新3次,分别在8点、11点、18点左右更新。预报天气包括当天、第二天、第三天的预报数据\"))\r\n self.weatherSearchLineEdit.setPlaceholderText(_translate(\"AmapProgramForm\", \"请输入要查询的城市\"))\r\n self.weatherSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.weatherInformationLabel.setText(_translate(\"AmapProgramForm\", \"天气查询服务说明\"))\r\n self.weatherDetailInformationLabel.setText(_translate(\"AmapProgramForm\", \"根据用户输入的城市,能够快速的帮助用户查询所在城市的天气状况\"))\r\n self.weatherInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.weatherInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.可以通过选择框选择要搜索的天气类型:实况天气、预报天气\\n\"\r\n \"2.搜索的城市(仅支持国内)\"))\r\n self.weatherFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.trafficSituationLabel.setText(_translate(\"AmapProgramForm\", \"交通态势查询\"))\r\n self.trafficSituationSearchResultContextLabel.setText(_translate(\"AmapProgramForm\", \"查询结果说明\"))\r\n self.trafficSituationSearchInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"实时查询指定道路或区域的实时拥堵情况和拥堵趋势(默认显示北京市长安大街)\"))\r\n self.trafficSituationSearchModeLabel.setText(_translate(\"AmapProgramForm\", \"查询方式\"))\r\n self.trafficRoadRealSituationComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"道路实时路况\"))\r\n self.trafficRoadRealSituationComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"矩形区域实时路况\"))\r\n self.trafficRoadRealSituationComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"多边形区域实时路况\"))\r\n self.trafficRoadRealSituationComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"周边实时路况\"))\r\n self.trafficRealRoadSelectCityLabel.setText(_translate(\"AmapProgramForm\", \"城市列表\"))\r\n self.trafficRealRoadNameLabel.setText(_translate(\"AmapProgramForm\", \"道路名称\"))\r\n self.trafficSituationRealRoadSearchRoadName.setPlaceholderText(_translate(\"AmapProgramForm\", \"请输入要查询的道路名称\"))\r\n self.trafficRealRoadSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.trafficRectangleRoadSearchPositionNameLabel.setText(\r\n _translate(\"AmapProgramForm\", \"请输入要查询的两个地点名称(相隔距离不超过2KM)\"))\r\n self.trafficRectangleRoadLevelLabel.setText(_translate(\"AmapProgramForm\", \"道路等级\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"请选择道路等级\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"全部驾车道路\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"高速路\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"环路及快速路\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(4, _translate(\"AmapProgramForm\", \"主干路\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(5, _translate(\"AmapProgramForm\", \"次干路\"))\r\n self.trafficRectangleRoadLevelComboBox.setItemText(6, _translate(\"AmapProgramForm\", \"支干路\"))\r\n self.trafficRectangleRoadLevelSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.trafficPolygonRoadSearchPositionNameLabel.setText(_translate(\"AmapProgramForm\", \"多个距离相近的地名\"))\r\n self.trafficPolygonRoadSearchPositionLineEdit.setPlaceholderText(\r\n _translate(\"AmapProgramForm\", \"请输入多个距离相近的地名进行查询\"))\r\n self.trafficPolygonRoadLevelLabel.setText(_translate(\"AmapProgramForm\", \"道路等级\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"请选择道路等级\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"全部驾车道路\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"高速路\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"环路及快速路\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(4, _translate(\"AmapProgramForm\", \"主干路\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(5, _translate(\"AmapProgramForm\", \"次干路\"))\r\n self.trafficPolygonRoadLevelComboBox.setItemText(6, _translate(\"AmapProgramForm\", \"支干路\"))\r\n self.trafficPolygonRoadLevelSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.trafficAroundRoadSearchPositionNameLabel.setText(_translate(\"AmapProgramForm\", \"中心地点名称\"))\r\n self.trafficAroundRoadSearchPositionNameLineEdit.setPlaceholderText(\r\n _translate(\"AmapProgramForm\", \"请输入要查询的中心地点名称\"))\r\n self.trafficAroundRoadLevelLabel.setText(_translate(\"AmapProgramForm\", \"道路等级\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"请选择道路等级\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"全部驾车道路\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"高速路\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"环路及快速路\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(4, _translate(\"AmapProgramForm\", \"主干路\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(5, _translate(\"AmapProgramForm\", \"次干路\"))\r\n self.trafficAroundRoadLevelComboBox.setItemText(6, _translate(\"AmapProgramForm\", \"支干路\"))\r\n self.trafficAroundRoadLevelSearchButton.setText(_translate(\"AmapProgramForm\", \"查询\"))\r\n self.trafficSituationServiceLabel.setText(_translate(\"AmapProgramForm\", \"交通态势查询服务说明\"))\r\n self.trafficSituationDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"根据用户输入的城市,能够快速的帮助用户查询所在城市的天气状况\"))\r\n self.trafficSituationInputContextLabel.setText(_translate(\"AmapProgramForm\", \"输入内容说明\"))\r\n self.trafficSituationInputInformationLabel.setText(_translate(\"AmapProgramForm\", \"1.可以通过选择框选择相应道路名称对应的城市\\n\"\r\n \"2.搜索的道路(仅支持国内主干道路)\\n\"\r\n \"3.输入多个地名时使用分号“;”间隔\"))\r\n self.trafficSituationFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n self.trafficSituationAnalysisSystemLabel.setText(_translate(\"AmapProgramForm\", \"山西省道路交通信息分析系统\"))\r\n self.trafficSituationAnalysisSystemResultContextLabel.setText(\r\n _translate(\"AmapProgramForm\", \"山西省各地级市路况信息展示(默认展示太原市数据)\"))\r\n self.trafficSituationAnalysisSystemControlLabel.setText(_translate(\"AmapProgramForm\", \"获取信息控制按钮\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(0, _translate(\"AmapProgramForm\", \"请选择城市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(1, _translate(\"AmapProgramForm\", \"太原市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(2, _translate(\"AmapProgramForm\", \"大同市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(3, _translate(\"AmapProgramForm\", \"阳泉市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(4, _translate(\"AmapProgramForm\", \"长治市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(5, _translate(\"AmapProgramForm\", \"晋城市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(6, _translate(\"AmapProgramForm\", \"朔州市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(7, _translate(\"AmapProgramForm\", \"忻州市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(8, _translate(\"AmapProgramForm\", \"吕梁市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(9, _translate(\"AmapProgramForm\", \"晋中市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(10, _translate(\"AmapProgramForm\", \"临汾市\"))\r\n self.trafficSituationAnalysisSystemCityComboBox.setItemText(11, _translate(\"AmapProgramForm\", \"运城市\"))\r\n self.trafficSituationAnalysisSystemViewButton.setText(_translate(\"AmapProgramForm\", \"查看\"))\r\n self.trafficSituationAnalysisSystemStartButton.setText(_translate(\"AmapProgramForm\", \"开始\"))\r\n self.trafficSituationAnalysisSystemStopButton.setText(_translate(\"AmapProgramForm\", \"停止\"))\r\n self.trafficSituationAnalysisSystemAnalysisStateLabel.setText(_translate(\"AmapProgramForm\", \"分析状况\"))\r\n self.trafficSituationAnalysisSystemInformationLabel.setText(_translate(\"AmapProgramForm\", \"山西省道路交通信息分析系统说明\"))\r\n self.trafficSituationAnalysisSystemDetailInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"实时采集路网和一些重要路段的车流量数据,然后对这些数据进行人工智能分析,根据分析得到的数据给出一个有参考性的意见。\"))\r\n self.trafficSituationAnalysisSystemSystemFunctionLabel.setText(_translate(\"AmapProgramForm\", \"系统功能说明\"))\r\n self.trafficSituationAnalysisSystemSystemFunctionInformationLabel.setText(\r\n _translate(\"AmapProgramForm\", \"通过选择山西省相应的城市列表,为用户简单展示山西省各地级市主要道路的路况信息\"))\r\n self.trafficSituationAnalysisSystemFeedBackLabel.setText(_translate(\"AmapProgramForm\",\r\n \"<html><head/><body><p><a href=\\\"https://github.com/\\\"><span style=\\\" color:#000000;\\\">提供反馈</span></a></p></body></html>\"))\r\n" } ]
41
rudineirk/faculdade_boiler
https://github.com/rudineirk/faculdade_boiler
056b79e33d1faffc1500dbf33f6427a2a613b361
9e796d8c2603f37799fc02dc7bd01827b28164df
19f32df96acd7473717fc37dd4cc499b130b600c
refs/heads/master
2021-01-10T04:30:46.570437
2015-12-02T21:32:04
2015-12-02T21:32:04
43,920,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5513643622398376, "alphanum_fraction": 0.5630016326904297, "avg_line_length": 21.862384796142578, "blob_id": "25a932593971bd6a12815586607f9c33e6a5861f", "content_id": "5fe5bd7ae7704ee88d2b262be2a6f35d18344370", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2492, "license_type": "permissive", "max_line_length": 68, "num_lines": 109, "path": "/boiler/conn.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom __future__ import print_function\n\nimport socket\nfrom threading import Semaphore\n\n__all__ = [\n 'BoilerConn',\n]\n\nMSG_AIR_TEMP = b\"sta0\"\nMSG_WATER_INSIDE_TEMP = b\"st-0\"\nMSG_WATER_IN_TEMP = b\"sti0\"\nMSG_WATER_OUT_TEMP = b\"sno0\"\nMSG_WATER_COLUMN = b\"sh-0\"\nCMD_HEAT_FLUX = b\"aq-\"\nCMD_WATER_FLUX = b\"ani\"\n\n\nclass BoilerConn(object):\n\n def __init__(self, host=\"127.0.0.1\", port=4545):\n self.host = host\n self.port = port\n self.sock = None\n self._heat_flux = 0.0\n self._water_flux = 0.0\n self._semaphore = Semaphore()\n self.open()\n\n def open(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n def close(self):\n self.sock.close()\n\n def _lock(self):\n self._semaphore.acquire()\n\n def _unlock(self):\n self._semaphore.release()\n\n def _send(self, msg):\n return self.sock.sendto(msg, (self.host, self.port))\n\n def _read(self, size=10000):\n return self.sock.recv(size)\n\n def _set_cmd(self, cmd, value):\n self._lock()\n value = str(value).encode()\n self._send(cmd + value + b\"\\r\\n\")\n self._read()\n self._unlock()\n\n def _get_msg(self, msg):\n self._lock()\n self._send(msg)\n data = self._read()\n self._unlock()\n\n data = data.decode(\"utf-8\")\n data = data[3:].replace(\",\", \".\").strip()\n try:\n data = float(data)\n except ValueError:\n data = data.split('.')\n new_data = ''.join(data[:-1]) + '.' + data[-1]\n data = float(new_data)\n return data\n\n @property\n def heat_flux(self):\n return self._heat_flux\n\n @heat_flux.setter\n def heat_flux(self, value):\n self._heat_flux = value\n self._set_cmd(CMD_HEAT_FLUX, value)\n\n @property\n def water_flux(self):\n return self._water_flux\n\n @water_flux.setter\n def water_flux(self, value):\n self._water_flux = value\n self._set_cmd(CMD_WATER_FLUX, value)\n\n @property\n def air_temp(self):\n return self._get_msg(MSG_AIR_TEMP)\n\n @property\n def water_inside_temp(self):\n return self._get_msg(MSG_WATER_INSIDE_TEMP)\n\n @property\n def water_in_temp(self):\n return self._get_msg(MSG_WATER_IN_TEMP)\n\n @property\n def water_out_temp(self):\n return self._get_msg(MSG_WATER_OUT_TEMP)\n\n @property\n def water_column(self):\n return self._get_msg(MSG_WATER_COLUMN)\n" }, { "alpha_fraction": 0.5854992866516113, "alphanum_fraction": 0.5909712910652161, "avg_line_length": 25.10714340209961, "blob_id": "e2d57518f7457967d0465b9e54e7af3bb7206a74", "content_id": "fc67c730f0d59cd2983628385a2045bb0d3d9f05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "permissive", "max_line_length": 71, "num_lines": 56, "path": "/boiler/core.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "from queue import Queue\nfrom threading import Thread\n\nfrom .conn import BoilerConn\nfrom .controller import WaterColumnController, WaterTempController\nfrom .reader import WaterColumnReader, WaterTempReader\n\n\nclass Main(object):\n\n def __init__(self):\n self._threads = []\n self.column_queue = Queue()\n self.temp_queue = Queue()\n\n self.conn = BoilerConn()\n\n self.column_controller = WaterColumnController(\n self.conn,\n self.column_queue,\n )\n self.temp_controller = WaterTempController(\n self.conn,\n self.temp_queue,\n )\n\n self.column_reader = WaterColumnReader(\n self.conn,\n self.column_queue,\n loop_time=2000,\n )\n self.temp_reader = WaterTempReader(\n self.conn,\n self.temp_queue,\n loop_time=1500,\n )\n\n def make_threads(self):\n self._threads.append(Thread(target=self.column_controller.run))\n self._threads.append(Thread(target=self.temp_controller.run))\n self._threads.append(Thread(target=self.column_reader.run))\n self._threads.append(Thread(target=self.temp_reader.run))\n\n def run(self):\n self.make_threads()\n for thread in self._threads:\n thread.start()\n\n for thread in self._threads:\n thread.join()\n self.conn.close()\n\n\nif __name__ == '__main__':\n main = Main()\n main.run()\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 20.38888931274414, "blob_id": "2e458841e70956de93946f1cc314267fb3979425", "content_id": "5ba302171ec86e6e932c808425569cf399e3d09e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 385, "license_type": "permissive", "max_line_length": 66, "num_lines": 18, "path": "/boiler/__init__.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "from .conn import BoilerConn\nfrom .controller import WaterColumnController, WaterTempController\nfrom .core import Main\nfrom .reader import WaterColumnReader, WaterTempReader\n\n__all__ = [\n 'BoilerConn',\n 'WaterColumnController',\n 'WaterTempController',\n 'WaterColumnReader',\n 'WaterTempReader',\n 'Main',\n]\n\n\nif __name__ == \"__main__\":\n main = Main()\n main.run()\n" }, { "alpha_fraction": 0.5776923298835754, "alphanum_fraction": 0.5869230628013611, "avg_line_length": 23.074073791503906, "blob_id": "042e444f7f6a2c100a0c050363f3540327b6fc6a", "content_id": "491ab88d3be6329a02630b4a86f824ad3af0feef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "permissive", "max_line_length": 64, "num_lines": 54, "path": "/boiler/reader.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nfrom datetime import datetime\nfrom time import sleep\n\n__all__ = [\n 'WaterColumnReader',\n 'WaterTempReader',\n]\n\n\nclass BaseReader(object):\n\n def __init__(self, conn, queue, loop_time=1000):\n self._conn = conn\n self._queue = queue\n self._loop_time = float(loop_time) / 1000.0\n self._last_loop = None\n\n def run(self):\n while True:\n value = self.read_value()\n self._queue.put(value)\n self.sleep()\n\n def read_value(self):\n raise NotImplementedError\n\n def sleep(self):\n time_now = datetime.now()\n if self._last_loop is None:\n self._last_loop = time_now\n time_diff = time_now - self._last_loop\n sleep_time = self._loop_time - time_diff.total_seconds()\n if sleep_time <= 0:\n sleep_time = self._loop_time\n self._last_loop = time_now\n sleep(sleep_time)\n\n\nclass WaterColumnReader(BaseReader):\n\n def read_value(self):\n value = self._conn.water_column\n print('Reader Water: {0}'.format(value))\n return value\n\n\nclass WaterTempReader(BaseReader):\n\n def read_value(self):\n value = self._conn.water_inside_temp\n print('Reader Temp: {0}'.format(value))\n return value\n" }, { "alpha_fraction": 0.7657657861709595, "alphanum_fraction": 0.7657657861709595, "avg_line_length": 21.200000762939453, "blob_id": "2f139d1d321977f6e47e93286271b7478f913214", "content_id": "6dcc60cb27bbb6eb4e2d39bec5e4cf47cbdadc1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "permissive", "max_line_length": 57, "num_lines": 5, "path": "/README.md", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "# Boiler\n\nA water tank boiler controller, using PID\n\nThis work is just a demo, its not made for production use\n" }, { "alpha_fraction": 0.7424242496490479, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 21, "blob_id": "6ab6e34b04358fb09e412909ee2d759f53d2a29b", "content_id": "896c5107a25c52b26a199a8a95efd15a4a9c60a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "permissive", "max_line_length": 52, "num_lines": 3, "path": "/ext/boiler.sh", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\njava -Djava.net.preferIPv4Stack=true -jar boiler.jar\n" }, { "alpha_fraction": 0.5517970323562622, "alphanum_fraction": 0.5687103867530823, "avg_line_length": 21.171875, "blob_id": "8f41284db2b329d4bdbe69df9bb1e1d3ccd37d24", "content_id": "1f754bd4cbd150bcb7029a2d281cff2f448adf3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1419, "license_type": "permissive", "max_line_length": 52, "num_lines": 64, "path": "/boiler/controller.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\n__all__ = [\n 'WaterColumnController',\n 'WaterTempController',\n]\n\nKC_LEVEL = 1200.0\nHREF = 1.5\n\nWATER_CAPACITANCE = 50000000.0\nKC_TEMP = 0.15 * WATER_CAPACITANCE\nTREF = 50.0\n\n\nclass BaseController(object):\n\n def __init__(self, conn, queue, kc, ref):\n self.conn = conn\n self._queue = queue\n self.kc = kc\n self.ref = ref\n\n def get_sensor_value(self):\n return self._queue.get()\n\n def run(self):\n while True:\n value = self.get_sensor_value()\n value = self.kc * (self.ref - value)\n self.set_actuator_value(value)\n\n def set_actuator_value(self, value):\n raise NotImplementedError\n\n\nclass WaterColumnController(BaseController):\n\n def __init__(self, conn, queue):\n super(WaterColumnController, self).__init__(\n conn,\n queue,\n KC_LEVEL,\n HREF,\n )\n\n def set_actuator_value(self, value):\n self.conn.water_flux = value\n print(\"Controller Water: {0}\".format(value))\n\n\nclass WaterTempController(BaseController):\n\n def __init__(self, conn, queue):\n super(WaterTempController, self).__init__(\n conn,\n queue,\n KC_TEMP,\n TREF,\n )\n\n def set_actuator_value(self, value):\n self.conn.heat_flux = value\n print(\"Controller Temp: {0}\".format(value))\n" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 11.25, "blob_id": "33bd681ebfc2cb173766bedc5e1966fb0595259d", "content_id": "6b4bda2a30d441f74bdd582c9082be809f3c071a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "permissive", "max_line_length": 22, "num_lines": 4, "path": "/boiler/__main__.py", "repo_name": "rudineirk/faculdade_boiler", "src_encoding": "UTF-8", "text": "from .core import Main\n\nmain = Main()\nmain.run()\n" } ]
8
MehazzE/Digit-regcognization---TEST
https://github.com/MehazzE/Digit-regcognization---TEST
4577e10cdfb6e5f6258783bdcd50640bd379b545
631477d509c79af0117cc719738c1b362a868b5e
07bac9303958ee997f6bb7c63a92202a3ebb95cc
refs/heads/main
2023-04-07T11:09:42.102370
2021-04-13T05:52:00
2021-04-13T05:52:00
357,438,864
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6321965456008911, "alphanum_fraction": 0.6428309679031372, "avg_line_length": 27.989360809326172, "blob_id": "d2579efc2b21bb8b23eb578a4959471ac6648166", "content_id": "3108d6c2436953d981b34b0badc8d325c3c943c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2727, "license_type": "no_license", "max_line_length": 104, "num_lines": 94, "path": "/network2.py", "repo_name": "MehazzE/Digit-regcognization---TEST", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\ndef sigmoid_prime(z):\n\treturn sigmoid(z)*(1-sigmoid(z))\n\t\ndef sigmoid(z):\n\treturn 1.0/(1.0 + np.exp(-z))\n\nclass CrossEntropyCost(object):\n\t@staticmethod\n\tdef fn(a,y):\n\t\treturn np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\n\t\t\n\t@staticmethod\n\tdef delta(z,a,y):\n\t\treturn (a-y)\n\nclass QuadraticCost(object):\n\t@staticmethod\n\tdef fn(a,y):\n\t\treturn 0.5*np.linalg.norm(a-y)**2\n\t\t\n\t@staticmethod\n\tdef delta(z,a,y):\n\t\treturn (a-y) * sigmoid_prime(z)\n \n\nclass Network(object):\n\tdef __init__(self, sizes, cost=CrossEntropyCost):\n\t\tself.num_layers = len(sizes)\n\t\tself.sizes = sizes\n\t\tself.large_weight_initializer()\n\t\tself.cost = cost\n\t\t\n\tdef default_weight_initializer(self):\n\t\tself.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n\t\tself.weights = [np.random.randn(y,x)/np.sqrt(x) for x,y in zip(self.sizes[:-1], self.sizes[1:])]\n\t\t\n\tdef large_weight_initializer(self):\n\t\tself.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n\t\tself.weights = [np.random.randn(y,x) for x,y in zip(self.sizes[:-1], self.sizes[1:])]\n\t\n\tdef load_weights(self, biases, weights):\n\t\tself.biases = biases\n\t\tself.weights = weights\n\n\tdef feedfoward(self, a):\n\t\tfor b,w in zip(self.biases, self.weights):\n\t\t\ta = sigmoid(np.dot(w,a) + b)\n\t\treturn a\n\t\t\n\tdef backprop(self, x, y):\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\t\n\t\tactivation = x\n\t\tactivations = [x]\n\t\tzs = []\n\t\t\n\t\tfor b, w in zip(self.biases, self.weights):\n\t\t\tz = np.dot(w,activation) + b\n\t\t\tzs.append(z)\n\t\t\tactivation = sigmoid(z)\n\t\t\tactivations.append(activation)\n\t\t\n\t\tdelta = (self.cost).delta(z[-1], activations[-1], y)\n\t\tnabla_b[-1] = delta\n\t\tnabla_w[-1] = np.dot(delta, activations[-2].transpose())\n\t\t\n\t\tfor l in range(2, self.num_layers):\n\t\t\tz = zs[-l]\n\t\t\tsp = sigmoid_prime(z)\n\t\t\tdelta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n\t\t\tnabla_b[-l] = delta\n\t\t\tnabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n\t\treturn (nabla_b, nabla_w)\n\n\tdef calculate_loss(self, training_data):\n\t\tloss = 0\n\t\tfor x,y in training_data:\n\t\t\tloss += self.cost.fn(self.feedfoward(x),y)\n\t\treturn loss\n\n\tdef update_mini_batch(self, mini_batch, eta, lmbda, n):\n\t\tnabla_b = [np.zeros(b.shape) for b in self.biases]\n\t\tnabla_w = [np.zeros(w.shape) for w in self.weights]\n\t\t\n\t\tfor x, y in mini_batch:\n\t\t\tdelta_nabla_b, delta_nabla_w = self.backprop(x,y)\n\t\t\tnabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n\t\t\tnabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n\t\tself.biases = [b - (eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]\n\t\tself.weights = [(1-eta*(lmbda/n))*w - (eta/len(mini_batch))*nw for w,nw in zip(self.weights, nabla_w)]\n\t\t" }, { "alpha_fraction": 0.6085106134414673, "alphanum_fraction": 0.653900682926178, "avg_line_length": 29.69565200805664, "blob_id": "5eb7294a0c75d58f725837c671f946f626365435", "content_id": "d717e136541dcccbc6e35cc7a110ff66def888f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 105, "num_lines": 23, "path": "/train.py", "repo_name": "MehazzE/Digit-regcognization---TEST", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport sys\nimport random\nimport network2 as nw2\nimport data_relative\n\nn1 = nw2.Network([784,200,100,10])\n\ndef train(nw, loop):\n td = data_relative.generate_training_data()\n biases, weights = data_relative.load_biases_weights()\n nw.load_weights(biases, weights)\n print(\"Training data:\" + str(len(td)))\n for i in range(loop):\n batch_train = random.sample(td, 10)\n batch_test = td\n nw.update_mini_batch(batch_train, 0.01, 0.01, 20)\n print(str(i+1)+\"/\"+str(loop) + \"; loss: \"+str(round(nw.calculate_loss(batch_test), 2)), end=\"\\r\")\n print(\"LOSS: \"+str(nw.calculate_loss(td)))\n data_relative.save_biases_weights(nw)\n\ntrain(n1, 500)" }, { "alpha_fraction": 0.5317028760910034, "alphanum_fraction": 0.5525362491607666, "avg_line_length": 28.078947067260742, "blob_id": "c91354d4dbbe0012baed550525c72b175b87a256", "content_id": "f9d672da947430116356871c7a5fc5760b6e41b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1104, "license_type": "no_license", "max_line_length": 77, "num_lines": 38, "path": "/data_relative.py", "repo_name": "MehazzE/Digit-regcognization---TEST", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\ndef toArray(img, digit):\n y = np.zeros((10,1), dtype=\"int\")\n y[digit][0] += 1\n x = []\n for r in range(0, len(img)):\n for c in img[r]:\n if c == 255:\n x.append([0])\n else:\n x.append([1])\n return (np.array(x), np.array(y))\n\ndef generate_training_data():\n training_data = []\n for d in range(0,10):\n i = 0\n while True:\n img = cv2.imread(\"./data/\"+str(d)+\"/\"+str(i)+\".png\", 0)\n if img is None:\n break\n training_data.append(toArray(img, d))\n i += 1\n return training_data\n\ndef get_inspect_data():\n return toArray(cv2.imread(\"./data/test/inspect.png\", 0),0)[0]\n\ndef save_biases_weights(nw):\n np.save(\"./data/biases_weights/biases.npy\", nw.biases)\n np.save(\"./data/biases_weights/weights.npy\", nw.weights)\n \ndef load_biases_weights():\n biases = np.load(\"./data/biases_weights/biases.npy\", allow_pickle=True)\n weights = np.load(\"./data/biases_weights/weights.npy\", allow_pickle=True)\n return biases, weights" }, { "alpha_fraction": 0.7028753757476807, "alphanum_fraction": 0.7603833675384521, "avg_line_length": 17.47058868408203, "blob_id": "306859110427d1df0767a609b74070918b575f16", "content_id": "95ee92363308076add640823b470b7f401c72440", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 53, "num_lines": 17, "path": "/read.py", "repo_name": "MehazzE/Digit-regcognization---TEST", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport sys\nimport random\nimport network2 as nw2\nimport paint\nimport data_relative\n\nn1 = nw2.Network([784,200,100,10])\n\nbiases, weights = data_relative.load_biases_weights()\nn1.load_weights(biases, weights)\n\nApp = paint.App\nwindow = paint.Window(n1)\nwindow.show()\nsys.exit(App.exec())" }, { "alpha_fraction": 0.6011409759521484, "alphanum_fraction": 0.6120750904083252, "avg_line_length": 34.06666564941406, "blob_id": "d74e88cfae095c70641b69248de631c4f7d71a86", "content_id": "8d5e3814c487a125bad0ed73bb1f168352242f3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4207, "license_type": "no_license", "max_line_length": 150, "num_lines": 120, "path": "/paint.py", "repo_name": "MehazzE/Digit-regcognization---TEST", "src_encoding": "UTF-8", "text": "from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport numpy as np\nimport cv2\nimport sys\nimport os\nimport data_relative\n\n# window class\nclass Window(QMainWindow):\n def __init__(self, nw):\n super().__init__()\n self.nw = nw\n self.setWindowTitle(\"Paint\")\n\n self.setGeometry(100, 100, 280, 280) \n\n self.image = QImage(self.size(), QImage.Format_RGB32) \n self.image.fill(Qt.white)\n\n self.drawing = False\n self.brushSize = 10\n self.brushColor = Qt.black\n\n self.lastPoint = QPoint()\n mainMenu = self.menuBar()\n \n # creating menu\n fileMenu = mainMenu.addMenu(\"File\")\n saveMenu = mainMenu.addMenu(\"Save\")\n\n saveZero = QAction(\"0\", self)\n saveMenu.addAction(saveZero)\n saveZero.triggered.connect(self.fastSave)\n saveOne = QAction(\"1\", self)\n saveMenu.addAction(saveOne)\n saveOne.triggered.connect(self.fastSave)\n saveTwo = QAction(\"2\", self)\n saveMenu.addAction(saveTwo)\n saveTwo.triggered.connect(self.fastSave)\n saveThree = QAction(\"3\", self)\n saveMenu.addAction(saveThree)\n saveThree.triggered.connect(self.fastSave)\n saveFour = QAction(\"4\", self)\n saveMenu.addAction(saveFour)\n saveFour.triggered.connect(self.fastSave)\n saveFive = QAction(\"5\", self)\n saveMenu.addAction(saveFive)\n saveFive.triggered.connect(self.fastSave)\n saveSix = QAction(\"6\", self)\n saveMenu.addAction(saveSix)\n saveSix.triggered.connect(self.fastSave)\n saveSeven = QAction(\"7\", self)\n saveMenu.addAction(saveSeven)\n saveSeven.triggered.connect(self.fastSave)\n saveEight = QAction(\"8\", self)\n saveMenu.addAction(saveEight)\n saveEight.triggered.connect(self.fastSave)\n saveNine = QAction(\"9\", self)\n saveMenu.addAction(saveNine)\n saveNine.triggered.connect(self.fastSave)\n\n #Clear\n clearAction = QAction(\"Clear\", self)\n clearAction.setShortcut(\"Ctrl + C\")\n fileMenu.addAction(clearAction)\n clearAction.triggered.connect(self.clear)\n\n #Inspect\n inspectAction = QAction(\"Inspect\", self)\n fileMenu.addAction(inspectAction)\n inspectAction.triggered.connect(self.inspect)\n\n def mousePressEvent(self, event): \n if event.button() == Qt.LeftButton: \n self.drawing = True\n self.lastPoint = event.pos() \n\n def mouseMoveEvent(self, event): \n if (event.buttons() & Qt.LeftButton) & self.drawing: \n painter = QPainter(self.image) \n painter.setPen(QPen(self.brushColor, self.brushSize, \n Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)) \n painter.drawLine(self.lastPoint, event.pos()) \n self.lastPoint = event.pos() \n self.update() \n\n def mouseReleaseEvent(self, event): \n if event.button() == Qt.LeftButton:\n self.drawing = False\n\n def paintEvent(self, event): \n canvasPainter = QPainter(self) \n canvasPainter.drawImage(self.rect(), self.image, self.image.rect()) \n\n def save(self): \n filePath, _ = QFileDialog.getSaveFileName(self, \"Save Image\", \"\", \n \"PNG(*.png);;JPEG(*.jpg *.jpeg);;All Files(*.*) \") \n print(filePath)\n if filePath == \"\": \n return\n self.image.save(filePath)\n \n def fastSave(self):\n dirname = \"./data/\"+self.sender().text()\n number_files = len(os.listdir(dirname))\n self.image.save(dirname+\"/\"+str(number_files)+\".png\")\n cv2.imwrite(dirname+\"/\"+str(number_files)+\".png\" ,cv2.resize(cv2.imread(dirname+\"/\"+str(number_files)+\".png\", cv2.IMREAD_UNCHANGED), (28,28)))\n\n def inspect(self):\n self.image.save(\"./data/test/inspect.png\")\n cv2.imwrite(\"./data/test/inspect.png\" ,cv2.resize(cv2.imread(\"./data/test/inspect.png\", cv2.IMREAD_UNCHANGED), (28,28)))\n print(\"Result: \", np.argmax(self.nw.feedfoward(data_relative.get_inspect_data())) )\n\n def clear(self):\n self.image.fill(Qt.white)\n self.update()\n \nApp = QApplication(sys.argv)" } ]
5
paulmelnikow/wren
https://github.com/paulmelnikow/wren
4844cb93861db5313aacc2bcfee3a5e3aa1fc1c6
e91595c8e999871466f38516c92b5735ecdc0a07
e23f25d48db61f9659dbb264a569e024b8b8753b
refs/heads/master
2019-07-14T02:47:34.432955
2014-10-05T21:17:27
2014-10-05T21:17:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5070228576660156, "alphanum_fraction": 0.5119801759719849, "avg_line_length": 24.570423126220703, "blob_id": "87429d7064ebe7ca69e33733ffd436cc87a42a5b", "content_id": "6aae3a3adfd0d8fd95273510ce5c507e5bc03267", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3631, "license_type": "permissive", "max_line_length": 59, "num_lines": 142, "path": "/wren/collection.py", "repo_name": "paulmelnikow/wren", "src_encoding": "UTF-8", "text": "import json, requests\nfrom urllib import splitquery\nfrom urlparse import urlparse as parse\n\n\nclass Collection(object):\n model = None\n\n def __init__(self, client):\n self.client = client\n\n def handle_error(self, response):\n import logging\n logger = logging.getLogger('dj_capysule')\n logger.error(response.text)\n response.raise_for_status()\n\n def decode(self, data, many=False):\n if many:\n result = []\n for d in data:\n obj = self.model(**self.model.decode(d))\n obj._persisted = True\n result.append(obj)\n return result\n else:\n return self.model(**self.model.decode(data))\n\n def encode(self, obj):\n return obj.encode()\n\n def all(self):\n response = self.client.fetch(self.url)\n\n if response.status_code >= 400:\n self.handle_error(response)\n\n data = response.json()\n\n return self.decode(data, many=True)\n\n def query(self, **kwargs):\n request = requests.Request('GET', self.url,\n params=kwargs,\n headers={'Content-Type': 'application/json'}\n )\n response = self.client.fetch(request)\n\n if response.status_code >= 400:\n self.handle_error(response)\n\n data = response.json()\n\n return self.decode(data, many=True)\n\n def get(self, id_):\n response = self.client.fetch(self._url(id_))\n\n if response.status_code >= 400:\n self.handle_error(response)\n\n data = response.json()\n\n return self.decode(data)\n\n def _url(self, id_):\n url = self.url\n\n if callable(url):\n return url(id_)\n\n url, query = splitquery(url)\n\n url = '{0}/{1}'.format(url, id_)\n\n if query is not None:\n url = '{0}?{1}'.format(url, query)\n\n return url\n\n def _parse_url(self, url):\n parse_url = getattr(self.model, '_parse_url', None)\n\n if callable(parse_url):\n return parse_url(url)\n\n parts = url.split('/')\n if len(parts):\n return parts[-1]\n else:\n return None\n\n def add(self, obj):\n if getattr(obj, '_persisted', False) is True:\n url = self._url(self._id(obj))\n method = 'PUT'\n else:\n url = getattr(obj, '_url', self.url)\n if callable(url):\n url = url()\n method = 'POST'\n\n data = self.encode(obj)\n\n request = requests.Request(method, url,\n data=json.dumps(data),\n headers={'Content-Type': 'application/json'}\n )\n\n response = self.client.fetch(request)\n\n if response.status_code >= 400:\n self.handle_error(response)\n\n if len(response.content) > 0:\n data = response.json()\n\n try:\n obj.decode(data)\n obj._persisted = True\n except Exception as error:\n raise\n\n return obj\n else:\n try:\n url = response.headers.get('Location')\n except KeyError:\n return None\n id_ = self._parse_url(url)\n if id_ is not None:\n for name, field in obj._fields.items():\n if field.options.get('primary', False):\n setattr(obj, name, id_)\n return obj\n return None\n\n\n def _id(self, obj):\n for name, field in obj._fields.items():\n if field.options.get('primary', False):\n return getattr(obj, name)\n" }, { "alpha_fraction": 0.5654993653297424, "alphanum_fraction": 0.5654993653297424, "avg_line_length": 33.014705657958984, "blob_id": "c116180822e697a6bb04b426391943dc85a453dc", "content_id": "238e436a88da2a23da587ad0e05bad741f3e3801", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2313, "license_type": "permissive", "max_line_length": 91, "num_lines": 68, "path": "/wren/client.py", "repo_name": "paulmelnikow/wren", "src_encoding": "UTF-8", "text": "import requests\nimport urlparse\n\nclass Client(object):\n\n def __init__(self, base_uri=None):\n self.base_uri = base_uri\n self._session = None\n self._auth = None\n self._headers = {}\n\n def set_basic_auth(self, user, password):\n self.session.auth = (user, password)\n self._auth = (user, password)\n\n def set_headers(self, headers):\n self.session.headers.update(headers)\n self._headers.update(headers)\n\n @property\n def session(self):\n if self._session is None:\n self._session = requests.Session()\n return self._session\n\n def log_request(self, request):\n import logging\n logger = logging.getLogger('dj_capysule')\n logger.debug('-------------------')\n if isinstance(request, basestring):\n logger.debug('GET %s' % request)\n else:\n logger.debug('%s %s' % (request.method, request.url))\n logger.debug('Headers')\n logger.debug(request.headers)\n logger.debug('Params')\n logger.debug(request.params)\n logger.debug('Data')\n logger.debug(request.data)\n logger.debug('Auth')\n logger.debug(request.auth)\n logger.debug('-------------------')\n\n def log_response(self, response):\n import logging\n logger = logging.getLogger('dj_capysule')\n logger.debug('===================')\n logger.debug(response.text)\n logger.debug('===================')\n\n def fetch(self, request):\n if isinstance(request, basestring):\n joined = urlparse.urljoin(self.base_uri, request)\n self.log_request(joined)\n response = self.session.get(joined)\n elif isinstance(request, requests.Request):\n request.url = urlparse.urljoin(self.base_uri, request.url)\n request.auth = self.session.auth\n headers = self._headers\n headers.update(request.headers)\n request.headers = headers\n self.log_request(request)\n prepared = request.prepare()\n response = self.session.send(prepared)\n else:\n raise TypeError('Request should be an instance of request.Request or a string')\n self.log_response(response)\n return response\n" }, { "alpha_fraction": 0.732467532157898, "alphanum_fraction": 0.7402597665786743, "avg_line_length": 26.5, "blob_id": "51304f8af85a5438e681b2a5b1966d4c9167c111", "content_id": "10ce31848d4bc764a3713fa133d8976c1dd99748", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 385, "license_type": "permissive", "max_line_length": 62, "num_lines": 14, "path": "/README.md", "repo_name": "paulmelnikow/wren", "src_encoding": "UTF-8", "text": "wren\n====\n\nSynchronous RESTful API consumer based on [Requests][].\n\nWritten in Python. Designed for use with the data-modeling\nand validation framework [Booby][].\n\nBased on [Finch][], which does the same thing *asynchonously*.\n\n\n[Requests]: http://docs.python-requests.org/en/latest/\n[Finch]: https://github.com/jaimegildesagredo/finch\n[Booby]: https://booby.readthedocs.org/en/0.5.0/\n" } ]
3
elias-dzobo/TweetBot
https://github.com/elias-dzobo/TweetBot
8f5f77ac8c5149eece4b9bb233b0abedf3d3f473
96fb0d0627a3c50b38136954ca4e39526e45b5a7
1742b9ba2dcfc7d6e1412013ba81227e293862fb
refs/heads/main
2023-09-01T19:38:23.612947
2021-09-13T09:28:33
2021-09-13T09:28:33
389,248,567
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5616966485977173, "alphanum_fraction": 0.5659811496734619, "avg_line_length": 29.1200008392334, "blob_id": "e2c72a2e618c6d7a26a384cd1dd1c9d4c5d26f94", "content_id": "d3ad714ec24c3269a27c8653f565d5acb39f0139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2336, "license_type": "no_license", "max_line_length": 103, "num_lines": 75, "path": "/preprocess.py", "repo_name": "elias-dzobo/TweetBot", "src_encoding": "UTF-8", "text": "import json\r\nfrom nltk.tokenize import word_tokenize\r\nimport re\r\nimport operator\r\nfrom collections import Counter\r\nfrom nltk.corpus import stopwords\r\nimport string\r\nfrom nltk import bigrams \r\nimport vincent\r\n\r\n# tokenize the tweet\r\nemoticons_str = r\"\"\"\r\n (?:\r\n [:=;] # Eyes\r\n [oO\\-]? # Nose (optional)\r\n [D\\)\\]\\(\\]/\\\\OpP] # Mouth\r\n )\"\"\"\r\n \r\nregex_str = [\r\n emoticons_str,\r\n r'<[^>]+>', # HTML tags\r\n r'(?:@[\\w_]+)', # @-mentions\r\n r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", # hash-tags\r\n r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs\r\n \r\n r'(?:(?:\\d+,?)+(?:\\.?\\d+)?)', # numbers\r\n r\"(?:[a-z][a-z'\\-_]+[a-z])\", # words with - and '\r\n r'(?:[\\w_]+)', # other words\r\n r'(?:\\S)' # anything else\r\n]\r\n \r\ntokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)\r\nemoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)\r\n \r\ndef tokenize(s):\r\n return tokens_re.findall(s)\r\n \r\ndef preprocess(s, lowercase=False):\r\n tokens = tokenize(s)\r\n if lowercase:\r\n tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]\r\n return tokens\r\n\r\npreprocessed_tweets = []\r\n\r\nwith open('ada.json', 'r') as f:\r\n for line in f:\r\n tweet = json.loads(line)\r\n tokens = preprocess(tweet['text'])\r\n preprocessed_tweets.append(tokens)\r\n\r\n\r\n#removing stopwords\r\npunctuation = list(string.punctuation)\r\nstop = stopwords.words('english') + punctuation + ['rt', 'via', 'RT', '…']\r\n\r\n# counting term frequencies \r\nfname = 'ada.json'\r\nwith open(fname, 'r') as f:\r\n count_all = Counter()\r\n for line in f:\r\n tweet = json.loads(line)\r\n # Create a list with all the terms\r\n terms_stop = [term for term in preprocess(tweet['text']) if term not in stop and len(term) > 2]\r\n terms_only = [term for term in preprocess(tweet['text']) \r\n if term not in stop and\r\n not term.startswith(('#', '@'))]\r\n # Update the counter\r\n term_bigrams = bigrams(terms_stop)\r\n count_all.update(term_bigrams)\r\n # Print the first 5 most frequent words\r\n print(count_all.most_common(5))\r\n\r\npreprocessed_tweets = [term for term in preprocessed_tweets if term not in stop and len(term) > 2]\r\nprint(preprocessed_tweets)\r\n" }, { "alpha_fraction": 0.5815602540969849, "alphanum_fraction": 0.588652491569519, "avg_line_length": 25.586956024169922, "blob_id": "610aeb743197a5612992bba81f6d78a2be55e7d1", "content_id": "6de23b256e1bb6913e49f226b0e047a6da0c56bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/main.py", "repo_name": "elias-dzobo/TweetBot", "src_encoding": "UTF-8", "text": "import tweepy\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy import Stream\r\nfrom tweepy.streaming import StreamListener\r\nimport time\r\n\r\nconsumer_key = 'IjnmXY2vpjAJJek8dWM51YiHF'\r\nconsumer_secret = 'NP5985N98dy7AWqI0elUAnpHgRzKj9sGVki8qjnFnIqrlCUCx8'\r\naccess_token = '948717579224473600-bRNh6cINiDp4sKIRX8Dip4LCJJF42di'\r\naccess_secret = 'P7JY1PIvEjYVB9fXSSYyP0HCkiWU37B7k91Ciz0f8nXZq'\r\n\r\nstart_time = time.time()\r\nkeyword_list = ['$btc', '$ada', '$matic', '$eth']\r\n\r\n\r\n#create stream class\r\nclass MyListener(StreamListener):\r\n\r\n def __init__(self, start_time, time_limit = 60):\r\n self.time = start_time\r\n self.limit = time_limit\r\n\r\n def on_data(self, data):\r\n try:\r\n with open('ada.json', 'a') as f:\r\n while (time.time() - self.time) < self.limit:\r\n f.write(data)\r\n return True\r\n except BaseException as e:\r\n print(\"Error on_data: %s\" % str(e))\r\n time.sleep(5)\r\n return True\r\n\r\n def on_error(self, status):\r\n print(status)\r\n return True\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n auth = OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_secret)\r\n api = tweepy.API(auth)\r\n twitter_stream = Stream(auth, MyListener(start_time, time_limit=60))\r\n twitter_stream.filter(track = keyword_list, languages=['en'])\r\n" } ]
2
SmileAK-47/webAutonmation
https://github.com/SmileAK-47/webAutonmation
843891dcfb7e5c718c5aab1253660686806d210e
a0709ce9cdbd059f3e82d0fc887c4854d8c5fa61
7445b66ac80b9e3ce8f2df714dfb2edb222ad738
refs/heads/master
2020-06-05T18:53:46.186812
2019-06-19T02:41:38
2019-06-19T02:41:38
192,517,434
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7643312215805054, "alphanum_fraction": 0.7707006335258484, "avg_line_length": 30.600000381469727, "blob_id": "9f14708f266dd3928f699e2bdec170b7bd12efd0", "content_id": "5344b1d4b55458ce547a748ed85230ba113567e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 77, "num_lines": 5, "path": "/webAutomation/RunTest.py", "repo_name": "SmileAK-47/webAutonmation", "src_encoding": "UTF-8", "text": "#encoding = utf - 8\nfrom testScripts.TestSendMailWithAttachment import TestSendMailWithAttachment\n\nif __name__ ==\"__main__\":\n TestSendMailWithAttachment()" } ]
1
t-ax/Khome_PredictiveML_Housing
https://github.com/t-ax/Khome_PredictiveML_Housing
abd00351a072c18279074143c6fa56ddda4d1892
6f02e171ca2b958a657a1162a752f36b26f32a22
50bb94f121f0238b7190cb94d3894130f5c2067d
refs/heads/master
2023-03-18T19:50:22.777910
2021-03-18T21:32:27
2021-03-18T21:32:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7152145504951477, "alphanum_fraction": 0.7275682687759399, "avg_line_length": 35.046875, "blob_id": "f8fa271691ee5ac3127009051ae0558a029845e1", "content_id": "ee946b2d4e8baf3fc97194b8c91448ef2fba1d65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4628, "license_type": "no_license", "max_line_length": 174, "num_lines": 128, "path": "/main.py", "repo_name": "t-ax/Khome_PredictiveML_Housing", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.8\n#https://medium.com/codex/house-price-prediction-with-machine-learning-in-python-cf9df744f7ff\n\nfrom function import *\n\n# IMPORTING PACKAGES\nimport pandas as pd # data processing\nimport numpy as np # working with arrays\nimport matplotlib.pyplot as plt # visualization\nimport seaborn as sb # visualization\nfrom termcolor import colored as cl # text customization\nfrom sklearn.model_selection import train_test_split # data split\nfrom sklearn.linear_model import LinearRegression # OLS algorithm\nfrom sklearn.linear_model import Ridge # Ridge algorithm\nfrom sklearn.linear_model import Lasso # Lasso algorithm\nfrom sklearn.linear_model import BayesianRidge # Bayesian algorithm\nfrom sklearn.linear_model import ElasticNet # ElasticNet algorithm\nfrom sklearn.metrics import explained_variance_score as evs # evaluation metric\nfrom sklearn.metrics import r2_score as r2 # evaluation metric\nimport datetime as dt\nimport glob\n\n#Test of the decision tree regressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import DecisionTreeClassifier \nfrom sklearn import tree\n\n\n\nsb.set_style('whitegrid') # plot style\nplt.rcParams['figure.figsize'] = (20, 10) # plot size\n\n#Getting Data from all files\nall_files = glob.glob('../Datasets/*/*.csv')\nli = []\nfor filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n li.append(df)\nframe = pd.concat(li, axis=0, ignore_index=True)\n\nframe=frame[(frame.type_local == 'Appartement')]\nframe=frame[['id_mutation', 'date_mutation', 'valeur_fonciere', 'code_postal' , 'surface_reelle_bati','nombre_pieces_principales', 'latitude', 'longitude']]\nframe=frame.dropna(axis=0)\n#frame=frame.head(100)\nframe.to_csv('frame.csv') # On enregistre la data\n\n#On met en place la data de test\ntestdata = frame.iloc[:10, :] # On récupère les 10 premières lignes pour tester le modèle prédictif\nprint(testdata.head(10)) # On affiche les réponses\ntestdata.drop('valeur_fonciere', inplace=True, axis=1) # on enlève les valeurs foncières pour pouvoir tester après\ntestdata[\"date_mutation\"] = pd.to_datetime(testdata[\"date_mutation\"])#On converti en date\ntestdata[\"date_mutation\"] = (testdata[\"date_mutation\"]-testdata[\"date_mutation\"].min())/ np.timedelta64(1,'D') #On calcule un nombre de jour depuis le min pour avoir un float\n#print(testdata[\"adresse_code_voie\"]) 'nom_commune',\ntestdata.set_index('id_mutation', inplace = True)\n\n#On met en place la data pour l'entrainement\nframe = frame.iloc[11:, :] # on laisse le reste pour l'entrainement\n\nY = frame.valeur_fonciere\nX = frame\nX.drop('valeur_fonciere', inplace=True, axis=1)\nX[\"date_mutation\"] = pd.to_datetime(X[\"date_mutation\"])#On converti en date\nX[\"date_mutation\"] = (X[\"date_mutation\"]-X[\"date_mutation\"].min())/ np.timedelta64(1,'D') #On calcule un nombre de jour depuis le min pour avoir un float\n#print(X[\"adresse_code_voie\"]) 'nom_commune',\nX.set_index('id_mutation', inplace = True)\n#X['latitude'].replace('', np.nan, inplace=True)\n\nX.to_csv('X.csv')\n\n#print(f\"{X.head(10)}\") #display a few rows\n\n#X.dropna(inplace = True)\n#.dropna(axis=0)\nX.fillna(X.mean())\nprint(\"test\")\n#print(cl(X.isnull().sum(), attrs = ['bold']))\n\n#print(f\"number of lines and columns : {X.shape}\") #Number of lines and columns\nprint(f\"number of lines : {len(X.index)}\") #Number of lines (efficient way)\n\n#pd.set_option('float_format', '{:f}'.format) #Option to display X.describe in float format\n#print(f\"{X.describe()}\") #see count,mean, std, min etc...\n\n#Heatmap to see the correlation between variables\n #X = X[X.nombre_pieces_principales.isin([2])]\n #sb.heatmap(X.corr(), annot = True, cmap = 'magma')\n #plt.savefig('heatmap.png')\n #plt.show()\n\n#scatter_df(X, 'valeur_fonciere')#using a funtion\n\n# 3. Distribution plot\n\n#filter\n#X = X[(X.code_postal == 94130) & (X.nombre_pieces_principales == 3)]\n#X = X[X.nombre_pieces_principales.isin([\"2\", \"3\"])]\n#salepricedistrib(X)\n\n\n#train dataset and check result \n#modeling(X)\n\n\n# TEEEEEEEST\n# Define model. Specify a number for random_state to ensure same results each run\npredictive_model = DecisionTreeRegressor(random_state=1)\n\n# Fit model\npredictive_model.fit(X, Y)\n\nprint(\"Rappel des éléments à prédire :\")\nprint(testdata.head(10))\nprint(\"Prédictions\")\nprint(predictive_model.predict(testdata.head(10)))\n\n\n\ntext_representation = tree.export_text(predictive_model)\nprint(text_representation)\n\n\nfig = plt.figure(figsize=(25,20))\n_ = tree.plot_tree(predictive_model, \n feature_names=X.feature_names, \n class_names=Y.target_names,\n filled=True)\n\nfig.savefig(\"decistion_tree.png\")\n" }, { "alpha_fraction": 0.5198028683662415, "alphanum_fraction": 0.5325789451599121, "avg_line_length": 42.832000732421875, "blob_id": "c1d2c08277f22deb0f2e4b535bf4f464a66d14ac", "content_id": "d860f65847370d3e026fdcf6cb38c0254a7fd44d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5479, "license_type": "no_license", "max_line_length": 151, "num_lines": 125, "path": "/function.py", "repo_name": "t-ax/Khome_PredictiveML_Housing", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.8\n\n\n# IMPORTING PACKAGES\nimport pandas as pd # data processing\nimport numpy as np # working with arrays\nimport matplotlib.pyplot as plt # visualization\nimport seaborn as sb # visualization\nfrom termcolor import colored as cl # text customization\nfrom sklearn.model_selection import train_test_split # data split\nfrom sklearn.linear_model import LinearRegression # OLS algorithm\nfrom sklearn.linear_model import Ridge # Ridge algorithm\nfrom sklearn.linear_model import Lasso # Lasso algorithm\nfrom sklearn.linear_model import BayesianRidge # Bayesian algorithm\nfrom sklearn.linear_model import ElasticNet # ElasticNet algorithm\nfrom sklearn.metrics import explained_variance_score as evs # evaluation metric\nfrom sklearn.metrics import r2_score as r2 # evaluation metric\n\n#Test of the decision tree regressor\nfrom sklearn.tree import DecisionTreeRegressor\n\n# 2. Scatter plot\n\ndef scatter_df(df, y_var):\n scatter_df = df.drop(y_var, axis = 1)\n dfcolumns = df.columns\n \n for counter in range(1,4,1):\n plot2 = sb.scatterplot(dfcolumns[counter], y_var, data = df, color = 'yellow', edgecolor = 'b', s = 150)\n plt.title('{} / y_var'.format(dfcolumns[counter]), fontsize = 16)\n plt.xlabel('{}'.format(dfcolumns[counter]), fontsize = 14)\n plt.ylabel('y_var', fontsize = 14)\n plt.xticks(fontsize = 12)\n plt.yticks(fontsize = 12)\n plt.savefig('scatter2.png')\n plt.show()\n \ndef salepricedistrib(df):\n plt.title('Sale Price Distribution', fontsize = 16)\n plt.xlabel('date_mutation', fontsize = 14)\n plt.ylabel('valeur_fonciere', fontsize = 14)\n plt.xticks(fontsize = 12)\n plt.yticks(fontsize = 12)\n plt.scatter(df['date_mutation'], df['valeur_fonciere'], c = 'red')\n plt.savefig('distplot.png')\n plt.show()\n \ndef modeling(df):\n # FEATURE SELECTION & DATA SPLIT\n df[\"date_mutation\"] = df.date_mutation.values.astype(np.int64) // 10 ** 9\n X_var = df[['date_mutation', 'code_postal','surface_reelle_bati','nombre_pieces_principales', 'latitude', 'longitude', 'adresse_code_voie']].values\n y_var = df['valeur_fonciere'].values\n\n X_train, X_test, y_train, y_test = train_test_split(X_var, y_var, test_size = 0.2, random_state = 0)\n\n print(cl('X_train samples : ', attrs = ['bold']), X_train[0:5])\n print(cl('X_test samples : ', attrs = ['bold']), X_test[0:5])\n print(cl('y_train samples : ', attrs = ['bold']), y_train[0:5])\n print(cl('y_test samples : ', attrs = ['bold']), y_test[0:5])\n\n\n # MODELING\n\n # 1. OLS\n\n ols = LinearRegression()\n ols.fit(X_train, y_train)\n ols_yhat = ols.predict(X_test)\n\n # 2. Ridge\n\n ridge = Ridge(alpha = 0.5)\n ridge.fit(X_train, y_train)\n ridge_yhat = ridge.predict(X_test)\n\n # 3. Lasso\n\n lasso = Lasso(alpha = 0.01)\n lasso.fit(X_train, y_train)\n lasso_yhat = lasso.predict(X_test)\n\n # 4. Bayesian\n\n bayesian = BayesianRidge()\n bayesian.fit(X_train, y_train)\n bayesian_yhat = bayesian.predict(X_test)\n\n # 5. ElasticNet\n\n en = ElasticNet(alpha = 0.01)\n en.fit(X_train, y_train)\n en_yhat = en.predict(X_test)\n\n\n ##Results\n # 1. Explained Variance Score\n\n print(cl('EXPLAINED VARIANCE SCORE:', attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('Explained Variance Score of OLS model is {}'.format(evs(y_test, ols_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('Explained Variance Score of Ridge model is {}'.format(evs(y_test, ridge_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('Explained Variance Score of Lasso model is {}'.format(evs(y_test, lasso_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('Explained Variance Score of Bayesian model is {}'.format(evs(y_test, bayesian_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('Explained Variance Score of ElasticNet is {}'.format(evs(y_test, en_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n\n\n # 2. R-squared\n\n print(cl('R-SQUARED:', attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('R-Squared of OLS model is {}'.format(r2(y_test, ols_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('R-Squared of Ridge model is {}'.format(r2(y_test, ridge_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('R-Squared of Lasso model is {}'.format(r2(y_test, lasso_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('R-Squared of Bayesian model is {}'.format(r2(y_test, bayesian_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n print(cl('R-Squared of ElasticNet is {}'.format(r2(y_test, en_yhat)), attrs = ['bold']))\n print('-------------------------------------------------------------------------------')\n" } ]
2
vincesesto/ansibleanswers
https://github.com/vincesesto/ansibleanswers
9a1f21d08f959e69696932810f3d60c82546178e
d4b148ae21b5b82e915bb9ed50d33a265406ae3a
55a4c584f9bdea415fa64771ce8ac2926745dcf9
refs/heads/master
2021-06-05T02:37:48.171476
2020-06-29T04:27:20
2020-06-29T04:27:20
123,524,100
1
5
null
null
null
null
null
[ { "alpha_fraction": 0.6163982152938843, "alphanum_fraction": 0.6354319453239441, "avg_line_length": 15.658536911010742, "blob_id": "c7c486efa4ada39e11cee0ecd88fa8ea2d1a4fd4", "content_id": "b81e56831d5a680069c6fee81ac08d690c0633ac", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 683, "license_type": "permissive", "max_line_length": 266, "num_lines": 41, "path": "/chapter4/roles/SimpliField.users/README.md", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "Users [![Build Status](https://travis-ci.org/SimpliField/ansible-users.svg?branch=master)](https://travis-ci.org/SimpliField/ansible-users) [![Ansible Role](https://img.shields.io/ansible/role/11094.svg?maxAge=2592000)](https://galaxy.ansible.com/SimpliField/users/)\n=========\n\nAnsible role to create users\n\nRequirements\n------------\n\nNeed ansible 2+\n\nRole Variables\n--------------\n\n```yaml\nusers:\n- www:\n name: \"www\"\n comment: \"www user\"\n createhome: \"yes\"\n home: \"/home/www\"\n shell: \"/bin/false\"\n```\n\nDependencies\n------------\n\nThere is no dependency.\n\nExample Playbook\n----------------\n\n```yaml\n- hosts: servers\n roles:\n - role: SimpliField.users\n```\n\nLicense\n-------\n\nBSD\n" }, { "alpha_fraction": 0.6501128673553467, "alphanum_fraction": 0.7607223391532898, "avg_line_length": 72.83333587646484, "blob_id": "d0cc4fb086fc4f6e4881024eec8e46545fc13947", "content_id": "bc37ccde50652fd0eb4008a11cd0eadf4b6792be", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 443, "license_type": "permissive", "max_line_length": 244, "num_lines": 6, "path": "/chapter6/roles/splunk_server/files/user_data.sh", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -e -x\nwget -O splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm 'https://www.splunk.com/bin/splunk/DownloadActivityServlet?architecture=x86_64&platform=linux&version=7.1.1&product=splunk&filename=splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm&wget=true'\nrpm -i splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm\nsleep 100\nsudo -u splunk /opt/splunk/bin/splunk start --answer-yes --no-prompt --accept-license --seed-passwd newpassword\n" }, { "alpha_fraction": 0.7628361582756042, "alphanum_fraction": 0.7726161479949951, "avg_line_length": 39.900001525878906, "blob_id": "01505093dd37ee6dfaabe719e5b441bb0d44b057", "content_id": "a51845c4a8efd4713f9e89f0f94ab6d48f0e627c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 409, "license_type": "no_license", "max_line_length": 64, "num_lines": 10, "path": "/chapter4/roles/db_server/files/dump.sql", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "CREATE TABLE IF NOT EXISTS test (\n\t message varchar(255) NOT NULL\n\t) ENGINE=MyISAM DEFAULT CHARSET=utf8;\nINSERT INTO test(message) VALUES('Ansible To Do List');\nINSERT INTO test(message) VALUES('Get ready');\nINSERT INTO test(message) VALUES('Ansible is fun');\n\n#CREATE USER myprojectuser@localhost IDENTIFIED BY 'password';\n#GRANT ALL PRIVILEGES ON myproject.* TO myprojectuser@localhost;\n#FLUSH PRIVILEGES;\n" }, { "alpha_fraction": 0.6480447053909302, "alphanum_fraction": 0.6517691016197205, "avg_line_length": 30.58823585510254, "blob_id": "203f8ee6017fafb840e9fabf6d5243b8b14859f9", "content_id": "ad2f80ebace74d0deecd8ecf16f59c558d15a239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 113, "num_lines": 17, "path": "/chapter7/test_rules/AWSCredentials.py", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "from ansiblelint import AnsibleLintRule\n\n\nclass AWSCredentials(AnsibleLintRule):\n id = 'ANSWERS02'\n shortdesc = 'Playbook May Contain AWS Credentials'\n description = 'AWS credentials should not be included in variables, especially if they are stored publically'\n tags = ['variables']\n\n def match(self, file, line):\n if \"aws_access_key_id\" in line:\n self.shortdesc\n return True\n if \"aws_secret_access_key\" in line:\n self.shortdesc\n return True\n return False\n" }, { "alpha_fraction": 0.6886160969734192, "alphanum_fraction": 0.7433035969734192, "avg_line_length": 43.75, "blob_id": "f304f4464fb2fc57c28a9d45be3e5ae7ce27d700", "content_id": "9d934da7b038754a1b7acd3c908e1e21e98300e9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 896, "license_type": "permissive", "max_line_length": 244, "num_lines": 20, "path": "/chapter7/roles/splunk_server/templates/user_data.j2", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -e -x\nwget -O splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm 'https://www.splunk.com/bin/splunk/DownloadActivityServlet?architecture=x86_64&platform=linux&version=7.1.1&product=splunk&filename=splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm&wget=true'\nrpm -i splunk-7.1.1-8f0ead9ec3db-linux-2.6-x86_64.rpm\nsleep 100\nsudo -u splunk /opt/splunk/bin/splunk start --answer-yes --no-prompt --accept-license --seed-passwd {{ admin_password }}\n\n{% for item in userlist %}\n sudo -u splunk /opt/splunk/bin/splunk add user {{ item }} -role admin -auth admin:{{ admin_password }}\n{% endfor %}\n\n# Now use Ansible to install our Splunk App\nsudo yum update -y\nsudo yum install git -y\nsudo pip install ansible\nsudo /usr/local/bin/ansible-pull -U https://github.com/vincesesto/testsplunkapp -i hosts\n\nsudo /opt/splunk/bin/splunk enable boot-start\n\nsudo -u splunk /opt/splunk/bin/splunk restart\n\n" }, { "alpha_fraction": 0.6318289637565613, "alphanum_fraction": 0.646080732345581, "avg_line_length": 29.071428298950195, "blob_id": "957bedeffc45032ad0a63952213af5728b3c7ba7", "content_id": "5def579730da63c244087f9d34d03447ba1351cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 91, "num_lines": 14, "path": "/chapter7/test_rules/LineLength.py", "repo_name": "vincesesto/ansibleanswers", "src_encoding": "UTF-8", "text": "from ansiblelint import AnsibleLintRule\n\n\nclass LineLength(AnsibleLintRule):\n id = 'ANSWERS01'\n shortdesc = 'Line too long'\n description = 'Python Code Style Guidelines Recommend Line Lenghth Under 80 Characters'\n tags = ['formatting']\n\n def match(self, file, line):\n if len(line) > 80:\n self.shortdesc += \" ({} characters)\".format(len(line))\n return True\n return False\n" } ]
6
mishless/LearningSystems
https://github.com/mishless/LearningSystems
63cef6a18cdd9d4db7267674badb1a19692d0af3
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
08aea595511daeed742283112ff8fb3edfed81a7
refs/heads/master
2016-08-09T18:55:52.155267
2016-03-10T20:33:43
2016-03-10T20:33:43
51,447,119
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6928228139877319, "alphanum_fraction": 0.7317740321159363, "avg_line_length": 72.63194274902344, "blob_id": "6f768e06554f929f77572045d2b2cf6a91418efd", "content_id": "ba0898527dd1e3d115d8389544d6561210bb8a26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10649, "license_type": "permissive", "max_line_length": 1165, "num_lines": 144, "path": "/README.md", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "# Laboratory work for Learning Systems\nLaboratory work for Learning Systems course at MDH during spring semester 2015/2016\n### Assignment 1\n-----\nReactive-Ion-Etching (RIE) machines are used in some company for removal of thin layers in the production of magnetic heads. The task of a RIE machine is to etch a specified surface of some mm depth into sliders. The etch depths of sliders are regarded as indicators for process stability. It would be easy to control the process if the etch depth could be monitored online. Unfortunately such a depth can only be measured at the end of the etching process. \n\nIn order to enhance process stability and reliability, attempts have been made to analyse data from process observations and to detect correlation between sensor data and process result, i.e. etch depth. At present, however, there is no explicit mathematical model to calculate etch depth from process parameters. As alternative artificial neural network may be constructed to predict etch depth based on sensor information.\n\nThe 21 features extracted from sensor signals are illustrated in the table below. But not all of them are relevant for the underlying problem. A pre-step of feature selection has been performed to select the most critical features as inputs. The results of feature selection recommend that features 2, 19, and 20 be selected for etch process modelling (selected features are in bold text in the table).\n\n \n| Number | Features extracted | Original signals|\n| ------------- |:-------------:| -----:|\n| 1 | Run time | |\n| **2** | **HF energy (integral of HF power)** | **HF power delivered** |\n| 3 | Start time | Start time |\n| 4 | Peak to peak value | HF power delivered |\n| 5 | Integral | DC voltage |\n| 6 | Start time | DC voltage |\n| 7 | Integral | Gas flow 1 |\n| 8 | Mean value | Gas flow 1 |\n| 9 | Standard deviation | Gas flow 1 |\n| 10 | Integral | Gas flow 2 |\n| 11 | Mean value | Gas flow 2 |\n| 12 | Standard deviation | Gas flow 2 |\n| 13 | Integral | Gas flow 3 |\n| 14 | Mean value | Gas flow 3 |\n| 15 | Standard deviation | Gas flow 3 |\n| 16 | Integral | Chamber pressure |\n| 17 | Mean value | Chamber pressure |\n| 18 | Peak to peak value | Chamber pressure |\n| **19** | **Integral** | **Throttle position** |\n| **20** | **Mean value** | **Throttle position** |\n| 21 | Peak to peak value | Throttle position |\n\n\nTwo data sets are available to be downloaded in a1 forlder - Data_Training and Data_Test. Data_Training contains the examples that are available for learning, while Data_Test includes test examples that represent unseen examples and are not involved in the learning procedure. Every case in both data sets consists of 21 features listed in the above table and the associated etch depth as output. The first 21 columns in the files represent the 21 features and the last column represents the output.\n\nThe task is to develop a competent neural network to predict etch depth based on Data_Training. Then you should examine the performance of the learned neural network on Data_Test. Only the three features selected need to be used as inputs to the neural network (you just use the results of feature selection here). As learning algorithm you can use GA or BP as your free choice.\n\n#### Results\n1. Structure of the ANN\n\n The structure of the neural network is configurable by the user, but for the results presented below we used three-layer ANN with 3 ⋅inputs, 5 hidden units and 1 output.\n2. Learning algorithm used\n\n The algorithm used for learning is backpropagation – basically it is iterative algorithm and on each iteration the inputs are forwarded though the network and the outputs are calculated. Then the error of each output is calculated and weights are modified. The first iteration uses random small weights. \n3. Performance\n ![alt text](https://github.com/mishless/LearningSystems/blob/master/a1/ann.png \"ANN Perfomance\")\n\n On the vertical axis is the error and on the horizontal axis there is the number of iterations that have passed. \n4. Performance on training data\n\n Error on whole training data set: 0.00591729466547751\n5. Performance on test data\n\n Test data error is 0.004609162462734982\n\n### Assignment 2\n---\nClassification of Irsi data is a well known benchmark problem in machine learning research. This data set is downloadable from a2 folder. The assignment is to implement this fuzzy classifier in a computer program that has to be applied to classify all the iris data and examine the classification accuracy of your fuzzy system.\n\n#### Results\n\n1. What is the AND operator in your implementation?\n\n In our solution we allow the user to specify which operator should be used in calculations when intersection between fuzzy sets occurs. The possible values are two – min or prod. The “min” option means that the built-in function for minimum will be used and the “prod” option means that “product” implementation will be used – which is in fact multiplication.\n2. What is the OR operator in your implementation? \n\n In our solution we allow the user to specify which operator should be used in calculations when union between fuzzy sets occurs. The possible values are two – max or probor. The “max” option means that the built-in function for maximum will be used and the “probor” option means that “probability or” implementation will be used.\n3. What is the data flow from inputs to decision given the normalized attribute values as (0.3, 0.8, 0.2, 0.7)? \n\n |0.3|0.5|0.8|\n |---|---|---|\n |μ_short(0.3)=0.5|μ_short(0.8)=0|μ_short(0.2)=0.67|μ_short(0.7)=0|\n |μ_middle(0.3)=0.5|μ_middle(0.8)=0.5|μ_middle(0.2)=0.33|μ_middle(0.7)=0.75|\n |μ_long(0.3)=0|μ_long(0.8)=0.5|μ_long(0.2)=0|μ_long(0.7)=0.25|\n\n Rule #1: min(max(0.5, 0.5), max(0.5, 0.5), max(0.33, 0), 0.65) = min(0.5, 0.5, 0.33, 0.75) = 0.33\n\n Rule #2: min(max(0.67, 0.33), 0) = 0\n\n Rule #3: min(max(0, 0.5), 0, 0.25) = 0\n\n Rule #4: min(0.5, max(0, 0.5), 0.67, 0.25) = 0.25\n\n The example is classified as iris setosa.\n4. What is the accuracy of your implemented fuzzy classifier on the Iris data?\n\n ~ 78% using min/max\n ~ 69% using prod/probor\n \n### Assignment 3\n------\nThe\tTravelling\tSalesman\tProblem\t(TSP)\tis\tone\tof\tthe\tmost well\tknow\toptimization\tproblems. This problem originally\tis\tdescribed\tas\tfollows: Given\ta\tlist\tof\tcities\tand\tthe\tdistance between\teach\tother,\twhich\tis\tthe\tshortest route\tto\ttravel\tacross\t all\tthe\tcities?,\tsuch\tthat,\tyou\tvisit\tall\tthe\tcities\tonce\tand\tyou\tstart\tand\tfinish\tin\tthe\tsame\tcity. We\tnow\tconsider\tthe\tproblem\twith\ta\tset\tof locations inside a\tcity available at a3 folder.\tThese locations are\trepresented\tby two\tcoordinates\t(x and\ty) as\tillustrated\tin\tthe\ttable\tbelow.\n\n| Location ID | X | Y |\n|---|---|---|\n| 1 | 565 | 575 |\n| 2 | 25 | 185 |\n| 3 | 345 | 750 |\n| ... | ... | ... |\n\nThe\tdistance\tbetween\ttwo\tlocations is\tthe\tEuclidean\tDistance.\nThe\tassignment\tis\tto\tapply an\toptimization\talgorithm,\te.g.\tGenetic\tAlgorithm\t(GA)\tto\tsearch\tfor\tthe\tshortest\troute.\tYou\tneed\tto\tvisit\tall\tthe\tlocations\tonce\tand\tthe\tstarting\tand\tend\tpoints must\tbe the\tlocation\tnumber\t1.\n\n#### Results\n\n1. Explain the important operations of the employed algorithm (e.g. GA) to solve this problem:\n\n One of the most important decisions that need to be made when using GA is the representations of individual because the success of the algorithm depends on this. Fortunately, for the problem we are solving this is quite intuitive and is described in details below. Another aspect that is of great importance is the definition of the fitness function which is also intuitive in our case. GA manages to solve the problem because of its evolutionary approach - an initial population of individuals is generated randomly and is evaluated. After that few of the best individual (fittest) are transferred to the next population. Using a selection mechanism few of the rest individual are chosen to be parent and to crossover between each other to create the rest of the next population. After that mutation operator is executed that mutates the children and brings diversity to the population. This prevents the algorithm from getting into a local minimum or over-fit. Eventually after the error has become small or after a maximum number of iterations have passed the algorithm terminates and gives the best solution found so far.\n \n2. Explain the representation of the individual solutions in your algorithm.\n\n The individual is represented as an array containing the IDs of the cities that will be visited. In index 0 is the ID of the city that will be visited first and is always equal to 1. In index 1 is the seconds city, in index 2 is the third city, etc. and the last index in the array is the city where we will finish, which is always equal to 1.\n \n3. Give the parameters used in your algorithm. Examples: population size, crossover rate...\n\n The population size that we used is 500. The elitism rate is 0.1 which means that 10% of the best individuals will be inherited in the next generation without any change. The tournament rate is 0.1 which means that whenever a parent is to be chosen 10% of the population will be chosen randomly and the parent will be the fittest one, according to the formula above. The parent rate is 0.2 which \n \n4. Performance\n ![alt text](https://github.com/mishless/LearningSystems/blob/master/a3/ga.png \"GA Perfomance\")\n\n5. Best results obtained\n\n Best result is 8228.576874458458 with path 1 - 18 - 3 - 17 - 21 - 42 - 7 - 2 - 30 - 29 - 16 - 46 - 35 - 49 - 32 - 45 - 19 - 41 - 8 - 9 - 10 - 43 - 33 - 51 - 28 - 27 - 26 - 47 - 13 - 14 - 52 - 11 - 12 - 25 - 4 - 15 - 5 - 6 - 48 - 24 - 38 - 37 - 40 - 39 - 36 - 34 - 44 - 50 - 20 - 23 - 31 - 22 - 1\n \n### Assignment 4\n-----\nGiven is a map of 26 cities (named as A, B, C, D,..., X, Y, Z respectively). The document for this map is available in a4 folder. Every road connection is represented by a row in the table below, where the first and the second columns correspond to the two cities that are directly connected and the third column denotes the distance of the road that connect the two cities.\n\n| From | To | Distance |\n|----|----|----|\n| A |\tB |\t2 |\n| A |\tE\t| 2 |\n| A |\tW\t| 1 |\n| ... | ... | ... |\n\nThe assignment is to write a computer program based on the principle of dynamic programming to realize the following two functions:\n1)\tCalculate the optimal values of all cities given that the destination is city F using the Bellman equation\n2)\tFind the shortest path from each city to F using the optimal values\n\n#### Results\nTODO\n" }, { "alpha_fraction": 0.5621200799942017, "alphanum_fraction": 0.5779236555099487, "avg_line_length": 30.63846206665039, "blob_id": "6ae8383eb27b5ff0566f07b272f449733c7e8f98", "content_id": "5049af1e05d73fa6840cb3f92f972bafd2d50cac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12339, "license_type": "permissive", "max_line_length": 146, "num_lines": 390, "path": "/a1/ANN.py", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "#Artificial Neural Network\n\n\n#includes\nimport configparser\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom decimal import *\n\n#global variables\nweights = [];\ntopology = [];\ndata_training = [];\ndata_test = [];\nlearning_rate = 0;\nweight_min = 0;\nweight_max = 0;\nerror_terms = [];\noutputs = [];\nresult_offset = 4.5;\npartition_num = 0;\npartition_size = 0;\ndata_sets = [];\nscalings = [{'min' : 1.5*(10**9), 'max' : 2.5*(10**9)},\n {'min' : 1.5*(10**8), 'max' : 4.5*(10**8)},\n {'min' : 0, 'max' : 150}];\n\ndef read_config():\n global partition_num;\n global learning_rate;\n global weight_min;\n global weight_max;\n global iteration_num;\n \n config = configparser.ConfigParser();\n config.read(\"config.txt\");\n temp = config[\"general\"][\"topology\"];\n temp = temp.split(\",\");\n for s in temp:\n topology.append(int(s));\n learning_rate = float(config['general']['learning_rate']);\n weight_min = float(config['general']['weight_min']);\n weight_max = float(config['general']['weight_max']);\n partition_num = int(config['general']['partition_num']);\n \ndef read_input():\n read_config();\n\ndef print_weights():\n print(\"***** WEIGHTS *****\");\n for i in range(0, len(weights)):\n print(\"Layer 0 (\" + str(topology[i]) + \" -> \" + str(topology[i+1]) + \"):\");\n print(\"---------------\");\n for j in range(0, len(weights[i])):\n for k in range(0, len(weights[i][j])):\n print(\"%.6f \" % weights[i][j][k], end=\"\");\n print();\n print(\"---------------\");\n print();\n\ndef fill_dummy_weights():\n w = 0.1;\n for i in range(0, len(weights)):\n for j in range(0, len(weights[i])):\n for k in range(0, len(weights[i][j])):\n weights[i][j][k] = w;\n w = w + 0.1;\n\ndef fill_random_weights(min_limit, max_limit):\n for i in range(0, len(weights)):\n for j in range(0, len(weights[i])):\n for k in range(0, len(weights[i][j])):\n weights[i][j][k] = random.uniform(min_limit, max_limit);\n\ndef init_weights():\n for i in range(0, len(topology)-1):\n weights.append([]);\n for j in range(0, topology[i+1]):\n weights[i].append([]);\n for k in range(0, topology[i]):\n weights[i][j].append(0);\n weights[i][j].append(0);\n\ndef init_error_terms():\n for layer in range(0, len(topology)): \n error_terms.append([]);\n for row in range(0, topology[layer]):\n error_terms[layer].append(0);\n\ndef init_outputs():\n for layer in range(0, len(topology)): \n outputs.append([]);\n for row in range(0, topology[layer]):\n outputs[layer].append(0);\n \ndef plot_sigmoid():\n x_list = np.arange(-8, 8, 0.1);\n y_list = [];\n for x in x_list:\n y_list.append(sigmoid(x));\n\n plt.plot(x_list, y_list);\n plt.show();\n \ndef sigmoid(x):\n\n #avoid overflow fuckups\n if x < -100:\n x = -100;\n \n res = 1/(1+(math.exp(-x)));\n return res;\n \ndef output_function(x):\n return sigmoid(x) + 4.5;\n\ndef calculate_output(input_sample):\n return output_function(calculate_net(len(topology)-1, 0, input_sample));\n\ndef print_nets(input_sample):\n print(\"***** NETS *****\");\n for layer in range(0, len(topology)):\n print(\"Layer \" + str(layer) + \":\");\n for row in range(0, topology[layer]):\n print(\"%0.2f \" % calculate_net(layer, row, input_sample), end = \"\");\n print();\n print();\n\ndef print_outputs():\n print(\"***** OUTPUTS *****\");\n for layer in range(0, len(topology)):\n print(\"Layer \" + str(layer) + \":\");\n for row in range(0, topology[layer]):\n print(\"%0.20f \" % outputs[layer] [row], end = \"\");\n print();\n print();\n \ndef print_error_terms():\n print(\"***** ERROR TERMS *****\");\n for layer in range(0, len(topology)):\n print(\"Layer \" + str(layer) + \":\");\n for row in range(0, topology[layer]):\n print(\"%0.6f \" % error_terms[layer] [row], end = \"\");\n print();\n print();\n \ndef read_input():\n file = open(\"Data_Training.txt\");\n file_lines = file.readlines();\n file.close();\n \n for line in file_lines:\n temp = line.split();\n data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];\n data_sample_numbers = [];\n \n for s in data_sample_strings:\n data_sample_numbers.append(float(s));\n \n data_training.append(data_sample_numbers);\n\n file = open(\"Data_Test.txt\");\n file_lines = file.readlines();\n file.close();\n \n for line in file_lines:\n temp = line.split();\n data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];\n data_sample_numbers = [];\n \n for s in data_sample_strings:\n data_sample_numbers.append(float(s));\n \n data_test.append(data_sample_numbers);\n\n random.shuffle(data_training);\n \ndef partition_data():\n global partition_size;\n partition_size = math.floor(len(data_training)/partition_num);\n\n print(\"Total data: \" + str(len(data_training)));\n print(\"Partition size: \" + str(partition_size));\n \n for i in range(0, partition_size*partition_num, partition_size):\n data_sets.append(data_training[i:(i+partition_size)]);\n\ndef examine_input():\n\n a = [];\n b = [];\n c = [];\n d = [];\n for data_sample in data_training:\n a.append(data_sample[0]);\n b.append(data_sample[1]);\n c.append(data_sample[2]);\n d.append(data_sample[3]);\n \n exit();\n\ndef scale_training_data():\n for data_sample in data_training:\n for i in range(0, topology[0]):\n data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);\n\ndef scale_test_data():\n for data_sample in data_test:\n for i in range(0, topology[0]):\n data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);\n\ndef scale_data():\n scale_training_data();\n scale_test_data();\n\ndef init():\n read_config();\n read_input();\n scale_data();\n init_weights();\n fill_random_weights(weight_min, weight_max);\n init_error_terms();\n init_outputs();\n partition_data();\n \ndef calculate_output_error_term(target_output, calculated_output):\n return (target_output - calculated_output) * calculated_output * (1 - calculated_output);\n\ndef calculate_net(layer, row):\n result = 0;\n for i in range(0, topology[layer-1]):\n result = result + outputs[layer-1][i] * weights[layer-1][row][i];\n result = result + (1 * weights[layer-1][row][-1]);\n return result;\n\ndef calculate_outputs(input_sample):\n for input_node in range(0, topology[0]):\n outputs[0][input_node] = input_sample[input_node];\n \n for layer in range(1, len(topology)):\n for row in range(0, topology[layer]):\n outputs[layer][row] = sigmoid(calculate_net(layer, row));\n\ndef calculate_error_term(layer, row):\n result = 0;\n for row_from_next_layer in range(0, topology[layer+1]):\n result = result + error_terms[layer+1][row_from_next_layer] * weights[layer][row_from_next_layer][row];\n result = result * outputs[layer][row] * (1 - outputs[layer][row]);\n return result\n\ndef calculate_error_terms(target_output):\n error_terms[-1][0] = calculate_output_error_term(target_output, outputs[-1][0]);\n for layer in reversed(range(1, len(topology)-1)):\n for row in range(0, topology[layer]):\n error_terms[layer][row] = calculate_error_term(layer, row);\n\ndef update_weights():\n for layer in range(0, len(topology)-1):\n for destination_row in range(0, topology[layer+1]):\n for source_row in range(0, topology[layer]):\n delta_weight = learning_rate * error_terms[layer+1][destination_row] * outputs[layer][source_row];\n weights[layer][destination_row][source_row] = weights[layer][destination_row][source_row] + delta_weight;\n weights[layer][destination_row][-1] = weights[layer][destination_row][-1] + learning_rate * error_terms[layer+1][destination_row] * 1;\n \n \n \ndef iterate_once(data_list):\n squared_errors = [];\n \n for data_sample in data_list:\n calculate_outputs(data_sample[0:3]);\n target_result = data_sample[3] - result_offset;\n squared_errors.append((target_result - outputs[-1][0])**2);\n calculate_error_terms(target_result);\n update_weights();\n \n mean_squared_error = sum(squared_errors)/float(len(squared_errors));\n return mean_squared_error;\n\ndef temp_test():\n data_sample = data_training[0];\n print_weights();\n \n for i in range(0, 10000):\n calculate_outputs(data_sample[0:3]);\n target_result = data_sample[3] - result_offset;\n calculate_error_terms(target_result);\n update_weights();\n print_weights();\n\ndef get_mean_error(data_list):\n squared_errors = [];\n \n for data_sample in data_list:\n calculate_outputs(data_sample[0:3]);\n target_result = data_sample[3] - result_offset;\n squared_errors.append((target_result - outputs[-1][0])**2);\n calculate_error_terms(target_result);\n \n mean_squared_error = sum(squared_errors)/float(len(squared_errors));\n return mean_squared_error;\n\ndef calculate_iteration_num(training, validation):\n\n fill_random_weights(weight_min, weight_max);\n error_old = get_mean_error(validation);\n\n consecutive_worse_num = 0;\n iterations = 0;\n while True:\n\n iterate_once(training);\n iterations = iterations + 1;\n error_new = get_mean_error(validation);\n #print(\"Iteration = \" + str(iterations) + \", error = \" + str(error_new));\n \n if error_new > error_old:\n consecutive_worse_num = consecutive_worse_num + 1;\n if consecutive_worse_num == 10:\n break;\n else:\n consecutive_worse_num = 0;\n \n error_old = error_new;\n \n return iterations;\n\ndef train_network(number_of_iterations):\n errors = []\n for i in range(0, number_of_iterations):\n errors.append(iterate_once(data_training));\n return errors;\n\n \ndef estimate_iteration_num():\n best_iterations = [];\n \n for i in range(0, partition_num):\n \n validation = data_training[ (i*partition_size) : ((i+1)*partition_size) ];\n \n if i == 0:\n training = data_training[ (i+1)*partition_size : partition_num*partition_size ];\n if i == (partition_num-1):\n training = data_training[0:partition_size*(partition_num-1)];\n else:\n training = data_training[0:i*partition_size] + data_training[(i+1)*partition_size:partition_num*partition_size];\n\n #print(\"Training = \" + str(training));\n #print(\"Validation = \" + str(validation));\n print(\"Performing K-fold cross validation... %2d%%\" % int(i*100*partition_size/(partition_num*partition_size)));\n iteration_number = calculate_iteration_num(training, validation);\n best_iterations.append(iteration_number);\n\n average_iterations = int(sum(best_iterations)/len(best_iterations));\n print(\"Best iterations:\" + str(best_iterations));\n print(\"Average best iterations: \" + str(average_iterations));\n return average_iterations;\n\ndef estimate_and_train():\n\n global weights;\n \n all_errors = [];\n errors = [];\n weight_sets = [];\n \n number_of_iterations = estimate_iteration_num();\n \n for i in range(0, 10):\n print(\"Running training network, cycle \" + str(i));\n fill_random_weights(weight_min, weight_max);\n all_errors.append(train_network(number_of_iterations));\n errors.append(get_mean_error(data_training));\n print(\"Error on whole training data set: \" + str(errors[-1]));\n weight_sets.append(weights);\n\n weights = weight_sets[errors.index(min(errors))];\n plt.plot(all_errors[errors.index(min(errors))]);\n plt.show()\n\n test_error = get_mean_error(data_test);\n print(\"Test data error is \" + str(test_error));\n\n \n#main\ninit();\nestimate_and_train();\n" }, { "alpha_fraction": 0.5320505499839783, "alphanum_fraction": 0.5558131337165833, "avg_line_length": 23.372880935668945, "blob_id": "2915c885572964a23b924b6fe3741a6954d19f56", "content_id": "5a793e8889dda76c8787bba28e2dd2165af58633", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8627, "license_type": "permissive", "max_line_length": 81, "num_lines": 354, "path": "/a5/reinforced_learning.py", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "# Reinforcment learning for balancing pole on a cart system.\n#\n# Authors: Vukan Turkulov & Mihaela Stoycheva\n# Licence: Ask Mihaela\n\nimport math\nimport matplotlib.pyplot as plt\nimport random\nimport sys\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n#constants\nF1 = 10\nF2 = -10\ninitial_state = (0.0, 0.0, 0.0, 0.0);\n\nFAIL_PENALTY = -1000;\nX_BOUNDARY_LOW = -2.4;\nX_BOUNDARY_HIGH = 2.4;\nTHETA_BOUNDARY_LOW = -0.20944;\nTHETA_BOUNDARY_HIGH = 0.20944;\n\nstates_info = {};\niteration_num = 10000;\ncoeff = 1.1025;\nv_exp_guard = math.floor(math.log(sys.float_info.max/2, coeff));\nv_max = 1000;\nchain_states = [initial_state];\nchain_other_states = [initial_state];\nchain_chances = [1.0];\nmax_depth = 0;\nplot_graphs = False\n\ndef simulate_movement(force):\n floats = initial_state;\n xses = [0];\n xdots = [0];\n thetas = [0];\n theta_dots = [0];\n\n for f in force:\n floats = simulate(f, floats);\n xses.append(floats[0]);\n xdots.append(floats[1]);\n thetas.append(floats[2]);\n theta_dots.append(floats[3]);\n\n return (xses, thetas, xdots, theta_dots);\n\ndef simulate(force, floats):\n\n GRAVITY=9.8;\n MASSCART=1.0;\n MASSPOLE=0.1;\n TOTAL_MASS=MASSPOLE + MASSCART;\n LENGTH=0.5; \n POLEMASS_LENGTH=MASSPOLE * LENGTH;\n STEP=0.02; \n FOURTHIRDS=1.3333333333333;\n\n x = floats[0];\n x_dot = floats[1];\n theta = floats[2];\n theta_dot = floats[3];\n\n costheta = math.cos(theta);\n sintheta = math.sin(theta);\n\n temp = ((force + POLEMASS_LENGTH * theta_dot *theta_dot * sintheta)/\n TOTAL_MASS);\n\n thetaacc = ((GRAVITY * sintheta - costheta* temp)/\n (LENGTH * (FOURTHIRDS - MASSPOLE * costheta * costheta/ TOTAL_MASS)));\n\n xacc = temp - POLEMASS_LENGTH * thetaacc* costheta / TOTAL_MASS;\n\n y0= x+STEP*x_dot;\n y1=x_dot+STEP*xacc;\n y2=theta+STEP*theta_dot;\n y3=theta_dot+STEP*thetaacc;\n\n return(y0, y1, y2, y3);\n\ndef is_state_valid(s):\n x = s[0];\n theta = s[2];\n\n if(x < X_BOUNDARY_LOW or x > X_BOUNDARY_HIGH):\n return False;\n if(theta < THETA_BOUNDARY_LOW or theta > THETA_BOUNDARY_HIGH):\n return False;\n\n return True;\n\ndef get_initial_info():\n x = random.random()\n\n for i in reversed(range(0, len(chain_states))):\n if x < chain_chances[i]:\n starting_state = chain_other_states[i]\n path = chain_states[0:i] + [starting_state];\n return{'state':starting_state, 'path':path};\n\ndef get_next_state(s, force):\n\n if force != F1 and force != F2:\n print(\"Invalid force! Terminating.\");\n exit();\n\n return simulate(force, s);\n\ndef chances_add(state):\n global_chances\n\ndef calculate_chance(v1, v2):\n if v1 > v_exp_guard:\n v1 = v_exp_guard;\n if v2 > v_exp_guard:\n v2 = v_exp_guard;\n\n if v1 >= v_max:\n return 1;\n if v2 >= v_max:\n return 0;\n\n return coeff**v1/(coeff**v1 + coeff**v2);\n\ndef get_v(state):\n global states_info;\n if state in states_info:\n return states_info[state]['v'];\n else:\n return 0;\n\ndef update_states_info(path):\n global states_info;\n\n i = -1;\n\n # Update v function backwards\n for state in reversed(path[0:-1]):\n if states_info[state]['v'] >= v_max:\n break;\n v1 = get_v(states_info[state]['s1']);\n v2 = get_v(states_info[state]['s2']);\n\n if states_info[state]['v'] == 1 + max(v1, v2):\n break;\n else:\n i -= 1;\n states_info[state]['v'] = min(v_max, 1 + max(v1, v2));\n\n # Update chances forward\n for state in path[i-1 : -1]:\n v1 = get_v(states_info[state]['s1']);\n v2 = get_v(states_info[state]['s2']);\n states_info[state]['chance'] = calculate_chance(v1, v2)\n\n if i == -len(path):\n i = 1;\n\n # If needed, update helper data structs\n if path[i-1] in chain_states:\n update_chain(path[i-1]);\n\ndef update_chain(starting_state):\n global chain_states;\n global chain_chances;\n\n i = chain_states.index(starting_state);\n del chain_states[i+1:];\n del chain_chances[i+1:];\n del chain_other_states[i+1:];\n s = chain_states[i];\n\n while True:\n if not s in states_info:\n break;\n\n chance = states_info[s]['chance'];\n\n if chance > 0.5:\n chain_states.append(states_info[s]['s1']);\n chain_other_states.append(states_info[s]['s2']);\n next_state = states_info[s]['s1'];\n else:\n chain_states.append(states_info[s]['s2']);\n chain_other_states.append(states_info[s]['s1']);\n next_state = states_info[s]['s2'];\n chance = 1 - chance;\n\n chain_chances.append(chain_chances[-1]*chance);\n\n s = next_state;\n\ndef info_add(state, prev_state):\n global states_info;\n\n states_info[state] = {'v' : 0,\n 'chance' : 0.5,\n 's1' : get_next_state(state, F1),\n 's2' : get_next_state(state, F2),\n 'prev' : prev_state};\n\ndef info_set_fail_state(state):\n global states_info;\n states_info[state]['v'] = FAIL_PENALTY;\n\ndef get_next_state_exp(state):\n\n x = random.random();\n if x < states_info[state]['chance']:\n return states_info[state]['s1'];\n else:\n return states_info[state]['s2'];\n\ndef train_system_once():\n global max_depth;\n\n initial_info = get_initial_info();\n state = initial_info['state'];\n chosen_states = initial_info['path'];\n\n depth = len(chosen_states);\n\n while True:\n\n if not state in states_info:\n info_add(state, chosen_states[-1]);\n\n if(is_state_valid(state) == False):\n info_set_fail_state(state);\n break;\n\n state = get_next_state_exp(state);\n chosen_states.append(state);\n\n if not state in states_info:\n info_add(state, chosen_states[-1]);\n\n depth += 1;\n\n update_states_info(chosen_states);\n\n if depth > max_depth:\n max_depth = depth;\n\ndef train_system(iterations):\n\n for i in range(0, iterations):\n if (i % 100) == 0:\n print(\"\\n*** Running iteration %5d *** depth = %d\" % (i, max_depth));\n train_system_once();\n\ndef run_system():\n state = initial_state;\n i = 0;\n f = [];\n\n xses = [0];\n xdots = [0];\n thetas = [0];\n theta_dots = [0];\n\n while True:\n if(is_state_valid(state) == False):\n break;\n\n if states_info[state]['chance'] > 0.5:\n state = states_info[state]['s1'];\n f.append(F1);\n else:\n state = states_info[state]['s2'];\n f.append(F2);\n\n xses.append(state[0]);\n xdots.append(state[1]);\n thetas.append(state[2]);\n theta_dots.append(state[3]);\n i += 1;\n\n print(\"Until fail: \" + str(i));\n return {'forces': f, 'movement': (xses, xdots, thetas, theta_dots)};\n\ndef plot_results(data):\n\n pp = PdfPages('Movement.pdf')\n\n plt.figure(1);\n plt.plot(data[0]);\n plt.title(\"Cart Position\");\n plt.xlabel(\"time[steps]\");\n plt.ylabel(\"position[meters]\");\n if plot_graphs == True:\n plt.show(block=False);\n pp.savefig();\n\n\n plt.figure(2);\n plt.plot(data[1]);\n plt.title(\"Cart Velocity\");\n plt.xlabel(\"time[steps]\");\n plt.ylabel(\"velocity[?]\");\n pp.savefig();\n if plot_graphs == True:\n plt.show(block=False);\n\n plt.figure(3);\n plt.plot(data[2]);\n plt.title(\"Pole Angle\");\n plt.xlabel(\"time[steps]\");\n plt.ylabel(\"angle[radians]\");\n pp.savefig();\n if plot_graphs == True:\n plt.show(block=False);\n\n plt.figure(4);\n plt.plot(data[3]);\n plt.title(\"Pole Angle Velocity\");\n plt.xlabel(\"time[steps]\");\n plt.ylabel(\"angle velocity[?]\");\n pp.savefig();\n if plot_graphs == True:\n plt.show(block=False);\n\n pp.close();\n\ndef init():\n global states_info;\n\n states_info[initial_state] = {'v' : 0,\n 'chance' : 0.5,\n 's1' : get_next_state(initial_state, F1),\n 's2' : get_next_state(initial_state, F2),\n 'prev' : -1};\n\ndef do_what_has_to_be_done():\n train_system(iteration_num);\n\n run_system_results = run_system();\n\n forces_used = run_system_results['forces'];\n file = open(\"forces_used.txt\", \"w\");\n file.write(str(forces_used));\n file.close();\n\n print(\"States encountered: \" + str(len(states_info)));\n movement = run_system_results['movement'];\n print(\"Steps = \" + str(len(forces_used)));\n plot_results(movement);\n\n#main\ninit();\ndo_what_has_to_be_done();" }, { "alpha_fraction": 0.5937219858169556, "alphanum_fraction": 0.6100448369979858, "avg_line_length": 38.260562896728516, "blob_id": "6a7abefef4158e9448edf84388f1522afd9c2844", "content_id": "381d65dbcd1f5449b588fc3c4e731e5bab7483cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5577, "license_type": "permissive", "max_line_length": 143, "num_lines": 142, "path": "/a2/FC.py", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python2.7\n# encoding: utf-8\n'''\nmdh.FC is a fuzzy classifier that classifies iris data into three classes provided with membership \nfunctions for all terms of linguistic variables and a database of fuzzy rules.\n\nIt defines classes_and_methods\n\n@author: Vukan Turkolov <vukant@gmail.com>\n Mihaela Stoycheva <mihaela.stoycheva@gmail.com\n\n@copyright: 2016 IDT, Mälardalen Högskola. All rights reserved.\n'''\n\nimport sys\nimport os\nimport re\nfrom operator import itemgetter\nfrom argparse import ArgumentParser\nfrom argparse import RawDescriptionHelpFormatter\n\ndef normalize_formula(x, min, max):\n normalized = [((x[i] - min[i]) / (max[i] - min[i])) for i in range(0, len(x) - 1)]\n normalized += (x[4],)\n return normalized\n\ndef normalize_data(data_set):\n maximum_tuples = (max(data_set, key=itemgetter(0))[0],\n max(data_set, key=itemgetter(1))[1],\n max(data_set, key=itemgetter(2))[2],\n max(data_set, key=itemgetter(3))[3])\n minimum_tuples = (min(data_set, key=itemgetter(0))[0],\n min(data_set, key=itemgetter(1))[1],\n min(data_set, key=itemgetter(2))[2],\n min(data_set, key=itemgetter(3))[3])\n data_set = [normalize_formula(item, minimum_tuples, maximum_tuples) for item in data_set]\n return data_set\n\ndef short(value):\n return abs((1 - (value / 0.6)) * (value < 0.6))\n\ndef middle(value):\n return abs((value / 0.6) * (value <= 0.6) + (2.5 - 2.5 * value) * (value > 0.6));\n\ndef calc_long(value):\n return abs((value * 2.5 - 1.5) * (value > 0.6));\n\ndef rule_one(sho, mid, lon, union_op, intersecion_op):\n result = intersecion_op(union_op(sho[0], lon[0]), union_op(mid[1], lon[1]),\n union_op(mid[2], lon[2]), mid[3]);\n return (result, 'Iris-versicolor');\n\ndef rule_two(sho, mid, lon, union_op, intersecion_op):\n result = intersecion_op(union_op(sho[2], mid[2]), sho[3]);\n return (result, 'Iris-setosa');\n\ndef rule_three(sho, mid, lon, union_op, intersecion_op):\n result = intersecion_op(union_op(sho[1], mid[1]),\n lon[2], lon[3])\n return (result, 'Iris-virginica')\n\ndef rule_four(sho, mid, lon, union_op, intersecion_op):\n result = intersecion_op(mid[0], union_op(sho[1],\n mid[1]), sho[2],\n lon[3])\n return (result, 'Iris-versicolor')\n\ndef probor(*args):\n if (len(args) == 2):\n return ((args[0] + args[1]) - (args[0] * args[1]))\n else:\n sys.exit(\"Probor operator works only with two arguments. ;)\")\n\ndef prod(*args):\n result = 1\n for arg in args:\n result *= arg\n return result;\n\ndef classify(short, middle, calc_long, union_op, intersection_op):\n result = []\n result.append(rule_one(short, middle, calc_long, union_op, intersection_op))\n result.append(rule_two(short, middle, calc_long, union_op, intersection_op))\n result.append(rule_three(short, middle, calc_long, union_op, intersection_op))\n result.append(rule_four(short, middle, calc_long, union_op, intersection_op))\n return max(result, key=lambda item:item[0])[1]\n\ndef fuzzify_and_eval(data_set, union_op, intersection_op):\n correct = 0;\n for data_item in data_set:\n membership_short = list([short(data_item[i]) for i in range(0, len(data_item) - 1)])\n membership_middle = list([middle(data_item[i]) for i in range(0, len(data_item) - 1)])\n membership_long = list([calc_long(data_item[i]) for i in range(0, len(data_item) - 1)])\n if (data_item[4] == classify(membership_short, membership_middle, membership_long, union_op, intersection_op)):\n correct += 1\n return correct / len(data_set);\n\ndef process_input(argv=None):\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv)\n # Setup argument parser\n parser = ArgumentParser()\n parser.add_argument(\"-d\", \"--data-set\", dest=\"data_set\", help=\"the file containing the data set\", required=True)\n parser.add_argument(\"-u\", \"--union-operator\", dest=\"union_operator\", choices=['max', 'probor'],\n help=\"specify the operator to be used for union; possible values: max, probors\", required=True)\n parser.add_argument(\"-i\", \"--intersection-operator\", dest=\"intersection_operator\", choices=['min', 'prod'],\n help=\"specify the operator to be used for intersection; possible values: min, prod\", required=True)\n\n # Process arguments\n args = parser.parse_args()\n\n data_set_path = args.data_set\n union_operator = args.union_operator\n intersection_operator = args.intersection_operator\n \n with open(data_set_path, 'r') as f:\n data_set = [(float(value[0]),\n float(value[1]),\n float(value[2]),\n float(value[3]),\n value[4]) for value in [line.split(',') for line in [re.sub('\\s+', ' ', line).strip() for line in f.read().splitlines()]]]\n f.closed\n return {'data-set': data_set, 'union-op': union_operator, 'intersection-op': intersection_operator}\n\ndef main():\n try:\n input_data = process_input()\n data_set = normalize_data(input_data['data-set'])\n result = fuzzify_and_eval(data_set, eval(input_data['union-op']), eval(input_data['intersection-op']))\n print(\"CORRECTLY CLASSIFIED: {}%\".format(result))\n return 0\n except KeyboardInterrupt:\n ### handle keyboard interrupt ###\n return 0\n except Exception as e:\n sys.stderr.write(repr(e))\n return 2\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.615119457244873, "alphanum_fraction": 0.62109375, "avg_line_length": 34.6721305847168, "blob_id": "0691f15811f5e3152f1ef243d17cedc7957d295d", "content_id": "6ffe5766dc043d2a040f4534525b2e198b81290c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4352, "license_type": "permissive", "max_line_length": 153, "num_lines": 122, "path": "/a4/DP.py", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n'''\nImplementation of value iteration algorithm for finding the optimal value for all states in a graph;\nUsing the optimal value the optimal policy can be easily constrcuted and thus the shortest path is found;\nResults are validated using Bellman-Ford shortest path algorithm;\n\nAuthors: Mihaela Stoycheva <mihaela.stoycheva@gmail.com> & Vukan Turkulov <vukant@gmail.com>\n\n'''\n\nimport sys\nimport random\nfrom argparse import ArgumentParser\n\ndef value_iterate_algorithm(graph, destination, eps):\n optimal_values = dict()\n d = dict()\n for node in graph:\n optimal_values[node] = 0\n d[node] = 0\n search = True \n while search:\n for node in graph:\n if node == destination:\n continue\n old_optimal_value = optimal_values[node]\n optimal_values[node] = max([optimal_values[dest] - int(graph[node][dest]) for dest in graph[node]])\n d[node] = abs(old_optimal_value - optimal_values[node])\n search = False\n for node in graph:\n if (d[node] > eps):\n search = True\n return optimal_values\n\ndef calculate_policy(graph, optimal_values):\n policy = dict()\n for node in graph:\n policy[node] = max([(optimal_values[dest] - int(graph[node][dest]), dest) for dest in graph[node]], key=lambda e:e[0])[1]\n return policy\n\ndef find_shortest_paths(graph, destination, policy):\n shortest_paths = dict()\n for node in graph:\n shortest_paths[node] = 0\n current_node = node;\n while current_node is not destination:\n random_policy = random.choice(policy[current_node])\n shortest_paths[node] += int(graph[current_node][random_policy])\n current_node = random_policy\n return shortest_paths\n\ndef bellman_ford(graph, destination):\n shortest_paths = dict()\n edges = 0;\n for node in graph:\n shortest_paths[node] = sys.maxsize\n shortest_paths[destination] = 0\n for i in range(0, len(graph)):\n for node in graph:\n for edge in graph[node]:\n if (shortest_paths[node] + int(graph[node][edge]) < shortest_paths[edge]):\n shortest_paths[edge] = shortest_paths[node] + int(graph[node][edge])\n return shortest_paths\n\ndef compare_shortest_paths(bellman_ford_shortest_path, value_iteration_shorest_path):\n for path in bellman_ford_shortest_path:\n if (bellman_ford_shortest_path[path] != value_iteration_shorest_path[path]):\n raise Exception(\"The two distances are not the same: Bellman-Ford shortest path for {} is {} and value iteration shortest path for {} is {}.\"\n .format(path, bellman_ford_shortest_path[path], path, value_iteration_shorest_path[path]))\n\ndef create_graph(paths):\n graph = dict()\n for path in paths:\n path = path.split(\" \")\n if path[0] not in graph:\n graph[path[0]] = dict()\n if path[1] not in graph:\n graph[path[1]] = dict()\n graph[path[0]][path[1]] = path[2]\n graph[path[1]][path[0]] = path[2]\n return graph\n\ndef process_input(argv=None):\n if argv is None:\n argv = sys.argv\n else:\n sys.argv.extend(argv)\n # Setup argument parser\n parser = ArgumentParser()\n parser.add_argument(\"-f\", \"--cities-file\", dest=\"file\", help=\"the file containing the cities information\", required=True)\n\n # Process arguments\n args = parser.parse_args()\n\n file = args.file\n paths = []\n with open(file, 'r') as f:\n paths = f.read().splitlines()\n f.closed\n return paths\n\ndef main(argv=None):\n try:\n paths = process_input(argv)\n graph = create_graph(paths)\n optimal_values = value_iterate_algorithm(graph, \"F\", 0.001)\n policy = calculate_policy(graph, optimal_values)\n shortest_paths = find_shortest_paths(graph, \"F\", policy)\n bellman_ford_shortest_paths = bellman_ford(graph, \"F\")\n compare_shortest_paths(bellman_ford_shortest_paths, shortest_paths)\n print(\"Optimal values: {}\".format(optimal_values))\n print(\"Shortest paths from every node to F: {}\".format(shortest_paths))\n return 0\n except KeyboardInterrupt:\n ### handle keyboard interrupt ###\n return 0\n except Exception as e:\n sys.stderr.write(repr(e))\n return 2\n\nif __name__ == \"__main__\":\n sys.exit(main())\n" }, { "alpha_fraction": 0.590111494064331, "alphanum_fraction": 0.6016267538070679, "avg_line_length": 25.177034378051758, "blob_id": "57015c59d788045523619a609a0b5927220f234b", "content_id": "c746ec91b306e818609b2388a6f07e590b53157d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10942, "license_type": "permissive", "max_line_length": 77, "num_lines": 418, "path": "/a3/ga.py", "repo_name": "mishless/LearningSystems", "src_encoding": "UTF-8", "text": "# Genetic Algorithm for solving the Traveling Salesman problem\n# Authors: Mihaela Stoycheva, Vukan Turkulov\n\n# Includes\nimport configparser\nimport math\nimport matplotlib.pyplot as plt\nimport numpy\nimport random\nimport sys\nfrom operator import itemgetter\n\n#Global variables(yay!)\n# Configuration variables(read from config.txt)\nmutation_rate = 0;\npopulation_size = 0;\nelitism_rate = 0;\ntournament_rate = 0;\nmax_iterations = 0;\ninput_file_name = \"\";\nparent_rate = 0;\n\n# General global variables\ncities = {};\nnumber_of_cities = 0;\nparent_number = 0;\ntournament_size = 0;\nelite_number = 0;\ncrossover_number = 0;\n\ndef read_config():\n global mutation_rate;\n global elitism_rate;\n global tournament_rate;\n global population_size;\n global input_file_name;\n global max_iterations;\n global parent_rate;\n global parent_number;\n global tournament_size;\n global elite_number;\n global crossover_number;\n\n config = configparser.ConfigParser();\n config.read(\"config.txt\");\n\n mutation_rate = float(config['general']['mutation_rate']);\n population_size = int(config['general']['population_size']);\n elitism_rate = float(config['general']['elitism_rate']);\n tournament_rate = float(config['general']['tournament_rate']);\n max_iterations = int(config['general']['max_iterations']);\n parent_rate = float(config['general']['parent_rate']);\n input_file_name = config['general']['input_file_name'];\n\n parent_number = int(population_size * parent_rate);\n elite_number = int(population_size * elitism_rate);\n tournament_size = int(population_size * tournament_rate);\n crossover_number = population_size - elite_number;\n\ndef print_config():\n print(\"***** CONFIGURATION *****\");\n print_var(\"Population size\", population_size);\n print_var(\"Elitism rate\", elitism_rate);\n print_var(\"Tournament rate\", tournament_rate);\n print_var(\"Mutation rate\", mutation_rate);\n print_var(\"Parent rate\", parent_rate);\n print_var(\"Iteration number\", max_iterations);\n print(\"\");\n print_var(\"Tournament size\", tournament_size);\n print_var(\"Parent number\", parent_number);\n print_var(\"Elite number\", elite_number);\n print_var(\"Crossover number\", crossover_number);\n print(\"\");\n\ndef read_input_file():\n global number_of_cities;\n\n file = open(input_file_name, \"r\");\n file_lines = file.readlines();\n file.close();\n\n for file_line in file_lines:\n temp = file_line.split(); \n cities[int(temp[0])] = {'x' : float(temp[1]), 'y' : float(temp[2])};\n\n number_of_cities = len(cities);\n\ndef get_distance(city1, city2):\n return math.sqrt( ((city1['x']-city2['x'])**2) +\n ((city1['y']-city2['y'])**2));\n\ndef print_cities():\n print(\"***** CITIES *****\");\n\n for key, city in cities.items():\n print(\"#\" + \"%2s\" % str(key) + \": (\" +\n \"%6s\" % str(city['x']) + ', ' +\n \"%6s\" % str(city['y']) + ')');\n\n print(\"\");\n\ndef print_var(name, var):\n print(name + \":\" + \" \"*(17-len(name)) + str(var));\n\ndef init():\n read_config();\n read_input_file(); \n print_config();\n\ndef create_random_individual():\n individual = [];\n\n # We must begin at first city\n individual.append(1);\n\n # Create list of city indexes\n indexes = list(range(2,number_of_cities+1));\n\n while len(indexes) > 0:\n picked_index = random.choice(indexes);\n indexes.remove(picked_index);\n individual.append(picked_index);\n\n # We must end at first city\n individual.append(1);\n\n return individual;\n\ndef print_population(population, name):\n print(\"***** POPULATION: \" + name + \" *****\");\n\n print(\"Population size = \" + str(len(population)));\n\n i = 0;\n for individual in population:\n print(\"IND #\" + str(i) + \": \" + str(individual));\n i += 1;\n\ndef print_population_2(population, name):\n print(\"***** POPULATION: \" + name + \" *****\");\n\n print(\"Population size = \" + str(len(population)));\n i = 0;\n for individual in population:\n print(\"IND #\" + str(i) + \" distance = \" +\n str(evaluate_individual(individual)));\n i += 1;\n\n print(\"\");\n\ndef print_population_3(population, name):\n print(\"***** POPULATION: \" + name + \" *****\");\n\n print(\"Population size = \" + str(len(population)));\n for individual in population:\n print(str(individual) + \": distance = \" +\n str(evaluate_individual(individual)));\n\n print(\"\");\n\ndef create_random_population(population_size):\n population = [];\n\n for i in range(0, population_size):\n population.append(create_random_individual());\n\n return population;\n\ndef evaluate_individual(individual):\n distance_traveled = 0;\n \n for i in range(0, len(individual)-1): \n distance_traveled = (distance_traveled +\n get_distance(cities[individual[i]], cities[individual[i+1]]));\n\n return distance_traveled;\n\ndef evaluate_population(population):\n evaluations = [];\n\n for individual in population:\n evaluations.append((evaluate_individual(individual), individual));\n\n return evaluations;\n\ndef select_tournament_pool(data):\n tournament_pool = [];\n\n indexes = list(range(0, len(data)));\n\n for i in range(0, tournament_size):\n chosen_index = random.choice(indexes);\n tournament_pool.append(data[chosen_index]);\n indexes.remove(chosen_index);\n\n return tournament_pool;\n\ndef best_solution(pool):\n best_individual = {'eval' : sys.float_info.max};\n\n for individual in pool:\n if individual['eval'] < best_individual['eval']:\n best_individual = individual; \n \n return best_individual;\n\ndef run_tournament(pool):\n return best_solution(pool);\n\ndef merge_popul_and_eval(population, evaluations):\n data = [];\n for i in range(0, len(population)):\n data.append({'ind' : population[i],\n 'eval' : evaluations[i]});\n\n return data;\n\ndef select_parent_pool(population, evaluations):\n parent_pool = [];\n data = merge_popul_and_eval(population, evaluations);\n\n for i in range(0, parent_number):\n tournament_pool = select_tournament_pool(data);\n parent = run_tournament(tournament_pool);\n parent_pool.append(parent['ind']);\n data.remove(parent);\n\n return parent_pool;\n\ndef is_individual_valid(individual):\n\n if(len(individual) != (number_of_cities+1)):\n print(\"INVALID \" + str(individual));\n return False;\n\n if(individual[0] != 1):\n print(\"INVALID \" + str(individual));\n return False;\n\n if(individual[-1] != 1):\n print(\"INVALID \" + str(individual));\n return False;\n\n for city in individual:\n if city == 1:\n if individual.count(city) != 2:\n print(\"INVALID \" + str(individual));\n return False;\n else:\n if individual.count(city) != 1:\n print(\"INVALID \" + str(individual));\n return False;\n\n return True;\n\ndef is_population_valid(population):\n for individual in population:\n if is_individual_valid(individual) == False:\n return False;\n\n return True;\n\ndef create_child(parent1, parent2):\n l = len(parent1);\n x = random.randint(1, l-1);\n y = random.randint(x, l-1);\n\n child = [];\n extract = parent1[x:y];\n \"\"\"print_var(\"P1\", parent1);\n print_var(\"P2\", parent2);\n print_var(\"x\", x);\n print_var(\"y\", y);\n print_var(\"Extract\", extract);\"\"\"\n\n i = 0;\n\n for j in range(0, x):\n\n while(parent2[i] in extract):\n i += 1;\n\n child.append(parent2[i]);\n i += 1;\n\n child.extend(extract);\n\n for j in range(y, l):\n\n while(parent2[i] in extract):\n i += 1;\n\n child.append(parent2[i]);\n i += 1;\n\n return child;\n\ndef generate_children(parent_pool, child_num):\n children = [];\n\n for i in range(0, child_num):\n parent1 = random.choice(parent_pool);\n parent_pool.remove(parent1);\n parent2 = random.choice(parent_pool);\n parent_pool.append(parent1);\n new_child = create_child(parent1, parent2);\n children.append(new_child);\n\n return children;\n\ndef generate_elites(population, evaluations, number):\n data = merge_popul_and_eval(population, evaluations);\n elites = [];\n\n for i in range(0, number):\n best = best_solution(data);\n elites.append(best['ind']);\n data.remove(best);\n\n return elites;\n\ndef mutate_individual(individual):\n\n i = random.randint(1, len(individual)-2);\n j = i;\n\n while j == i:\n j = random.randint(1, len(individual)-2);\n\n individual[i], individual[j] = individual[j], individual[i];\n\ndef mutate_population(population):\n for individual in population:\n if random.random() < mutation_rate:\n mutate_individual(individual);\n\ndef test_stuff():\n \"\"\"\n p1 = \"abcdefg\";\n p2 = \"1234567\";\n for i in range(0,10):\n print(create_child(p1,p2));\n \n\n ind = [1,2,3,4,5,6];\n print(\"Before\", ind);\n mutate_individual(ind);\n print(\"After\", ind);\n exit();\"\"\"\n\ndef perform_GA():\n best_solutions = [];\n best_individuals = [];\n best_solution = None;\n\n #print(\"***** ALGORITHM START *****\");\n\n population = create_random_population(population_size);\n\n iteration_counter = 1;\n\n while True:\n #print(\"Running iteration \" + str(iteration_counter) + \":\");\n\n evaluations = evaluate_population(population);\n\n best_solution = min(evaluations, key=lambda evaluation:evaluation[0])\n best_solutions.append(best_solution[0]);\n best_individuals.append(best_solution[1]);\n\n evaluations = [evaluation[0] for evaluation in evaluations]\n\n if iteration_counter == max_iterations:\n break; \n\n parent_pool = select_parent_pool(population, evaluations);\n children = generate_children(parent_pool, crossover_number);\n mutate_population(children);\n\n elites = generate_elites(population, evaluations, elite_number);\n \n # Prepare population for the next iteration\n population = children + elites;\n iteration_counter += 1;\n\n if is_population_valid(population) == False:\n break;\n\n return (best_solutions, best_individuals);\n\ndef do_what_needs_to_be_done():\n results = [];\n bests = [];\n\n print(\"***** ALGORITHM START *****\");\n sys.stdout.flush()\n\n for i in range(0, 10):\n print(\"Starting cycle \" + str(i+1));\n results.append(perform_GA());\n bests.append((results[i][0][-1], results[i][1][-1]));\n\n best_ind = bests.index(min(bests, key=lambda best:best[0]));\n\n print(str(best_ind));\n print(\"***** RESULTS *****\");\n print(\"Best result is \" + str(bests[best_ind][0]));\n print(\"Best result is \" + str(bests[best_ind][1]));\n\n plt.plot(results[best_ind][0]);\n plt.show();\n\n \n\n\n#main\ninit();\ndo_what_needs_to_be_done()\n" } ]
6
chauhanprakhar/api_example
https://github.com/chauhanprakhar/api_example
4d82e8fbf1727e0bb04fee2d9f1dc03adbefd77c
754ecf74e79c4db29efe729d4d7bd394055575a5
2500bb6420136d4127812a3aeaa72880e898efc4
refs/heads/master
2020-12-14T16:09:32.449247
2020-01-19T08:23:45
2020-01-19T08:23:45
234,801,992
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6325300931930542, "alphanum_fraction": 0.6927710771560669, "avg_line_length": 19.875, "blob_id": "fee5fd3d3693ffd50cf46abfd8f49d3c3f9a524a", "content_id": "d15b8faa148a706c6212105f46c77957a3c9f103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 68, "num_lines": 8, "path": "/api_example/send.py", "repo_name": "chauhanprakhar/api_example", "src_encoding": "UTF-8", "text": "#import requests\n\n#headers = {}\n#headers['Authorization'] = 'Bearer #token code'\n\n#r =requests.get('http://127.0.0.1:8000/paradigms', headers=headers)\n\n#print(r.text)" }, { "alpha_fraction": 0.8191489577293396, "alphanum_fraction": 0.8191489577293396, "avg_line_length": 22.5, "blob_id": "12f39d00f084e7f65c553f9d429730a2b4f3a1fc", "content_id": "ef2ec1aef336e4d7793b16eefdfff877afae225c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/languages/admin.py", "repo_name": "chauhanprakhar/api_example", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom . models import Language\n\nadmin.site.register(Language)\n" }, { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 19, "blob_id": "bb41ebd6040968eba49f26dbcda65c6fdd22df51", "content_id": "77264392653170c74b723d678ca8c1b0c381f897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/README.md", "repo_name": "chauhanprakhar/api_example", "src_encoding": "UTF-8", "text": "# api_example\n api using rest framework\n" } ]
3
AdrianoDiDio/KMeansClustering
https://github.com/AdrianoDiDio/KMeansClustering
89f69c44b03f70ee2b045c6751657279df4b5131
6d292811a2b064f1b1ae6cbb2b720be03b0d6ded
8b2c87d92e99a784d8ff1ff53a3f1155569f6fcc
refs/heads/main
2023-03-05T21:15:15.406358
2021-02-13T09:35:20
2021-02-13T09:35:20
336,522,917
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5247772932052612, "alphanum_fraction": 0.5345627665519714, "avg_line_length": 25.263591766357422, "blob_id": "8755b781579c368649591b1361239a045618d940", "content_id": "348c2d538e3933ec811ddfe8d87d4ed3a82729f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 15942, "license_type": "no_license", "max_line_length": 125, "num_lines": 607, "path": "/src/c/KMeansClustering.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\nint StartSeconds = 0;\nint SysMilliseconds()\n{\n struct timeval tp;\n int CTime;\n\n gettimeofday(&tp, NULL);\n\n if ( !StartSeconds ){\n StartSeconds = tp.tv_sec;\n return tp.tv_usec/1000;\n }\n\n CTime = (tp.tv_sec - StartSeconds)*1000 + tp.tv_usec / 1000;\n\n return CTime;\n}\n\nvoid CreateDirIfNotExists(char *DirName) {\n struct stat FileStat;\n\n if (stat(DirName, &FileStat) == -1) {\n#ifdef _WIN32\n mkdir(DirName);\n#else\n mkdir(DirName, 0700);\n#endif\n }\n}\n\nvoid PointSubtract(float *PointA,float *PointB,float *PointOut,int Stride)\n{\n int i;\n for( i = 0; i < Stride; i++ ) {\n PointOut[i] = PointA[i] - PointB[i];\n }\n}\n\nfloat PointDistanceSquared(float *PointA,float *PointB,int Stride)\n{\n float Sum;\n int i;\n Sum = 0.f;\n for( i = 0; i < Stride; i++ ) {\n Sum += (PointB[i] - PointA[i]) * (PointB[i] - PointA[i]);\n }\n return Sum;\n}\nint IsAlpha(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= 'A' && c <= 'Z' ) ||\n ( c >= 'a' && c <= 'z' ) ){\n return 1;\n }\n\n return 0;\n}\n\nint IsNumber(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= '0' && c <= '9' ) ){\n return 1;\n }\n\n return 0;\n}\n\nchar *StringCopy(const char *From)\n{\n char *Dest;\n\n Dest = malloc(strlen(From) + 1);\n\n if ( !Dest ) {\n return NULL;\n }\n\n strcpy(Dest, From);\n return Dest;\n}\nvoid DPrintf(char *Fmt, ...)\n{\n char Temp[1000];\n va_list arglist;\n\n va_start(arglist, Fmt);\n vsnprintf(Temp, sizeof( Temp ), Fmt, arglist);\n#ifdef _DEBUG\n fputs(Temp, stdout);\n#endif\n va_end(arglist);\n}\n\nint StringToInt(char *String)\n{\n char *EndPtr; \n long Value;\n \n Value = strtol(String, &EndPtr, 10);\n \n if( errno == ERANGE && Value == LONG_MIN ) {\n DPrintf(\"StringToInt %s (%lu) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == LONG_MAX ) {\n DPrintf(\"StringToInt %s (%lu) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nfloat StringToFloat(char *String)\n{\n char *EndPtr; \n float Value;\n \n Value = strtof(String, &EndPtr);\n \n if( errno == ERANGE && Value == -HUGE_VALF ) {\n DPrintf(\"StringToFloat %s (%f) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == HUGE_VALF) {\n DPrintf(\"StringToFloat %s (%f) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nint GetFileLength(FILE *Fp)\n{\n int Length;\n int CurrentPosition;\n\n if ( !Fp ) {\n return -1; //Must be a valid file\n }\n\n CurrentPosition = ftell(Fp);\n fseek(Fp, 0, SEEK_END);\n Length = ftell(Fp);\n fseek(Fp, CurrentPosition, SEEK_SET);\n\n return Length;\n}\n\nchar *ReadTextFile(char *File,int Length)\n{\n FILE *Fp;\n int FileSize;\n char *Result;\n int Ret;\n \n Fp = fopen(File,\"r\");\n \n if( !Fp ) {\n DPrintf(\"File %s not found.\\n\",File);\n return NULL;\n }\n FileSize = Length != 0 ? Length : GetFileLength(Fp);\n Result = malloc(FileSize + 1);\n Ret = fread(Result,1, FileSize,Fp);\n if( Ret != FileSize ) {\n DPrintf(\"Failed to read file %s\\n\",File);\n return NULL;\n }\n Result[Ret] = '\\0';\n fclose(Fp);\n return Result;\n}\n\nvoid FlowerPrint(Flower_t *Flower)\n{\n if( Flower == NULL ){\n return;\n }\n DPrintf(\"Species:%s\\n\",Flower->Species);\n DPrintf(\"Sepal Length:%f\\n\",Flower->SepalLength);\n DPrintf(\"Sepal Width:%f\\n\",Flower->SepalWidth);\n DPrintf(\"Petal Length:%f\\n\",Flower->PetalLength);\n DPrintf(\"Petal Length:%f\\n\",Flower->PetalWidth);\n}\nFlower_t *LoadIrisDatasetOld()\n{\n\n Flower_t *FlowerDataset;\n Flower_t Iterator;\n FILE *CSVDatasetFile;\n int RetValue;\n \n CSVDatasetFile = fopen(\"Dataset/iris.csv\",\"r\");\n if( CSVDatasetFile == NULL ) {\n DPrintf(\"LoadIrisDataset:Couldn't open dataset...\\n\");\n return NULL;\n }\n FlowerDataset = malloc(sizeof(Flower_t));\n FlowerDataset->Next = NULL;\n char Head1[256];\n char Head2[256];\n char Head3[256];\n char Head4[256];\n char Head5[256];\n RetValue = fscanf(CSVDatasetFile,\"%s,%s,%s,%s,%s\\n\",Head1,Head2,Head3,Head4,Head5);\n while( 1 ) {\n RetValue = fscanf(CSVDatasetFile,\"%f,%f,%f,%f,%s\\n\",&Iterator.SepalLength,&Iterator.SepalWidth,&Iterator.PetalLength,\n &Iterator.PetalWidth,Iterator.Species);\n if( RetValue != 5 ) {\n DPrintf(\"Done or found an invalid line...\\n\");\n break;\n }\n DPrintf(\"Loaded %f;%f;%f;%f; Species %s\\n\",Iterator.SepalLength,Iterator.SepalWidth,Iterator.PetalLength,\n Iterator.PetalWidth,Iterator.Species);\n }\n return FlowerDataset; \n}\nchar *CSVGetNumberFromBuffer(char *Buffer,float *Value)\n{\n int i = 0;\n char String[256];\n if( Value == NULL ) {\n return Buffer;\n }\n do {\n if( *Buffer == '\\r' ) {\n Buffer++;\n continue;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsNumber(*Buffer) || *Buffer == '.' || *Buffer == ',' || *Buffer == '\\n' || *Buffer == '\\r');\n String[i] = '\\0';\n *Value = StringToFloat(String);\n return Buffer;\n}\n\nchar *CSVGetStringFromBuffer(char *Buffer,char *Value)\n{\n int i = 0;\n char String[256];\n\n do {\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsAlpha(*Buffer) || *Buffer == '.' || *Buffer == '-' || *Buffer == ',' || *Buffer == '\\n');\n String[i] = '\\0';\n if( Value == NULL ) {\n Value = StringCopy(String);\n } else {\n strcpy(Value,String);\n }\n return Buffer;\n}\n\nchar *CSVSkipLine(char *Buffer,int *NumColumns)\n{\n int LocalNumColumns;\n LocalNumColumns = -1;\n do {\n if( IsAlpha(*Buffer) ) {\n if( LocalNumColumns == -1 ) {\n LocalNumColumns = 1;\n }\n }\n if( *Buffer == ',' ) {\n LocalNumColumns++;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n Buffer++;\n } while( *Buffer );\n if( NumColumns != NULL ) {\n *NumColumns = LocalNumColumns;\n }\n return Buffer;\n}\n\nFlower_t *LoadIrisDataset()\n{\n Flower_t *FlowerDataset;\n Flower_t Iterator;\n char *Buffer;\n\n// char *Line;\n// int RetValue;\n// int i;\n \n\n Buffer = ReadTextFile(\"Dataset/iris.csv\",0);\n if( Buffer == NULL ) {\n DPrintf(\"Couldn't read file\\n\");\n return NULL;\n }\n FlowerDataset = malloc(sizeof(Flower_t));\n FlowerDataset->Next = NULL;\n \n int LineNumber = 0;\n while( *Buffer ) {\n //Buffer a line\n if( LineNumber == 0 ) {\n Buffer = CSVSkipLine(Buffer,NULL);\n } else {\n Buffer = CSVGetNumberFromBuffer(Buffer,&Iterator.SepalLength);\n Buffer = CSVGetNumberFromBuffer(Buffer,&Iterator.SepalWidth);\n Buffer = CSVGetNumberFromBuffer(Buffer,&Iterator.PetalLength);\n Buffer = CSVGetNumberFromBuffer(Buffer,&Iterator.PetalWidth);\n Buffer = CSVGetStringFromBuffer(Buffer,Iterator.Species);\n FlowerPrint(&Iterator);\n }\n if( *Buffer == '\\n' ) {\n LineNumber++;\n }\n Buffer++;\n }\n DPrintf(\"File has %i lines\\n\",LineNumber);\n return FlowerDataset; \n}\n\nvoid PrintPoint(float *Position,int Stride)\n{\n int i;\n DPrintf(\"Point: \");\n for( i = 0; i < Stride; i++ ) {\n DPrintf(\"%f;\",Position[i]);\n }\n DPrintf(\"\\n\");\n}\n\nvoid DumpClusters(PointArrayList_t *Dataset,Centroid_t *Centroids,int NumCentroids,int Stride,int Pass)\n{\n FILE *OutCentroidCSV;\n FILE *OutDatasetCSV;\n char OutFile[256];\n int BaseChar;\n int i;\n int j;\n \n //Write 2 CVS 1 for the centroids 1 for the dataset\n if( !Dataset ) {\n DPrintf(\"DumpClusters:Invalid Dataset.\\n\");\n return;\n }\n if( !Centroids ) {\n DPrintf(\"DumpClusters:Invalid Centroids data.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"DumpClusters:Invalid number of centroids.\\n\");\n return;\n }\n CreateDirIfNotExists(\"Out\");\n sprintf(OutFile,\"Out/out_centroids_%i.csv\",Pass);\n OutCentroidCSV = fopen(OutFile,\"w\");\n sprintf(OutFile,\"Out/out_dataset_%i.csv\",Pass);\n OutDatasetCSV = fopen(OutFile,\"w\");\n \n BaseChar = 'x';\n for ( i = 0; i < Stride - 1; i++ ) {\n fprintf(OutCentroidCSV,\"%c,\",BaseChar + i);\n }\n fprintf(OutCentroidCSV,\"%c\\n\",BaseChar + (Stride - 1));\n for( i = 0; i < NumCentroids; i++ ) {\n for( j = 0; j < Stride - 1; j++ ) {\n fprintf(OutCentroidCSV,\"%f,\",Centroids[i].Position[j]);\n }\n fprintf(OutCentroidCSV,\"%f\\n\",Centroids[i].Position[Stride - 1]);\n }\n \n BaseChar = 'x';\n for ( i = 0; i < Stride; i++ ) {\n fprintf(OutDatasetCSV,\"%c,\",BaseChar + i);\n }\n fprintf(OutDatasetCSV,\"centroidIndex\\n\");\n for( i = 0; i < Dataset->NumPoints; i++ ) {\n for( j = 0; j < Stride; j++ ) {\n fprintf(OutDatasetCSV,\"%f,\",Dataset->Points[i].Position[j]);\n }\n fprintf(OutDatasetCSV,\"%i\\n\",Dataset->Points[i].CentroidIndex);\n }\n \n fclose(OutCentroidCSV);\n fclose(OutDatasetCSV);\n}\nvoid KMeansClustering(PointArrayList_t *Dataset,int NumCentroids,int Stride)\n{\n Centroid_t *Centroids;\n float Min;\n float Distance;\n float ClusterSize;\n float *Sum;\n float *Delta;\n int i;\n int j;\n int k;\n int NumStep;\n int HasToStop;\n int NumClustersSet;\n int Start;\n int End;\n \n if( !Dataset ) {\n DPrintf(\"KMeansClustering:Invalid Dataset.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Number of centroids %i\\n\",NumCentroids);\n return;\n }\n \n if( Stride <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Stride %i\\n\",Stride);\n return;\n }\n Centroids = malloc(sizeof(Centroid_t) * NumCentroids );\n \n printf(\"Selected KMeans Algorithm with a dataset of size %i and %i centroids.\\n\",Dataset->NumPoints,NumCentroids);\n \n // 1) Selects K (NumCentroids) random centroids from the dataset.\n Start = SysMilliseconds();\n srand(time(0)); \n for( i = 0; i < NumCentroids; i++ ) {\n Centroids[i].Position = malloc(Stride * sizeof(float));\n// DPrintf(\"Centroid %i: \",i);\n for( j = 0; j < Stride; j++ ) {\n// Index = (rand() % (Dataset->NumPoints + 1)); \n Centroids[i].Position[j] = Dataset->Points[/*Index*/i].Position[j];\n// DPrintf(\"%f;\",Centroids[i].Position[j]);\n }\n// DPrintf(\"\\n\");\n Centroids[i].Stride = Stride;\n }\n //Test with 5 steps...\n HasToStop = 0;\n NumStep = 0;\n Sum = malloc( Stride * sizeof(float));\n Delta = malloc( Stride * sizeof(float));\n while( !HasToStop ) {\n // 2) Assign each points of the dataset to the nearest centroid.\n for( i = 0; i < Dataset->NumPoints; i++ ) {\n Min = INFINITY;\n for( j = 0; j < NumCentroids; j++ ) {\n Distance = PointDistanceSquared(Dataset->Points[i].Position,Centroids[j].Position,Stride);\n if( Distance < Min ) {\n Dataset->Points[i].CentroidIndex = j;\n Min = Distance;\n }\n }\n }\n// DumpClusters(Dataset,Centroids,NumCentroids,0);\n // 3) Recalculate centroid position based on the new clusters.\n NumClustersSet = 0;\n for( i = 0; i < NumCentroids; i++ ) {\n memset(Sum,0,Stride * sizeof(float));\n ClusterSize = 0;\n for( j = 0; j < Dataset->NumPoints; j++ ) {\n if( Dataset->Points[j].CentroidIndex != i ) {\n continue;\n }\n for( k = 0; k < Stride; k++ ) {\n Sum[k] += Dataset->Points[j].Position[k];\n }\n ClusterSize++;\n }\n if( ClusterSize == 0.f ) {\n continue;\n }\n for( k = 0; k < Stride; k++ ) {\n Sum[k] /= ClusterSize;\n }\n PointSubtract(Sum,Centroids[i].Position,Delta,Stride);\n// DPrintf(\"ClusterSize %f Old Position %f;%f | New Position: %f;%f | Delta: %f;%f\\n\",ClusterSize,\n// Sum.x,Sum.y,Centroids[i].Position.x,\n// Centroids[i].Position.y,Delta.x,Delta.y\n// );\n for( k = 0; k < Stride; k++ ) {\n if( fabsf(Delta[k]) > KMEANS_ALGORITHM_TOLERANCE) {\n break;\n }\n }\n if( k == Stride ) {\n NumClustersSet++;\n }\n memcpy(Centroids[i].Position,Sum,Stride * sizeof(float));\n// DPrintf(\"New Centroid Position for %i is %f;%f or %f;%f\\n\",i,Sum[0],Sum[1],\n// Centroids[i].Position[0],Centroids[i].Position[1]\n// );\n }\n if( NumClustersSet == NumCentroids ) {\n break;\n }\n if( NumStep < 3 ) {\n DumpClusters(Dataset,Centroids,NumCentroids,Stride,NumStep);\n }\n NumStep++;\n }\n End = SysMilliseconds();\n printf(\"KMeansAlgorithm has finished...took %i steps to complete %i ms elapsed\\n\",NumStep,End-Start);\n DumpClusters(Dataset,Centroids,NumCentroids,Stride,NumStep);\n for( i = 0; i < NumCentroids; i++ ) {\n free(Centroids[i].Position);\n }\n free(Centroids);\n free(Sum);\n free(Delta);\n}\n\nPointArrayList_t *LoadPointsDataset(char *File,int *Stride)\n{ \n PointArrayList_t *PointList;\n Point_t Iterator;\n char *Buffer;\n char *Temp;\n int LineNumber;\n int LocalStride;\n int i;\n \n if( !File ) {\n printf(\"LoadPointsDataset:Invalid file\\n\");\n return NULL;\n }\n Buffer = ReadTextFile(File,0);\n if( Buffer == NULL ) {\n DPrintf(\"Couldn't read file\\n\");\n return NULL;\n }\n PointList = malloc(sizeof(PointArrayList_t));\n PointArrayListInit(PointList,64); Temp = Buffer;\n LineNumber = 0;\n LocalStride = 0;\n \n while( *Temp ) {\n if( LineNumber == 0 ) {\n Temp = CSVSkipLine(Temp,&LocalStride);\n assert(LocalStride != -1);\n } else {\n Iterator.Stride = LocalStride;\n Iterator.Position = malloc( Iterator.Stride * sizeof(float));\n for( i = 0; i < LocalStride; i++ ) {\n Temp = CSVGetNumberFromBuffer(Temp,&Iterator.Position[i]);\n }\n PointArrayListAdd(PointList,Iterator);\n }\n if( *Temp == '\\n' ) {\n LineNumber++;\n }\n Temp++;\n }\n#if 0 /*_DEBUG*/\n// int i;\n for( i = 0; i < PointList->NumPoints; i++ ) {\n PrintPoint(PointList->Points[i].Position,LocalStride);\n }\n DPrintf(\"Read %i points || %i lines\\n\",PointList->NumPoints,LineNumber);\n#endif\n if( Stride != NULL ) {\n *Stride = LocalStride;\n }\n free(Buffer);\n return PointList; \n}\nint main(int argc,char** argv)\n{\n PointArrayList_t *PointList;\n int NumClusters;\n int Stride;\n\n if( argc != 3 ) {\n printf(\"Usage:%s <Dataset File> <Number of Clusters>\\n\",argv[0]);\n return -1;\n }\n\n PointList = LoadPointsDataset(argv[1],&Stride);\n \n if( PointList == NULL ) {\n DPrintf(\"Couldn't load point dataset.\\n\");\n return -1;\n }\n NumClusters = StringToInt(argv[2]);\n KMeansClustering(PointList,NumClusters,Stride);\n PointArrayListCleanUp(PointList);\n free(PointList);\n return 1;\n}\n" }, { "alpha_fraction": 0.7168367505073547, "alphanum_fraction": 0.7265306115150452, "avg_line_length": 33.385963439941406, "blob_id": "7409b1e6415a3a8a232e9bf37d1f0c3eca464f92", "content_id": "8fc33e47a27018c548d75d286e91f83dd2485ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1960, "license_type": "no_license", "max_line_length": 113, "num_lines": 57, "path": "/tools/2d/BlobGenerator/PyBlobGenerator.py", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as pyplot\nimport numpy as np\nfrom numpy.random import Generator,PCG64\nimport itertools\nimport csv\nimport argparse\nimport os\nfrom pathlib import Path\n\ndef checkAndSetArgument(argObject,defaultValue):\n if argObject is not None:\n return argObject\n else:\n return defaultValue\nparser = argparse.ArgumentParser(description='Generate an uniform distribution of 2D points.')\nparser.add_argument(\"--width\",\"-w\",type=int,help=\"sets the upper bound of the X axis.\")\nparser.add_argument(\"--height\",\"-he\",type=int,help=\"sets the upper bound of the Y axis.\")\nparser.add_argument(\"--showPlot\",\"-s\",action=\"store_true\",help=\"show generated points in a plot.\")\nparser.add_argument(\"--outFile\",\"-o\",type=str,help=\"set the name of the output file (default is data_blob.csv).\")\n\nargs = parser.parse_args()\n\nwidth = checkAndSetArgument(args.width,128)\nheight = checkAndSetArgument(args.height,64)\n\nprint(\"Selected Upper X Bound: \" + str(width))\nprint(\"Selected Upper Y Bound: \" + str(height))\n\nN = width * height // 4\n\nprint(\"Generating \" + str(N) + \" samples\")\n\nrg = Generator(PCG64())\n\nx = rg.uniform(0,width,N)\nroundX = [round(number, 3) for number in x]\ny = rg.uniform(0,height,N)\nroundY = [round(number, 3) for number in y]\n\npointList = tuple(map(list, zip(roundX, roundY)))\n\nuniquePointList = np.unique(pointList, axis=0)\n\nuniqueX,uniqueY = zip(*uniquePointList)\n\nif args.showPlot is not False:\n pyplot.scatter(uniqueX, uniqueY,c='g', alpha=0.6, lw=0)\n pyplot.show()\n \ncsvOutFile = checkAndSetArgument(args.outFile,'data_blob.csv')\ncsvOutFilePath = os.path.dirname(os.path.realpath(csvOutFile))\nPath(csvOutFilePath).mkdir(parents=True, exist_ok=True)\nwith open(csvOutFile, newline='',mode='w') as dataBlobFile:\n dataBlobWriter = csv.writer(dataBlobFile,delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n dataBlobWriter.writerow(['x','y'])\n for row in uniquePointList:\n dataBlobWriter.writerow(row)\n" }, { "alpha_fraction": 0.6480447053909302, "alphanum_fraction": 0.6480447053909302, "avg_line_length": 16.899999618530273, "blob_id": "ec17f61bcea4ef53304a37b1cfa10df1a79dc7c8", "content_id": "023a6e935cd924b73bd6f3ee5aabd357cff5fe67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 179, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/doc/src/codes/Sequential/KMeansClustering.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "typedef struct Point_s {\n float *Position;\n int CentroidIndex;\n int Stride;\n} Point_t;\n\ntypedef struct Centroid_s {\n float *Position;\n int Stride;\n} Centroid_t;\n" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6709870100021362, "avg_line_length": 16.910715103149414, "blob_id": "a0ac414262c18fb152de35c211844063fae52350", "content_id": "08258085eb379e9c44e0538f3b2b072c4a7fd6f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 64, "num_lines": 56, "path": "/src/openmp/KMeansClustering.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#ifndef __KMEANSCLUSTERING_H_\n#define __KMEANSCLUSTERING_H_\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <stdarg.h>\n#include <assert.h>\n#include <errno.h>\n#include <math.h>\n#include <time.h>\n#include <limits.h>\n#include <sys/time.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <omp.h>\n\n#include \"PointArrayList.h\"\n\n#ifdef __GNUC__\n#define Attribute(x) __attribute__(x)\n#else\n#define Attribute(x)\n#endif\n\n#define KMEANS_ALGORITHM_TOLERANCE 0.00001f\n\ntypedef struct Flower_s {\n float SepalWidth;\n float SepalLength;\n float PetalWidth;\n float PetalLength;\n char *Species;\n \n struct Flower_s *Next;\n} Flower_t;\n\ntypedef struct Vec2_s {\n float x;\n float y; \n} Vec2_t;\ntypedef struct Point_s {\n float *Position;\n int CentroidIndex;\n int Stride;\n} Point_t;\n\ntypedef struct Centroid_s {\n float *Position;\n int Stride;\n} Centroid_t;\n\n\nvoid DPrintf(char *Fmt, ...) Attribute((format(printf,1,2)));\n\n#endif //__KMEANSCLUSTERING_H_\n" }, { "alpha_fraction": 0.5349071621894836, "alphanum_fraction": 0.5434686541557312, "avg_line_length": 26.293813705444336, "blob_id": "31c8fa53599f5ec753159a680328ff9c061d0b63", "content_id": "2c28948fb34d9c6a17a0d0a08e80b78c4980379d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 15885, "license_type": "no_license", "max_line_length": 128, "num_lines": 582, "path": "/src/openmp/KMeansClustering.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\nint StartSeconds = 0;\nint SysMilliseconds()\n{\n struct timeval tp;\n int CTime;\n\n gettimeofday(&tp, NULL);\n\n if ( !StartSeconds ){\n StartSeconds = tp.tv_sec;\n return tp.tv_usec/1000;\n }\n\n CTime = (tp.tv_sec - StartSeconds)*1000 + tp.tv_usec / 1000;\n\n return CTime;\n}\n\nvoid CreateDirIfNotExists(char *DirName) {\n struct stat FileStat;\n\n if (stat(DirName, &FileStat) == -1) {\n#ifdef _WIN32\n mkdir(DirName);\n#else\n mkdir(DirName, 0700);\n#endif\n }\n}\n\nvoid PointSubtract(float *PointA,float *PointB,float *PointOut,int Stride)\n{\n int i;\n for( i = 0; i < Stride; i++ ) {\n PointOut[i] = PointA[i] - PointB[i];\n }\n}\n\nfloat PointDistanceSquared(float *PointA,float *PointB,int Stride)\n{\n float Sum;\n int i;\n Sum = 0.f;\n for( i = 0; i < Stride; i++ ) {\n Sum += (PointB[i] - PointA[i]) * (PointB[i] - PointA[i]);\n }\n return Sum;\n}\nint IsAlpha(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= 'A' && c <= 'Z' ) ||\n ( c >= 'a' && c <= 'z' ) ){\n return 1;\n }\n\n return 0;\n}\n\nint IsNumber(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= '0' && c <= '9' ) ){\n return 1;\n }\n\n return 0;\n}\n\nchar *StringCopy(const char *From)\n{\n char *Dest;\n\n Dest = (char *) malloc(strlen(From) + 1);\n\n if ( !Dest ) {\n return NULL;\n }\n\n strcpy(Dest, From);\n return Dest;\n}\nvoid DPrintf(char *Fmt, ...)\n{\n char Temp[1000];\n va_list arglist;\n\n va_start(arglist, Fmt);\n vsnprintf(Temp, sizeof( Temp ), Fmt, arglist);\n#ifdef _DEBUG\n fputs(Temp, stdout);\n#endif\n va_end(arglist);\n}\n\nfloat StringToFloat(char *String)\n{\n char *EndPtr; \n float Value;\n \n Value = strtof(String, &EndPtr);\n \n if( errno == ERANGE && Value == -HUGE_VALF ) {\n DPrintf(\"StringToFloat %s (%f) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == HUGE_VALF) {\n DPrintf(\"StringToFloat %s (%f) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nint StringToInt(char *String)\n{\n char *EndPtr; \n long Value;\n \n Value = strtol(String, &EndPtr, 10);\n \n if( errno == ERANGE && Value == LONG_MIN ) {\n DPrintf(\"StringToInt %s (%lu) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == LONG_MAX ) {\n DPrintf(\"StringToInt %s (%lu) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nint GetFileLength(FILE *Fp)\n{\n int Length;\n int CurrentPosition;\n\n if ( !Fp ) {\n return -1; //Must be a valid file\n }\n\n CurrentPosition = ftell(Fp);\n fseek(Fp, 0, SEEK_END);\n Length = ftell(Fp);\n fseek(Fp, CurrentPosition, SEEK_SET);\n\n return Length;\n}\n\nchar *ReadTextFile(char *File,int Length)\n{\n FILE *Fp;\n int FileSize;\n char *Result;\n int Ret;\n \n Fp = fopen(File,\"r\");\n \n if( !Fp ) {\n DPrintf(\"File %s not found.\\n\",File);\n return NULL;\n }\n FileSize = Length != 0 ? Length : GetFileLength(Fp);\n Result = (char *) malloc(FileSize + 1);\n Ret = fread(Result,1, FileSize,Fp);\n if( Ret != FileSize ) {\n DPrintf(\"Failed to read file %s\\n\",File);\n return NULL;\n }\n Result[Ret] = '\\0';\n fclose(Fp);\n return Result;\n}\n\nchar *CSVGetNumberFromBuffer(char *Buffer,float *Value)\n{\n int i = 0;\n char String[256];\n if( Value == NULL ) {\n return Buffer;\n }\n do {\n if( *Buffer == '\\r' ) {\n Buffer++;\n continue;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsNumber(*Buffer) || *Buffer == '.' || *Buffer == ',' || *Buffer == '\\n' || *Buffer == '\\r');\n String[i] = '\\0';\n *Value = StringToFloat(String);\n return Buffer;\n}\n\nchar *CSVGetStringFromBuffer(char *Buffer,char *Value)\n{\n int i = 0;\n char String[256];\n\n do {\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsAlpha(*Buffer) || *Buffer == '.' || *Buffer == '-' || *Buffer == ',' || *Buffer == '\\n');\n String[i] = '\\0';\n if( Value == NULL ) {\n Value = StringCopy(String);\n } else {\n strcpy(Value,String);\n }\n return Buffer;\n}\n\nchar *CSVSkipLine(char *Buffer,int *NumColumns)\n{\n int LocalNumColumns;\n LocalNumColumns = -1;\n do {\n if( IsAlpha(*Buffer) ) {\n if( LocalNumColumns == -1 ) {\n LocalNumColumns = 1;\n }\n }\n if( *Buffer == ',' ) {\n LocalNumColumns++;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n Buffer++;\n } while( *Buffer );\n if( NumColumns != NULL ) {\n *NumColumns = LocalNumColumns;\n }\n return Buffer;\n}\n\nvoid PrintPoint(float *Position,int Stride)\n{\n int i;\n DPrintf(\"Point: \");\n for( i = 0; i < Stride; i++ ) {\n DPrintf(\"%f;\",Position[i]);\n }\n DPrintf(\"\\n\");\n}\n\nvoid DumpClusters(PointArrayList_t *Dataset,float *Centroids,int NumCentroids,int *Clusters,int Stride,int Pass)\n{\n FILE *OutCentroidCSV;\n FILE *OutDatasetCSV;\n char OutFile[256];\n int BaseChar;\n int i;\n int j;\n \n //Write 2 CVS 1 for the centroids 1 for the dataset\n if( !Dataset ) {\n DPrintf(\"DumpClusters:Invalid Dataset.\\n\");\n return;\n }\n if( !Centroids ) {\n DPrintf(\"DumpClusters:Invalid Centroids data.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"DumpClusters:Invalid number of centroids.\\n\");\n return;\n }\n CreateDirIfNotExists(\"Out\");\n sprintf(OutFile,\"Out/out_centroids_%i.csv\",Pass);\n OutCentroidCSV = fopen(OutFile,\"w\");\n sprintf(OutFile,\"Out/out_dataset_%i.csv\",Pass);\n OutDatasetCSV = fopen(OutFile,\"w\");\n \n BaseChar = 'x';\n for ( i = 0; i < Stride - 1; i++ ) {\n fprintf(OutCentroidCSV,\"%c,\",BaseChar + i);\n }\n fprintf(OutCentroidCSV,\"%c\\n\",BaseChar + (Stride - 1));\n for( i = 0; i < NumCentroids; i++ ) {\n for( j = 0; j < Stride - 1; j++ ) {\n fprintf(OutCentroidCSV,\"%f,\",Centroids[i * Stride + j]);\n }\n fprintf(OutCentroidCSV,\"%f\\n\",Centroids[i * Stride + (Stride -1)]);\n }\n \n BaseChar = 'x';\n for ( i = 0; i < Stride; i++ ) {\n fprintf(OutDatasetCSV,\"%c,\",BaseChar + i);\n }\n fprintf(OutDatasetCSV,\"centroidIndex\\n\");\n for( i = 0; i < Dataset->NumPoints; i++ ) {\n for( j = 0; j < Stride; j++ ) {\n fprintf(OutDatasetCSV,\"%f,\",Dataset->Points[i * Stride + j]);\n }\n fprintf(OutDatasetCSV,\"%i\\n\",Clusters[i]);\n }\n \n fclose(OutCentroidCSV);\n fclose(OutDatasetCSV);\n}\ntypedef struct MinComparator_s\n{\n float Value;\n int Index;\n} MinComparator_t;\n#pragma omp declare reduction(NearestCentroid : MinComparator_t : omp_out = omp_in.Value < omp_out.Value ? omp_in : omp_out) \\\n initializer(omp_priv = {9999999, -1})\nvoid KMeansClustering(PointArrayList_t *Dataset,int NumCentroids,int Stride)\n{\n float *Centroids;\n float *Distances;\n int DistancesSize;\n int *Clusters;\n int ClusterSize;\n int *ClusterCounter;\n int ClusterCounterSize;\n float *ClusterMeans;\n int ClusterMeansSize;\n int Sum;\n int Start;\n int End;\n// int i;\n// int j;\n// int k;\n int Step;\n MinComparator_t Comparator;\n int MaxThreadNumber;\n \n\n if( !Dataset ) {\n DPrintf(\"KMeansClustering:Invalid Dataset.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Number of centroids %i\\n\",NumCentroids);\n return;\n }\n \n if( Stride <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Stride %i\\n\",Stride);\n return;\n }\n \n Centroids = (float *) malloc(NumCentroids * Stride * sizeof(float));\n MaxThreadNumber = omp_get_max_threads();\n printf(\"Selected KMeans Algorithm with a dataset of size %i and %i centroids Max Threads:%i.\\n\",\n Dataset->NumPoints,NumCentroids,MaxThreadNumber\n );\n \n Start = SysMilliseconds();\n // 1) Selects K (NumCentroids) random centroids from the dataset.\n// srand(time(0));\n #pragma omp parallel for shared(Centroids) \\\n num_threads(MaxThreadNumber) schedule(static, (NumCentroids*Stride)/MaxThreadNumber)\n for( int i = 0; i < NumCentroids; i++ ) {\n for( int j = 0; j < Stride; j++ ) {\n #pragma omp atomic write\n Centroids[i * Stride + j] = Dataset->Points[i * Stride +j];\n }\n }\n// for( j = 0; j < NumCentroids; j++ ) {\n// DPrintf(\"Centroid %i at %f;%f\\n\",j,Centroids[j * Stride],Centroids[j * Stride +1]);\n// }\n\n// HasToStop = 0;\n// NumStep = 0;\n// Sum = malloc( NumCentroids * Stride * sizeof(float));\n// ClustersCounter = malloc( NumCentroids * sizeof(int));\n// Delta = malloc( Stride * sizeof(float));\n DistancesSize = Dataset->NumPoints * NumCentroids * sizeof(float);\n Distances = (float *) malloc(DistancesSize);\n \n ClusterCounterSize = NumCentroids * sizeof(int);\n ClusterCounter = (int *) malloc(ClusterCounterSize);\n \n ClusterSize = Dataset->NumPoints * sizeof(int);\n Clusters = (int *) malloc(ClusterSize);\n \n ClusterMeansSize = NumCentroids * Stride *sizeof(float);\n ClusterMeans = (float *) malloc(ClusterMeansSize);\n\n Step = 0;\n// Comparator = malloc(sizeof(MinComparator_t));\n while( 1 ) {\n// 2) Assign each points of the dataset to the nearest centroid.\n// First get the distances...\n memset(Distances,0,DistancesSize);\n #pragma omp parallel for firstprivate(Centroids,Dataset,Stride) \\\n schedule(static, (NumCentroids*Stride)/MaxThreadNumber)\n for( int i = 0; i < Dataset->NumPoints; i++ ) {\n for( int j = 0; j < NumCentroids; j++ ) {\n float LocalDistance = 0.f;\n for( int k = 0; k < Stride; k++ ) {\n float LocalCentroid = Centroids[j * Stride + k];\n float LocalPoint = Dataset->Points[i * Stride + k];\n LocalDistance += (LocalCentroid - LocalPoint) * (LocalCentroid - LocalPoint);\n }\n Distances[i * NumCentroids + j] = LocalDistance;\n }\n }\n memset(ClusterCounter,0,ClusterCounterSize);\n memset(Clusters,0,ClusterSize);\n\n// #pragma omp parallel firstprivate(ClusterCounter,Clusters)\n #pragma omp parallel for schedule(guided) \\\n \\\n firstprivate(Dataset,Stride) shared(Distances,ClusterMeans,ClusterCounter) /*private(j)*/\n for( int i = 0; i < Dataset->NumPoints; i++ ) {\n float Min = INFINITY;\n int Index = -1;\n float LocalDistance = 0.f;\n// #pragma omp parallel for reduction(NearestCentroid:Comparator)\n for( int j = 0; j < NumCentroids; j++ ) {\n LocalDistance = Distances[i * NumCentroids + j];\n if( LocalDistance < Min ) {\n Min = LocalDistance;\n Index = j;\n }\n }\n #pragma omp atomic write\n Clusters[i] = Index;\n #pragma omp atomic\n ClusterCounter[Index]++;\n }\n \n memset(ClusterMeans,0,ClusterMeansSize);\n\n #pragma omp parallel for firstprivate(ClusterCounter) shared(ClusterMeans)\n for( int i = 0; i < Dataset->NumPoints * Stride; i++ ) {\n int PointIndex = i / Stride;\n int StrideIndex = i % Stride;\n int CentroidIndex = Clusters[PointIndex];\n int LocalAddValue = Dataset->Points[PointIndex * Stride + StrideIndex];\n #pragma omp atomic\n ClusterMeans[CentroidIndex * Stride + StrideIndex] += LocalAddValue;\n }\n// break;\n #pragma omp parallel for firstprivate(ClusterCounter) shared(ClusterMeans)\n for( int i = 0; i < NumCentroids * Stride; i++ ) {\n int CentroidIndex = i / Stride;\n int StrideIndex = i % Stride;\n int NumClusters = ClusterCounter[CentroidIndex]; \n if( NumClusters == 0 ) {\n continue;\n }\n #pragma omp atomic\n ClusterMeans[CentroidIndex * Stride + StrideIndex] /= (float) NumClusters;\n }\n \n Sum = 0;\n #pragma omp parallel for reduction(+: Sum)\n for( int i = 0; i < NumCentroids * Stride; i++ ) {\n float Delta;\n int Value;\n int CentroidIndex = i / Stride;\n int StrideIndex = i % Stride;\n Delta = fabsf(ClusterMeans[CentroidIndex * Stride + StrideIndex] - Centroids[CentroidIndex * Stride + StrideIndex]);\n Value = Delta < KMEANS_ALGORITHM_TOLERANCE ? 1 : 0;\n// #pragma omp atomic\n Sum = Sum + Value;\n }\n if( Sum == NumCentroids * Stride ) {\n break;\n }\n memcpy(Centroids,ClusterMeans,ClusterMeansSize);\n Step++;\n// break;\n }\n End = SysMilliseconds();\n printf(\"Took %i steps to complete %i ms elapsed\\n\",Step,End-Start);\n DumpClusters(Dataset,Centroids,NumCentroids,Clusters,Stride,0);\n free(Centroids);\n free(Distances);\n free(ClusterCounter);\n free(Clusters);\n free(ClusterMeans);\n}\n\nPointArrayList_t *LoadPointsDataset(char *File,int *Stride)\n{ \n PointArrayList_t *PointList;\n float *Point;\n char *Buffer;\n char *Temp;\n int LineNumber;\n int LocalStride;\n int i;\n \n if( !File ) {\n printf(\"LoadPointsDataset:Invalid file.\\n\");\n return NULL;\n }\n \n Buffer = ReadTextFile(File,0);\n if( Buffer == NULL ) {\n DPrintf(\"LoadPointsDataset:Couldn't read file %s\\n\",File);\n return NULL;\n }\n\n Temp = Buffer;\n LineNumber = 0;\n LocalStride = 0;\n PointList = NULL;\n \n while( *Temp ) {\n if( LineNumber == 0 ) {\n Temp = CSVSkipLine(Temp,&LocalStride);\n assert(LocalStride != -1);\n if( PointList == NULL ) {\n PointList = (PointArrayList_t *) malloc(sizeof(PointArrayList_t));\n PointArrayListInit(PointList,64,LocalStride);\n }\n \n } else {\n Point = (float *) malloc( LocalStride * sizeof(float));\n for( i = 0; i < LocalStride; i++ ) {\n Temp = CSVGetNumberFromBuffer(Temp,&Point[i]);\n }\n PointArrayListAdd(PointList,Point);\n free(Point);\n }\n if( *Temp == '\\n' ) {\n LineNumber++;\n }\n Temp++;\n }\n#if 0 /*_DEBUG*/\n// int i;\n for( i = 0; i < PointList->NumPoints; i++ ) {\n PrintPoint(PointList->Points[i].Position,LocalStride);\n }\n DPrintf(\"Read %i points || %i lines\\n\",PointList->NumPoints,LineNumber);\n#endif\n if( Stride != NULL ) {\n *Stride = LocalStride;\n }\n free(Buffer);\n return PointList; \n}\nint main(int argc,char** argv)\n{\n PointArrayList_t *PointList;\n int NumClusters;\n int Stride;\n\n if( argc != 3 ) {\n printf(\"Usage:%s <Dataset File> <Number of Clusters>\\n\",argv[0]);\n return -1;\n }\n PointList = LoadPointsDataset(argv[1],&Stride);\n \n if( PointList == NULL ) {\n DPrintf(\"Couldn't load point dataset.\\n\");\n return -1;\n }\n NumClusters = StringToInt(argv[2]);\n KMeansClustering(PointList,NumClusters,Stride);\n PointArrayListCleanUp(PointList);\n free(PointList);\n}\n" }, { "alpha_fraction": 0.6165933609008789, "alphanum_fraction": 0.6209930777549744, "avg_line_length": 28.462963104248047, "blob_id": "dffb6a85dacb8c7535676f0fbe2dfee39f4c527a", "content_id": "d11b73485f1d2f5f7e81925e2c630f1591afed6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 118, "num_lines": 54, "path": "/src/openmp/PointArrayList.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\n\nvoid PointArrayListAdd(PointArrayList_t *PointList, float *Point) {\n int Base;\n int i;\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListAdd:Failed PointList is NULL.\\n\");\n return;\n }\n if( PointList->NumPoints == PointList->Size ) {\n //Grow\n PointList->Size *= 2;\n PointList->Points = (float *) realloc(PointList->Points, PointList->Size * sizeof(float) * PointList->Stride);\n }\n Base = PointList->NumPoints * PointList->Stride;\n for( i = 0; i < PointList->Stride; i++ ) {\n PointList->Points[Base + i] = Point[i];\n }\n PointList->NumPoints++;\n}\n\nvoid PointArrayListCleanUp(PointArrayList_t *PointList)\n{\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListCleanUp:Failed PointList is NULL.\\n\");\n return;\n }\n free(PointList->Points);\n PointList->Points = NULL;\n PointList->NumPoints = 0;\n PointList->Size = 0;\n}\n\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize,int Stride)\n{\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListInit:Failed PointList is NULL.\\n\");\n return;\n }\n if( InitialSize <= 0 ) {\n DPrintf(\"PointArrayListInit:Failed Invalid InitialSize (%i)\\n\",InitialSize);\n return;\n }\n \n if( Stride <= 0 ) {\n DPrintf(\"PointArrayListInit:Failed Invalid Stride (%i)\\n\",Stride);\n return;\n }\n PointList->Points = (float *) malloc(sizeof(Point_t) * InitialSize * Stride);\n PointList->Stride = Stride;\n PointList->NumPoints = 0;\n PointList->Size = InitialSize;\n}\n" }, { "alpha_fraction": 0.5035426616668701, "alphanum_fraction": 0.5146538019180298, "avg_line_length": 20.33333396911621, "blob_id": "a7e8b80efbd20e7acfd02056763e5622a4fda2cc", "content_id": "0aa163f90e889c65b41f14d60b17b5c79a973547", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6210, "license_type": "no_license", "max_line_length": 114, "num_lines": 291, "path": "/src/cuda/Utils.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\nint StartSeconds = 0;\n\nint SysMilliseconds()\n{\n struct timeval tp;\n int CTime;\n\n gettimeofday(&tp, NULL);\n\n if ( !StartSeconds ){\n StartSeconds = tp.tv_sec;\n return tp.tv_usec/1000;\n }\n\n CTime = (tp.tv_sec - StartSeconds)*1000 + tp.tv_usec / 1000;\n\n return CTime;\n}\n\nvoid CreateDirIfNotExists(char *DirName) {\n struct stat FileStat;\n\n if (stat(DirName, &FileStat) == -1) {\n#ifdef _WIN32\n mkdir(DirName);\n#else\n mkdir(DirName, 0700);\n#endif\n }\n}\n\nint IsAlpha(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= 'A' && c <= 'Z' ) ||\n ( c >= 'a' && c <= 'z' ) ){\n return 1;\n }\n\n return 0;\n}\n\nint IsNumber(char c)\n{\n if( !c ){\n return 0;\n }\n\n if( ( c >= '0' && c <= '9' ) ){\n return 1;\n }\n\n return 0;\n}\n\nchar *StringCopy(const char *From)\n{\n char *Dest;\n\n Dest = malloc(strlen(From) + 1);\n\n if ( !Dest ) {\n return NULL;\n }\n\n strcpy(Dest, From);\n return Dest;\n}\n\nint StringToInt(char *String)\n{\n char *EndPtr; \n long Value;\n \n Value = strtol(String, &EndPtr, 10);\n \n if( errno == ERANGE && Value == LONG_MIN ) {\n DPrintf(\"StringToInt %s (%lu) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == LONG_MAX ) {\n DPrintf(\"StringToInt %s (%lu) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nfloat StringToFloat(char *String)\n{\n char *EndPtr; \n float Value;\n \n Value = strtof(String, &EndPtr);\n \n if( errno == ERANGE && Value == -HUGE_VALF ) {\n DPrintf(\"StringToFloat %s (%f) invalid...underflow occurred\\n\",String,Value);\n return 0;\n } else if( errno == ERANGE && Value == HUGE_VALF) {\n DPrintf(\"StringToFloat %s (%f) invalid...overflow occurred\\n\",String,Value);\n return 0;\n }\n return Value;\n}\n\nint GetFileLength(FILE *Fp)\n{\n int Length;\n int CurrentPosition;\n\n if ( !Fp ) {\n return -1; //Must be a valid file\n }\n\n CurrentPosition = ftell(Fp);\n fseek(Fp, 0, SEEK_END);\n Length = ftell(Fp);\n fseek(Fp, CurrentPosition, SEEK_SET);\n\n return Length;\n}\n\nvoid DPrintf(char *Fmt, ...)\n{\n char Temp[1000];\n va_list arglist;\n\n va_start(arglist, Fmt);\n vsnprintf(Temp, sizeof( Temp ), Fmt, arglist);\n#ifdef _DEBUG\n fputs(Temp, stdout);\n#endif\n va_end(arglist);\n}\n\nchar *ReadTextFile(char *File,int Length)\n{\n FILE *Fp;\n int FileSize;\n char *Result;\n int Ret;\n \n Fp = fopen(File,\"r\");\n \n if( !Fp ) {\n DPrintf(\"File %s not found.\\n\",File);\n return NULL;\n }\n FileSize = Length != 0 ? Length : GetFileLength(Fp);\n Result = malloc(FileSize + 1);\n Ret = fread(Result,1, FileSize,Fp);\n if( Ret != FileSize ) {\n DPrintf(\"Failed to read file %s\\n\",File);\n return NULL;\n }\n Result[Ret] = '\\0';\n fclose(Fp);\n return Result;\n}\n\nchar *CSVGetNumberFromBuffer(char *Buffer,float *Value)\n{\n int i = 0;\n char String[256];\n if( Value == NULL ) {\n return Buffer;\n }\n do {\n if( *Buffer == '\\r' ) {\n Buffer++;\n continue;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsNumber(*Buffer) || *Buffer == '.' || *Buffer == ',' || *Buffer == '\\n' || *Buffer == '\\r');\n String[i] = '\\0';\n *Value = StringToFloat(String);\n return Buffer;\n}\n\nchar *CSVGetStringFromBuffer(char *Buffer,char *Value)\n{\n int i = 0;\n char String[256];\n\n do {\n if( *Buffer == '\\n' ) {\n break;\n }\n if( *Buffer == ',' ) {\n Buffer++;\n break;\n }\n String[i] = *Buffer;\n Buffer++;\n i++;\n } while ( IsAlpha(*Buffer) || *Buffer == '.' || *Buffer == '-' || *Buffer == ',' || *Buffer == '\\n');\n String[i] = '\\0';\n if( Value == NULL ) {\n Value = StringCopy(String);\n } else {\n strcpy(Value,String);\n }\n return Buffer;\n}\n\nchar *CSVSkipLine(char *Buffer,int *NumColumns)\n{\n int LocalNumColumns;\n LocalNumColumns = -1;\n do {\n if( IsAlpha(*Buffer) ) {\n if( LocalNumColumns == -1 ) {\n LocalNumColumns = 1;\n }\n }\n if( *Buffer == ',' ) {\n LocalNumColumns++;\n }\n if( *Buffer == '\\n' ) {\n break;\n }\n Buffer++;\n } while( *Buffer );\n if( NumColumns != NULL ) {\n *NumColumns = LocalNumColumns;\n }\n return Buffer;\n}\n\nvoid DumpClusters(float *Points,int NumPoints,float *Centroids,int NumCentroids,int *Clusters,int Stride,int Pass)\n{\n FILE *OutCentroidCSV;\n FILE *OutDatasetCSV;\n char OutFile[256];\n int i;\n int j;\n \n //Write 2 CVS 1 for the centroids 1 for the dataset\n if( !Points ) {\n DPrintf(\"DumpClusters:Invalid Dataset.\\n\");\n return;\n }\n if( !Centroids ) {\n DPrintf(\"DumpClusters:Invalid Centroids data.\\n\");\n return;\n }\n if( !Clusters ) {\n DPrintf(\"DumpClusters:Invalid Cluster List data.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"DumpClusters:Invalid number of centroids.\\n\");\n return;\n }\n CreateDirIfNotExists(\"Out\");\n sprintf(OutFile,\"Out/out_centroids_%i.csv\",Pass);\n OutCentroidCSV = fopen(OutFile,\"w\");\n sprintf(OutFile,\"Out/out_dataset_%i.csv\",Pass);\n OutDatasetCSV = fopen(OutFile,\"w\");\n \n fprintf(OutCentroidCSV,\"x,y\\n\");\n for( i = 0; i < NumCentroids; i++ ) {\n for( j = 0; j < Stride; j++ ) {\n fprintf(OutCentroidCSV,\"%f,\",Centroids[i * Stride + j]);\n }\n fprintf(OutCentroidCSV,\"\\n\");\n }\n \n fprintf(OutDatasetCSV,\"x,y,centroidIndex\\n\");\n for( i = 0; i < NumPoints; i++ ) {\n for( j = 0; j < Stride; j++ ) {\n fprintf(OutDatasetCSV,\"%f,\",Points[i * Stride + j]);\n }\n fprintf(OutDatasetCSV,\"%i\\n\",Clusters[i]);\n }\n \n fclose(OutCentroidCSV);\n fclose(OutDatasetCSV);\n}\n \n" }, { "alpha_fraction": 0.5062845945358276, "alphanum_fraction": 0.5145801901817322, "avg_line_length": 30.322834014892578, "blob_id": "ac2571e7276f4e96b65905abe11ac975921ba729", "content_id": "18cfb9d77602cf911ca58d89ac30633e2998ebc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3978, "license_type": "no_license", "max_line_length": 118, "num_lines": 127, "path": "/doc/src/codes/Sequential/KMeansClustering.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "void PointSubtract(float *PointA,float *PointB,float *PointOut,int Stride)\n{\n int i;\n for( i = 0; i < Stride; i++ ) {\n PointOut[i] = PointA[i] - PointB[i];\n }\n}\n\nfloat PointDistanceSquared(float *PointA,float *PointB,int Stride)\n{\n float Sum;\n int i;\n Sum = 0.f;\n for( i = 0; i < Stride; i++ ) {\n Sum += (PointB[i] - PointA[i]) * \n (PointB[i] - PointA[i]);\n }\n return Sum;\n}\n\nvoid KMeansClustering(PointArrayList_t *Dataset,int NumCentroids,int Stride)\n{\n Centroid_t *Centroids;\n float Min;\n float Distance;\n float ClusterSize;\n float *Sum;\n float *Delta;\n int i;\n int j;\n int k;\n int NumStep;\n int HasToStop;\n int NumClustersSet;\n \n if( !Dataset ) {\n DPrintf(\"KMeansClustering:Invalid Dataset.\\n\");\n return;\n }\n if( NumCentroids <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Number of centroids %i\\n\",NumCentroids);\n return;\n }\n \n if( Stride <= 0 ) {\n DPrintf(\"KMeansClustering:Invalid Stride %i\\n\",Stride);\n return;\n }\n \n printf(\"Selected KMeans Algorithm with a dataset of size %i and %i centroids.\\n\",Dataset->NumPoints,NumCentroids);\n \n // 1) Selects K (NumCentroids) random centroids from the dataset.\n srand(time(0)); \n Centroids = malloc(sizeof(Centroid_t) * NumCentroids );\n for( i = 0; i < NumCentroids; i++ ) {\n Centroids[i].Position = malloc(Stride * sizeof(float));\n for( j = 0; j < Stride; j++ ) {\n Centroids[i].Position[j] = Dataset->Points[i].Position[j];\n }\n Centroids[i].Stride = Stride;\n }\n //Test with 5 steps...\n HasToStop = 0;\n NumStep = 0;\n Sum = malloc( Stride * sizeof(float));\n Delta = malloc( Stride * sizeof(float));\n while( !HasToStop ) {\n // 2) Assign each points of the dataset to the nearest centroid.\n for(i = 0; i < Dataset->NumPoints; i++) {\n Min = 99999;\n for(j = 0; j < NumCentroids; j++) {\n Distance = PointDistanceSquared(Dataset->Points[i].Position,Centroids[j].Position,Stride);\n if( Distance < Min ) {\n Dataset->Points[i].CentroidIndex = j;\n Min = Distance;\n }\n }\n }\n// DumpClusters(Dataset,Centroids,NumCentroids,0);\n // 3) Recalculate centroid position based on the new clusters.\n NumClustersSet = 0;\n for( int i = 0; i < NumCentroids; i++ ) {\n memset(Sum,0,Stride * sizeof(float));\n ClusterSize = 0;\n for( j = 0; j < Dataset->NumPoints; j++ ) {\n if( Dataset->Points[j].CentroidIndex != i ) {\n continue;\n }\n for( k = 0; k < Stride; k++ ) {\n Sum[k] += Dataset->Points[j].Position[k];\n }\n ClusterSize++;\n }\n if( ClusterSize == 0.f ) {\n continue;\n }\n for( k = 0; k < Stride; k++ ) {\n Sum[k] /= ClusterSize;\n }\n PointSubtract(Sum,Centroids[i].Position,Delta,Stride);\n for( k = 0; k < Stride; k++ ) {\n if( fabsf(Delta[k]) > KMEANS_ALGORITHM_TOLERANCE) {\n break;\n }\n }\n if( k == Stride ) {\n NumClustersSet++;\n }\n memcpy(Centroids[i].Position,Sum,Stride * sizeof(float));\n }\n if( NumClustersSet == NumCentroids ) {\n break;\n }\n if( NumStep < 3 ) {\n DumpClusters(Dataset,Centroids,NumCentroids,Stride,NumStep);\n }\n NumStep++;\n }\n printf(\"KMeansAlgorithm has finished...took %i steps to complete\\n\",NumStep);\n DumpClusters(Dataset,Centroids,NumCentroids,Stride,NumStep);\n for( i = 0; i < NumCentroids; i++ ) {\n free(Centroids[i].Position);\n }\n free(Centroids);\n free(Sum);\n free(Delta);\n}\n" }, { "alpha_fraction": 0.5525760650634766, "alphanum_fraction": 0.5583006739616394, "avg_line_length": 26.429752349853516, "blob_id": "f261423dde071dad957dd4a9d972a0029f60f21d", "content_id": "c64009140772e3f16840eb0ac5d4c0fe0ce13baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3319, "license_type": "no_license", "max_line_length": 110, "num_lines": 121, "path": "/src/cuda/PointArrayList.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\n\nPointArrayList_t *LoadPointsDataset(char *File,int *Stride)\n{ \n PointArrayList_t *PointList;\n float *Point;\n char *Buffer;\n char *Temp;\n int LineNumber;\n int LocalStride;\n int i;\n \n if( !File ) {\n printf(\"LoadPointsDataset:Invalid File\\n\");\n return NULL;\n }\n \n Buffer = ReadTextFile(File,0);\n \n if( Buffer == NULL ) {\n DPrintf(\"Couldn't read file\\n\");\n return NULL;\n }\n\n PointList = NULL;\n Temp = Buffer;\n LineNumber = 0;\n LocalStride = 0;\n \n while( *Temp ) {\n if( LineNumber == 0 ) {\n Temp = CSVSkipLine(Temp,&LocalStride);\n assert(LocalStride != -1);\n if( PointList == NULL ) {\n PointList = malloc(sizeof(PointArrayList_t));\n PointArrayListInit(PointList,64,LocalStride);\n }\n } else {\n Point = malloc( LocalStride * sizeof(float));\n for( i = 0; i < LocalStride; i++ ) {\n Temp = CSVGetNumberFromBuffer(Temp,&Point[i]);\n }\n PointArrayListAdd(PointList,Point);\n free(Point);\n }\n if( *Temp == '\\n' ) {\n LineNumber++;\n }\n Temp++;\n }\n#if 0 /*_DEBUG*/\n int i,j,base;\n for( i = 0; i < PointList->NumPoints; i++ ) {\n base = i * PointList->Stride;\n DPrintf(\"Point at \");\n for( j = 0; j < PointList->Stride; j++ ) {\n DPrintf(\"%i;\",base+j);\n }\n DPrintf(\"\\n\");\n// PrintVec2(PointList->Points[i].Position);\n }\n DPrintf(\"Read %i points || %i lines\\n\",PointList->NumPoints,LineNumber);\n#endif\n if( Stride != NULL ) {\n *Stride = LocalStride;\n }\n free(Buffer);\n return PointList; \n}\n\nvoid PointArrayListAdd(PointArrayList_t *PointList, float *Point) {\n int Base;\n int i;\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListAdd:Failed PointList is NULL.\\n\");\n return;\n }\n if( PointList->NumPoints == PointList->Size ) {\n //Grow\n PointList->Size *= 2;\n PointList->Points = realloc(PointList->Points, PointList->Size * sizeof(Point_t) * PointList->Stride);\n }\n Base = PointList->NumPoints * PointList->Stride;\n for( i = 0; i < PointList->Stride; i++ ) {\n PointList->Points[Base + i] = Point[i];\n }\n PointList->NumPoints++;\n}\n\nvoid PointArrayListCleanUp(PointArrayList_t *PointList)\n{\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListCleanUp:Failed PointList is NULL.\\n\");\n return;\n }\n free(PointList->Points);\n PointList->Points = NULL;\n PointList->NumPoints = 0;\n PointList->Size = 0;\n}\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize,int Stride)\n{\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListInit:Failed PointList is NULL.\\n\");\n return;\n }\n if( InitialSize <= 0 ) {\n DPrintf(\"PointArrayListInit:Failed Invalid InitialSize (%i)\\n\",InitialSize);\n return;\n }\n \n if( Stride <= 0 ) {\n DPrintf(\"PointArrayListInit:Failed Invalid Stride (%i)\\n\",Stride);\n return;\n }\n PointList->Points = malloc(sizeof(Point_t) * InitialSize * Stride);\n PointList->Stride = Stride;\n PointList->NumPoints = 0;\n PointList->Size = InitialSize;\n}\n" }, { "alpha_fraction": 0.625948429107666, "alphanum_fraction": 0.6305007338523865, "avg_line_length": 28.288888931274414, "blob_id": "af1d808d1e4fd817072d9d9b3a9e55c4d57e8198", "content_id": "10d9c238880e9de689f4963eb26d299b992c9cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 90, "num_lines": 45, "path": "/src/c/PointArrayList.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#include \"KMeansClustering.h\"\n\n\nvoid PointArrayListAdd(PointArrayList_t *PointList, Point_t Point) {\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListAdd:Failed PointList is NULL.\\n\");\n return;\n }\n if( PointList->NumPoints == PointList->Size ) {\n //Grow\n PointList->Size *= 2;\n PointList->Points = realloc(PointList->Points, PointList->Size * sizeof(Point_t));\n }\n PointList->Points[PointList->NumPoints++] = Point;\n}\n\nvoid PointArrayListCleanUp(PointArrayList_t *PointList)\n{\n int i;\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListCleanUp:Failed PointList is NULL.\\n\");\n return;\n }\n for( i = 0; i < PointList->NumPoints; i++ ) {\n free(PointList->Points[i].Position);\n }\n free(PointList->Points);\n PointList->Points = NULL;\n PointList->NumPoints = 0;\n PointList->Size = 0;\n}\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize)\n{\n if( PointList == NULL ) {\n DPrintf(\"PointArrayListInit:Failed PointList is NULL.\\n\");\n return;\n }\n if( InitialSize <= 0 ) {\n DPrintf(\"PointArrayListInit:Failed Invalid InitialSize\\n\");\n return;\n }\n PointList->Points = malloc(sizeof(Point_t) * InitialSize);\n PointList->NumPoints = 0;\n PointList->Size = InitialSize;\n}\n" }, { "alpha_fraction": 0.5458715558052063, "alphanum_fraction": 0.5507001280784607, "avg_line_length": 39.60784149169922, "blob_id": "68708ae8bd4bbefee15b77750a851e2a51d2b790", "content_id": "25ddf88c50ed74eab7c02623fc3866437e40cd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4142, "license_type": "no_license", "max_line_length": 128, "num_lines": 102, "path": "/doc/src/codes/Parallel/openmp/KMeansClustering.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "void KMeansClustering(PointArrayList_t *Dataset,int NumCentroids,int Stride)\n{\n\n Centroids = (float *) malloc(NumCentroids * Stride * sizeof(float));\n DistancesSize = Dataset->NumPoints * NumCentroids * sizeof(float);\n Distances = (float *) malloc(DistancesSize);\n \n ClusterCounterSize = NumCentroids * sizeof(float);\n ClusterCounter = (float *) malloc(ClusterCounterSize);\n \n ClusterSize = Dataset->NumPoints * sizeof(int);\n Clusters = (int *) malloc(ClusterSize);\n \n ClusterMeansSize = NumCentroids * Stride *sizeof(float);\n ClusterMeans = (float *) malloc(ClusterMeansSize);\n \n MaxThreadNumber = omp_get_max_threads();\n\n #pragma omp parallel for private(j) num_threads(MaxThreadNumber) schedule(static, (NumCentroids*Stride)/MaxThreadNumber)\n for( i = 0; i < NumCentroids; i++ ) {\n for( j = 0; j < Stride; j++ ) {\n Centroids[i * Stride + j] = Dataset->Points[i * Stride +j];\n }\n }\n \n while( 1 ) {\n memset(Distances,0,DistancesSize);\n \n #pragma omp parallel for private(j,k) num_threads(MaxThreadNumber) \\\n schedule(static, (Dataset->NumPoints*NumCentroids)/MaxThreadNumber)\n for( i = 0; i < Dataset->NumPoints; i++ ) {\n for( j = 0; j < NumCentroids; j++ ) {\n float LocalDistance = 0.f;\n for( k = 0; k < Stride; k++ ) {\n LocalDistance += (Centroids[j * Stride + k]\n - Dataset->Points[i * Stride + k]) * \n (Centroids[j * Stride + k] - Dataset->Points[i * Stride + k]);\n }\n Distances[i * NumCentroids + j] = LocalDistance;\n }\n }\n \n memset(ClusterCounter,0,ClusterCounterSize);\n\n #pragma omp parallel for schedule(guided) \\\n shared(Distances,Clusters,ClusterCounter) private(j)\n for( i = 0; i < Dataset->NumPoints; i++ ) {\n float Min = INFINITY;\n int Index = -1;\n for( j = 0; j < NumCentroids; j++ ) {\n float LocalDistance = Distances[i * NumCentroids + j];\n if( LocalDistance < Min ) {\n Min = LocalDistance;\n Index = j;\n }\n }\n #pragma omp atomic write\n Clusters[i] = Index;\n #pragma omp atomic\n ClusterCounter[Index]++;\n }\n \n memset(ClusterMeans,0,ClusterMeansSize);\n #pragma omp parallel for firstprivate(ClusterCounter) shared(ClusterMeans)\n for( int i = 0; i < Dataset->NumPoints * Stride; i++ ) {\n int PointIndex = i / Stride;\n int StrideIndex = i % Stride;\n int CentroidIndex = Clusters[PointIndex];\n int LocalAddValue = Dataset->Points[PointIndex * Stride + StrideIndex];\n #pragma omp atomic\n ClusterMeans[CentroidIndex * Stride + StrideIndex] += LocalAddValue;\n }\n \n #pragma omp parallel for firstprivate(ClusterCounter) shared(ClusterMeans)\n for( int i = 0; i < NumCentroids * Stride; i++ ) {\n int CentroidIndex = i / Stride;\n int StrideIndex = i % Stride;\n int NumClusters = ClusterCounter[CentroidIndex]; \n if( NumClusters == 0 ) {\n continue;\n }\n #pragma omp atomic\n ClusterMeans[CentroidIndex * Stride + StrideIndex] /= (float) NumClusters;\n }\n \n Sum = 0.f;\n #pragma omp parallel for shared(ClusterMeans,Centroids) reduction(+: Sum)\n for( i = 0; i < NumCentroids * Stride; i++ ) {\n float Delta;\n float Value;\n int CentroidIndex = i / Stride;\n int StrideIndex = i % Stride;\n Delta = fabsf(ClusterMeans[CentroidIndex * Stride + StrideIndex] - Centroids[CentroidIndex * Stride + StrideIndex]);\n Value = Delta < KMEANS_ALGORITHM_TOLERANCE ? 1.f : 0.f;\n Sum = Sum + Value;\n }\n if( Sum == NumCentroids * Stride ) {\n break;\n }\n memcpy(Centroids,ClusterMeans,ClusterMeansSize);\n }\n}\n" }, { "alpha_fraction": 0.7359667420387268, "alphanum_fraction": 0.7359667420387268, "avg_line_length": 25.72222137451172, "blob_id": "52c3dbccd88d718a0d9276cf9f37551f0f2bb2cf", "content_id": "36edacabb4120c3d6282abdd40747d49884950d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 481, "license_type": "no_license", "max_line_length": 80, "num_lines": 18, "path": "/src/openmp/PointArrayList.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#ifndef __POINTARRAYLIST_H_\n#define __POINTARRAYLIST_H_\n\nstruct Point_s;\ntypedef struct Point_s Point_t;\n\ntypedef struct PointArrayList_s {\n float *Points;\n int NumPoints;\n int Size;\n int Stride;\n} PointArrayList_t;\n\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize,int Stride);\nvoid PointArrayListAdd(PointArrayList_t *PointList, float *Point);\nvoid PointArrayListCleanUp(PointArrayList_t *PointList);\n\n#endif //__POINTARRAYLIST_H_\n" }, { "alpha_fraction": 0.6617449522018433, "alphanum_fraction": 0.672035813331604, "avg_line_length": 38.91071319580078, "blob_id": "3af8c8c8d34d682672b029083d09d109139d35ca", "content_id": "30f8b00e43c0611e133c28b6fda1c31b796d18f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2235, "license_type": "no_license", "max_line_length": 119, "num_lines": 56, "path": "/tools/2d/PlotGenerator/PyPlotGenerator.py", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as pyplot\nimport matplotlib.cm as cm\nimport numpy as np\nimport csv\nimport argparse\n\nparser = argparse.ArgumentParser(description='''Show the results of the KMean Clustering algorithm \n generated by KMeansAlgorithm. It will load up to NumPhases cvs files.''')\nparser.add_argument(\"numphases\",type=int,help=\"sets the number of phases.\")\n\nargs = parser.parse_args()\n\npyplot.gcf().canvas.set_window_title(\"KMeansAlgorithm\")\n\nfor phase in range(args.numphases):\n try:\n with open(\"In/out_centroids_\" + str(phase) + \".csv\",newline='') as csvFile:\n centroidsReader = csv.reader(csvFile, delimiter=',')\n next(centroidsReader, None)\n centroidsRow = [[float(row[0]), float(row[1])] for row in centroidsReader if row]\n centroidList = list(centroidsRow)\n except IOError:\n continue\n\n datasetPointList = []\n try:\n with open(\"In/out_dataset_\" + str(phase) + \".csv\",newline='') as csvFile:\n datasetReader = csv.reader(csvFile, delimiter=',')\n next(datasetReader, None)\n datasetRows = [[float(row[0]), float(row[1]), int(row[2])] for row in datasetReader if row]\n datasetPointList = list(datasetRows)\n except IOError:\n continue\n \n markerCentroidSize = 25\n markerCentroidSizeOffset = 2\n centroidX,centroidY = zip(*centroidList)\n centroidColors = np.array(np.random.choice(range(256), size=len(centroidList)))\n centroidColors = cm.rainbow(np.linspace(0, 1, len(centroidList)))\n \n subPlot = pyplot.subplot(1,args.numphases,phase + 1)\n subPlot.title.set_text('Step ' + str(phase))\n subPlot.set_xlabel(\"x\")\n subPlot.set_ylabel(\"y\")\n \n #print(centroidColors)\n subPlot.scatter(centroidX, centroidY,c=centroidColors,alpha=0.6, s= markerCentroidSize ** markerCentroidSizeOffset,\n marker='D',zorder=1)\n\n markerDatasetSize = 2\n markerDatasetOffset = 2\n datasetX,datasetY,centroidIndex = zip(*datasetPointList)\n datasetColors = [centroidColors[x] for x in centroidIndex]\n subPlot.scatter(datasetX, datasetY,c=datasetColors,alpha=0.6,s= markerDatasetSize ** markerDatasetOffset,zorder=2)\n\npyplot.show()\n" }, { "alpha_fraction": 0.7511110901832581, "alphanum_fraction": 0.7511110901832581, "avg_line_length": 25.47058868408203, "blob_id": "adb6564d572208fda994e7e02be29dc71eb82f53", "content_id": "de9382d4b3b91dd65cdbbd7c707bf7687e365d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 450, "license_type": "no_license", "max_line_length": 69, "num_lines": 17, "path": "/src/c/PointArrayList.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#ifndef __POINTARRAYLIST_H_\n#define __POINTARRAYLIST_H_\n\nstruct Point_s;\ntypedef struct Point_s Point_t;\n\ntypedef struct PointArrayList_s {\n Point_t *Points;\n int NumPoints;\n int Size;\n} PointArrayList_t;\n\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize);\nvoid PointArrayListAdd(PointArrayList_t *PointList, Point_t Point);\nvoid PointArrayListCleanUp(PointArrayList_t *PointList);\n\n#endif //__POINTARRAYLIST_H_\n" }, { "alpha_fraction": 0.7003154754638672, "alphanum_fraction": 0.7034700512886047, "avg_line_length": 14.800000190734863, "blob_id": "2de99dd1629959f59fe2bcf53fbd6ecba7bb4ca5", "content_id": "286a55f07b1ae063ef35460a0eac5e74fdec4b3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 317, "license_type": "no_license", "max_line_length": 41, "num_lines": 20, "path": "/src/openmp/Makefile", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "\nCC= gcc\nCFLAGS=-Wall -fopenmp\n\nObj = PointArrayList.o KMeansClustering.o\nLDFLAGS =\n\nall: release\n\ndebug: CFLAGS +=-D_DEBUG -g\ndebug: KMeansClustering\n\nrelease: CFLAGS += -O3\nrelease: KMeansClustering\n\nKMeansClustering: $(Obj)\n\t$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)\n\n.PHONY: clean\nclean:\n\trm -f $(Obj) KMeansClustering\n" }, { "alpha_fraction": 0.5521885752677917, "alphanum_fraction": 0.5959596037864685, "avg_line_length": 21.769229888916016, "blob_id": "4c26bdf3bfd9be442fd8340c6425392467979c4a", "content_id": "c00cbf0d2c53d75c4118a0f3163bcccad2d0bdff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 297, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/doc/src/codes/metrics.c", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "int StartSeconds = 0;\nint SysMilliseconds()\n{\n struct timeval tp;\n int CTime;\n gettimeofday(&tp, NULL);\n if ( !StartSeconds ){\n StartSeconds = tp.tv_sec;\n return tp.tv_usec/1000;\n }\n CTime = (tp.tv_sec - StartSeconds)*1000 + tp.tv_usec / 1000;\n return CTime;\n} \n" }, { "alpha_fraction": 0.7364341020584106, "alphanum_fraction": 0.7364341020584106, "avg_line_length": 26.157894134521484, "blob_id": "1215fcfd26f78fd70d76b304135894a327e58258", "content_id": "884af114d93e9622c7308008114c871bc527ec82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 516, "license_type": "no_license", "max_line_length": 80, "num_lines": 19, "path": "/src/cuda/PointArrayList.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#ifndef __POINTARRAYLIST_H_\n#define __POINTARRAYLIST_H_\n\n\ntypedef struct PointArrayList_s {\n float *Points;\n int Stride;\n int NumPoints; // Number of points\n int Size;\n} PointArrayList_t;\n\nPointArrayList_t *LoadPointsDataset(char *File,int *Stride);\n\nvoid PointArrayListInit(PointArrayList_t *PointList,int InitialSize,int Stride);\nvoid PointArrayListAdd(PointArrayList_t *PointList, float *Point);\nvoid PointArrayListCleanUp(PointArrayList_t *PointList);\n\n\n#endif //__POINTARRAYLIST_H_\n" }, { "alpha_fraction": 0.7614678740501404, "alphanum_fraction": 0.7614678740501404, "avg_line_length": 35.27777862548828, "blob_id": "f6836cef47b7246413e2902e75864a2314e59231", "content_id": "ea243472abcb7ccc009b1b07c59b18c9c4b1fa7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 654, "license_type": "no_license", "max_line_length": 118, "num_lines": 18, "path": "/src/cuda/Utils.h", "repo_name": "AdrianoDiDio/KMeansClustering", "src_encoding": "UTF-8", "text": "#ifndef __UTILS_H_\n#define __UTILS_H_ \n\nint SysMilliseconds();\nvoid CreateDirIfNotExists(char *DirName);\nint IsAlpha(char c);\nint IsNumber(char c);\nchar *StringCopy(const char *From);\nint StringToInt(char *String);\nfloat StringToFloat(char *String);\nint GetFileLength(FILE *Fp);\nvoid DPrintf(char *Fmt, ...);\nchar *ReadTextFile(char *File,int Length);\nchar *CSVGetNumberFromBuffer(char *Buffer,float *Value);\nchar *CSVGetStringFromBuffer(char *Buffer,char *Value);\nchar *CSVSkipLine(char *Buffer,int *NumColumns);\nvoid DumpClusters(float *Points,int NumPoints,float *Centroids,int NumCentroids,int *ClusterList,int Stride,int Pass);\n#endif //__UTILS_H_ \n" } ]
18
fabifer/tarea1
https://github.com/fabifer/tarea1
8b70fa8f5eff2130641384ec414fd23d5a01206b
1938da1d68e954ba66dd8c4594bee99262fd4b44
242cf4f714ab5a6ed39128ba51c086617b4c0c5d
refs/heads/master
2018-12-29T19:50:46.345577
2011-04-06T05:06:56
2011-04-06T05:06:56
1,546,747
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5816023945808411, "alphanum_fraction": 0.5994065403938293, "avg_line_length": 15.850000381469727, "blob_id": "ff30ce93dd476ffd1d6d3b42ad58020902c3e749", "content_id": "e79fca070e7259bd891375a7b531d349e31e7350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 50, "num_lines": 20, "path": "/primos.py", "repo_name": "fabifer/tarea1", "src_encoding": "UTF-8", "text": "# -*- coding: utf_8 -*-\n# primos.py: muestra los numeros primos hasta n\n\nfrom math import sqrt\n\nn = int(raw_input (\"Ingrese un numero natural: \"))\n\nprint (\"Primos hasta \" + str(n) + \":\")\n\nfor x in range(2,n+1):\n\n\tes_primo = True\n\n\tfor y in range(2,int(sqrt(x))+1):\n\t\tif x % y == 0:\n\t\t\tes_primo = False\n\t\t\tbreak\n\n\tif es_primo:\n\t\tprint(x)\n" }, { "alpha_fraction": 0.5344352722167969, "alphanum_fraction": 0.581267237663269, "avg_line_length": 13.520000457763672, "blob_id": "6b223814d0b5f6635d439cd4cce2397e4b53eca5", "content_id": "41a3812ae68ea09c0b68038a43bca5c1115656e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 56, "num_lines": 25, "path": "/fibonacci.py", "repo_name": "fabifer/tarea1", "src_encoding": "UTF-8", "text": "# -*- coding: utf_8 -*-\n# fibonacci.py: muestra los numeros de fibonacci hasta n\n\nfib1 = 0\nfib2 = 1\ntemp = 0\n\n\nn = int(raw_input(\"Ingrese un numero natural: \"))\n\nprint (\"Serie de Fibonacci hasta \" + str(n) + \": \")\n\nif n >= 0:\n\n\tprint (fib1)\n\n\tif n >= 1:\n\n\t\tprint(fib2)\n\n\t\tfor x in range(2, n+1):\n\t\t\ttemp = fib2 + fib1\n\t\t\tprint(temp)\n\t\t\tfib1 = fib2\n\t\t\tfib2 = temp\n" } ]
2
KimDH94/TIL
https://github.com/KimDH94/TIL
3954941402a5112b340cb9de9fa3bd3c327ac446
b92eba9c27593997fdd2c0c432aca7ddb88301a1
4f41aa5172908f07629ca70b67fa8656a4a580b3
refs/heads/master
2020-12-10T03:13:13.438153
2018-07-05T09:59:09
2018-07-05T09:59:09
95,427,269
0
1
null
2017-06-26T08:50:44
2017-07-31T13:58:18
2018-01-17T07:46:21
Jupyter Notebook
[ { "alpha_fraction": 0.5032154321670532, "alphanum_fraction": 0.5080385804176331, "avg_line_length": 19.064516067504883, "blob_id": "b271fbe20d5cc913a17210023b2abf0a6419f3fa", "content_id": "4ba3c8d371771883b985297747a383990a58cf9e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "permissive", "max_line_length": 50, "num_lines": 31, "path": "/HackerRank/Implement/26.Circular_Array_Rotation.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef circularArrayRotation(a, m):\n # Complete this function\n\n s = k%n\n\n for i in range(len(a)):\n a.append(a[i])\n\n tmp_list = a[n-k:(n-k)+n+1]\n\n result = []\n for num in m:\n result.append(tmp_list[num])\n\n return result\n\nif __name__ == \"__main__\":\n n, k, q = input().strip().split(' ')\n n, k, q = [int(n), int(k), int(q)]\n a = list(map(int, input().strip().split(' ')))\n m = []\n m_i = 0\n for m_i in range(q):\n m_t = int(input().strip())\n m.append(m_t)\n result = circularArrayRotation(a, m)\n print (\"\\n\".join(map(str, result)))\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.6925926208496094, "avg_line_length": 41.1875, "blob_id": "a36f98a66d7bc5bb5d5d3f18c3e0b61ccdd0cdf4", "content_id": "7c969e3ffc4ced31951fdf716e551d2a41eb2103", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3084, "license_type": "permissive", "max_line_length": 195, "num_lines": 32, "path": "/ADsP/test01_.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 주관식\n\n### 개인의 사생활 침해를 방지하고 통계 응답자의 비밀사하은 보호하면서 통계자료의 유용성을 최대한 확보 할 수 있는 데이터변환 방법은 무엇인가?\n- 마스킹(Masking)\n\n### 인터넷 서비스 사용자와 광고주를 연결하는 비즈니스에서 가장 중요한 것은 사용자의 특성을 보다 정교하게 파악해 광고주가 도달하고자 하는 정확한 고객군을 만들어 내는 것이다. 이 목표를 위해 활용되기 시작한 것은 무엇인가?\n- 사용자 로그\n\n### 데이터 분석 기획을 위해서 데이터 분석 수준진단이 필요하다. 분석 준비도와 분석 성숙도를 통해 데이터 분석 수준을 진단하게 되는데, 분석준비도 6개의 영역 중 2가지를 적으시오\n- 분석 업무, 분석 인력/조직, 분석 기법, 분석 데이터, 분석 문화, 분석 인프라\n\n### 정보기술 또는 정보시스템을 전략적으로 활용하기 위하여 조직 내*외부 환경을 분석하여 기회나 문제점을 도출하고 시스템 구축 우선순위를 결정하는 등 중장비 마스터 플랜을 수립하는 절차는?\n- 정보전략계획\n\n### 이것은 데이터 안의 두 변수 간의 관계를 알아보기 위해 사용하는 값이다. 두 변수간의 공분산으로는 음과 양의 관계를 파악할 수 있으나 관계 정도를 확인하기는 힘들다. 그래서 각변수의 표준편차를 곱하여 공분산을 나누어 -1에서 1사이 값으로 표준화하여 두 변수 간의 관계 정도를 확인 할 수 있도록 수치화 한 이것을 활용한다. 이것은 무엇인가?\n- 상관계수\n\n### A반과 B반 학생들이 동일한 과목을 들었다고 하자. A반과 B반 학생 모두를 대상으로 과목별 성적의 평균을 구하려고 할 때, A반 학생 데이터와 B반 학생 데이터를 class라는 변수를 기준으로 합치려고 한다. R로 프로그램을 작성하시오\n- merge(A,B,by = \"class\")\n\n### 우리는 모집단을 조사하기 위해 추출한 모집단의 일부 원소를 이용한다. 통계자료의 획득방법 중 모집단을 조사하기 위해 추출한 집단을 무엇이라 하는가?\n- 샘플\n\n### 어떤 객체가 불량인지 우량인지 또는 생존하느냐 못하느냐와 같이 0과 1로 구분하는데 활용되거나 A,B,C,D 또는 1등급, 2등급, 3등급 중에 어느 등급에 속하는지와 같이 정햊ㄴ 범주로 분류하는데 사용되는 데이터마이닝 분석방법은 무엇인가?\n- 분류\n\n### 빈도가 높고 핵심어 일수록 큰 글씨로 중심부에 표현되며, 어떤 말을 하고 있는지 한 눈에 볼 수 있도록 단어들이 구름처럼 만든 비주얼 분석도구는 무엇인가?\n- 워드 클라우드\n\n### 주성분분석을 통해 얻은 R프로그래밍 결과가 아래와 같이 나왔다. 3개의 변수를 활용할 경우 전체 데이터의 몇 %를 설명할 수 있는지 쓰시오\n- 전체데이터의 설명력은 Cumulative Proportion 을 본다\n" }, { "alpha_fraction": 0.5883306264877319, "alphanum_fraction": 0.5883306264877319, "avg_line_length": 13.348836898803711, "blob_id": "f5c2ef246bf7aba2e0cb4b343efc5f9715c55c07", "content_id": "fa7b02cb6018c5d97d6fef7f952deb7e8a035533", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1235, "license_type": "permissive", "max_line_length": 47, "num_lines": 43, "path": "/Linux/Directory_&_File.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "### ls\n- 현재 디렉토리의 파일 목록을 출력하는 명령어. (ls -l)은 자세히 보기\n\n### ls -la\n- 현재 디렉토리의 파일 목록중 숨김파일까지 보여주는 명령어\n\n### ls -Sla\n- 현재 디렉토리의 파일 목록중 숨김파일까지 파일크기순으로 정렬해서 보여주는 명령어\n\n### pwd\n- 현재 위치하고 있는 디렉토리를 알려주는 명령어\n\n### mkdir 파일명\n- 새로 생설할 디렉토리 명 \n\n### cd 디렉토리명 (tap키 활용)\n- 디렉토리명으로 이동\n\n### cd ..\n- 현재 디렉토리의 위치를 기준으로 부모가 되는 디렉토리로 이동하게 해줌 (상대경로)\n\n### cd /최상위/상위\n- 현재 디렉토리가 무엇이건 절대적인 경로로 이동하게 해주는 것 (절대경로)\n\n### rm 파일명\n- 파일 삭제\n\n### rm -r 디렉토리명\n- 디렉토리 삭제 (사용시 주의 폴더 자체가 삭제되므로...)\n\n### 명령어 --help\n- 명령어에 대한 사용 설명서\n\n### man 명령어 (맥 사용자 유용)\n- 명령어에 대한 mannual을 보여줌\n\n### cp <원본파일> <복사할파일의위치>\n- 파일 복사\n\n### mv <원본파일> <파일이이동될 위치>\n- 파일 이동 / 파일이름 변경\n\n###\n" }, { "alpha_fraction": 0.6011644601821899, "alphanum_fraction": 0.6011644601821899, "avg_line_length": 24.884614944458008, "blob_id": "7c2379e07a317133b563117e1551397508a4098e", "content_id": "cd0486d9009cb051512f3cfc0e1030e2a1be719d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1233, "license_type": "permissive", "max_line_length": 73, "num_lines": 26, "path": "/ADsP/Day09.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 편리한 기능\n\n- R의 작업환경 설정 : R 단축아이콘 우측클릭 > 속성 > 바로가기 > 시작위치에 현재 작업위치를 입력 > 저장\n- 프로그램에서 작업환경 설정 : setwd(\"작업디렉토리\")\n- 도움말 : help(함수), ?함수, RSitSearch(\"함수명\")\n- 히스토리 : history(), savehistory(file = \"파일명\"), loadhistory(file = \"파일명\")\n- 콘솔 청소 : Ctrl + L\n\n# 스크립트 사용하기\n\n- 한줄 실행 : Ctrl + R\n- 여러줄 실행 : 드래그해서 Ctrl + R\n- 주석처리 : #\n\n# 패키지\n\n- 패키지 : R함수, 데이터 및 컴파일 코드의 모임\n- 패키지 자동설치 : install.package(\"패키지명)\n- 패키지 수동설치 : install.package(\"패키지명\", \"패키지 위치\")\n\n# 배치 실행\n\n- 매일 돌아가야하는 프로그램을 시스템에서 프로세스에서 자동으로 구동하는 작업\n- 배치파일 실행 명령 : 윈도우 창에서 batch.R 실행파일이 있는 위치에서 R CMD BATCH batch.R\n- Path 지정 : 내컴퓨터에 오른쪽 마우스를 클릭 > 속성 > 고급시스템 설정 > 환경변수 클릭 > 변수명이 path를 클릭 >\n R프로그램의 실행파일의 위치를 찾아서 추가 > 저장\n \n" }, { "alpha_fraction": 0.7332015633583069, "alphanum_fraction": 0.7332015633583069, "avg_line_length": 30.4375, "blob_id": "7891def75f4c120a0c88af0e9c4c83ca9e869b4f", "content_id": "f4877d77c52b3ffa34285cebc92e117f132f725c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 822, "license_type": "permissive", "max_line_length": 74, "num_lines": 16, "path": "/Linux/Internet.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Internet\n- Client와 Server의 대화(request와 response)\n- IP address 와 domain name ( 전화기의 전화번호부에서 이름과 그와 매치되는 핸드폰 번호 ) \n- DNS server : 이 세상의 domain name의 IP주소를 저장하고 있는 거대한 서버\n\n# IP\n- ip addr 명령어를 치고 inet에 해당되는 주소가 컴퓨터의 ip address이다.\n- curl ipinfo.io/ip 와 ip addr 로 알아낸 ip주소가 다르다. \n- ip addr 은 private address 확인이고, curl ipinfo.io/ip 는 public address 확인이다.\n\n# Web Server\n- server computer에서도 설치해야 함. 마치 우리가 chrome이나 firefox를 웹브라우져로 사용하는 것처럼\nApache와 같은 소프트웨어를 설치해야 함\n- 웹서버는 사용자가 요청한 파일을 어디에서 읽어오는걸까?\n\n# apache \n\n\n" }, { "alpha_fraction": 0.6195651888847351, "alphanum_fraction": 0.6231883764266968, "avg_line_length": 15.235294342041016, "blob_id": "7e960c331729d7d2fc901b05731b4b0c4acd0272", "content_id": "66812fd91e4d137876e924b2b94918884b5dfb48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 416, "license_type": "permissive", "max_line_length": 37, "num_lines": 17, "path": "/Linux/File_edit.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "### sudo (super user do)\n- 관리자권한과 비슷한 느낌\n\n### nano 파일명 (초급용, vi = 중/고급용)\n- ^ = control키 이다.\n\n### ^6\n- mark set\n\n### Package manager\n- 휴대폰의 스토어앱과 유사함\n\n### brew 명령어\n- 맥사용자는 homebrew를 통해 brew를 사용하는 것이 좋다\n- brew install, brew uninstall 등등\n- 현재 깔려있는 htop의 경우 \n- $ sudo htop이 유용\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5129310488700867, "avg_line_length": 22.200000762939453, "blob_id": "e076ab18b10791ead34bcb36f85ffaebc1b6f2f9", "content_id": "018df43d8e81b71120bac5b61c80bbe8d1f468b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "permissive", "max_line_length": 47, "num_lines": 20, "path": "/HackerRank/Implement/07.Divisible_Sum_Paris.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef divisibleSumPairs(n, k, ar):\n # Complete this function\n result = 0\n\n for i in range(len(ar)-1):\n for j in range(i + 1, len(ar)):\n if (ar[i] + ar[j]) % k == 0:\n result += 1\n else:\n continue\n return result\nn, k = input().strip().split(' ')\nn, k = [int(n), int(k)]\nar = list(map(int, input().strip().split(' ')))\nresult = divisibleSumPairs(n, k, ar)\nprint(result)\n" }, { "alpha_fraction": 0.35777124762535095, "alphanum_fraction": 0.44281524419784546, "avg_line_length": 21, "blob_id": "c348bd884029bc4e7a62070bf6f5f15746f506f8", "content_id": "7e6ec8ed2e99c31a0e79752aec26d80fe1010c11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "permissive", "max_line_length": 48, "num_lines": 31, "path": "/HackerRank/Implement/31.Library_Fine.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef libraryFine(d1, m1, y1, d2, m2, y2):\n # Complete this function\n if y1 > y2:\n return 10000\n\n else:\n if y1 < y2:\n return 0\n elif m1 > m2:\n return 500 * (m1 - m2)\n elif m1 == m2:\n if d1 > d2:\n return 15 * (d1 - d2)\n else:\n return 0\n else:\n return 0\n\n\n\nif __name__ == \"__main__\":\n d1, m1, y1 = input().strip().split(' ')\n d1, m1, y1 = [int(d1), int(m1), int(y1)]\n d2, m2, y2 = input().strip().split(' ')\n d2, m2, y2 = [int(d2), int(m2), int(y2)]\n result = libraryFine(d1, m1, y1, d2, m2, y2)\n print(result)\n" }, { "alpha_fraction": 0.5188679099082947, "alphanum_fraction": 0.5256064534187317, "avg_line_length": 31.2608699798584, "blob_id": "9b4d19b49c4d266136089c221c843c41c73658db", "content_id": "b7845835a2df007b88b12dc896e58df8d80d128d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "permissive", "max_line_length": 97, "num_lines": 23, "path": "/HackerRank/Implement/02.Apple_and_Orange.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef countApplesAndOranges(s, t, a, b, apples, oranges):\n # Complete this function\n\n larry = [int(1) if ((a + apples[i]) >= s and (a + apples[i]) <= t) else 0 for i in range(m)]\n\n rob = [int(1) if ((b + oranges[j]) >= s and (b + oranges[j]) <= t) else 0 for j in range(n)]\n\n print(sum(larry))\n print(sum(rob))\nif __name__ == \"__main__\":\n s, t = input().strip().split(' ')\n s, t = [int(s), int(t)]\n a, b = input().strip().split(' ')\n a, b = [int(a), int(b)]\n m, n = input().strip().split(' ')\n m, n = [int(m), int(n)]\n apple = list(map(int, input().strip().split(' ')))\n orange = list(map(int, input().strip().split(' ')))\n countApplesAndOranges(s, t, a, b, apple, orange)\n" }, { "alpha_fraction": 0.47165533900260925, "alphanum_fraction": 0.48752835392951965, "avg_line_length": 18.173913955688477, "blob_id": "e423d2a6a2af4948d2afad4158bfd72520570cd2", "content_id": "b23e7c4b0913e22d0d1a279e1d62165c9e5e994c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "permissive", "max_line_length": 46, "num_lines": 23, "path": "/HackerRank/Implement/06.Birthday_Chocolate.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef solve(n, s, d, m):\n # Complete this function\n result = 0\n if n == 1 and s[0] == d:\n return 1\n else:\n for i in range(m,n+1):\n if sum(s[i-m:i]) == d:\n result += 1\n return result\n\n\n\nn = int(input().strip())\ns = list(map(int, input().strip().split(' ')))\nd, m = input().strip().split(' ')\nd, m = [int(d), int(m)]\nresult = solve(n, s, d, m)\nprint(result)\n" }, { "alpha_fraction": 0.6244944930076599, "alphanum_fraction": 0.6533795595169067, "avg_line_length": 27.850000381469727, "blob_id": "3bf58cda64007ac2e901f72c02afaf389fb4be65", "content_id": "367e99a9d8be038780f25d0dde0e4b31f48fae46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1731, "license_type": "permissive", "max_line_length": 198, "num_lines": 60, "path": "/HackerRank/Warmup/02.Compare_the_Triplets.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Alice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from to for three categories: problem clarity, originality, and difficulty.\n#\n# We define the rating for Alice's challenge to be the triplet , and the rating for Bob's challenge to be the triplet .\n#\n# Your task is to find their comparison points by comparing with , with , and with .\n#\n# If , then Alice is awarded point.\n# If , then Bob is awarded point.\n# If , then neither person receives a point.\n# Comparison points is the total points a person earned.\n#\n# Given and , can you compare the two challenges and print their respective comparison points?\n#\n# Input Format\n#\n# The first line contains space-separated integers, , , and , describing the respective values in triplet .\n# The second line contains space-separated integers, , , and , describing the respective values in triplet .\n#\n# Constraints\n#\n# Output Format\n#\n# Print two space-separated integers denoting the respective comparison points earned by Alice and Bob.\n#\n# Sample Input\n#\n# 5 6 7\n# 3 6 10\n# Sample Output\n#\n# 1 1\n\n#!/bin/python3\n\nimport sys\n\ndef solve(a0, a1, a2, b0, b1, b2):\n # Complete this function\n tmp_a = [a0, a1, a2]\n tmp_b = [b0, b1, b2]\n\n alice = 0\n bob = 0\n\n for i in range(len(tmp_a)):\n if tmp_a[i] > tmp_b[i]:\n alice += 1\n elif tmp_a[i] < tmp_b[i]:\n bob += 1\n else:\n continue\n return alice, bob\n\n\na0, a1, a2 = input().strip().split(' ')\na0, a1, a2 = [int(a0), int(a1), int(a2)]\nb0, b1, b2 = input().strip().split(' ')\nb0, b1, b2 = [int(b0), int(b1), int(b2)]\nresult = solve(a0, a1, a2, b0, b1, b2)\nprint (\" \".join(map(str, result)))\n" }, { "alpha_fraction": 0.480211079120636, "alphanum_fraction": 0.5233069658279419, "avg_line_length": 18.947368621826172, "blob_id": "057c5b604f902852a3863b17985500f52fc15037", "content_id": "f4dfb595c58fcccfd569952614e28a939cf5ce08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "permissive", "max_line_length": 111, "num_lines": 57, "path": "/HackerRank/Warmup/09.Time_Conversion.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Given a time in -hour AM/PM format, convert it to military (-hour) time.\n#\n# Note: Midnight is on a -hour clock, and on a -hour clock. Noon is on a -hour clock, and on a -hour clock.\n#\n# Input Format\n#\n# A single string containing a time in -hour clock format (i.e.: or ), where and .\n#\n# Output Format\n#\n# Convert and print the given time in -hour format, where .\n#\n# Sample Input\n#\n# 07:05:45PM\n# Sample Output\n#\n# 19:05:45\n\n#!/bin/python3\n\nimport sys\n\ndef timeConversion(s):\n # Complete this function\n\n result = \"\"\n if s[-2:] == \"PM\":\n hour = int(s[:2]) + 12\n if hour == 24:\n result = s[:-2]\n else:\n result = str(hour) + s[2:-2]\n\n else:\n hour = int(s[:2]) + 12\n if hour == 24:\n result = \"00\" + s[2:-2]\n else:\n hour = s[:2]\n result = str(hour) + s[2:-2]\n\n return result\n\n# best solution\n\ns = input().strip()\nresult = timeConversion(s)\nprint(result)\n\ns = raw_input()\nzn = s[-2:]\nif zn == \"PM\" and s[:2] != \"12\":\n s = str(12 + int(s[:2])) + s[2:]\nif zn == \"AM\" and s[:2] == \"12\":\n s = \"00\" + s[2:]\nprint s[:-2]\n" }, { "alpha_fraction": 0.6694265007972717, "alphanum_fraction": 0.671445906162262, "avg_line_length": 39.925621032714844, "blob_id": "159aaebff864031fb60b6b50769f478f38251a19", "content_id": "ceaa8973b72666639b904236bd3d35425b0f9f63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11288, "license_type": "permissive", "max_line_length": 148, "num_lines": 121, "path": "/ADsP/Point_1.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 출제포인트\n\n### 정성적 데이터와 정량적 데이터이 차이점\n- 정성적 데이터는 그 형태와 형식이 정해져 있지 않아서 저장, 검색, 분석하는 데 많은 비용과 기술적 투자가 수반 된다. \n - (ex) 주관식 응답, SNS에 올린 글 등 형태와 형식이 정해져 있지 않은 데이터\n- 정량 데이터는 데이터의 양이 크게 증가 하더라도 저장, 검색, 분석하여 활용하기 용이하다. \n - (ex) 지역별 온도, 풍속, 강우량과 같이 수치로 표현되는 데이터\n\n### DIKW 각각의 정의를 묻는 문제, 예시에 대한 문제가 자주 출제 됨\n- 데이터 (Data) : 존재형식을 불문하고, 타 데이터와의 상관 관계가 없는 가공하기 전의 순수한 수치나 기호를 의미\n - (ex) A 마트는 100원에 B마트는 200원에 연필을 판매한다\n- 정보 (Information) : 데이터의 가공 및  상관관계간 이해를 통해 패턴을 인식하고 그 의미를 부여한 데이터\n - (ex) A 마트의 연필이 더 싸다\n- 지식 (Knowledge) : 상호 연결된 정보 패턴을 이해하여 이를 토대로 예측한 결과물\n - (ex) 상대적으로 저렴한 A마트에서 연필을 사야겠다\n- 지혜 (Wisedom) : 근본 원리에 대한 깊은 이해를 바탕으로 도출되는 창의적 아이디어\n - (ex) A마트의 다른 상품들도 B마트보다 쌀 것이라고 판단한다\n \n### 데이터베이스의 일반적인 특징은 자주 출제가 된다\n- 통합된 데이터 : 동일한 내용의 데이터가 중복되어 있지 않다는 것을 의미\n- 저장된 데이터 : 컴퓨터가 접근할 수 있는 저장 매체에 저장되는 것을 의미\n- 공용 데이터 : 여러 사용자가 서로 다른 목적으로 데이터를 공동으로 이용한다는 것을 의미\n- 변화되는 데이터 : 항상 변화하면서도 항상 현재의 정확한 데이터를 유지해야 함\n\n### 데이터베이스 활용의 약어의 의미를 잘못 설명한 보기를 찾는 문제가 출제된다\n- OLTP : 여러 단말에서 보내온 메시지에 따라 호스트 컴퓨터가 데이터 베이스를 엑세스하고, 바로 처리 결과를 돌려보내는 형태를 말한다. 데이터 베이스의 데이터를 수시로 갱신하는 프로세싱을 의미한다.\n- OLAP : 정보 위주의 분석 처리를 의미하며, 다양한 비즈니스 관점에서 쉽고 빠르게 다차원적인 데이터에 접근하여 의사 결정에 활용할 수 있는 정보를 얻을 수 있게 해주는 기술이다.\n- CRM : '고객관계관리'라고 한다. 기업이 고객과 관련된 내외부 자료를 분석/통합해 고객 중심 자원을 극대화하고이를 토대로 고객특성에 맞게 마케팅 활동을 계획/지원/평가하는 과정이다.\n- SCM : 기업에서 원재료의 생산/유통 등 모든 공급망 단계를 최적화해 수요자가 원하는 제품을 원하는 시간과 장소에 제공하는 '공급망 관리'를 뜻한다\n- ERP : 인사/재무/생산 등 기업의 전 부문에 걸쳐 독립적으로 운영되던 각종 관리시스템의 경영자원을 하나의 통합 시스템으로 재구축함으로써 생산성을 극대화하려는 경영혁신기법을 의미한다. '전사적 자원관리' 라고 한다.\n- RTE : 회사의 주요 경영정보를 통합관리하는 실시간 기업의 새로운 기업경영시스템이다.\n- BI : 비즈니스 인텔리전스란 기업이 보유하고 있는 수많은 데이터를 정리하고 분석해 기업의 의사결정에 활용하는 일련의 프로세스를 말한다. 다양한 분석과 그 결과를 공유할 수 있는 기반 시스템을 구축하고 고급 정보를 이용해 신속하고 정확한 의사결정을 하도록 한다.\n- EAI : 기업 내 상호 연관된 모든 애플리케이션을 유기적으로 연동하여 필요한 정보를 중앙 집중적으로  관리, 사용할 수 있는 환경을 구현하는 것이다\n- KMSI : 기업의 환경이 물품을 주로 생산하던 산업사회에서, 지적 재산의 중요성이 커지는 지식사회로 급격히 이동함에 따라, 기업 경영을 지식이라는 관점에서 새롭게 조명하는 접근방식이다.\n\n### 3V의 용어와 정의를 정확히 이해하고 넘어가야 함\n- 양 (Volume)\n - 데이터의 규모 측면\n- 다양성 (Variety)\n - 데이터의 유형과 소스 측면\n- 속도 (Velocity)\n - 데이터의 수집과 처리 측면\n\n### 데이터의 범주가 '데이터의 변화 > 기술변화 > 인재, 조직의 변화' 로 점점 확대되고 있음을 알고 그 내용이 무엇인지 알아야 한다\n- 데이터의 변화\n - 규모(Volume), 형태(Variety), 속도(Velocity)\n- 기술 변화\n - 새로운 데이터 처리, 저장, 분석기술 및 아키텍쳐, 클라우드 컴퓨팅 활용\n- 인재, 조직 변화\n - Data Scientist 같은 새로운 인재 필요, 데이터 중심 조직\n- 기존 방식으로는 얻을 수 없었던 통찰 및 가치 창출\n- 사업방식, 시장, 사회, 정부 등에서 변화와 혁신 주도\n\n### 빅데이터 출현 배경 3가지 각각의 내용을 충분히 숙지\n- 산업계\n - 고객 데이터 축적 (산업계에서 일어난 변화를 보면 빅데이터 현상은 양질 전환 법칙으로 설명이 가능하다)\n- 학계\n - 거대 데이터 활용 과학 확산 (학계에서 대표적인 사례로는 인간 게놈 프로젝트를 들 수 있다. 거대 데이터를 다루는 학문 분야가 늘어나면서 필요한 기술 아키텍쳐 및 통계 도구들도 지속적으로 발전했다.\n- 관련기술발전(디지털화, 저장기술, 인터넷 보급, 모바일 혁명, 클라우드 컴퓨팅)\n - 기술발전 측면에서 보면 디지털화의 급진전, 저장 기술의 발전과 가격 하락, 인터넷의 발전과 모바일 시대의 진전에 따른 클라우드 컴퓨팅 보편화 등이 모두 빅데이터 출현과 관련된다.\n \n### 사용자 로그 정보의 정의를 정확히 이해해야 한다. 또한 빅데이터 출현에 따른 변화가 무엇인지 객관식 문제로 나올 수 있다\n- 사용자 로그(log) 정보 (사용자가 인터넷에 접속한 시간과 정보, 검색기록 등) 에 대한 프로파일링이 이뤄지기 시작하면서 아이덴티티가 뚜렷해지고 사용자와 광고를 매칭하는 정확도도 향상된다.\n\n### 빅데이터에서 중요시 여기는 부분이 과거에서 현재로 어떻게 변화되었는지 헷갈리지 않게 체크하자\n- 사전처리 > 사후처리\n - 필요한 정보만 수집하고 필요하지 않는 정보를 버리는 시스템에서 가능한 한 많은 데이터를 모으고 그 데이터를 다양한 방식으로 조합해 숨은 정보를 찾아 낸다.\n- 표본조사 > 전수조사\n - 데이터 수집 비용의 감소와 클라우드 컴퓨팅 기술의 발전으로 데이터 처리비용의 감소로 표본을 조사하는 기존의 지식발견의 방식이 전수조사를 통해 샘플링이 주지 못하는 패턴이나 정보를 제공해 주게 된다.\n- 질 > 양\n - 데이터가 지속적으로 추가될 때 양질의 정보가 오류 정보보다 많아져 전체적으로 좋은 결과산출에 긍정적인 영향을 미친다는 추론에 그 바탕을 두고 변화 된다\n- 인관관계 > 상관관계\n - 상관관계를 통해 특정 현상의 발생 가능성이 포착되고, 그에 상응하는 행동을 하도록 추천되는 일이 점점 늘어나 데이터 기반의 상관관계 분석이 주는 인사이트가 인과관계에 의해 미래예측을 점점 압도해 가는 시대가 도래하게 될 것으로 전망된다.\n \n### 빅데이터를 활용 기본 테크닉 7가지가 어떤 기술인지, 어떻게 활용되고 있는지는 반드시 숙지하자\n- 연관 규칙 학습\n -  어떤 변인들 간에 주목할 만한 상관관계가 있는지를 찾아내는 방법\n - 커피를 구매하는 사람이 탄산음료를 더 많이 사는가?\n- 유형 분석\n - 문서를 분류하거나 조직을 그룹으로 나눌 때, 혹은 온라인 수강생들을 특성에 따라 분류할 때 사용한다\n - 이 사용자는 어떤 특성을 가진 집단에 속하는가?\n- 기계 학습\n - 훈련 데이터로부터 학습한 알려진 특성을 활용해 예측하는 방법이다\n - 기존의 시청 기록을 바탕으로 시청자가 현재 보유한 영화 중에서 어떤 것을 가장 보고 싶어할까?\n- 회귀 분석\n - 독립변수를 조작하며, 종속변수가 어떻게 변하는지를 보면 두 변인의 관계를 파악할 때 사용한다\n - 구매자의 나이가 구매 차량의 타입에 어떤 영향을 미치는가?\n- 감정 분석\n - 특정 주제에 대해 말하거나 글을 쓴 사람의 감정을 분석한다\n - 새로운 환불 정책에 대한 고객의 평가는 어떤가?\n- 소셜 네트워크 분석\n - 특정인과 다른 사람이 몇 촌정도의 관계인가를 파악할 때 사용하고 영향력 있는 사람을 찾아낼 때 사용한다\n- 유전자분석\n - 최적화가 필요한 문제의 해결책을 자연선택, 돌연변이 등과 같은 메커니즘을 통해 점진적으로 진화(evolve)시켜 나가는 방법이다\n - 최대의 시청률을 얻으려면 어떤 프로그램을 어떤 시간대에 방송해야 하는가?\n\n### 주관식으로 출제될 수 있으니 위기요인 3가지는 반드시 외우자\n- 사생활 침해\n - 여행 사실을 트위트 한 사람의 집을 강도가 노리는 고전적 사례 발생 - 익명화 기술발전 필요\n- 책임 원칙 훼손\n - 영화 마이너리티 리포트\n- 데이터 오용\n - 베트남 전쟁때 사망자 수가 과장돼 보고되는 경향\n\n### 데이터 사이언스에 대해 묻는 문제가 출제될 수 있으니 숙지하자\n- 데이터 사이언스란 데이터로부터 의미 있는 정보를 추출해내는 학문이다\n- 데이터 사이언스는 정형 또는 비정형을  휴대전화, 감시용 카메라 등에서 생성되는 숫자와 문자, 영상 정보 등 다양한 유형의 데이터를 대상으로 한다\n- 데이터 사이언스는 분석 뿐 아니라 이를 효과적으로 구현하고 전달하는 과정까지를 포함한 포괄적 개념이다\n- 데이터 사이언스는  수학, 통계학, 컴퓨터공학, 시각화, 해커의 사고방식, 해당분야의 전문 지식을 종합한 학문이다\n- 데이터 사이언티스트는 비즈니스의 성과를 좌우하는 핵심 이슈에 답을 하고, 사업의 성과를 견인해 나갈 수 있어야 한다. 중요한 역량중 하나인 소통력이 필요한 이유이다\n\n### 데이터사이언티스트에게 요구되는 인문학적 사고에 대해 과거 > 현재 > 미래로 정리하여 이해하자\n- 과거\n  - 정보 : 무슨 일이 일어났는가? 리포팅(보고서 작성 등)\n - 통찰력 : 어떻게, 왜 일어났는가? (모델링, 실험설계)\n- 현재\n - 정보 : 무슨일이 일어나고 있는가? 경고\n - 통찰력 : 차선 행동은 무엇인가 권고\n- 미래\n - 정보 : 무슨 일이 일어날 것인가?\n - 통찰력 : 최악 또는 최선의 상황은 무엇인가? 예측, 최적화, 시뮬레이션\n" }, { "alpha_fraction": 0.6540173888206482, "alphanum_fraction": 0.6576213836669922, "avg_line_length": 26.911243438720703, "blob_id": "8712d70826c2d9760504c007462b5d1485f4562b", "content_id": "1b96da0b7aeec48c52542b64b292fd650a32c706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10207, "license_type": "permissive", "max_line_length": 112, "num_lines": 169, "path": "/ADsP/test04.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 객관식\n\n### 데이터베이스의 특성 중 적절한 것은?\n- 정보기술 발전의 측면에서 정보처리, 검색 관리 소프트웨어, 관련 하드웨어, 정보 전송을 위한 네트워크 기술 등의 발전을 견인 할 수 있다\n- 정보의 축적 및 전달 측면에서 대량의 정보를 일정한 형식에 따라 정보처리기기가 읽고 쓰고 검색할 수 있도록 하는 기계가독성을 갖는다\n\n### 다음 중 데이터베이스의 특징과 가장 거리가 먼 것은?\n- 응용프로그램 종속성 x\n- 데이터의 무결성 유지 o\n- 프로그래밍 생산성 향상 o\n- 데이터 중복성 최소화 o \n\n### 다음 중 NoSQL 데이터베이스가 아닌 것은?\n- MySQL x\n- MongoDB o\n- Hbase o \n- Redis o\n\n### 빅데이터의 중요성과 빅데이터 시대의 빅데이터의 역할 등을 고려할 떄 아래의 (ㄱ) 안에 들어갈 말로 가장 적절한 것은?\n 빅데이터의 핵심은 빅데이터 자체가 아닌 분석을 통한 (ㄱ) 이다.\n- 의사결정 최적화\n\n### 비즈니스 모델 분석의 톱다운(Top Down) 접근방식에서 분석 기회 발굴을 위한 비즈니스 모델 상세화 과정에 대한 설명으로 가장 부적절한 것은?\n- 비즈니스 컨텍스트 분석을 통한 환경변화 요소 파악 x\n- 기업 전략과 실행 계획간 관계 분석 o\n- 비즈니스 운영 시나리오 상세화 o\n- 기업 전략 테마를 실현하기 위한 실행요소 연결데이터 지연시간의 최소화 방법 o\n\n### 전사적 관점에서 데이터 분석을 수행할 경우 고려할 사항과 가장 거리가 먼 것은?\n- 업무 영역별 분석 x\n- 분석 내재화 프로세스 o\n- 분석 패턴 서비스 아키텍쳐 o\n- 분석 간 선순환 관계 o\n\n### 분석 준비도(Readiness)는 기업의 데이터 분석 도입의 수준을 파악하기 위한 진단방법으로 6가지 영역을 대상으로 파악한다. 6가지 영역 중 하나인 분석기법 영역에 해당하지 않는 것은?\n- 예측 분석 업무 개선 정도 x\n- 분석 업무 도입 방법론 o\n- 분석기법 라이브러리 o\n- 분석기법 효과성 평가 o\n\n### 분석준비도 6가지\n- 분석업무파악\n - 발생한 사실 분석 업무\n - 예측 분석 업무\n - 시뮬레이션 분석 업무\n - 최적화 분석 업무\n - 분석 업무 정기적 개선\n- 인력 및 조직\n - 분석 전문가 직무 존재\n - 분석 전문가 교육 훈련 프로그램\n - 관리자들의 기본적 분석 능력\n - 전사 분석업무 총괄 조직 존재\n - 경영진 분석 업무 이해 능력\n- 분석 기법\n - 업무별 적합한 분석기법 사용\n - 분석 업무 도입 방법론\n - 분석기법 라이브러리\n  - 분석기법 효과성 평가\n - 분석기법 정기적 개선\n- 분석 데이터\n - 분석업무를 위한 데이터 충분성\n - 분석업무를 위한 데이터 신뢰성\n - 분석업무를 위한 데이터 적시성\n - 비구조적 데이터 관리\n - 외부 데이터 활용 체계\n - 기준 데이터 관리 (MDM)\n- 분석 문화\n - 사실에 근거한 의사결정\n - 관리자의 데이터 중시\n - 회의 등에서 데이터 활용\n - 경영진의 직관보다 데이터\n - 데이터 공유 및 협업 문화\n- IT 인프라\n - 운영시스템 데이터 통합\n - EAI, ETL 등 데이터유통체계\n - 분석 전용 서버 및 스토리지\n - 빅데이터 분석 환경\n - 통계 분석 환경\n - 비쥬얼 분석 환경\n \n### 기업에서의 분석 수준은 도입단계로부터 활용단계, 확산단계, 최적화 단계까지 점차 진화한다. 분석 수준이 활용단계에 도달했을 때의 설명으로 가장 부적절한 것은?\n- 전사 차원에서 분석을 관리 하고 공유 x\n- 운영 데이터 기반의 모델링 수행 o\n- 전문 부서에서 분석 업무를 담당 o\n- 미래 결과 예측 및 시뮬레이션 결과 방영 o\n\n### example(solve)에 대한 설명으로 가장 적절한 것은?\n- 함수 solve의 도움말의 예제에 있는 명령어들을 실행시킨다\n\n### 다음 중 나머지 세 개의 명령과 결과가 다른 것은?\n- z = c(1:3,NA)\n is.na(z)\n- z <- c(1:3,NA)\n is.na(z)\n- z = c(1:3,NA)\n z = NA          x\n- c(1,1,1,2) == 2\n\n### 아래에서 설명하는 표본추출법으로 적절한 것은?\n    모집단이 몇 개의 군집(Clustering) 형태로 구성돼 있고 각 집단에서 원소들에게 일련번호를 부여할 수 있는 경우에 이용된다. 일부 군집을 랜덤으로 선택하고 선택된 \n 각 군집에서 표본을 선택한다.\n- 집락추출법\n\n### 최적회귀방정식을 선택하기 위한 방법으로 사용되는 변수 선택법(varialble selection)에 대한 설명과 가장 거리가 먼 것은?\n- 최적선택법(Optimum Selection)은 전진선택법에 의해 변수를 추가하면서 기존변수의 중요도가 악화되면 제거하는 등 단계적으로 변수를 추가하고 제거를 반복하는 방법이다 x (= 단계별 방법)\n- 설명면수의 수가 많아지면 관리가 힘들기 때문에 가능한 적은 수의 설명 변수를 선택해야 한다 o\n- 전진선택법(Forward Selection)은 절편만 있는 상수항으로 시작하여 중요하다고 생각되는 설명변수부터 차례로 하나씩 모형에 추가한다 o\n- 후진제거법(Backward Elimination)은 독립변수 후보 모두를 포함한 모형에서 출발해 가장 적은 영향을 주는 변수부터 하나씩 제거하는 방법이다 o\n\n### 연도별, 계절별, 월별, 등 시간의 흐름에 따라 순서대로 관측되는 관측값을 분석하는 시계열 자료 분석 방법에 대한 설명으로 가장 부적절한 것은?\n- 잡음(noise)은 무작위적인 변동이며 일반적으로 분석을 통해 그 원인을 규명할 수 있다 x\n- 시계열자료는 시간의 흐름에 따라 관측되기 때문에 대체로 독립적이지 않다\n- 짧은 기간 동안의 주기적인 패턴을 계절변동이라 한다\n- 시간이 경과함에 따라 관측값이 지속적으로 증가하거나 감소하는 추세를 갖는 변동을 추세요인이라고 한다\n\n### 자료의 특징이나 분포를 한눈에 보기 쉽도록 하는 시각화는 모집단 분포의 모양을 파악하기 위한 기초적인 방법이다. 자료의 도표화에 대한 다음 설명 중 가장 부적절한 것은?\n- 히스토그램은 자료값에 대한 빈도를 나타내며 자료의 분포를 확인할 수 있다 x\n- 서로 다른 특성값에 대한 자료의 개수를 표로 나타낸 것은 도수분포표라 한다 o\n- 산점도는 자료의 선형 또는 비선형 관계의 여부를 파악하기 위한 방법이다 o\n- 상자 그림은 자료의 위치측도와 산포측도에 해당하는 5가지 통계량을 이용하여 자료를 요약 정리하는 방법이다 o\n\n### 추정이란 표본을 이용하여 모집단의 특성치를 추측하는 과정이다. 다음 중 추정에 대한 설명으로 가장 부적절한 것은?\n- 점추정이란 \"모수가 특정한 값일 것\" 이라고 추정하는 것으로 모집단의 평균은 표본분산을 통해 추정할 수 있다 x (= 점추정은 미지의 모수에 대하여 가장 근사한 단일 값을 구하는 것) x\n- 각각의 확률분포는 분포의 형태를 결정하는 평균, 분산 등의 모수를 갖는다 o\n- 모집단의 특성을 나타내는 모수는 일반적으로 알려져 있지 않으며 표본을 통해 미지의 모수를 추정하게 된다 o\n- 구간추정이란 \"모수가 특정 구간에 있을 것\"이라고 선언하는 것으로 추정량의 분포에 대한 전쩨가 주어져야 한다 o\n\n### 다음 중 비모수적 방법에 대한 설명으로 가장 부적절한 것은?\n- 비모수적 검정은 자료가 추출된 모집단의 분포에 대한 제약은 없으나 이상치가 있는 경우 그의 영향을 많이 받는다는 단점이 있다 x\n- 특정분포를 가정하지 않으므로 관측값이 어떤 분포를 따르더라도 항상 적용할 수 있는 방법이다 o\n- 비모수적 방법에서 흔히 사용하는 도구는 부호와 순위이다 o\n- 모집단의 분포에 관계없이 적용할 수 있는 방법을 분포무관(distribution free) 방법이라 한다 o\n\n### 다음 중 결측치에 대한 설명으로 가장 부적절한 것은?\n- 관측치가 있지만 실상은 default값이 기록된 경우에도 결측치로 처리해야 하는 것이 바람직하다 x\n- 해당 칸이 비어있는 경우 결측치 여부는 알기 쉽다 o\n- 결측치가 있는 경우 다양한 대치(Imputation)방법을 사용하여 완전한 자료로 만든 후 분석을 진행할 수 있다 o\n- 결측치가 20%이상인 경우에는 해당 변수를 제거하고 분석해야 한다 o\n\n### 고객의 여러 소성(나이, 성별, 작업, 과거 구매 형태 등)을 이용하여 해당 고객의 이탈 여부를 예측하기 위한 모형으로 부적절한 것은?\n- ARMA 모형 x\n- 로지스틱 회귀모형 o\n- 의사결정나무 o\n- 랜덤 포레스트 o\n\n### 주성분 분석의 활용에 관해 보기의 설명 중 가장 부적절한 것은?\n- 변수 간의 비선형성을 확인한다 x\n- 변수의 개수를 줄여 축약된 정보를 한 눈에 볼 수 있게 요약한다\n- 주성분의 정규성 검정을 통해 원자료의 정규성을 체크한다\n- 주성분의 산점도를 통해 이상치를 탐색한다\n\n### 다음 중 시계열 변수를 이용하거나 인과관계 모형으로 연속형 값을 예측하는 정형데이터마이닝 분석 기법으로 적절한 것은?\n- 예측분석 o\n- 분류분석 x\n- 군집분석 x\n- 연관분석 x\n\n### R에서 제공하는 텍스트 마이닝 함수와 그 기능이 연결된 것으로 부적절한 것은?\n- Ida() = 단어의 긍정 또는 부정 여부를 판별 x\n- findFreqTerm() = 문서에서 자주 나타나는 단어를 출력\n- findAssocs() = 단어 간의 연관 정도를 출력\n- tm_map() = 공백 제거, 대소문자 변환, stopword 처리 등\n\n### 다음 중 사회연결망분석을 위한 R라이브러리 이름이 아닌 것은?\n- RandomForest x\n- igraph o\n- sna o\n- ergm o\n" }, { "alpha_fraction": 0.3162134885787964, "alphanum_fraction": 0.39274924993515015, "avg_line_length": 24.461538314819336, "blob_id": "5dbc4b6b1d6b2e588e2e2b6776e2359811c1ef9e", "content_id": "e9791e8e056642c4c1f27d660b1ce1b199bb30ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 993, "license_type": "permissive", "max_line_length": 67, "num_lines": 39, "path": "/HackerRank/Implement/16.Forming_a_Magic_Square.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef formingMagicSquare(s):\n # Complete this function\n\n cost = []\n n = len(s)\n\n magic_array = [[8, 1, 6, 3, 5, 7, 4, 9, 2],\n [6, 1, 8, 7, 5, 3, 2, 9, 4],\n [4, 9, 2, 3, 5, 7, 8, 1, 6],\n [2, 9, 4, 7, 5, 3, 6, 1, 8],\n [8, 3, 4, 1, 5, 9, 6, 7, 2],\n [4, 3, 8, 9, 5, 1, 2, 7, 6],\n [6, 7, 2, 1, 5, 9, 8, 3, 4],\n [2, 7, 6, 9, 5, 1, 4, 3, 8]]\n array = []\n\n for row in range(n):\n for col in range(n):\n array.append(s[row][col])\n\n for mg in magic_array:\n tmp_sum = 0\n for i in range(9):\n tmp_sum += (abs(mg[i] - array[i]))\n cost.append(tmp_sum)\n\n return min(cost)\n\nif __name__ == \"__main__\":\n s = []\n for s_i in range(3):\n s_t = [int(s_temp) for s_temp in input().strip().split(' ')]\n s.append(s_t)\n result = formingMagicSquare(s)\n print(result)\n" }, { "alpha_fraction": 0.5274725556373596, "alphanum_fraction": 0.5621301531791687, "avg_line_length": 23.12244987487793, "blob_id": "e8a6877de829735f788ade04cb9145b881b2227a", "content_id": "6c4cc5e1151564c4e3e0af5025d3c22aa04b0410", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1755, "license_type": "permissive", "max_line_length": 75, "num_lines": 49, "path": "/ADsP/Day14.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터 프레임 다루기\n\n- 데이터 프레임 만들기 : data.frame(v1,v2,...,f1,f2)\n- 데이터 프레임내 데이터 선택 : dfm[[1]], dfm[[\"name\"]], dfm$name, dfm[1,], dfm[,1]\n- 데이터 프레임 결합 : rbind(), cbind(), merge()\n- 데이터 프레임 추출 : subset()\n\n# 집단으로 분할하기\n\n- 백터 : split(vec, fac) - 벡터값과 팩터값의 길이가 같아야 한다.\n- 데이터프레임 : split(dfm, fac)\n\n# 함수 적용하기\n\n- 행렬 : apply(mtr, 1, func), apply(mtr,2,func)\n- 리스트 : lappy(lst,func), sapply(lst,func)\n- 데이터 프레임 : lappy(dfm,func), sapply(dfm,func), apply(dfm,func) - 모두 동질일 경우만\n\n# 집단별로 함수 적용하기\n\n- tapply(vec, fac, func)\n- by(dfm, fac, func)\n\n# 병렬 벡터들과 리스트들에 함수 적용하기\n\n- 벡터 : mapply(func,vec1,vec2,vec3,...)\n- 리스트 : mapply(func, lst1, lst2, lst3,...)\n\n# 문자열 다루기\n\n- 문자열 길이 : nchar(\"문자열\")\n- 벡터의 길이 : length(vec)\n- 문자열 연결하기 : paste(\"단어\",\"문장\",scalar)\n- 하위 문자열 추출하기 : substr(\"문자열\", 시작번호, 끝번호)\n- 구분자로 문자열 추출하기 : strsplit(\"문자열\", 구분자)\n- 문자열 대체하기 : sub(\"대상문자열\",\"변경문자열\",s), gsub(\"대상문자열\",\"변경문자열\",s)\n\n# 날짜 다루기\n\n- 문자열 > 날짜 : as.Date(\"2014 - 12 - 25\")\n as.Date(\"12/25/2014\", format = \"%m/%d%Y\")\n- 날짜 > 문자열 : format(Sys.Date(), format = \"%m/%d/%Y\")\n\n- %b : 축약된 월 이름 (\"Jan\")\n- %B : 전체 월 이름 (\"January\")\n- %d : 두자리 숫자로 된 일(\"31\")\n- %m : 두자리 숫자로 된 월(\"12\")\n- %y : 두자리 숫자로 된 년(\"14\")\n- %Y : 네자리 숫자로 된 년(\"2014\")\n\n" }, { "alpha_fraction": 0.6402438879013062, "alphanum_fraction": 0.6509146094322205, "avg_line_length": 31.799999237060547, "blob_id": "d4c81f121ad97ddd862c7eb4995fe9ae326cd756", "content_id": "bc44675bdc212bcb07c132c06aa0f88c1d4046f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1170, "license_type": "permissive", "max_line_length": 88, "num_lines": 20, "path": "/Linux/;_&_&&.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 결론\n- ; 앞의 명령어가 실패해도 다음 명령어가 실행\n- && 앞의 명령어가 성공했을 때 다음 명령어가 실행\n- & 앞의 명령어를 백그라운드로 돌리고 동시에 뒤의 명령어를 실행\n\n# 명령어의 반환값\n- 리눅스(유닉스)의 모든 명령어는 종료할 때 성공 여부를 알려준다\n- 예를들어, test 디렉토리가 없는 곳에서 mkdir test 명령어를 실행시킨 후에 \n- < echo $? >를 치면 0 의 값이 나오고 반대로 실패한 경우 1이 나온다\n- 즉 리눅스에서는 0이 아닌 값은 실패(false)를 의미한다.\n\n# &와 &&는 다르다\n- & 는 명령어를 백그라운드로 동작시킬 때 사용한다\n- 간단한 예로 mkdir test & cd test 를 실행하면 cd : no such file or directory: test 라는 에러메시지가 뜬다\n- 한편 test 디렉토리는 생성이 된다.\n\n# 명령의 그룹핑 {}\n- mkdir test3 && { cd test3 ㅇ&& touch abc; echo 'success!!' } || echo 'there is no dir';\n- mkdir test3가 성공했을 때 cd test3; touch abc 를 실행시키고 sucess!! 를 출력하도록 한다\n- 실패했을 때 echo 'There is no dir' 를 실행시킨다.\n" }, { "alpha_fraction": 0.6170212626457214, "alphanum_fraction": 0.6276595592498779, "avg_line_length": 25.809524536132812, "blob_id": "f17385b798e1499a353e85450c20bb6a3651a890", "content_id": "4aa2b642b54c6c08b3d99f54ed74bb22f861a284", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2404, "license_type": "permissive", "max_line_length": 84, "num_lines": 42, "path": "/ADsP/Day02.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 빅데이터\n\n- 빅데이터 정의\n - Mckinsey,(2011): 일반적인 데이터베이스 소프트웨어로 저장, 관리, 분석할 수 있는 범위를 초과하는 규모의 데이터이다.\n - IDC(2011): 다양한 종류의 대규모 데이터로부터 저렴한 비용으로 가치를 추출하고 데이터의 초고속 수집/발굴/분석을 지원하도록 고안된 차세대\n 기술 및 아키텍쳐이다.\n - 가트너 그룹의 더그 래니의 3V = volume, variety, velocity\n- 빅데이터 정의의 범주 및 효과\n - 데이터 변화 (규모, 형태, 속도)\n - 기술변화 (새로운 데이터 처리, 저장, 분석 기술, 클라우드 컴퓨팅 활용)\n - 인재, 조직 변화 (data scientist, 데이터 중심 조직)\n- 빅데이터에 거는 기대의 비유적 표현\n - 산업혁명의 석탄과 철, 21섹의 원유, 렌즈, 플랫폼\n- 빅데이터가 만들어 내는 본질적인 변화\n - 사전처리 > 사후처리\n - 표본조사 > 전수조사\n - 질 > 양\n - 인과관계 > 상관관계\n\n# 빅데이터의 가치와 영향\n\n- 빅데이터의 활용 기본 테크닉 7가지\n - 연관규칙학습 = 어떤 변인들 간에 주목할 만한 상관관계가 있는지 찾는 방법\n - 유형분석 = 문서를 분류, 조직을 그룹으로 나눌 때, 집단의 특성에 따라 분류할 때\n - 기계학습 = 훈련 데이터로부터 학습한 알려진 특성을 활용해 예측하는 방법\n - 회귀분석 = 독립변수를 조작하며, 종속변수가 어떻게 변하는지를 보고 변인의 관계를 파악\n - 감정분석 = 특정 주제에 대해 말하거나 글을 쓴 사람의 감정 분석\n - 소셜네트워크 = 특정인과 다른 사람이 몇 촌정도의 관계인가를 파악 혹은 영향력 있는 사람을 찾을 때\n - 유전자분석 = 최적화가 필요한 문제의 해결책을 자연선택, 돌연변이 등과 같은 메너티즘을 통해 점진적 진화시는 방법\n\n# 위기 요인과 통제방안\n\n- 빅데이터 시대의 위기 요인\n - 사생활 침해 > 동의에서 책임으로\n - 책임 원칙 헤손 > 결과기반 책임 원칙 고수\n - 데이터 오용 > 알고리즘 접근 허용\n \n# 미래의 빅데이터\n\n- 데이터 : 모든 것의 데이터화 (datafication)\n- 기술 : 진화하는 알고리즘, 인공지능\n- 인력 : 데이터 사이언티스트, 알고리즈미스트\n \n" }, { "alpha_fraction": 0.6810631155967712, "alphanum_fraction": 0.6810631155967712, "avg_line_length": 21.259260177612305, "blob_id": "3f91f51e0a557116b0b5bd9cc6fd9239da7eb199", "content_id": "5f0e9c1ea0e74b72a014f7f1b0dea628587dc623", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1114, "license_type": "permissive", "max_line_length": 45, "num_lines": 27, "path": "/Linux/Directory_Structure.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# /bin - User Binaries\n- 실행가능한 프로그램들을 Binary라고 부른다.\n- 사용자가 사용하는 명령어들이 위치하고 있다.\n\n# /sbin - System Binaries\n- 시스템 관리자, 혹은 목적을 가진 사용자들의 프로그램들.\n- 일반 사용자들이 사용하는 것은 잘 없음.\n- root 사용자들이 쓰는 것은 sbin에 있다고 보면 됨.\n\n# /etc - Configuration Files\n- 이미 설치되어있는 프로그램에 대한 설정\n- 어떤 프로그램을 설치하면 그 프로그램이 동작하는 방법의 설정을 바꾸고 싶을 때\n\n# /var = Variable Files\n- 내용이 바뀔 수 있는 특성을 가짐\n- log와 같은\n\n# tmp - Temporary Files\n- 자동으로 삭제가 됨.\n- 영구적으로 저장해야될 데이터를 넣어서는 안된다.\n- 필요에 의해서 임시로 파일을 저장하기에는 최적의 디렉토리임\n\n# /home - Directory\n- < cd ~ >현재 사용자의 home으로 갈 수 있는 명령어\n\n# /opt - Optional add-on Application\n- htop 같은 프로그램을 설치한다 했을 때 자동으로 적절한 디렉토리에 저장시킴\n\n" }, { "alpha_fraction": 0.6602914333343506, "alphanum_fraction": 0.6730418801307678, "avg_line_length": 23.399999618530273, "blob_id": "dd3d0ca8c926f11b763649cd31e79c588d2f47c7", "content_id": "231fe147467a382acb32f2373b94321d9ab95e4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "permissive", "max_line_length": 176, "num_lines": 45, "path": "/HackerRank/Warmup/08.Birthday_Cake_Candles.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Colleen is having a birthday! She will have a cake with one candle for each year of her age. When she blows out the candles, she’ll only be able to blow out the tallest ones.\n#\n# Find and print the number of candles she can successfully blow out.\n#\n# Input Format\n#\n# The first line contains a single integer, , denoting the number of candles on the cake.\n# The second line contains space-separated integers, where each integer describes the height of candle .\n#\n# Constraints\n#\n# Output Format\n# Print the number of candles the can be blown out on a new line.\n#\n# Sample Input 0\n#\n# 4\n# 3 2 1 3\n# Sample Output 0\n#\n# 2\n# Explanation 0\n#\n# The maximum candle height is 3 and there are two candles of that height.\n\n#!/bin/python3\n\nimport sys\n\ndef birthdayCakeCandles(n, ar):\n # Complete this function\n ar.sort()\n ar.reverse()\n count = 0\n for i in range(n):\n if ar[0] == ar[i]:\n count += 1\n else:\n break\n return count\n\nn = int(input().strip())\nar = list(map(int, input().strip().split(' ')))\nresult = birthdayCakeCandles(n, ar)\nprint(result)\n" }, { "alpha_fraction": 0.6030150651931763, "alphanum_fraction": 0.6105527877807617, "avg_line_length": 21.11111068725586, "blob_id": "b177f7b8f0aab54e130190478d4a6590f0376429", "content_id": "c248fe5a3e3fa377795a10454fadf46bc42af2cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "permissive", "max_line_length": 52, "num_lines": 18, "path": "/HackerRank/Implement/20.Designer_PDF_Viewer.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef designerPdfViewer(h, word):\n # Complete this function\n\n ord_list = [(ord(alph)-97) for alph in word]\n\n height_list = [h[height] for height in ord_list]\n\n return max(height_list) * len(word)\n\nif __name__ == \"__main__\":\n h = list(map(int, input().strip().split(' ')))\n word = input().strip()\n result = designerPdfViewer(h, word)\n print(result)\n" }, { "alpha_fraction": 0.49369746446609497, "alphanum_fraction": 0.5, "avg_line_length": 21.66666603088379, "blob_id": "8bb1331c67a8ec94a99a57337e68ce5baaa4f2a6", "content_id": "d9ae04035147f21a799ae8ff9f2bec907e1f338c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "permissive", "max_line_length": 54, "num_lines": 21, "path": "/HackerRank/Implement/22.Angry_Professor.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef angryProfessor(k, a):\n # Complete this function\n\n on_time = [std for std in a if std <= 0 ]\n\n if len(on_time) < k:\n return \"YES\"\n else:\n return \"NO\"\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n n, k = input().strip().split(' ')\n n, k = [int(n), int(k)]\n a = list(map(int, input().strip().split(' ')))\n result = angryProfessor(k, a)\n print(result)\n" }, { "alpha_fraction": 0.7843137383460999, "alphanum_fraction": 0.7843137383460999, "avg_line_length": 50, "blob_id": "383d80795c029f349e57f41652038445e53bb618", "content_id": "cb3a458587b53cb1482e4d785c02ae1d7d7d47bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 51, "license_type": "permissive", "max_line_length": 50, "num_lines": 1, "path": "/Udemy_lecture/Go_from_zero_to_hero/Introduction_to_GUIs/Readme.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# You should run it with a Jupyter Notebook Server\n" }, { "alpha_fraction": 0.6284722089767456, "alphanum_fraction": 0.6284722089767456, "avg_line_length": 21.076923370361328, "blob_id": "f7dae4ecc7a0557b19df19f56885f81cbc3cc049", "content_id": "421d94bda0fc26e02f8eeb86fee64c594e9f8238", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 546, "license_type": "permissive", "max_line_length": 49, "num_lines": 13, "path": "/Linux/Shell_script.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "## Shell Script\n- 자동화된 작업을 처리할 수 있다.\n- 명령어들, 각본을 짜서 저장해 놓은 파일.\n- ./<파일명> 하면 실행된다.\n\n\n## #!/bin/bash\n- 우리가 작성하고 있는 이 파일을 실행시켰을 때 첫줄에 있는 저 #!이라는 기호를 보고\n- 앞으로 작성할 코드들이 bash를 통해서 해석되어야 한다.\n\n## if ! [ -d bak]; then mkdir bak fi\n- 현재 디렉토리에 bak이라는 디렉토리가 없다면 bak 디렉토리를 만들어라\n- if문이 끝날 땐 fi로 닫아준다.\n\n" }, { "alpha_fraction": 0.5398229956626892, "alphanum_fraction": 0.5951327681541443, "avg_line_length": 30.534883499145508, "blob_id": "c41eb5525123ce96c3d49348477caf7f403d7d04", "content_id": "063ee58b69e0b9f79aa69eade602e5543f26ae8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2654, "license_type": "permissive", "max_line_length": 85, "num_lines": 43, "path": "/ADsP/Day22.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터마이닝\n\n- 정의 : 대용량 데이터에서 의미 있는 패턴을 파악하거나 예측하여 의사결정에 활용하는 방법\n- 통게분석과 데이터마이닝의 차이점\n - 통계분석은 가설이나 가정에 따른 분석이나 검증\n - 다양한 수리 알고리즘을 이용해 데이터 베이스의 데이터로부터 의미 있는 정보를 추출\n- 데이터마이닝 활용 : 시각화분석, 분류, 예측, 군집화, 포케스팅\n- 데이터마이닝 방법론 : 인공지능, 의사결정나무, 군집분석, 연관분석, 로지스틱 회귀분석, 로짓분석, 최근접이웃법 등\n\n# 데이터마이닝 학습법\n\n- 지도학습(supervised learning) : 목적변수가 존재하는 분석, 의사결정나무, 인공신경망, 판별분석, 로지스틱 회귀분석, 사례기반추론 등\n- 비지도학습(Unsupervised learning) : 목적변수가 없이 설명을 위한 분석, 연관성분석, 연속규칙, 군집분석\n\n# 데이터마이닝 추진단계\n\n- 목적 설정 : 마이닝을 위한 명확한 목적을 설정한다.\n- 데이터 준비 : 모델링을 위한 다양한 데이터를 준비하고 데이터 정제를 통해 품질을 보장한다.\n- 데이터 가공 : 목적변수를 정의하거나 모델링을 위한 데이터 형식으로 가공한다.\n- 기법 적용 : 데이터마이닝 기법을 적용하여 정보를 추출한다.\n- 검증 : 마이닝으로 추출한 결과를 검증하고 업무에 적용하여 투자대비성과(ROI) 등 기대효과를 전파한다.\n\n# 데이터 분할\n\n- 구축용(training data) - 50%의 데이터를 모델링을 위한 훈련용으로 활용\n- 검정용(validation data) - 30%의 데이터를 구축된 모형의 과대/과소 추정의 판정 목적으로 활용\n- 시험용(test data) - 20%의 데이터를 테스트데이터나 과거 데이터를 활용하여 모델의 성능 평가에 활용\n\n# 모델의 성능 평가\n\n- 은행의 대출 문제로 본다면 연이율이 20%100만원을 100명에게 대출한다고 할때\n- 실제 분류\n- 1모형 A B 2모형 A B\n a 65 5 a 75 15\n b 10 20 b 0 10\n \n- 기대수익\n - 대수익 = (65명 * 20만원) - (5명 * 100만원) = 800만원\n - 기대수익 = (75명 * 20만원) - (15명 * 100만원) = 0원\n- 기대손실비용\n - 기대손실비용 = (10명 * 20만원) + (5명 * 100만원) = 700만원\n - 기대손실비용 = (0명 * 20만원) + (15명 * 100만원) = 1500만원\n- 결과 : 기대수익과 기대손실비용 면에서 볼 때 1모형이 우수함\n" }, { "alpha_fraction": 0.48651960492134094, "alphanum_fraction": 0.5036764740943909, "avg_line_length": 31.897958755493164, "blob_id": "12b6f6b364bf2a039fd18893d63c1e5bca26f56e", "content_id": "b3a360a831f27c195bfb206319a1b579bd0b1b13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2776, "license_type": "permissive", "max_line_length": 98, "num_lines": 49, "path": "/ADsP/Day12.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터 구조의 정의\n\n- 벡터(Vector)\n - 벡터들은 동질적이다. - 한 벡터의 모든 원소는 같은 자료형 또는 같은 모드(mode)를 가진다.\n - 벡터는 위치로 인덱스 된다. - V[2]는 v벡터의 2번째 원소이다.\n - 벡터는 인덱스를 통해 여러 개의 원소로 구성된 하위 벡터를 반환 할 수 있다. - V[c(2,3)]은 v벡터의 2번째, 3번째 원소로 구성된\n 하위벡터이다.\n - 벡터 원소들은 이름을 가질 수 있다. - V <-c(10,20,30); names(v) <-c(\"Moe\", \"Larry\", \"Curly\")\n V[\"Larry\"]\n Larry\n 20\n\n- 리스트(Lists)\n - 리스트는 이질적이다. - 여러 자료형의 원소들이 포함될 수 있다.\n - 리스트는 위치로 인덱스된다. - L[[2]]는 L 리스트의 2번째 원소이다.\n - 리스트에서 하위 리스트를 추출할 수 있다. - L[c(2,3)]은 L 리스트의 2번째, 3번째 원소로 이루어진 하위 리스트이다.\n - 리스트의 원소들은 이름을 가질 수 있다. - L[[\"Moe\"]]와 L$Moe는 둘다 \"Moe\"라는 이름의 원소를 지칭 한다.\n \n- 데이터 프레임(data frames)\n - 데이터 프레임은 강력하고 유연한 구조. SAS의 데이터셋을 모방해서 만들어진다.\n - 특징\n - 데이터 프레임의 리스트의 원소는 벡터 또는 요인이다.\n - 그 벡터와 요인은 데이터 프레임의 열이다.\n - 벡터와 요인들은 동일한 길이이다.\n - 동일한 벡터와 요인들은 데이터 프레임을 사각으로 만든다.\n - 열에는 이름이 있어야 한다.\n - 데이터 프레임의 원소에 대한 접근방법\n - b[1] ; b[\"empno\"]\n - b[[i]] ; b[[\"empno\"]]\n - b$empno\n \n# 그 밖의 데이터 구조들\n\n- 단일값(scalars) : R에서는 원소가 하나인 벡터로 인식/처리\n- 행렬(Matrix) : R에서는 차원을 가진 벡터로 인식\n- 배열(Arrays) : 행렬에 3차원 또는 n차원까지 확장된 형태\n 주어진 벡터에 더 많은 차원을 부여하여 배열을 생성\n- 요인(Factors) : 벡터처럼 생겼지만, R에서는 벡터에 있는 고유값(unique value)의 정보를 얻어 내는데, 이 고유값들을 요인의 수준(level)이라고 한다.\n 요인의 두가지 주된 사용처 : 범주형 변수, 집단 분류\n\n# 리스트 다루기\n\n- 리스트 원소 선택 : L[[n]], L[[\"name\"]], L$name\n\n# 행렬 다루기\n\n- 행렬 설정 : dim(vec) <- c(2,3)\n- 행과열 이름 붙이기 : rownames(mtrx) <- c(\"rowname1\", \" rowname2\", ...)\n colnames(mtrx) <- c(\"colname1\",\"colname2\", ...)\n \n" }, { "alpha_fraction": 0.6702930927276611, "alphanum_fraction": 0.6756216883659363, "avg_line_length": 40.321102142333984, "blob_id": "cab3c4fb414c5d93afa8324202b830543b71f90a", "content_id": "caf3c77466ca4870172fcba6ab1704f63abfb8aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9861, "license_type": "permissive", "max_line_length": 171, "num_lines": 109, "path": "/ADsP/Point_2.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 출제포인트\n\n### 분석 기획을 위해 필요한 3가지 역량과 분석 대상과 방법에 따른 분석 유형으로 출제 가능성이 높다\n- 필요한 3가지 역량\n - 수학/ 통계학적 지식\n - 정보기술(IT기술, 해킹기술, 통신기술 등)\n - 해당 비즈니스에 대한 이해와 전문성을 포함\n- 분석을 기획한다는 것은 해당 문제 영역에 대한 전문성 역량 및 수학/ 통계학적 지식을 활용한 분석역량과 분석의 도구인 데이터 및 프로그래밍 기술 역량에 대한 균형 잡힌 시각을 가지고 방향성 및 계획을 수립해야 한다는 것\n\n### 목표 시점별 분석 방향, 목표, 유형, 접근 방식에 대한 특성을 묻는 문제\n- 당면한 분석 주제의 해결 (과제 단위)\n - 1차 목표 : Speed & Test\n - 과제의 유형 : Quick & Win\n - 접근 방식 : Problem Solving\n- 지속적 분석 문화 내재화 ( 마스터 플랜 단위)\n - 1차 목표 : Accuracy & Deploy\n - 과제의 유형 : Long Term View\n - Problem Definition\n \n### 데이터 분석 기획시 고려사항 3가지, 그리고 데이터의 유형별 예제도 시험에 자주 나옴\n- 1. 분석의 기본이 되는 데이터에 대한 고려가 필요하다\n - 분석을 위한 데이터의 확보가 우선적이며, 유형에 대한 분석이 선행적으로 이루어져야 함\n- 2. 분석을 통해서 가치가 창출될 수 있는 적절한 활용방안과 활용 가능한 유즈케이스의 탐색이 필요하다\n - \"바퀴를 재발명하지 마라\" 라는 격언처럼 기존에 잘 구현되어 활용되고 있는 유사 분석 시나리오 및 솔루션을 최대한 활용하는 것이 중요하다\n-3. 분석을 수행함에 있어서 발생하는 장애요소들에 대한 사전 계획 수립이 필요하다\n - 일회성 분석으로 그치지 않고 조직의 역량으로 내재화하기 위해서는 충부하고 계속적인 교육 및 활용방안 등의 변화관리가 고려되어야 한다\n \n### 데이터 분석 방법론을 정의할 때 반드시 필요한 내용인 절차, 방법, 도구와 기법, 태플릿과 산출물을 기억하고 적용업무의 특성에 따른 모델들도 기억하자\n- 방법론\n - 폭포수 모델, 나선형 모델, 프로토타입 모델(개발의 일부분만 먼저 개발)\n- 구성\n - 단계 : 최상위 계층으로서 프로세스 그룹을 통하여 완성된 단계별 산출물이 생선된다\n - 태스크 : 단계를 구성하는 단위 활동으로써 물리적 또는 논리적 단위로 품질검토의 항목이 된다\n - 스텝 : 각 단계는 기준선으로 설정되어 관리되어야 하며 버전관리 등을 통하여 통제된다\n \n### 데이터에서 패턴을 찾는 9개의 프로세스와 KDD 분석 절차 5단계는 시험에 자주 출제되는 부분 입니다\n- 데이터에서 패턴을 찾는 9개 프로세스\n - 분석 대상 비즈니스 도메인의 이해\n - 분석 대상 데이터셋 선택과 생성\n - 데이터페 포함되어 있는 노이즈(Noise)와 이상값(Outlier) 등을 제거하는 정제작업이나 전처리 작업 실시\n - 분석 목적에 맞는 변수를 찾고 필요시 데이터의 차원을 축소하는 데이터를 변경\n - 분석 목적에 맞는 데이터마이닝 기법을 선택\n - 분석 목적에 맞는 데이터마이닝 알고리즘을 선택\n - 데이터마이닝 실행\n - 데이터마이닝 결과에 대한 해석\n - 데이터마이닝에서 발견된 지식 활용\n- KDD 분석 절차 5단계\n - 데이터셋 선택(selection)\n   - 데이터셋 선택(selection)선택에 앞서 분석 대상의 도메인에 대한 이해와 프로젝트 목표 설정이 필수\n   - 데이터베이스  또는 원시 데이터에서  분석에 필요한 데이터 선택\n   - 데이터마이닝에 필요한 목표데이터(target data)를 구성\n - 데이터 전처리(Preprocessing)\n   - 추출된 분석 대상용 데이터 셋에 포함되어 있는 잡음과 이상치, 결측치를 식별하고 필요시 제거하거나 의미 있는 데이터로 재처리하여 데이터 셋을 정제\n   - 데이터 전처리 단계에서 축로 요구되는 데이터 셋이 필요한 경우 데이터에 선택 프로세스 재실행\n  - 데이터 변환(Transformation)\n   - 데이터 전처리 과정을 통해 정제된 데이터에 분석 목적에 맞게 변수를 생성, 선택하고 데이터의 차원을 축소하여 효율적으로 데이터마이닝을 할 수 있도록 데이터에 변경\n   - 데이터마이닝 프로세스를 진행하기 위해 학습용 데이터(training data)와 검증용 데이터(test data)로 데이터에 분리\n - 데이터 마이닝(Data Mining)\n   - 학습용 데이터를 이용해서 분석목적에 맞는 데이터마이닝 기법을 선택하고 적절한 알고리즘을 적용하여 데이터마이닝 작업을 실행\n   - 필요에 따라 데이터 전처리와 데이터 변환 프로세스를 추가로 실행하여 최적의 결과를 산출\n - 데이터 마이닝 결과 평가 (Interpretation/Evaluation)\n   - 데이터마이닝 결과에 대한 해석과 평가 그리고 분석 목적과의 일치성을 확인\n   - 데이터마이닝을 통해 발견한 지식을 업무에 활용하기 위한 방안을 마련\n   - 필요에 따라 데이터에 선택 프로세스에서 데이터마이닝 프로세스를 반복 수행\n\n### CRIDP-DM 4가지 레벨과 6단계 그리고 각 단계별 업무내용을 시험에 자주 출제되는 부분이다. 반드시 기억하자\n- CRISP-DM 프로세스는 6단계로 구성되어 있으며, 각 단계는 일 방향으로 구성되어 있지 않고 단계 간 피드백을 통하여 단계별 완성도를 높이게 되어 있다.\n- 1. 업무이해(Business Understanding)\n - 비즈니스 관점에서 프로젝트의 목적과 요구사항을 이해하기 위한 단계로써 도메인 지식을 데이터 분석을 위한 문제정의로 변경하고 초기 프로젝트 계획을 수립하는 단계이다.\n - 업무 목적 파악, 상황 파악, 데이터 마이닝 목표설정, 프로젝트 계획 수립\n- 2. 데이터 이해 (Data Understanding)\n - 분석을 위한 데이터를 수집하고 데이터 속성을 이해하기 위한 과정으로 데이터 품질에 대한 문제점을 식별하고 숨겨져 있는 인사이트를 발견하는 단계이다.\n - 초기 데이터 수집, 데이터 기술 분석, 데이터 탐색, 데이터 품질 확인\n- 3. 데이터 준비 (Data Preparation)\n - 분석을 위하여 수집된 데이터에서 분석기법에 적합한 데이터에 편성하는 단계로써 많은 시간이 소요될 수 있다.\n - (분석용 데이터 셋 선택, 데이터 정제, 분석용 데이터 셋 편성, 데이터 통합, 데이터 포맷팅)\n- 4. 모델링 (Modeling)\n - 다양한 모델링 기법과 알고리즘을 선택하고 모델링 과정에서 사용되는 파라미터를 최적화해 나가는 단계이다. 모델링 과정에서 데이터 셋이 추가로 필요한 경우 데이터 준비 단계를 반복 수행할 수 있으며, 모델링 결과를 테스트용 데이터 셋으로 평가하여 모델의 과적합(Overfitting)의 문제를 확인한다.\n - 모델링 기법 선택, 모델 테스트 계획 설계, 모델 작성, 모델 평가\n- 5. 평가 (Evaluation)\n - 모델링 결과가 프로젝트 목적에 부합하는지 평가하는 단계로 데이터마이닝 결과를 최종적으로 수용 할 것인지 판단한다.\n - 분석결과 평가, 모델링 과정 평가, 모델 적용성 평가\n- 6. 전개 (Deployment)\n - 모델링과 평가 단계를 통하여 완성된 모델은 실 업무에 적용하기 위한 계획을 수립하고 모니터링과 모델의 유지보수 계획을 마련한다. 모델은 적용되는 비즈니스 도메인 특성, 입력되는 데이터의 품질 편차, 운영모델의 평가기준에 따라 생명주기(Life cycle)가 다양하므로 상세한 전개 계획이 필요하다\n - 전개 계획 수립, 모니터링과 유지보수 계획 수립, 프로젝트 종료보고서 작성, 프로젝트 리뷰\n \n### 빅데이터 분석을 위한 3개의 계층과 내용은 시험에 자주 나온다\n- 빅데이터를  분석하기 위한 방법론은 계층적  프로세스 3계층으로 구성된다\n - 단계(Phase > Task > Step)\n\n### 빅데이터 분석 방법론의 5단계와 각 단계별 주요 업무는 가장 중요하고 시험에도 자주 나오는 부분이다\n- 분석기획\n - 비즈니스 도메인고 문제점을 인식하고 분석 계획 및 프로젝트 수행계획을 수립한다\n- 데이터 준비 단계\n - 비즈니스 요구사항과 데이터 분석에 필요한 원천 데이터를 정의하고 준비하는 단계이다\n- 데이터 분석 단계\n - 원천 데이터를 분석용 데이터 셋으로 편성하고 다양한 분석 기법과 알고리즘을 이용하여 데이터를 분석하는 단계이다. 분석 단계를 수행하는 과정에서 추가적인 데이터 확보가 필요한 경우 데이터 준비 단계로 피드백하여 두 단계를 반복하여 진행한다\n- 시스템 구현 단계\n - 분석 기획에 맞는 모델을 도출하고 이를 운영중인 가동 시스템에 적용하거나 시스템 개발을 위한 사전 검증으로 프로토타입 시스템을 구현한다\n- 평가 및 전개\n - 데이터 분석 및 시스템 구현 단계를 수행한 후 프로젝트의 성과를 평가하고 정리하거나 모델의 발전 계획을 수립하여 차기 분석 기획으로 전달하고 프로젝트를 종료하는 단계이다\n### 그리고 \n### 각 \n### 단계별 \n### 업무내용을 ㅅㅎ\n##\n### \n - 데이터셋 선택(selection)도메인에 \n - 데이터셋 선택(selection)\n" }, { "alpha_fraction": 0.6578072905540466, "alphanum_fraction": 0.6622369885444641, "avg_line_length": 35.119998931884766, "blob_id": "64329228e82673f17ccae9cdd74fc7c144e6f01c", "content_id": "be9072eb9a7fb497183af616cef44e6dfc789ce8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1969, "license_type": "permissive", "max_line_length": 199, "num_lines": 25, "path": "/ADsP/test05_.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 주관식\n\n### 아래에서 언급한 이것은 무엇인가?\n- 이것은 데이터베이스의 구조와 제약조건에 관한 전반적인 명세를 의미하는 것으로서, 데이터베이스를 구성하는 데이터 개체(Entity), 속성(Attribute), 관계 (Relationship) 및 데이터 조작 시 데이터 값들이 갖는 제약 조건 등에 관해 전반적으로 정의한다.\n- 스키마\n\n### 아래의 설명은 빅데이터의 어떤 역할과 관련된 설명인가?\n- \"빅데이터 시대에는 다양한 사업자들이 각종 사용자 데이터나 M2M 센서 등에서 수집된 데이터를 가공처리 저장해 두고, 이 데이터에 접근할 수 있도록 API를 공개하고, 다양한 서드파티 사업자들이 비즈니스에 필요한 정보를 추출해 활용하게 될 것이다\"\n- 플랫폼\n\n### 아래는 데이터 분석기획에서 자주 사용되는 용어를 설명한 것이다. ( ) 에 들어갈 용어는?\n- ( ) 란 기업이 전사 또는 각 업무별 주요 의사결정 포인트에 활용할 수 있는 분석의 후보를 의미한다\n- 분석기회\n\n### 분류할 데이터와 주어진 데이터의 모든 거리를 계산하여 가까운 거리의 데이터를 K개 만큼 찾은 후, 그 중에서 가장 빈도수가 높은 클래스로 분류해주는 기법은?\n- K-Nearest-Neighbor\n\n### 최적화방법은 우리 생활과 밀접하게 연관되어 있다. 어떤 물건을 구입할 때 우리는 몇 가지 대안 중에서 재정적인 고려와 할께 구입 이유, 사용시간, 가격 등 여러 조건을 비교 검토 한 후 결정을 내린다. 이러한 결정을 내릴 떄 최대 효과, 최소비용, 최고의 선택 같은 최적화의 개념을 인식하게 된다. 이러한 최적화 방법 중 가장 많이 사용되는 방법은?\n- 선형계획법\n\n### R에서 다음의 명령을 수행했을 때 출력되는 결과는?\n X <- c(1,2,3,NA)\n Mean(X)\n- NA\n" }, { "alpha_fraction": 0.6717144250869751, "alphanum_fraction": 0.6717144250869751, "avg_line_length": 35.86000061035156, "blob_id": "595479bd5fb0e37522815ee6af92d2a3619ce3a5", "content_id": "d4cbe56d0749900fccfda07d9511c2125d11beca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3957, "license_type": "permissive", "max_line_length": 84, "num_lines": 50, "path": "/ADsP/Day26.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 군집분석 (Clustering Analysis)\n- 각 객체(대상)의 유사성을 측정하여 유사성이 높은 대상집단을 분류하고, 군집에 속한 객체들의 유사성과 서로 다른 군집에 속한\n 객체간의 상이성을 규명하는 분석 방법\n- 특성에 따라 고객을 여러 개의 배타적인 집단으로 나누는 것\n- 군집의 개수나 구조에 대한 가정 없이 데이터로붜 거리를 기준으로 군집화 유도\n\n# 군집분석의 특징\n- 비교사학습법(unsupervised learning)에 해당하며 타겟변수(종속변수)의 정의가 없이 학습이 가능\n- 데이터를 분석의 목적에 따라 적절한 군집으로 분석자가 정의 가능\n- 요인분석과의 차이 : 유사한 변수를 함께 묶어주는 목적이 아니라 각 데이터(객체)를 묶어줌\n- 판별분석과의 차이 : 판별분석은 사전에 집단이 나누어져 있어야 하지만 군집분석은 집단이 없는 상태에서 집단을 구분\n\n# 군집분석의 거리 측정\n- 데이터가 연속형인 경우 : 유클리드 거리, 표준화 거리, 마할라노비스 거리, 체비셔프 거리, 맨하탄 거리, 캔버라 거리, 민코우스키 거리 등을 활용\n- 데이터가 범주형인 경우 : 자카드 거리 활용\n\n# 계층적 군집분석\n- 계측적 군집방법은 n개의 군집으로 시ㅏㄱ해 점차 군집의 개수를 줄여나가는 방법\n - 최단 연결법 (single linkage, nearest neighbor)\n - n * n 거리 행렬에서 거리가 가장 가까운 데이터를 묶어서 군집을 형성\n - 군집과 군집 또는 데이터와의 거리를 계산시 최단거리(min)를 거리로 계산하여 거리행렬 수정\n - 수정된 거리행렬에서 거리가 가까운 데이터 또는 군집을 새로운 군집으로 형성\n - 최장연결법 (complete linkage, farthest neighbor)\n - 군집과 군집 또는 데이터와의 거리를 계산시 최장거리(max)를 거리로 계산하여 거리행렬 수정\n - 평균연결법 (average linkage)\n - 군집과 군집 또는 데이터와의 거리를 계산시 평균거리(mean)를 거리로 계산하여 거리행렬 수정\n - 와드연결법(ward linkage)\n - 군집내 편차들의 제곱합을 고려한 방법\n - 군집 간 정보의 손실을 최소화 하기 위해 군집화를 진행\n \n# 비계층적 군집분석\n- n개의 개체를 g개의 군집으로 나눌 수 있는 모든 가능한 방법을 점검해 최적화한 군집을 형성하는 것\n - K-평균 군집분석(k-means clustering)\n - 원하는 군집의 개수와 초기 값(seed)들을 정해 seed 중심으로 군집을 형성한다\n - 각 데이터를 거리가 가장 가까운 seed가 있는 군집으로 분류한다\n - 각 군집의 seed 값을 다시 계산한다\n - 모든 개체가 군집으로 할당될 때까지 위 과정들을 반복한다\n - K-평균 군집분석 특징\n - 거리 계산을 통해 군집화가 이루어지므로 연속형 변수에 활용이 가능하다\n - K 개의 초기 중심값은 임의로 선택이 가능하며 가급적이면 멀리 떨어지는 것이 바람직하며 초기값을 일렬로 선택하지 않은 것이 좋다\n - 초기 중심으로부터의 오차 제곱합을 최소화하는 방향으로 군집이 형성되는 탐욕적(greedy) 알고리즘이므로 안정된 군집은 보장하나 \n 최적이라는 보장은 없다\n- 장점\n - 알고리즘이 단순하며, 빠르게 수행되어 분석 방법 적용이 용이하다\n - 계층적 군집분석에 비해 많은 양의 데이터를 다룰 수 있다\n- 단점\n - 군집의 수, 가중치와 거리 정의가 어렵다\n - 사전에 주어진 목적이 없으므로 결과해석이 어렵다\n - 잡음이나 이상값에 영향을 많이 받는다\n - 볼록한 형태가 아닌(non-convex) 군집이(예를 들어 U형태 의 군집) 존재할 경우에는 성능이 떨어진다\n \n \n" }, { "alpha_fraction": 0.4842105209827423, "alphanum_fraction": 0.5026316046714783, "avg_line_length": 19, "blob_id": "4beb05a49a4bf24bec44fc7c32feaeb4979a5b23", "content_id": "e66ebfa3ef925a4d3d7c80416071368e02f367db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "permissive", "max_line_length": 41, "num_lines": 19, "path": "/HackerRank/Implement/32.Sherlock_and_Squares.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\nimport math\ndef squares(a, b):\n # Complete this function\n\n start = math.ceil(a**(0.5))\n end = int(b**(0.5))\n\n return end - start + 1\n\nif __name__ == \"__main__\":\n q = int(input().strip())\n for a0 in range(q):\n a, b = input().strip().split(' ')\n a, b = [int(a), int(b)]\n result = squares(a, b)\n print(result)\n" }, { "alpha_fraction": 0.4771505296230316, "alphanum_fraction": 0.4905914068222046, "avg_line_length": 20.257143020629883, "blob_id": "fb66f82fb53701b102ba49d2af77e1bbebea5671", "content_id": "e7cc8a8195798c7ffaea72728eb2d57225e63f12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 744, "license_type": "permissive", "max_line_length": 59, "num_lines": 35, "path": "/HackerRank/Implement/13.Counting_Valleys.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef countingValleys(n, s):\n # Complete this function\n conv = [1 if s[i] == \"U\" else -1 for i in range(n)]\n step_level = []\n step = 0\n\n for i in conv:\n step += i\n step_level.append(step)\n\n is_vall = 0\n tmp_index = 0\n for idx in range(len(step_level)):\n\n if step_level[idx] == 0:\n check_valley = sum(step_level[tmp_index:idx+1])\n if check_valley < 0:\n is_vall += 1\n tmp_index = idx\n else:\n tmp_index = idx\n else:\n continue\n return is_vall\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n s = input().strip()\n result = countingValleys(n, s)\n print(result)\n" }, { "alpha_fraction": 0.6583514213562012, "alphanum_fraction": 0.6757050156593323, "avg_line_length": 31.821428298950195, "blob_id": "36b0c030e6d85ace69b628c986e18e0c33a10b8b", "content_id": "a82c4b0792f694c716a8f564be4414888d36b1bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1884, "license_type": "permissive", "max_line_length": 88, "num_lines": 28, "path": "/ADsP/Day16.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 변수의 구가화\n\n- 신용평가모형 또는 고객 세분화 등의 시스템으로 모형을 적용하기 위해서는 각변수들을 구간화하여 점수를 적용하는 방식이 활용\n- binning : 연속형 변수를 범주형 변수로 변환하기 위해 50개 이하의 구간에 동일한 수의 데이터를 할당하여 의미를 파악하면서 구간을 축소하는 방법\n- 의사결정나무 : 모형을 통해 연속형 변수를 범주형 변수로 변환하는 방법\n\n# 결측값 처리\n\n- 변수에 데이터가 비어 있는 경우 NA, ., 9999999, Unknown, Not Answer 등으로 표현\n- 단순 대치법(single Imputation)\n - completes analysis : 결측값의 레코드를 삭제\n - 평균대치법 : 관측 및 실험을 통해 얻어진 데이터의 평균으로 대치\n - 비조건부 평균 대치법 : 관측 데이터의 평균으로 대치\n - 조건부 평균 대치법 : 회귀분석을 통해 데이터를 대치\n - 단순확률 대치법 : 평균대치법에서 추정량 표준 오차의 과소 추정문제를 보완한 방법으로 Hot-deck 방법, nearest neighbor 방법이 있음\n- 다중 대치법 (multiple Imputation) : 단순 대치법을 m번 실시하여 m개의 가상적 자료를 만들어 대치하는 방법\n\n# 이상값 처리\n\n- bad data : 잘못 입력된 값이나 분석 목적에 부합되지 않는 값인 경우로 삭제\n- 이상값 : 의도하지 않은 현상으로 입력된 값이나, 의도된 극단값인 경우 활용\n- 이상값의 인식\n - 평균으로부터 3표준편차 떨어진 값\n - 기하평균보다 2.5 표준편차 이상 떨어진 값\n - 1사분위와 3사분위 값에서 범위보다 2.5배 이상 떨어진 값\n- 이상값의 처리\n - 절단(trimming) : 이상값이 포함된 레코드를 삭제\n - 조정(winsorizing) : 이상값을 상한 또는 하한 값으로 조정\n \n" }, { "alpha_fraction": 0.5878467559814453, "alphanum_fraction": 0.5918097496032715, "avg_line_length": 24.233333587646484, "blob_id": "60aba765d763da67a17af46b0136ad0d7a36bcd7", "content_id": "ab11f93ba9c3972d69b06cebcd81bfd41069c555", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 757, "license_type": "permissive", "max_line_length": 112, "num_lines": 30, "path": "/HackerRank/Implement/14.Electronics_Shop.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef getMoneySpent(keyboards, drives, s):\n # Complete this function\n if s < min(keyboards) + min(drives):\n return -1\n\n spend_list = []\n\n for key in keyboards:\n for usb in drives:\n spend = key + usb\n\n if spend <= s:\n spend_list.append(spend)\n\n else:\n continue\n\n return max(spend_list)\n\ns,n,m = input().strip().split(' ')\ns,n,m = [int(s),int(n),int(m)]\nkeyboards = list(map(int, input().strip().split(' ')))\ndrives = list(map(int, input().strip().split(' ')))\n# The maximum amount of money she can spend on a keyboard and USB drive, or -1 if she can't purchase both items\nmoneySpent = getMoneySpent(keyboards, drives, s)\nprint(moneySpent)\n" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.5979021191596985, "avg_line_length": 12, "blob_id": "6335faabd10b04b71178f3ebdad59734b5745899", "content_id": "ee44bb4e65bda589e67faadc24ab72cf0c5a5221", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 532, "license_type": "permissive", "max_line_length": 43, "num_lines": 22, "path": "/ADsP/Readme.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# ADsP\n- Advanced Data Analytics Semi-Professional\n- 자격증 준비\n\n# 시험일\n-2017년 9월 2일\n\n# 목차\n- 데이터 이해\n - 데이터의 이해\n - 데이터의 가치와 미래\n - 가치 창조를 위한 데이터 사이언스와 전략 인사이트\n- 데이터 분석 기획\n - 데이터 분석 기획의 이해\n - 분석 마스터 플랜\n- 데이터 분석\n - 데이터 분석개요\n - R프로그래밍 기초\n - 데이터 마트\n - 통계분석\n - 정형 데이터 마이닝\n - 비정형 데이터 마이닝\n" }, { "alpha_fraction": 0.6017699241638184, "alphanum_fraction": 0.6070796251296997, "avg_line_length": 25.904762268066406, "blob_id": "a763f87b41eab2e3322f5010678f3e9ad46981b2", "content_id": "c9facdd2084564ca29b3c84590d62400e2de9f60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1121, "license_type": "permissive", "max_line_length": 76, "num_lines": 21, "path": "/ADsP/Day10.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 변수 다루기\n\n- R에서 뱐수명만 선언하고 값을 할당하면 자료형태를 스스로 인식하고 선언한다.\n- 화면에 프린트하고자 할 때, print()를 사용해도 되지만 변수 값만 표현해도 내용을 출력해준다.\n- 변수에 값을 할당할 때는 대입연산자( <-, <<-, =, ->, ->> ) 를 사용할 수 있으나 <- 를 추천한다.\n- 메모리에 불필요한 변수가 있는지 확인하기 위해서는 ls()를 활용하고 삭제는 rm()을 활용한다.\n\n# 기본적인 통계량 계산\n\n- 평균 : mean()\n- 중간값 : median()\n- 표준편차 : sd()\n- 분산 : var()\n- 공분산 : cov()\n- 상관계수 : cor()\n\n# 함수의 생성 및 활용\n\n- R은 함수형 언어이기 때문에 프로그래머가 직접 활용 가능한 함수를 생성하여 활용할 수 있다.\n- 함수는 function(매개변수1, 매개변수2, ...) 선언하고 표현식이 2줄 이상인 경우는 {}로 묶어서 함수의 범위를 설정한다.\n- 표현식은 변수 할당, 조건문 (if문)과 반복문 (for문, while문, repeat문) 그리고 전달값(return)으로 구성된다.\n" }, { "alpha_fraction": 0.5136986374855042, "alphanum_fraction": 0.5273972749710083, "avg_line_length": 22.052631378173828, "blob_id": "93809274928792e1c5ffb7269a4279286d987e85", "content_id": "710879714c3ac5cc2f370e0ac5748923060ad741", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "permissive", "max_line_length": 41, "num_lines": 19, "path": "/HackerRank/Implement/23.Beautiful_Day_at_the_Movies.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef beautifulDays(i, j, k):\n # Complete this function\n\n beautiful_day = 0\n for num in range(i,j+1):\n reverse_num = int(str(num)[::-1])\n if (num - reverse_num) % k == 0:\n beautiful_day += 1\n\n return beautiful_day\nif __name__ == \"__main__\":\n i, j, k = input().strip().split(' ')\n i, j, k = [int(i), int(j), int(k)]\n result = beautifulDays(i, j, k)\n print(result)\n" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6943573951721191, "avg_line_length": 32.578948974609375, "blob_id": "b44007a3eecd1c42ed270c54c1d8601c5c2bdeab", "content_id": "8e77092e975bff3157af5a701e4d9790779ec764", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1173, "license_type": "permissive", "max_line_length": 81, "num_lines": 19, "path": "/Linux/SHELL_KERNEL.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "![Kernel](http://www.tonypickett.com/wp-content/uploads/2013/09/arch_02.jpg)\n\n# 구조\n- Application > Shell > Kernel > Hardware\n- Hardware\n- Kernel : Hardware를 직접적으로 제어하는 운영체제에서 가장 중심이 되는 core\n- Shell : 사용자가 리눅스에서 ls -al하고 앤터를 치면 현재 파일의 목록을 운영체제가 확인하고 출력해주는데 \n          이렇게 입력한 명령이 Shell에게 명령을 입력해주는 것. 즉 커널을 직접 제어하는 것은 어렵기 때문에 사람이 이해하기\n 쉬운형태의 명령어를 입력하면 프로그램들이 그것을 해석해서 Kernel에게 전달해주는 것이다.\n- Shell은 사용자가 입력하는 명령을 해석하는 프로그램, 여러가지 Shell이 있다. 자기 취향에 맞게 Shell을 선택하고 사용할 수 있다.\n\n# bash vs zsh\n\n- zsh\n - cd tap키 하면 숨김디렉토리는 보여주지 않는다.\n - 디렉토리 파일의 앞글자만 입력해서 tap키를 누르면 자동완성해준다. 조금 더 편의함\n- bash\n - cd tap키 하면 숨김디렉토리까지 보여준다.\n - 사용자가 입력하는 명령\n" }, { "alpha_fraction": 0.6754453778266907, "alphanum_fraction": 0.67983478307724, "avg_line_length": 40.64516067504883, "blob_id": "62f7f83a9dbdec979e141589a71933d5e05df62a", "content_id": "2576f5e26088fe1143886846b89a704094900162", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8541, "license_type": "permissive", "max_line_length": 152, "num_lines": 93, "path": "/ADsP/test02.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 객관식\n\n### 자기 디스크나 자기 테이프 등과 같이 컴퓨터가 접근할 수 있는 저장매체에 저장되는 것을 의미하는 데이터베이스의 특징으로 알맞은 것은?\n- 데이터베이스는 저장된 데이터이다.\n- 데이터베이스는 변화하는 데이터이다 (= 항상변화하면서도 항상 현재의 정확한 데이터를 유지)\n- 데이터베이스는 공용 데이터이다 (= 여러사용자가 서로 다른 목적으로 데이터를 공용으로 이용)\n- 데이터베이스는 통합된 데이터이다 (= 데이터가 중복되지 않음)\n\n### 빅데이터 회의론을 넘어 가치 패러다임의 변화를 설명한 것 중 설명이 잘못 된 것은? (잘 된 것)\n- 과거의 아날로그 세상을 컴퓨터와 워드, 파워포인트와 같은 오피스 프로그램을 통해 디지털화 되면서 새로운 패러다임으로 바뀌었다\n- 현재는 전 세계에 흩어져 있는 다양한 정보를 쉽게 찾을 수 있는 연결의 시대에 있고 구글과 네이버와 같은 서비스를 통해 다양한 정보를 구하고 있다\n- 미래는 필요한 정보를 나 대신 누군가가 찾아서 나에게 서비스해 주는 에이젼트 시대가 도래할 것이다\n- 디지털화(과거) > 연결(현재) > 에이젼트(미래)\n\n### KDD 분석 절차 중 분석 목적에 맞는 변수를 찾고 데이터 차원을 축소하는 과정은?\n- 데이터 변환\n- 데이터 전처리 x (= 데이터속에 있는 잡음, 및 이상치를 필요하면 제거하는 등 관리하는 것)\n\n### 데이터 분석 방법론 중 CRISP-DM에 대한 설명으로 옳지 것은?\n- 1996년 유럽연합의 ESPRIT에서 있었던 프로젝트에서 시작되어 SPSS, NCR, Daimler Chrysler 등이 참여하였다\n- 모델링과정에서 데이터셋이 추가로 필요한 경우 데이터 준비 단계를 반복 수행할 수 있다\n- CRISP-DM은 계층적 프로세스 모델로써 4레벨로 구성되어 있다.\n- 각 단계는 폭포수 모델처럼 구성되어져 있다 x (= 일방향 x, 단계간 피드백을 통해 단계별 완성도를 높인다)\n\n### 분석 과제를 도출하기 위한 상향식 접근방식에 대한 설명으로 옳지 않은 것은?\n- 상향식 접근방식의 데이터 분석은 비지도 학습방법에 의해 수행된다\n- 인과관계로부터 상관관계분석으로의 이동이라는 변화를 만들었다\n- 사물을 있는 그대로 인식하는 'what'관점에서 접근한다\n- 분석적으로 사물을 인식하려는 'why'관점에서 접근한다 x (= 하향식 접근)\n\n### 분석과제의 주요 관리 영역은?\n- Data Size, Data Complexity, Speed, Accuracy & Precision, Analytic Complexity\n\n### 기업의 데이터 분석 수준을 진단하는 과정에서 기업에 필요한 6가지 분석 구성요소를 갖추고 있고, 현재 부분적으로 도입되어 지속적인 확산이 필요한 기업들을 분석 수준을 포트폴리오 사분면으로 정의한다면 어디에 해당하는가?\n- 확산형 기업\n- 준비형 기업 (= 기업에 필요한 데이터, 인력, 조직, 분석업무, 분석기법 등이 적용되어 있지 않아 사전준비가 필요한 기업)\n- 도입형 기업 (= 기업에서 활용하는 분석업무, 기법 등은 부족하지만 적용조직 등 준비도가 높아 바로 도입할 수 있는 기업\n- 정착형 기업 (= 준비도는 낮으나 조직, 인력, 분석업무, 분석기법 등을 기업 내부에서 제한적으로 사용하고 있어 1차적으로 정착이 필요한 기업\n\n### 다음 중 R에서 사용가능한 데이터 오브젝트에 관한 설명으로 가장 부적절한 것은?\n- 데이터프레임은 테이블로 된 데이터 구조로써 행렬로 표현된다 x (= 행렬이 아닌 리스트 구조이다)\n- 차원을 가진 벡터를 행렬이라고 한다 o\n- 리스트에서 원소들은 다른 모드여도 상관없다 o\n- 벡터에서 모든 원소는 같은 모드여야 한다 o \n\n### 다음 중 벡터가 제대로 생성되지 않은 경우는 무엇인가?\n- x <- c(\"seo\", 823, TRUE)\n- s <- c(\"1\", Chloe, Nicole) x (= \"Chloe\", \"Nicole\"로 해줘야함)\n- h <- c(1*pi, 2+pi)\n- w <- c(TRUE, 1,2,3)\n\n### 다음은 데이터의 척도에 관한 설명이다 옳은 것은?\n- 명목척도는 측정 대상이 어느 집단에 속하는지 분류할 때 사용되며 성별, 출생지 정보가 해당된다\n- 순서척도는 측정 대상이 순서를 갖는 자료를 의미하며, 만족도, 선호도, 학년, 신용등급 정보가 해당된다\n- 구간척도는 측정 대상의 순서와 순서사이의 간격이 의미가 있는 자료를 의미하며, 온도, 물가지수, 주가지수 정보가 해당된다\n- 비율척도는 측정대상의 값이 비율로 정의되는 자료를 의미하며, 물가성장율, 흡연감소율 정보가 해당된다 x (= 간격에 대한 비율이 의미를 가진다. 무게, 나이, 시간, 거리)\n\n### 다음은 확률변수에 관한 설명이다. 설명이 옳은 것은?\n- 확률변수는 특정값이 나타날 가능성이 확률적으로 주어지는 변수이며, 실수값으로 표현된다\n- 이산형 확률변수는 확률변수의 공간이 유한하거나 셀 수 있는 경우를 의미하며, 이항분포, 기하분포, 다항분포가 해당된다 (= 추가적으로 베르누이 확률분포, 포아송분포도 있음)\n- 균일분포는 확률변수의 구간[a,b] 내에서 모든 확률이 동일한 분포를 의미하며 확률은 1/(b-a)가 된다\n- 연속형 확률변수는 확률변수의 공간이 무한한 경우를 의미하며, 베르누이 확률분포, 포아송분포, 정규분포가 해당된다 x (= 연속형 확률분포는 정규분포, T분포, F분포 이다)\n\n### 두 개 이상의 독립벼누를 사용해 하나의 종속변수의 변화를 설명하는 다중회귀분석을 실시할 것이다. 다음 중 모형을 적합 시킨 후, 모형이 적절한 지 확인하기 위해 체크해야 할 사항dms?\n- F-value를 통해 모형이 통계적으로 유의한지 확인하다\n- 모형이 데이터에 잘 적합되어 있는지를 확인한다\n- t-value, p-value를 통해 유의한지 확인한다\n- 상관계수를 통해 모형의 설명력을 확인한다 x (= 회귀분석 이전의 단계에서 확인해야함)\n\n### SQL을 활용하거나 SAS에서 proc sql로 작업하던 사용자들에게 R프로그램을 지원해 주는 패키지는 무엇인가?\n- sqldf (= 표준 SQL에서 사용되는 문장을 모두 활용이 가능)\n\n### 비계층적 군집분석의 장점에 대한 설명은?\n- 주어진 데이터의 내부 구조에 대한 사전 정보가 없어도 의미 있는 결과를 얻을 수 있다\n- 다양한 형태의 데이터의 적용이 가능하다\n- 분석방법의 적용이 용이하다\n- 사전에 주어진 목적이 없으므로 결과 해석이 쉽다 x (= 어려움)\n\n### 데이터마이닝 분석방법 중 연관성분석의 장점이 아닌 것은 무엇인가?\n- 탐색적 기법 : 조건 반응(if-then)으로 표현되는 연관성분석의 결과로 이해가 쉽다\n- 비목적성 분석기법 : 분석 방향이나 목적이 특별히 없는 경우라도 묵적변수만 있으면 분석이 가능하다 (= 목적변수가 없어도 가능)\n- 사용이 편리한 분석 데이터의 형태 : 거래 내용에 대한 데이터를 변환 없이 그 자체로 if-then 이용할 수 있는 간단한 자료구조를 갖는 분석이다\n- 계산의 용이성 : 분석을 위한 계산이 상당히 간단하다\n\n### 텍스트마이닝 패키지인 TM에서 영어 문서 A에 포함된 단어의 띄어쓰기와 시제를 모두 표준화하기 위해 사용하는 R프로그래밍으로 적합한 것은?\n- A <- tm_map(A,removeWords,stopwords(\"english\"))\n\n### 사회연결망 분석(social network analysis)의 네트워크 구조를 파악하는 기법 중 하나로 각 노드 간의 거리를 근거로 중심성을 측정하는 방법으로 연결된 노드 뿐 아니라 간접적으로 연결된 모든 노드 간의 거리를 합산해 중심성을 측정하는 기법은 무엇인가?\n- 근접 중심성\n- 연결정도 중심성 (= 한 점에서 직접적으로 연결된 점들의 합)\n- 매게 중심성 (= 네트워크 내에서 한 점이 담당하는 매개자 혹은 중재자 역할의 정도)\n- 위세 중심성 (= 자신의 연결정도 중심성으로부터 발생하는 영향력과 자신과 연결된 타인의 영향력을 합하여 결정한다)\n" }, { "alpha_fraction": 0.7259100675582886, "alphanum_fraction": 0.7259100675582886, "avg_line_length": 34.92307662963867, "blob_id": "c778e8d7fe1ab2e07dbf914073d8994200243ef6", "content_id": "4b9ee1014d8f87113c4c64e1f9b56635649fdba6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 847, "license_type": "permissive", "max_line_length": 78, "num_lines": 13, "path": "/Linux/SSH.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 원격제어 (SSH)\n- 매우 중요\n- 이 컴퓨터를 통해서 인터넷 너머에 있는 서버컴퓨터를 원격제어를 할 경우 SSH를 사용\n- SSH Client 에서 입력한 명령어가 SSH Server 가 그 명령어를 컴퓨터에 전달하고 그 결과를 SSH Client로 보여준다.\n- 웹브라우저의 client와 server가 주고받는 과정과 매우 비슷하다. 결국 client와 server의 관계\n\n# 설치 과정\n- install openssh-server : 현재 리눅스에다가 openssh라는 구체적인 서버의 이름을 설치\n- +openssh-client\n- install이 끝나면 service ssh start\n- ps aux | grep ssh : 잘 실행되고 있는지 확인\n- openssh-client가 설치되어있는 컴퓨터에서 ssh 사용자이름@<ip주소> 를 입력하면 접속\n- 이제부터 명령을 내리면 원격에 있는 컴퓨터를 제어하게 됨.\n" }, { "alpha_fraction": 0.7023305296897888, "alphanum_fraction": 0.7023305296897888, "avg_line_length": 24.513513565063477, "blob_id": "5621d1f69c8c282595620cabb8cae6c9a8cbe81b", "content_id": "8ebd89642f86bc94349e726ffcbae64424b11039", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2198, "license_type": "permissive", "max_line_length": 81, "num_lines": 37, "path": "/ADsP/Day07.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터 처리 과정\n\n- 데이터 분석을 위해서는 DW나 DM을 통해 분석데이터를 구성해야 한다.\n- 신규데이터나 DW에 없는 데이터는 기존 운영시스템(legacy)에서 가져오기 보다는 운영시스템에서\n 임시로 데이터를 저장하는 스테이징영역(staging area)에서 데이터를 전처리해서 운영데이터 저장소(ODS)\n 에 저장된 데이터를 DW와 DM와 결합하여 데이터를 구성하도록 한다.\n\n# 시각화 기법\n\n- 가장 낝은 수준의 분석이지만 복잡한 분석을 보다 더 효율적으로 해석할 수 있어 빅데이터 분석에서 필수적인 분석방법이다.\n- 여러 차트형식의 시각화와 트리구조, 다이어그램 맵, 워드클라우드 등이 있다.\n\n# 공간분석\n\n- 공간적 차원과 관련된 속성들을 시각화하는 분석으로 지도위에 관련된 속성들을 생성하고 크기, 모양, 선 굵기 등을 구분하여\n 인사이트를 얻는다.\n \n# 탐색적 자료분석(EDA)\n\n- 다양한 차원과 값을 조합해 가며 특이점이나 의미있는 사실을 도출하고 분석의 최종목적을 달성해가는 과정이다.\n- 빅데이터 시대가 도래하면서 데이터 양이 많아지면서 더욱더 활용이 많아진다.\n\n# 데이터마이닝\n\n- 대용량의 자료로부터 정보를 요약하고 미래에 대한 예측을 목표로 자료에 존재하는 관계, 패턴, 규칙 등을 탐색하고 이를 모형화함으로써\n 이전에 알지 못한 유용한 지식을 추출하는 분석방법이다.\n- 기계학습(인공신경망, 의사결정나무, 클러스터링, SVM), 패턴인식(연관규칙, 장바구니분석)\n\n# 시뮬레이션\n\n- 복잡한 실제상황을 단순화해 컴퓨터상의 모델로 만들어 재현하거나 변경함으로써 현상을 보다 잘 이해하고 미래의 변화에 따른 결과를 예측하는 데\n 사용하는 고급분석 기법이다.\n \n# 최적화\n\n- 목적함수 값을 최대화 또는 최소화하는 것을 목표로 하는 방법으로 제약조건 하에서 목표값을 개선하는 방식으로 목적함수와 제약조건을 정의해 문제를\n 해결한다.\n" }, { "alpha_fraction": 0.6163522005081177, "alphanum_fraction": 0.6955974698066711, "avg_line_length": 21.08333396911621, "blob_id": "c4da35b8f3863edc8d3283558abcb557a175d614", "content_id": "5d1df1128d04a07d25a03c615f327d1415fc9345", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "permissive", "max_line_length": 166, "num_lines": 36, "path": "/HackerRank/Warmup/03.A_Very_Big_Sum.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# You are given an array of integers of size . You need to print the sum of the elements in the array, keeping in mind that some of those integers may be quite large.\n#\n# Input Format\n#\n# The first line of the input consists of an integer . The next line contains space-separated integers contained in the array.\n#\n# Output Format\n#\n# Print a single value equal to the sum of the elements in the array.\n#\n# Constraints\n#\n#\n# Sample Input\n#\n# 5\n# 1000000001 1000000002 1000000003 1000000004 1000000005\n# Output\n#\n# 5000000015\n\n#!/bin/python3\n\nimport sys\n\ndef aVeryBigSum(n, ar):\n # Complete this function\n sum = 0\n for i in range(n):\n sum += ar[i]\n return sum\n\nn = int(input().strip())\nar = list(map(int, input().strip().split(' ')))\nresult = aVeryBigSum(n, ar)\nprint(result)\n" }, { "alpha_fraction": 0.6381842494010925, "alphanum_fraction": 0.6421895623207092, "avg_line_length": 19.80555534362793, "blob_id": "66212d525fe33d1a17322217deb5ce941d0a088d", "content_id": "2fbc4b119b400bb9b26add708ddf2916b3594fc6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1523, "license_type": "permissive", "max_line_length": 51, "num_lines": 36, "path": "/ADsP/Day18.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 기술통계\n\n- 자료의 특성을 표, 그림, 통계량 등을 사용하여 쉽게 파악할 수 있도록 정리/요약하는 것\n\n# 통계량에 의한 자료 분석\n\n- 중심위치 : 중앙값, 최빈값\n- 산포의 척도 : 분산, 표준편차, 범위, 사분위수범위, 변동계수, 표준오차\n- 분포의 형태 : 왜도, 첨도\n\n# 그래프를 통한 자료 분석\n\n- 범주형자료 : 막대그래프와 파이차트 등\n- 연속형자료 : 히스토그램, 줄기-잎 그림, 상자그림 등\n- 시계열자료 : 꺾은선 그래프\n\n# 연관성 분석\n\n- 종속변수 vs 독립변수\n- 산점도(scatter plot)로 확인할 수 있는 것\n - 두변수 사이의 선형관계가 성립하는가\n - 두변수 사이의 함수관계가 성립하는가\n - 이상값의 존재 여부와 몇 개의 집단으로 구분되는지를 확인\n- 공분산(covariance)\n - 두 확률변수 간의 방향성을 확인\n\n# 상관분석(correlation analysis)\n\n- 두 변수간의 상관 정도를 상관계수를 통해 확인할 수 있음\n- 상관계수는 -1에서 1사이의 값으로 양수는 양의 상관, 음수는 음의 상관을 표현\n- 상관계수가 0이면 데이터 간의 상관이 없다\n- 피어스 상관계수 - 등간척도 이상으로 측정된 두 변수들의 상관관계 측정\n- 스피어만 순서상관계수 - 순서 또는 서열 척도인 두 변수들 간의 상관관계를 측정\n- R프로그램\n - cor(x,y,method = \"spearman\")\n - rcorr(as.matrix(data명), type = \"spearman\")\n" }, { "alpha_fraction": 0.6278659701347351, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 24.200000762939453, "blob_id": "f2c35137fe6535499938a8f49b81e4a7efe3d32b", "content_id": "cbde256a58017b9b7705e69566bc2a4e187fb70f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "permissive", "max_line_length": 244, "num_lines": 45, "path": "/HackerRank/Warmup/07.Mini_Max_Sum.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Given five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers.\n#\n# Input Format\n#\n# A single line of five space-separated integers.\n#\n# Constraints\n#\n# Each integer is in the inclusive range .\n# Output Format\n#\n# Print two space-separated long integers denoting the respective minimum and maximum values that can be calculated by summing exactly four of the five integers. (The output can be greater than 32 bit integer.)\n#\n# Sample Input\n#\n# 1 2 3 4 5\n# Sample Output\n#\n# 10 14\n\n#!/bin/python3\n\nimport sys\n\ndef miniMaxSum(arr):\n # Complete this function\n tmp_max = 0\n tmp_min = 0\n\n for i in arr:\n if min(arr) == max(arr):\n tmp_max = i*4\n tmp_min = i*4\n\n if int(i) > min(arr):\n tmp_max += i\n\n if int(i) < max(arr):\n tmp_min += i\n\n\n print(\"{} {}\".format(tmp_min, tmp_max))\nif __name__ == \"__main__\":\n arr = list(map(int, input().strip().split(' ')))\n miniMaxSum(arr)\n" }, { "alpha_fraction": 0.6462196707725525, "alphanum_fraction": 0.6576319336891174, "avg_line_length": 33.19512176513672, "blob_id": "dfd69c11b4f3c62e4ae971f16d73687edccd4d1d", "content_id": "9601652a3b9d35d0b6608716a84664383cb2e77a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3048, "license_type": "permissive", "max_line_length": 90, "num_lines": 41, "path": "/ADsP/Day01.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터\n\n- 데이터의 정의\n - 존재적 트성 = 객관적 사실\n - 당위적 특성 = 추론, 예측, 전망, 추정을 위한 근거\n- epdlxj\n- 데이터의 특징\n\n - 정성적 데이터: 언어, 문자 (ex: 회사 매출이 증가)\n - 정량적 데이터: 수치, 도형, 기호 (ex: 나이, 몸무게, 주가 등)\n\n- 지식\n - 암묵지: 학습과 경험을 통해 개인에 체화된 지식 (ex: 김치담그기, 자전거타기) 공유와 전달이 어려움, 내면화 >> 공통화 필요\n - 형식지: 문서나 메뉴얼처럼 형식화된 지식 (ex: 교과서, 비디오, DB) 공유와 전달이 용이 >> 연결화\n \n# 데이터와 정보 (DIKW)\n\n- 데이터: 개별 데이터 자체로 의미가 중요하지 않은 객관적 사실\n- 정보: 데이터의 가공, 처리와 데이터간 연관관계 속에서 의미가 도출\n- 지식: 정보를 구조화하여 유의미한 정보를 분류하고 개인적 경험을 결합하여 내재화한 것\n- 지혜: 지식의 축적과 아이디어가 결합된 창의적 산물\n\n# 데이터베이스\n\n- 용어\n - 1950년대 미국의 군대의 데이터의 기지라는 뜻으로 데이터베이스 탄생\n - 1963년 미국 SDC가 개최한 심포지엄에서 공식용어로 사용\n - 1970년대 초반 유럽에서 데이터베이스라는 단일어로 일반화 됨\n - 1975년 국내에서 미국의 CAC가 KORSTIC을 통해 처음으로 서비스 됨\n- 정의\n - EU: 체계적이거나 조직적으로 정리되고 전자식 또는 기타 수단으로 개별적으로 접근할 수 있는 독립된 저작물, 데이터 또는 기타 소재의 수집물\n - 국내 저작권법: 소재를 체계적으로 배열 또는 구성한 편집물로 개별적으로 그 소재에 접근하거나 그 소재를 검색할 수 있도록 한 것\n - 국내 컴퓨터 용어사전: 동시에 복수의 적용 업무를 지원할 수 있도록 복수 이용자의 요구에 대응해서 데이터를 받아들이고 저장, 공급하기 위하여\n 일정한 구조에 따라서 편성된 데이터의 집합\n- 특징\n - 통합된 데이터(integrated data): 동일한 내용의 데이터가 중복되어 있지 않다는 것을 의미함. 데이터 중복은 관리상의 복잡한 부작용을 초래\n - 저장된 데이터(stored data): 자기 디스크나 자기 테이프 등과 같이 컴퓨터가 접근할 수 있는 저장 매체에 저장되는 것을 의미 함. 데이터베이스는\n 기본적으로 컴퓨터 기술을 바탕으로 한 것.\n - 공용 데이터(shared data): 여러 사용자가 서로 다른 목적으로 데이터를 공동으로 이용한다는 것을 의미 함. 대용량화되고 구조가 복잡한 것이 보통\n - 변화되는 데이터(changeable data): 데이터베이스에 저장된 내용은 곧 데이터베이스의 현재 상태를 나타냄. 다만 이 상태는 새로운 데이터의 삽입,\n 기존 데이터의 삭제, 갱신으로 항상 변화하면서도 항상 현재의 정확한 데이터를 유지해야 함\n" }, { "alpha_fraction": 0.6985998749732971, "alphanum_fraction": 0.6997052431106567, "avg_line_length": 41.40625, "blob_id": "acf407252203c605b4031c13cb61f63a660ccd69", "content_id": "5cda1f9f639df01294785181436d87edf3eb00e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6066, "license_type": "permissive", "max_line_length": 140, "num_lines": 64, "path": "/ADsP/test03.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 객관식\n\n### 일반적인 빅데이터의 정의는?\n- 빅데이터는 일반적인 데이터베이스 소프트웨어로 저장, 관리, 분석 등 범위를 초과하는 규모의 데이터이다\n- 빅데이터는 대규모 데이터로부터 저렴한 비용으로 가치를 추출하고 데이터의 초고속 수집, 발굴, 분석을 지원하도록 고안된 차세대 기술 및 아키텍쳐이다\n- 빅데이터는 데이터의 양(volume), 데이터 유형과 소스의 다양성(variety), 데이터 수립과 처리 츠견에서 속도(velocity)가 급격히 증가하면서 나타났다.\n\n### 정보위주의 처리를 의미하며, 다양한 비즈니스 관점에서 쉽고 빠르게 다차원적인 데이터에 접근하여 의사결정에 활용할 수 있는 정보를 얻게 해주는 기술을 의미하는 시스템으로 알맞은 것은?\n- OLAP 시스템\n- OLTP 시스템 : 여러 단말에서 보내온 메시지에 따라 호스트 컴퓨터가 데이터 베이스를 액세스하고, 바로 처리 결과를 돌려보내는 형태를 말함\n- ERP 시스템 : 인사, 재무. 생산 등 기업의 전 부문에 걸쳐 독립적으로 운영되던 각종 관리시스템의 경영자원을 하나의 통합 시스템으로 재구축함으로써 생산성을 극대화하려는 경영혁신기법이다\n\n### 다음 중 용어와 의미가 잘못 연결된 것을 모두 고르시오\n- Business Intelligence = 기업이 보유하고 있는 수많은 데이터를 정리하고 분석해 기업의 의사결정에 활용하는 일련의 프로세스를 말한다 (맞는말)\n\n### 법률 전문가인 변호인, 금전 거래에 정통한 회계사처럼 컴퓨터와 수학 나아가 통계학이나 비즈니스에 두루 깊은 지식을 갖춘 직업으로 알고리즘에 부당함으로 피해 받는 사람들을 구제할 수 있는 능력을 가진 전문가로 적절한 것은?\n- 알고리즈미스트\n\n### 빅데이터 분석 방법론에서 데ㅣ터 분석 단계는 분석용 데이터를 준비해서 텍스트 분석과 탐색적 분석 그리고 모델링 과정을 수행하게 된다. 이때, 모델링 과정에서 수행하는 업무로 적절한 것은?\n- 비정형 데이터 분석결과를 통합적으로 활용하여 프로젝트 목적에 맞는 통합 모델링을 수행한다\n- 분석 데이터셋을 훈련용 데이터와 모델 검증력을 위한 테스트 데이터로 분할한다\n- 훈련용 데이터를 활용하여 분류, 예측, 군집 등의 모델을 만들어 가동 중인 운영 시스템에 적용한다\n- 데이터의 기초 통계량(평균, 분석, 표준편차, 최대값, 최소값 등)을 산출한다 x (= 탐색적 분석에서 실시)\n\n### A반과 B반 학생들이 동일한 과목을 들었다고 하자. A반과, B반 학생 모두를 대상으로 과목별 성적의 평균을 구하기 위해 A반 학생 데이터와 B반 학생 데이터를 합치려고 한다. 어떠한 함수를 사용하여야 할까?\n- rbind()\n\n### 다음은 시계열 분석에 대한 설명이다.\n- 시계열분석에서 사용되는 모형은 크게 자기회기모형(AR모형)과 이동평균모형(MA모형)으로 나눈다\n- 자기회귀모형에서 자기상관함수(ACF)는 빠르게 감소하고, 부분자기함수(PACF)는 어느시점에서 절단점을 갖게된다\n- 이동평균모형에서 자기상관함수(ACF)는 절단점을 갖고, 부분자기상관함수(PACF)가 빠르게 감소함을 볼 수 있다\n- 자기회귀누적이동평균모형(ARIMA)은 정상시계열모형으로 차분이나 변환을 통해 AR모형이나 MA모형으로, 둘을 합친 ARMA모형으로 비정상화 할 수 있다 x (= 정상화)\n\n### ARIMA모형에 대한 설명으로 옳은 것은?\n- 자기회귀누적이동평균은 비정상시계열이다\n- 차분이나 변환을 통해 AR모형이나 MA모형으로 정상시계열 모형으로 바꿀 수 있다\n- 차분을 할 필요 없는 시계열은 ARMA(p,q) 모형을 적용할 수 있다\n\n### 데이터를 분리하고 처리한 다음, 다시 결합하는 등 가장 필수적인 데이터 처리기능을 제공하는 패키지로 apply함수를 기반으로 데이터와 출력변수를 동시에 배열로 치환하여 처리하는 패키지는 무엇인가?\n- plyr\n\n### 데이터마이닝 분석 기법 중 의사결정나무의 활용 예가 아닌 것은?\n- 세분화 (Segmentation)\n- 분류 (Classification)\n- 예측(Prediction)\n- 연관성분석 x\n\n### 계층적 군집분석 방법론 중 군집을 생성할 때 거리를 정의하는 방법이 아닌것은?\n- 최단연결법, 최장연결법, 평균연결법, 와드연결법\n\n### 연관성분석의 측도 중 \"P(A교집합B) = (A와 B가 동시에 포하된 거래수) / 전체 거래수\" 를 나타내는 측도는 무엇인가?\n- 지지도(Support)\n- 신뢰도 (Confidnece) (= (A와 B가 동시에ㅔ 포함도니 거래수) / (A 를 포함하는 거래수))\n- 향상도 (Lift) (= A와 B가 동시에 포함된 거래수 / A를 포함하는 거래수 x B를 포함하는 거래수 )\n\n### 텍스트마이닝 패키지인 TM에서 문서별 단어간의 사용여부 또는 빈도수를 빈도표로 표현하고자 할 때, 사용하는 R프로그래밍으로 적합한 것은 무엇인가?\n- A <- DocumentTermMatrix(A)\n\n### 연관성분석의 측도로 향상도에 대해 올바른 것은?\n- A가 주어지지 않았을 때의 품목 B의 확률에 비해 A가 주어졌을 때의 품목 B의 확률의 증가 비율\n\n### 최적화를 성공적으로 수행하기 위한 단계 중 문제를 이해하고 최적화를 이용해 분석하려는 문제점이 무엇인지를 충분히 이햏는 단계는 몇 단계인가?\n- 계층적 군집분석에서 군집을 묶는 순서는 거리가 가장 짧은 노드들을 하나의 군집으로 선택하고 다음에 나머지 노드들을 최장거리순 혹은 최단거리순으로 묶을지 결정을 하고 묶어 나간다. 두 지점의 거리가 가장 짧은 노드는 d와 e이며 거리는 5.00 이다\n" }, { "alpha_fraction": 0.6392045617103577, "alphanum_fraction": 0.6448863744735718, "avg_line_length": 36.05263137817383, "blob_id": "7d44bf6625f17ecdec589d139a2ca3915b7d6beb", "content_id": "8ff283abd36dcddd0d0c0629411cf941745c134c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1218, "license_type": "permissive", "max_line_length": 78, "num_lines": 19, "path": "/ADsP/Day11.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터 입력과 출력\n\n- R에서는 텍스트 데이터 뿐만 아니라 데이터베이스와 다양한 통계프로그램에서 작성된 데이터를 불러들여서 적절한 데이터 분석을 수행할 수 있음\n- R에서는 부동소수점 표현시 7자리 수를 기본으로 세팅되어 있으며 option()함수, digit=\"숫자\"를 지정해서 자리수를 변경\n- 파일에 문자를 출력하고자 할 때 : cat(\"출력할 내용\", file = \"파일명\")\n- R에서는 역슬래쉬(\\)를 인식하지 못하므로 슬래쉬(/) 또는 역슬래쉬 쌍(\\\\)으로 파일의 경로를 지정\n\n# 외부 파일 입력과 출력\n\n- 고정자리 변수 파일 : read.fwf(\"파일명\", width = c(w1, w2, ...))\n- 구분자 변수 파일 : read.table(\"파일명\", sep = \"구분자\")\n- csv 파일 읽기 : read.csv(\"파일명\", header = T) <- 1행이 변수인 경우\n- csv 파일 출력 : write.csv(데이터 프레임, \"파일명\")\n\n# 웹 페이지(web page)에서 데이터 읽어 오기\n\n- 파일 다운로드 : read.cvc(http://www.example.com/download.data.csv)\n- ftp에서 파일 다운로드 : read.csv(ftp://ftp.example.com/download/data/csv)\n- html에서 테이블 : library(XML); readHTMLTable(\"url\")\n" }, { "alpha_fraction": 0.6012784838676453, "alphanum_fraction": 0.6132640838623047, "avg_line_length": 43.69643020629883, "blob_id": "1cb40bf5e14348ebd87dddf611d32a9d684db731", "content_id": "e806fe04d4a827baf524db273f5ebb3cdb882fab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5033, "license_type": "permissive", "max_line_length": 104, "num_lines": 56, "path": "/ADsP/Day25.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 인공신경망(Artificial Neural Network) 연구\n- 1943년 매컬릭(McCulloch)과 피츠(Pitts) : 인간의 뇌를 수많은 신경세포가 연결되 하나의 디지털 네트워크 모형으로 간주하고\n 신경세포의 신호처리 과정을 모형화 하여 단순 패턴분류 모형을 개발\n- 헵(Hebb) : 신경세포(뉴런) 사이의 연결강도(weight)를 조정하여 학습규칙을 개발\n- 로젠블럿(Rosenblatt, 1955): 퍼셉트콘(Perceptron)이라는 인공세포 개발\n 비선형성의 한계점 발생 -XOR(Exclusive OR) 문제\n- 홉필드(Hopfild), 러멜하트(Rumelhart), 맥클랜드(McClelland) : 역전 파알고리즘 (Backpropaga-tion)을 활용하여 비선형성을 극복한\n 다계층 퍼셉트론으로 새로운 인공신경망 모형 등장\n- 생물학적 신경망과 인공신경망의 유사점\n- 생물학적 신경망 / 인공 신경망\n 세포체 뉴런\n 수상돌기 입력\n 축색돌기 출력\n 시냅스 가중치\n\n# 뉴런(Neuran)\n- 인공신경망은 뉴런이라는 아주 단순하지만 복잡하게 연결된 프로세스로 이루어져 있음\n- 뉴런은 가중치가 있는 링크들ㄹ 연결됨\n- 뉴런은 여러 개의 입력신호를 받아 하나의 출력신호를 생성\n- 뉴런은 전이함수, 즉 활성함수(activation function)을 사용\n - 뉴런은 입력 신호의 가중치 합을 계산하여 임계값과 비교\n - 가중치 합이 임계값보다 작으면 뉴런의 출력은 -1, 같거나 크면 +1을 출력함\n \n# 신경망모형 구축 시 고려사항\n- 입력변수 : 신경망 모형은 그 복잡성으로 인해 입력자료의 선택에 매우 민감\n - 범주형 변수(각 범주의 빈도가 일정수준 이상이고 각 범주의 빈도가 일정할 때 활용)\n - 가변수화하여 적용(성별[남,녀] 남성[1,0], 여성[1,0])\n - 연속형 변수(입력 값의 범위가 변수들간에 큰 차이가 없을 떄 활용)\n - 평균을 중심으로 분포가 댗ㅇ이 아니면 좋지 않은 결과가 산출됨\n - 변환 또는 범주화를 통해 활용하는 것이 적절\n- 가중치 초기값 (역전파 알고리즘의 경우, 초기값에 따라 결과가 많이 달라져 초기값 선택이 매우 중요)\n - 가중치가 0이면 시그모이드 함수는 선형이되고 신경망 모형은 선형모형이 됨\n - 초기값은 0 근처의 랜덤 값으로 선정하고\n - 초기에는 선형모형에서 가중치가 증가하면서 비선형으로 변경 됨\n- 신경망 모형은 비용함수 R(x)는 비볼록함수이고 여러 개의 국소 최소값들(local minima)를 가짐\n - 랜덤하게 선택된 여러 개의 초기값에 대한 신경망을 적합한 후 얻은 해들을 비교하여 가장 오차가 작은 것을 선택하여 최종 예측값을 얻거나\n 평균 (또는 최빈값)을 구하여 최종 예측값으로 선정\n - 훈련자료에 대하여 배깅(bagging)을 적용하여 최종 예측치를 선정\n- 학습률 -상수값을 사용하며, 처음에는 큰 값으로 정하고 반복이 진행되어 해가 가까울 수록 0에 수렴\n - 은닉층과 은닉노드가 많으면 -가중치가 많아져서 과대적합 문제 발생\n - 은니층과 은닉노드가 적으면 -과소적합 문제가 발생\n - 은닉층 수 결정 : 은닉층이 하나인 신경망을 범용근사자(universal approximator)이므로 가급적이면 하나로 선정\n - 은닉노드 수 결정 : 적절히 큰 값으로 결정하고 가중치를 감소하면서 모수에 대한 벌점화 적용\n- 과대 적합 문제 (신경망이 많은 가중치를 추정해야 하므로 과대적합 문제가 빈번)\n - 해결방법\n - 조기종료 : 모형이 적합하는 과정에서 검증오차가 증가하기 시작하면 반복을 중지\n - 선형모형의 능형회귀와 유사한 가중치 감소라는 벌점화 기법 활용\n\n# 로지스틱 회귀분석\n- 반응변수가 범주형인 경우에 적용되는 회귀분석 모형이다\n- 새로운 설명변수(또는 예측변수)가 주어질 때 반응변수의 각 범주(또는 집단)에 속할 확률이 얼마인지를 추정(예측모형)하여, 추정확률을 기준치에 따라\n 분류하는 목적(분류모형)으로 활용된다\n- 이때 모형의 적합을 통해 추정된 확률을 사후확률(Posterior probability)이라고 한다\n- glm() 함수를 활용하여 로지스틱 회귀분석을 실행한다\n- 표현 : glm(종속변수 ~ 독립변수1+ ''' + 독립변수k, family=binomial, data = 데이터셋명)\n- 로지스틱 회귀분석의 결과, B의 추정값이 5.14 이면, 독립변수의 단위가 증가함에 따라 종속변수가 0에서 1로 바뀔 오즈(Odds)가 exp(5.140) = 170배 증가한다는 의미\n" }, { "alpha_fraction": 0.5664335489273071, "alphanum_fraction": 0.5734265446662903, "avg_line_length": 46.66666793823242, "blob_id": "aeb23fd2cad962ddc25d53a4ee206c2c3e72c1a9", "content_id": "1b758b9a6455a2bf047dfd44f350d4911776b172", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1046, "license_type": "permissive", "max_line_length": 94, "num_lines": 12, "path": "/Linux/Background.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 학습내용\n- Ctrl + z : 실행중인 프로그램을 백그라운드로 보내는 단축기. 이 기능을 실행하면 명령어가 일시 정지 됨\n- jobs를 치면 실행중인 프로그램을 보여준다. ( background 에 존재하는 프로그램들 )\n- +가 있는 것은 fg를 눌렀을 때 바로 작업화면으로 보여주는 첫번째 순위를 보여주는 것. - 는 두번째\n- fg %2 를 누르면 -표시가 되어있는 프로그램들을 보여준다.\n- kill %번호 를 누르면 종료시키고 싶은 프로그램들을 종료 시킨다. \n- kil -9 %4 좀 더 강력한 종료. \n\n# 실행 할 때부터 프로그램을 background로 보내고 싶다면?\n- ls -R / > result.txt 2> error.log & : 이걸 하게 되면 내용들을 result.txt로 담고 에러가 생길 경우 error.log로 보내라.\n &를 붙이면 이것을 background에서 자동적으로 실행시킬수 있도록 함.\n 즉, 사용자는 다른 업무를 할 수 있게됨.\n" }, { "alpha_fraction": 0.5704971551895142, "alphanum_fraction": 0.6120619177818298, "avg_line_length": 18.79032325744629, "blob_id": "f6cb96088d6fbf3ff2c80879abaae54acb69f7bd", "content_id": "c68f0aee4eee817c67759b8e24c04343b53d1494", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1227, "license_type": "permissive", "max_line_length": 163, "num_lines": 62, "path": "/HackerRank/Warmup/04.Diagonal_Difference.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Given a square matrix of size , calculate the absolute difference between the sums of its diagonals.\n#\n# Input Format\n#\n# The first line contains a single integer, . The next lines denote the matrix's rows, with each line containing space-separated integers describing the columns.\n#\n# Constraints\n#\n# Output Format\n#\n# Print the absolute difference between the two sums of the matrix's diagonals as a single integer.\n#\n# Sample Input\n#\n# 3\n# 11 2 4\n# 4 5 6\n# 10 8 -12\n# Sample Output\n#\n# 15\n# Explanation\n#\n# The primary diagonal is:\n#\n# 11\n# 5\n# -12\n# Sum across the primary diagonal: 11 + 5 - 12 = 4\n#\n# The secondary diagonal is:\n#\n# 4\n# 5\n# 10\n# Sum across the secondary diagonal: 4 + 5 + 10 = 19\n# Difference: |4 - 19| = 15\n\n#!/bin/python3\n\nimport sys\n\ndef diagonalDifference(a):\n # Complete this function\n tmp_1 = 0\n tmp_2 = 0\n\n for x in range(len(a)):\n tmp_1 += a[x][x]\n tmp_2 += a[x][-(x+1)]\n\n res = tmp_1 - tmp_2\n return abs(res)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n a = []\n for a_i in range(n):\n a_t = [int(a_temp) for a_temp in input().strip().split(' ')]\n a.append(a_t)\n result = diagonalDifference(a)\n print(result)\n" }, { "alpha_fraction": 0.6341991424560547, "alphanum_fraction": 0.6616161465644836, "avg_line_length": 22.89655113220215, "blob_id": "109bee2977e9fd5dbf635fac5d122f7c8db9051a", "content_id": "bf392e29a8cf77f13d0d55956dbcb5058614566d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1392, "license_type": "permissive", "max_line_length": 209, "num_lines": 58, "path": "/HackerRank/Implement/01.Gradning_Students.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# At HackerLand University, a passing grade is any grade 40 points or higher on a 100 point scale. Sam is a professor at the university and likes to round each student’s grade according to the following rules:\n#\n# If the difference between the grade and the next higher multiple of 5 is less than 3, round to the next higher multiple of 5\n# If the grade is less than 38, don’t bother as it’s still a failing grade\n# Automate the rounding process then round a list of grades and print the results.\n#\n# Input Format\n#\n# The first line contains a single integer denoting (the number of students).\n# Each line of the subsequent lines contains a single integer, , denoting student 's grade.\n#\n# Constraints\n#\n# Output Format\n#\n# For each of the grades, print the rounded grade on a new line.\n#\n# Sample Input 0\n#\n# 4\n# 73\n# 67\n# 38\n# 33\n# Sample Output 0\n#\n# 75\n# 67\n# 40\n# 33\n\n#!/bin/python3\n\nimport sys\n\ndef solve(grades):\n # Complete this function\n result = []\n for i in grades:\n tmp = i // 5 + 1\n if i < 38:\n result.append(i)\n\n elif (5*tmp-i) < 3:\n result.append(5*tmp)\n\n else:\n result.append(i)\n return result\n\nn = int(input().strip())\ngrades = []\ngrades_i = 0\nfor grades_i in range(n):\n grades_t = int(input().strip())\n grades.append(grades_t)\nresult = solve(grades)\nprint (\"\\n\".join(map(str, result)))\n" }, { "alpha_fraction": 0.6209150552749634, "alphanum_fraction": 0.6274510025978088, "avg_line_length": 18.125, "blob_id": "e4becb71b03618d2a2781afb92ba9970f1780c31", "content_id": "10a5f7f304e8fe561a07956cd3431efcc94ced57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 315, "license_type": "permissive", "max_line_length": 56, "num_lines": 8, "path": "/Linux/Cron.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# CRON\n- 정기적으로 해야할 일 (ex: 정기적으로 데이터를 백업, 정기적으로 누군가에게 메일을 보내는 일)\n\n# crontab -e\n- 내가 하고자 하는 일을 정의할 수 있음.\n\n# 2>&\n- 표준에러를 표준출력화 시키는 것. 즉 에러가 있을 때 표준출력으로 간다.\n" }, { "alpha_fraction": 0.6413690447807312, "alphanum_fraction": 0.6979166865348816, "avg_line_length": 28.217391967773438, "blob_id": "433db5cf145246fe427eb27f6eb9f1a98a03a768", "content_id": "25c06b0984cffaa443d624f381b25434c1b2dc21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 940, "license_type": "permissive", "max_line_length": 75, "num_lines": 23, "path": "/README.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Today I Learned\n- 시작일: 2017년 6월 27일 \n\n# 작성규칙\n- 언어나 기술명으로 폴더를 만든다.\n- python의 경우 ipynb으로 생성, python이 아닌 다른 언어라면 md파일 권장\n- README.md에 추가하기(index처럼)\n\n# INDEX\n- [ADsP](https://github.com/KimDH94/TIL/tree/master/ADsP)\n- [Python](https://github.com/KimDH94/python_study)\n- [Review_Python](https://github.com/KimDH94/TIL/tree/master/Review_Python)\n- [Linux](https://github.com/KimDH94/TIL/tree/master/Linux)\n- [Tensorflow](https://github.com/KimDH94/Tensorflow)\n\n# 10000시간의 법칙\n### Goal ( 18/02/02 ~ )\n- Be Awesom Data_Scientist\n- 한 분야의 전문가가 되기 위해서는 10000시간의 투자가 필요하다고 한다. \n- 오직 앉아서 데이터 사이언스에 대해 공부한 시간만 (시간:분) 단위로 업데이트하자.\n- 공부 내용은 간략하게 txt파일로 따로 업데이트\n\n#### 343 : 31 (HHHH : MM)\n" }, { "alpha_fraction": 0.4489361643791199, "alphanum_fraction": 0.457446813583374, "avg_line_length": 19.434782028198242, "blob_id": "9548ad282e5b4b194e82d6eaa5aa99c621f56da7", "content_id": "eb781597db3805d63e649850661d1a7f1b7e5eaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "permissive", "max_line_length": 32, "num_lines": 23, "path": "/HackerRank/Implement/29.Find_Digits.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef findDigits(n):\n # Complete this function\n result = []\n for x in str(n):\n if int(x) != 0:\n if n % int(x) == 0:\n result.append(x)\n else:\n continue\n else:\n continue\n return len(result)\n\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n result = findDigits(n)\n print(result)\n" }, { "alpha_fraction": 0.6624301075935364, "alphanum_fraction": 0.6644636392593384, "avg_line_length": 35.425926208496094, "blob_id": "408c301f78f8aaeac227ac61447f63a9e7155703", "content_id": "3da34ba443966f7b78f4fcea7007bcacd2d098e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3813, "license_type": "permissive", "max_line_length": 93, "num_lines": 54, "path": "/ADsP/Day23.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 분류분석 (classification) vs 예측분석 (prediction)\n- 공통점 : 레코드의 특정 속성의 값을 미리 알아 맞히는 점\n- 차이점\n - 분류 : 레코드(튜플)의 범주형 속성의 값을 알아 맞히는 것\n - 예측 : 레코드(튜플)의 연속형 속성의 값을 알아 맞히는 것\n- 분류의 예\n - 신용평가모형, 사기방지모형, 이탈모형, 고객세분화\n- 분류기법\n - 회귀분석 - 로지스틱 회귀분석(Logistic regression)\n - 의사결정나무(Decision tree), CART(Classfication and Regression Tree), C5.0\n - 베이지안 분류(Bayesian classification), naive Bayesian\n - 인공신경망(Artificial neural network)\n - 지지도벡터기계(Support vector machine)\n - k 최근법 이웃(k-nearest neighborhood)\n - 규칙기바늬 분류와 사례기반추론(Case based reasoning)\n\n# 의사결정나무(decision tree)의 특징\n- 분류함수를 의사결정 규칙으로 이뤄진 나무 모양으로 그리는 방법\n- 의사결정 문제를 시각화해 의사결정이 이뤄지는 시점과 성과를 한 눈에 볼 수 있게 함\n- 계산 결과가 의사결정나무에 직접 나타나게 돼 분석이 간편함\n- 분류 정확도가 좋음\n- 계산이 복잡하지 않아 대용량데이터에서도 빠르게 만들 수 있음\n- 비정상 잡음 데이터에 대해서도 민감함이 없이 분류\n- 한 변수와 상관성이 높은 다른 불필요한 변수가 있어도 크게 영향 받지 않음\n\n# 의사결정나무의 활용\n- 세분화(segmentation) : 데이터를 비슷한 특성을 갖는 몇 개의 ㄱ룹으로 분할해 그룹별 특성을 발견\n- 분류(classification) : 관측개체를 여러 예측변수들에 근거해 목표변수의 범주를 몇 개의 등급으로 분류하고자 하는 경우\n- 예측(prediction) : 자료에서 규칙을 찾아내고 이를 이용해 미래의 사건을 예측하고자 하는 경우\n- 차원축소 및 변수선택(data reduction and variable selection) : 매우 많은 수의 예측변수 중에서 목표변수에 큰 영향을\n 미치는 변수들을 골라내고자 하는 경우\n- 교호작용효과의 파악(interaction effect identification) : 여러 개의 예측변수들을 결합해 목표 변수에 작용하여 파악하고자 하는 경우\n- 범주의 병합 또는 연속형 변수의 이산화(bining) : 범주형 목표변수의 범주를 소수의 몇개로 병합하거나 연속형 목표변수를 몇 개의 등급으로 이산화\n 하고자 하는 경우\n\n# 의사결정분석의 분석\n- 분석 단계 : 성장 단계 >> 가지치기 단계 >> 타당성 평가 단계 >> 해석 및 예측 단계\n- 나무의 가지치기\n - 너무 큰 나무모형은 자료를 과대적합하고 너무 작은 나무모형은 과소적합 할 위험이 있어 마디에 속한 자료가 일정 수 이하일 경우, 분할을 정지하고 가지치기를 실시\n- 불순도에 따른 분할 측도\n - 카이제곱 통계량\n - 지니지수\n - 엔트로피지수\n\n# 의사결정분석의 종류\n- CART (Classification and Regression Tree)\n - 목적변수가 범주형인 경우 지니지수, 연속형인 경우 분산을 이용해 이진분리를 사용\n - 개별 입력변수 뿐만 아니라 입력변수들의 선형결합들 중 최적의 분리를 찾을 수 있음\n- C4.5\n - 다지분리(multiple split)가 가능하고 범주형 입력 변수의 범주 수만큼 분리 가능\n - 불순도의 측도로 엔트로피 지수 사용\n- Chaid\n - 가지치기를 하지 않고 적당한 크기에서 나무모형의 성장을 중지하며 입력변수가 반드시 범주형 변수여야 함\n - 불순도의 측도로 카이제곱 통계량 사용\n" }, { "alpha_fraction": 0.6875584721565247, "alphanum_fraction": 0.6969130039215088, "avg_line_length": 41.7599983215332, "blob_id": "1e7dd445bbf354a8ce811cd135db608660d201d9", "content_id": "d29ee209068f85f90dfb2b50e8ed8ed85cc5e22d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1705, "license_type": "permissive", "max_line_length": 104, "num_lines": 25, "path": "/Linux/IO_Redirection.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# IO = Input/Output\n\n# Outut\n- ls -l > result.txt (목록을 result.txt라는 새로운 텍스트파일에 붙여넣기 하는 것)\n- (>) 이 기호가 redirection 역할. Standard Output을 redirection한 것이지 Standard error를 redirection한게 아님\n- 즉, 프로그래밍 언어(Unix Process)를 통해 처리한 결과를 redirection해서 저장하는 것.(standard output)\n- ( 2> ) : standard output이 아니라 standard error라고 하는 정보의 출력에 대한 redirection의 의미를 가짐.\n- ( 1> ) : redirection을 의미하는데 standard output(표준출력을 의미함)\n- rm rename2.txt 1> result.txt 2> error.log => 출력하는 결과가 있다면 rename2.txt에 저장이 될 것이고 만약 에러가 있다면 error.log로\n 별도로 저장이 될 것이다. \n\n# Input\n- cat만 쳐주면 대기상태로 되고, 입력하면 그대로 출력해준다. 대기상태를 나오려면 (control+d)를 누르면 된다. 즉 사용자가 키보드를 통해서 입력하는 정보를\n  받는다. 정보를 standard input으로 바꾸는 것.\n- cat < hello.txt : 이렇게 해주면  hello.txt파일의 내용을 입력으로 받는다. cat hello.txt의 경우 인자로 받는 것.\n- head linux.txt : 기본 10줄만 출력해서 보여준다. \n  head -n1 linux.txt : linux.txt의 첫줄만 보여준다. \n  head -n1 < linux.txt > one.txt : linux.txt의 첫번째 내용을 one.txt 파일에 저장한다.\n \n# IO Stream\n- 데이터가 input되고 output으로 흘러가는 것을 뜻함.\n\n# IO append\n- ls -al >> result.txt 이것은 >>를 함으로써 추가를 하는 것.(append) > 이것만 사용할 경우 덮어쓰기\n-( << ) 이거는 합쳐서 input하는 것.\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 9, "blob_id": "7a65c26591e6718bf20997d76493af9273227c61", "content_id": "efc2f10ddba4f25a56ebd925aa6e87fcf14cb0d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "permissive", "max_line_length": 14, "num_lines": 2, "path": "/Linux/README.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 생활코딩 (Linux)\r\n- 참고\r\n" }, { "alpha_fraction": 0.46292948722839355, "alphanum_fraction": 0.47920432686805725, "avg_line_length": 20.269229888916016, "blob_id": "aecc85a97f49ffac0b751a60352f03d569bc0ea8", "content_id": "195bbc76e7cccab9992580debf46f468bb90aa98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "permissive", "max_line_length": 42, "num_lines": 26, "path": "/HackerRank/Implement/12.Drawing_Book.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef solve(n, p):\n # Complete this function\n num_list = [num for num in range(n+1)]\n\n if n / 2 >= p:\n idx = num_list.index(p)\n return idx // 2\n\n else:\n if n % 2 == 0:\n num_list.append(n+1)\n num_list.reverse()\n idx = num_list.index(p)\n return (idx) // 2\n else:\n num_list.reverse()\n idx = num_list.index(p)\n return (idx) // 2\nn = int(input().strip())\np = int(input().strip())\nresult = solve(n, p)\nprint(result)\n" }, { "alpha_fraction": 0.7161778807640076, "alphanum_fraction": 0.7199621796607971, "avg_line_length": 36.75, "blob_id": "b5c1ab9c3e5ed828f8357270bdb39f2abb79114a", "content_id": "f986e749419813c54e072cc0c1a1bb9500f867fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2075, "license_type": "permissive", "max_line_length": 90, "num_lines": 28, "path": "/ADsP/Day29.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 사회연결망 분석\n- 개인과 집단들 간의 관계를 노드와 링크로서 모델링해 그 것의 위상구조와 확산 및 진화과정을 계량적으로 분석하는 방법론\n- 개인의 인간관계가 인터넷으로 확대된 사람 사이의 네트워크 분석\n- 사회연결망의 개념은 제이곱 마리노(Jacob Mareno)가 처음 발표하였으나 바르네스(Barnes)가 1954년 \n처음으로 \"사회연결망\" 이라는 용어를 사용\n\n# 사회연결망의 분석 방식\n- 집합론적 방법 - 객체들 간의 관계를 쌍으로 표시\n- 그래프 이론을 이용한 방법 - 객체는 점(노드)로 표시하고 두 객체 간의 연결을 선으로 표시\n- 행렬을 이용한 방법 - 각 객체를 행렬의 행과 열에 대칭적으로 배치하여 표시\n\n# 네트워크 구조를 파악하기 위한 기법\n- 연결정도 중심성(Degree centrality) - 한점에 직접적으로 연결된 점들의 합으로 결정\n- 근접 중심성(Closeness centrality) - 각 노드 간의 거리를 근거로 중심성을 측정하는 방법\n- 매개 중심성(Betweenness centrality) - 네트워크 내에서 한 점이 담당하는 매개자 혹은 중재자 역할ㅇ릐 정도로서 중심성을 측정\n- 위세 중심성(Eigenvector centrality) - 연결된 노드의 중요성에 가중치를 둬 노드의 중심성을 측정\n\n# 네트워크 레벨 통계량\n- degree, shortest paths, reachability, destiny, reciprocity, transitivity, triad census 등\n\n# 커뮤니티의 수를 측정하는 방법 (community detection)\n- Walktrap\n - 일련의 random walk 과정을 통해 커뮤니티를 발견\n - 각 버텍스를 하나의 커뮤니티로 취급하고 시작해서 점차 더 큰 그룹을 병합하면서 클러스터링\n- Edge Betweenness\n - 그래프에 존재하는 최단거리(shortest path) 중 몇 ㄱ ㅐ가 그 에지를 거쳐가는지를 측정\n - 높은 edge betweenness 점수를 갖는 에지가 클러스터를 분리하는 속성을 가진다는 가정\n - 각 에지의 betweenness를 기반으로 클러스터링\n" }, { "alpha_fraction": 0.5438066720962524, "alphanum_fraction": 0.5679758191108704, "avg_line_length": 17.38888931274414, "blob_id": "0a4d8d0c768937e8c52d5f11cfe3bdfdd4b354c6", "content_id": "6b9a13279ac1da67ea6773fc24a9ac8720387c21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "permissive", "max_line_length": 32, "num_lines": 18, "path": "/HackerRank/Implement/24.Viral_Advertising.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef viralAdvertising(n):\n # Complete this function\n start = 5\n result = 0\n for num in range(1,n+1):\n result += int(start/2)\n\n start = (int(start/2)*3)\n\n return result\nif __name__ == \"__main__\":\n n = int(input().strip())\n result = viralAdvertising(n)\n print(result)\n" }, { "alpha_fraction": 0.6547861695289612, "alphanum_fraction": 0.6568228006362915, "avg_line_length": 40.787235260009766, "blob_id": "e2ee90e2cb3c4e16f403e4ea255ec2c9b73316f9", "content_id": "5c2a3b6413e354d61c626eb46f752af9b1fb4caa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4434, "license_type": "permissive", "max_line_length": 87, "num_lines": 47, "path": "/ADsP/Day03.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 빅데이터 분석과 전략 인사이트\n\n- 빅데이터 회의론 원인\n - 부정적 학습효과 : 과거의 고객관계관리(CRM) = 공포 마케팅, 투자대비 효과 미흡\n - 부적절한 성공사례 : 빅데이터가 필요없는 분석사례, 기존 CRM 분석 성과\n- 싸이월드의 퇴보 원인\n - OLAP과 같은 분석 인프라로 존재했으나 중요한 의사결정에 데이터 분석 활용 못함\n - 웹로그 분석을 통한 일차원적 분석만 집중\n - 소셜 네트워킹 활동 특성과 관련된 분석을 위한 프레임 워크나 평가지표도 없었음\n - 트렌드 변화가 사업모델에 미치는 영향에 대한 전략적 통찰(Insight)을 가지지 못함\n- 전략적 통찰이 없는 ㅂㄴ석의 함정\n - 단순히 일차원적인 분석의 반복ㅇㄴ 해당부서의 업무 영역에서는 효과적이지만 기업의 환경 변화와 고객 변화에 전략적으로 대처하기 힘듬\n - 전략적 통찰력의 창출에 초점을 맞춰 분석을 활용하면 사업의 중요한 기회를 발굴할 수 있음\n - 최고가 되기 위해서는 일차원적 분석을 통해 분석 경험을 늘리고 작은 성공을 통해 분석의 활용 범위를 넓혀 사업성과를 견인할 수 있는\n 전략적 인사이트를 주는 가치 기반 분석 단계로 발전해야 함\n \n# 데이터 사이언스와 사이언티스트\n\n- 데이터 사이언스\n - 데이터로부터 의미 있는 정보를 추출해내는 학문으로 분석뿐 아니라 이를 효과적으로 구현하고 전달하는 과정까지를 포괄한 개념\n - 데이터 사이언스는 정형 또는 비정형을 막론하고 인터넷, 휴대전화, 감시용 카메라 등에서 생성되는 숫자와 문자, 영상 정보 등\n 다양한 유형의 데이터를 대상으로 함\n - 데이터 사이언스는 데이터 공하, 수학, 통계학, 컴퓨터공학, 시각화, 해커의 사고방식, 해당 분야의 전문 지식을 종합한 학문\n - 데이터 사이언스의 영역은 3개로 분석 분야 IT분야, 비즈니스 분석 분야로 구성 됨.\n- 데이터사이언티스트\n - 데이터 사이언티스트는 데이터 홍수 속에서 헤엄을 치고 데이터 소스를 찾고, 복잡한 대용량 데이터를 구조화, 불완전한 데이터를 서로 연결해야함\n - 데이터 사이언티스트가 갖춰야 할 역량 중 한 가지는 강력한 호기심이며, 호기심이란 문제의 이면을 파고들고, 질문들을 찾고, 검증 가능한 \n 가설을 세우는 능력을 의미\n - 데이터 사이언티스트는 빅데이터에 대한 이론적 지식과 분석 기술에 대한 숙련 기술인 하드 스킬을 가져야함\n - 데이터 사이언티스트는 창의적사고, 호기심, 논리적 비판을 통한 통찰력 있는 분석, 설득력 있는 전달력, 그리고 다 분야간 협력을 위한 커뮤니케이션\n 능력과 같은 소프트 스킬을 가져야 함.\n\n# 빅데이터와 데이터 사이언스의 미래\n\n- 외부 환경적 측면의 인문학 열풍\n - 단순 세계화에서 복합한 세계화로 변화: 컨버전스 > 디버전스\n - 비즈니스 중심이 제품생산에서 서비스로 이동: 고장 나지 않는 제품 > 뛰어난 서비스 응대\n - 경제와 산업의 논리가 생산에서 시장 창조로 변화 : 공급자 중심의 기술경쟁 > 무형자산의 경쟁\n- 가치 패러다임의 변화\n - 1단계 : 디지털화(Digitalization) = 과거 PC와 워드프로세스, 이미징 기술의 발달로 분서를 디지털화 함으로써 가치를 형상화 표준화 함\n - 2단계 : 연결(Connection) = 현재 인터넷과 모바일 기술의 발전으로 다양한 디지털 정보를 필요한 사람에게 연결해서 효과적이고 효율적으로\n 정보를 연결 및 제공 함\n - 3단계 : 에이전시(Agency) = 미래 개인과 기기 그리고 사물에 이르는 방대한 정보를 하이퍼 연결을 통해 필요한 정보를 효과적으로 제공하고\n 관리 할 수 있는 시대로 발전할 것으로 예상\n- 데이터 사이언스의 한계와 인문학\n - 분석은 가정에서 시작해 인간의 해석이 개입되는 단계를 거침\n - 분석 결과를 해석하는 인간의 소양과 인문학적 소양을 통해 보다 발전적인 미래 가치를 도출\n" }, { "alpha_fraction": 0.694915235042572, "alphanum_fraction": 0.7012711763381958, "avg_line_length": 28.5, "blob_id": "6765fb41a8a52575205691ff3d2dcb1c5391b7a9", "content_id": "46e802d5e27820a921938d87351beb558cfa2c08", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "permissive", "max_line_length": 65, "num_lines": 16, "path": "/HackerRank/30_days_of_Code/02.Data_Types.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "i = 4\nd = 4.0\ns = 'HackerRank'\n\n# Declare second integer, double, and String variables.\nl = int(input())\n# Read and save an integer, double, and String to your variables.\nf = float(input())\n# Print the sum of both integer variables on a new line.\nprint(int(i + l))\n# Print the sum of the double variables on a new line.\nprint(f + d)\n# Concatenate and print the String variables on a new line\nv = str(input())\n# The 's' variable above should be printed first.\nprint(s + v)\n" }, { "alpha_fraction": 0.6373966932296753, "alphanum_fraction": 0.6647727489471436, "avg_line_length": 34.85185241699219, "blob_id": "105fe222f357183ef558fd6036e9c4b1087248b5", "content_id": "2bb6a19c9b140e8a2ae9458eb84f64278c9b82fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4213, "license_type": "permissive", "max_line_length": 144, "num_lines": 54, "path": "/ADsP/test05.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 객관식\n\n### 다음 중 데이터베이스 설계 절차가 가장 적절하게 배치된 것은?\n- 요구사항 분석 -> 개념적 설계 -> 논리적 설계 -> 물리적 설계\n\n### 고객의 니즈를 4가지 가치 유형으로 구분할 수 있다. 4가지 가치 유형에 해당하지 않는 것은?\n- 사회적 가치 x\n- 기능적 가치 o\n- 재무적 가치 o\n- 무형의 가치 o\n\n### 다음 중 비즈니스 운영 시나리오를 상세화하기 위한 도구로 가장 적절한 것은?\n- Causal Loop Diagram\n\n### 전사 차원에서 분석을 관리하고 공유하는 단계, 분석 전담 조직을 운영하고, 조직 내 데이터 사이언팃를 확보, 전사 성과의 실시간 분석이 가능하며, 분석 규칙 및 이벤트를 관리, 분석 전ㅇㅇ 서버 도입 및 빅데이터 관리 환경을 구현\n- 확산단계\n\n### 데이터마이닝 기법은 크게 두 가지로 나눌 수 있다. 하나는 교사학습(Supervised Learning)과 비교사학습기법(Unsupervised Learning)이다. 다음 분석 주제 중 비교사학습법을 사용하여 과제를 해결하기에 가장 적절한 것은?\n- 슈퍼마켓에서 본인과 다른 고객들의 구매이력을 바탕으로 할인 쿠폰을 발행한다.\n- 기존에 알고 있는 네트워크 데이터 패킷들의 문제점을 이용하여 네트워크 데이터 패킷의 문제점 종류를 분류한다. x (교사학습)\n- 기존 파산회사와 파산하지 않은 회사들의 재정상태 데이터를 기반으로 회사의 파산 여부를 예측한다. x (교사학습)\n- 문제가 발생한 항공기를 대상으로 수리 기간을 추정한다. x (교사학습)\n\n### 아래 척도 중 이산형 자료에 해당되는 척도를 모두 고르시오\n     가. 구간척도   나. 명목척도   다. 비율척도   라. 순서척도\n- 가, 나\n\n### 모집단의 모든 원소들에게 1,2,3,..., N 의 일련번호를 부여하고 이를 순서대로 나열한 후에 K개 (K=N/n) 씩 n개의 구간으로 나누고 첫 구간 (1,2,3, ...,K) 에서 하나를 임의로 선택한 후에 K개씩 띄어서 표본을 추출하는 방법이다\n- 계통추출법\n\n### 두 확률변수 X와 Y의 상관계수가 0.5일 때 다음 중 옳은 것은?\n- X + 0.1과 Y의 상관계수는 0.5이다 o\n- -X와 Y의 상관계수는 0.5이다 x\n- X와 2Y의 상관계수는 1이다 x\n- X + 0.3 과 2Y의 상관계수는 0.5 이다 x\n\n### 다음 중 시계열 분석의 기초가 되는 개념인 정상성에 대한 설명으로 가장 부적절한 것은?\n- 정상성을 만족하지 않는 시계열 자료는 모형화 할 수 없다 x\n- 분산은 시점에 의존하지 안흔다 o\n- 모든 시점에 대해 일정한 평균을 가진다 o\n- 공분산은 단지 시차에만 의존하고 실제 특정 지점 t,s에는 의존하지 않는다 o\n\n### 다음 중 시뮬레이션의 장점으로 가장 부적절한 것은?\n- 많은 시간이 지난 후에 결과를 알 수 있는 문제에 대해서는 예측이 불가능하다 x\n- 복잡한 현실문제는 추리적인 방법으로 해결책을 구하지 못할 수 있으며, 이때는 시뮬레이션만이 유일한 해결책이다 o\n- 시뮬레이션 모형이 만들어지면 여러 가지 대안을 쉽게 비교할 수 있다 o\n- 시뮬레이션 모형을 현실문제와 근접하게 만들 수 있기 때문에 이해와 사용이 편리하며, 문제해결에 대해 의사결정자와 대화가 용이하다 o\n\n### 100개의 원소로 구성된 모집단이 있고 모집단의 각 1번부터 100번까지 번호를 부여하였다. 이 모집단에서 10개의 표본을 비복원 단순랜덤추출법으로 추출하고자 한다. 다음 중 가장 부적절한 것은?\n- 1번 원소와 2번 원소가 동시에 표본에 포함될 확률은 1/100 이다 x\n- 비복원 단순랜덤추출법을 적용할 때, 1번 원소가 표본에 포함될 확률은 1/10 이다 o\n- 1번 원소가 표본에 포함될 확률은 100번 원소가 표본에 포함될 확률과 동일하다 o\n- 1번 원소와 2번 원소가 동시에 표본에 포함될 확률은 99번 100번 원소가 동시에 표본에 포함될 확률과 동일하다 o\n" }, { "alpha_fraction": 0.4862385392189026, "alphanum_fraction": 0.5045871734619141, "avg_line_length": 18.81818199157715, "blob_id": "65c3d14bf8bdf8e380f8ca943883895ba7463b97", "content_id": "ff1ea548fff157732c40d6d369188bbc325fecfc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "permissive", "max_line_length": 50, "num_lines": 22, "path": "/HackerRank/Implement/28.Jumping_on_the_Clouds.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef jumpingOnClouds(c, k):\n # Complete this function\n energy = 100\n\n for jump in range(0,n,k):\n if c[jump] == 1:\n energy -= 3\n else:\n energy -= 1\n\n return energy\n\nif __name__ == \"__main__\":\n n, k = input().strip().split(' ')\n n, k = [int(n), int(k)]\n c = list(map(int, input().strip().split(' ')))\n result = jumpingOnClouds(c, k)\n print(result)\n" }, { "alpha_fraction": 0.5302245020866394, "alphanum_fraction": 0.542314350605011, "avg_line_length": 24.173913955688477, "blob_id": "42be1fdb87aa245821bdb452ad0c9a12c5caa2ce", "content_id": "4907b7106c46b1ac66e74dcc9ef2dfcfdb68d0cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "permissive", "max_line_length": 64, "num_lines": 23, "path": "/HackerRank/Implement/27.Sequence_Equation.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef permutationEquation(p):\n # Complete this function\n y = [i for i in range(1, n+1)]\n# result = []\n\n# for x in range(1,n+1):\n# tmp_idx = p.index(x)\n# tmp_y = y[tmp_idx]\n# tmp_idx = p.index(tmp_y)\n# result.append(y[tmp_idx])\n# return result\n\n result = [ y[p.index(y[p.index(x)])] for x in range(1, n+1)]\n return result\nif __name__ == \"__main__\":\n n = int(input().strip())\n p = list(map(int, input().strip().split(' ')))\n result = permutationEquation(p)\n print (\"\\n\".join(map(str, result)))\n" }, { "alpha_fraction": 0.6461538672447205, "alphanum_fraction": 0.6542510390281677, "avg_line_length": 28.404762268066406, "blob_id": "c176be68fe58875411938e9db1f531d5e67cb953", "content_id": "7ba5d84958d7eebef3df6c1cadf82a8f6a9ab0b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2605, "license_type": "permissive", "max_line_length": 81, "num_lines": 42, "path": "/ADsP/Day19.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 회귀분석(regression Analysis)\n\n- 하나 또는 그 이상의 독립변수들이 종속변수에 미치는 영향을 추정할 수 있는 통계기법\n - 단순선형회귀분석 : 독립변수가 하나인 경우\n - 다중선형회귀분석 : 독립변수가 둘이상인 경우\n \n# 회귀분석의 검정\n\n- 회귀식(모형)에 대한 검증 : F검정\n- 회귀계수들에 대한 검증 : t 검정\n- 모형의 설명력 : 결정계수 R^2 = 회귀제곱합/전체제곱합 = SSR/SST 0 <= R^2 <= 1\n - 단순회귀분석의 결정계수는 상관계수 r의 제곱과 같음\n- 선형회귀분석의 가정 (데이터가 전제로 하는 가정)\n - 선형성 : 입력변수와 출력변수의 관계가 선형관계가 있음\n\n- 선형 입력변수와 출력변수의 산점도로 확인\n - 독립성 : 잔차와 독립변인의 값이 관련이 없어야 함\n - 등분산성 : 독립변인의 모든 값에 대한 오차들의 분산이 일정\n - 비정상성 : 관측치들의 잔차들끼리 상관이 없어야 함\n - 정상성 : 잔차항이 정규분포를 이뤄야 함\n \n- 잔차와 출력변수의 산점도로 확인\n\n# 다중선형회귀석\n\n- 다중공선성(multicollinearity)\n - 다중회귀 분석에서 설명변ㅅ들 사이에 선형관계가 존재하면 회귀계수의 정확한 추정이 곤란\n \n- 다중공선성 검사방법\n - 분산팽창요인(VIF) : 10보다 크면 심각한 문제\n - 상태지수 : 10이상이면 문제가 있다고 30보다 크면 심각\n - 선형관계가 강한 변수 제거\n \n# 변수선택법 (variable selection)\n\n- 모든 가능한 조합 : 모든 가능한 독립변수들의 조합에 대한 회귀모형을 분석해 가장 적합한 모형 선택\n- 전진선택법(forward selection) : 절편만 있는 상수모형으로부터 시작해 중요하다고 생각되는 설명변수부터 차례로 모형에 추가\n - 이해쉬움, 많은 변수에서 활용 변수값의 작은 변동에 결과가 달라져 안정성이 부족\n- 후진소거법(backward selection) : 독립변수 후보 모두를 포함한 모형에서 가장 적은 영향을 주는 변수부터 하나씩 제거\n - 전체 변수들의 정보를 이용 가능, 변수가 많은 경우 활요이 어려움, 안전성 부족\n- 단계별방법(stepwise method) : 전진선택법에 의해 변수를 추가하면서 새롭게 추가된 변수에 기인해 기존 변수가 그 중요도가 약화되면\n 해당변수를 제거하는 등 단계별로 추가 또는 삭제되는 변수를 검토해 더 이상 없을 떄 중단\n" }, { "alpha_fraction": 0.4369247555732727, "alphanum_fraction": 0.44017326831817627, "avg_line_length": 26.984848022460938, "blob_id": "b81d89ee96bfc6691f3ed46d59c5f03183ae6cbb", "content_id": "bdcbad8d4fbb63a3fa0d860dc8a509eae4206756", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2851, "license_type": "permissive", "max_line_length": 85, "num_lines": 66, "path": "/ADsP/Day04.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 분석기획 방향성 도출\n- 분석기획 : 실제 분석을 수행하기 전에 분석과제를 정의하고 의도했던 결과를 도출할 수 있도록 적절하게 관리할 수 있는\n 방안을 사전에 계획하는 일련의 작업\n \n 분석의 대상(What)\n \n Known Un-known\n --------------------------- \n Known | Optimization | Insight |\n 분석의 방법(HOW) | | |\n Un-known | Solution | Discovery|\n ----------------------------\n \n- 목표 시점 별 분석 기획 방안의 차이\n\n 당면한 분석 주제의 해결 지속적 분석 문화 내재화\n (과제 단위) (마스터 플랜 단위)\n Speed & Test < 1차 목표 > Accuracy & Deploy\n Quick & Win < 과제의 유형 > Long Term View\n Problem Solving < 접근 방식 > Problem Definition\n \n- 분석 기획 시 고려사항\n - 분석의 기본이 되는 데이터에 대한 고려 > 데이터 데이터 유형에 따른 선행 분석\n (Ex)데이터 유형 : 정형 데이터(DB), 비정형 데이터 (보고서, 이메일, 소셜데이터), 반정형 데이터 (센서를 통한 스트리밍되는 머신데이터)\n - 분석을 통해 가치 창출되는 적절한 활용방안과 유즈케이스 탐색\n - 분석 수행시 발생 가능한 장애요소와 대책에 대한 사전 계획 수립\n \n# 분석 방법론\n\n- KDD 분석 방법론\n - 데이터셋 선택 (Selection)\n - 데이터 전처리 (Preprocessing)\n - 데이터 변환 (Transformation)\n - 데이터 마이닝 (Datamining)\n - 결과 평가 (Interpretation / evaluation)\n\n- CRISP-DM 방법론\n - 업무 이해 (Business understanding)\n - 데이터 이해 (Data understanding)\n - 데이터 준비 (Data preparation)\n - 모델링 (Modeling)\n - 평가 (Evaluation)\n - 전개 (Deplyment)\n\n- 빅데이터 분석 방법론\n 1. 분석기획\n - 비즈니스 이해 및 범위 설정\n - 프로젝트 정의 및 계획 수립\n - 프로젝트 위험계획 수립\n 2. 데이터 준비\n - 필요 데이터 정의\n - 데이터 스토어 설계\n - 데이터 수집 및 정합성 점검\n 3. 데이터 분석\n - 분석용 데이터 준비\n - 텍스트 분석\n - 탐색적 분석\n - 모델링\n - 모델 평가 및 검증\n - 모델 적용 및 운영방안 수립\n 4. 시스템 구현\n - 설계 및 구현\n - 시스템 테스트 및 운영\n 5. 평가 및 전개\n - 모델 발전계획 수립\n - 프로젝트 평가 및 보고\n" }, { "alpha_fraction": 0.47611203789711, "alphanum_fraction": 0.4827018082141876, "avg_line_length": 19.233333587646484, "blob_id": "d40b01c992d367638d60a048fb69a144a561f32a", "content_id": "781cdae27f6ec401a7283a4c00a00a5e1aad72ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "permissive", "max_line_length": 52, "num_lines": 30, "path": "/HackerRank/Implement/33.Cut_the_sticks.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef cutTheSticks(arr):\n # Complete this function\n result = [len(arr)]\n\n while len(arr) > 0:\n tmp_list = []\n for i in arr:\n tmp = i - min(arr)\n\n if tmp != 0:\n tmp_list.append(tmp)\n else:\n continue\n\n if len(tmp_list) != 0:\n result.append(len(tmp_list))\n\n arr = tmp_list\n\n return result\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n arr = list(map(int, input().strip().split(' ')))\n result = cutTheSticks(arr)\n print (\"\\n\".join(map(str, result)))\n" }, { "alpha_fraction": 0.5796178579330444, "alphanum_fraction": 0.6310282349586487, "avg_line_length": 50.11627960205078, "blob_id": "7fccbad5663b2741b721dcf8084b02b1b7688e78", "content_id": "3ab45f3b32717448c9d1768205762ec5532e7d69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4284, "license_type": "permissive", "max_line_length": 101, "num_lines": 43, "path": "/ADsP/Day21.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 다차원척도법 (MDS : Multi Dimensional Scaling)\n\n- 다차원 척도법은 군집분석과 같이 개체들을 대상으로 변수들을 측정한 후, 개체들 사이의 유사성/비유사성을 측정하여 개체들을 2차원 또는 3차원\n 공간상에 점으로 표현하는 분석 방법이다.\n- 목적: 군집분석은 개체들 간의 비유사성을 이용하여 동일한 그룹들로 분류하는 것이 목적인 반면, 다차원척도법은 개체들의 비유사성을 이용하여\n 2차원 공간상에 점으로 표시하고 개체들 사이의 집단화를 시각적으로 표현하는 것이 목적이다.\n- 종류\n - 계량적 MDS : 데이터가 구간척도나 비율척도인 경우 활용한다.(전통적인 다차원척도법, classical MDS). N 개의 케이스에 대해서 p개의 특성변수가 있는 경우,\n 각 개체들 간의 유클리드 거리행렬을 계산하고 개체들 간의 비유사성 S(거리제곱 행렬의 선형함수)를 공간상에 표현한다.\n - 비계량적 MDS : 데이터가 순서척도인 경우 활용. 개체들간의 거리가 순서로 주어진 경우에는 순서척도를 거리의 속성과 같도록 변환(monotone transformation)\n 하여 거리를 생성한 후 적용한다.\n- stress 와 적합도 수준 M\n - 개체들을 공간상에 표현하기 위한 방법으로 STRESS 나 S-STRESS 를 부적합도 기준으로 사용한다.\n - 최적모형의 적합은 부적합도를 최소로 하는 방법으로 일정 수준이하로 될 떄까지 반복해서 수행한다.\n \n# 주성분분석 (PCA : Principal Component Analysis)\n\n- 상관관계가 있는 변수들을 결합해 상관관계가 없는 변수로 분산을 극대화하는 변수로, 선형결합을 해 변수를 축약하는데 사용한다.\n- 요인분석 vs 주성분분석\n - 요인분석은 몇개의 요인(잠재된 변수)들을 추출하기 위해서 여러가지 방법이 사용될 수 있고, 그 중 가장 많이 사용되는 방법이 주성분분석이다.\n - 공통점 : 모두 데이터를 축소에 활용 = 많은 변수들을 몇 개의 변수로 축소한다.\n - 차이점\n - 생성된 변수의 수와 이름 : 요인분석은 몇 개ㅏ고 지정할 수 없고(3개 or 4개, 5개) 이름을 붙일 수 있으나 2개를 생성한다.(제 1주성분, 제 2주성분)\n - 생성된 변수들 간의 관계 : 요인분석은 생성된 변수들이 기본적으로 대등한 관계를 가지나 주성분분석은 제 1주성분이 가장 중요하고 제 2성분이 그 다음으로 중요\n - 목표변수와의 관계 : 요인분석은 목표변수를 고려하지 않고 주어진 변수들 간 비슷한 성격들을 묶지만 주성분분석은 목표변수를 고려하여 주성분 변수를 생성한다.\n - 주성분분석의 활용\n - 여러 변수들 간의 상관성, 연관성을 이용해서 주성분차원으로 변수를 축소한다.\n - 회귀분석이나 의사결정나무 등 모형 개발시 다중공선성이 존재할 경우, 상관도가 높은 변수를 축소한다.\n - 연관성이 높은 변수를 축소하여 군집분석을 군집화 결과, 연산속도를 개선한다.\n - 기계에서 나오는 다양한 센서데이터를 주성분분석으로 차원을 축소한 후 시계열로 분포나 추세를 분석하면 고장 징후를 사전에 파악할 수 있다.\n \n- Importance of components\n\n PC1 PC2 PC3 PC4 PC5\n Standard deviation 1.6618 1.2671 0.7420 0.25311 0.13512\n Proportion of Variance 0.5523 0.3211 0.1101 0.01281 0.00365\n Cumulative Proportion 0.5523 0.8734 0.9835 0.99635 1.00000\n \n- R결과 해석\n - 제1주성분, 제2주성분의 누적 기여율은 87.34%\n - 제1주성분을 통한 기여율(해석율)은 55.23%이며 제2주성분을 통한 기여율(해석율)은 32.11%\n - 주성분의 개수를 정할 떄는 누적 기여율을 가지고 정하거나 고유값(eigenvalue)을 가지고 scree plot을 그려서 고유값 곡선이 수평으로 눕는 주성분에 전단계까지\n 활용한다\n" }, { "alpha_fraction": 0.6388888955116272, "alphanum_fraction": 0.644444465637207, "avg_line_length": 23, "blob_id": "2fdb5ac4eadbb770c9af9d1c879565688ff4aabc", "content_id": "43f4c46a7855f1bc297c3e7709d35451d95002ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "permissive", "max_line_length": 47, "num_lines": 15, "path": "/HackerRank/Implement/11.Sock_Merchant.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\nfrom collections import Counter\ndef sockMerchant(n, ar):\n # Complete this function\n colors = Counter(ar)\n result = []\n for idx,val in colors.items():\n result.append(val // 2)\n return sum(result)\nn = int(input().strip())\nar = list(map(int, input().strip().split(' ')))\nresult = sockMerchant(n, ar)\nprint(result)\n" }, { "alpha_fraction": 0.7085811495780945, "alphanum_fraction": 0.7136788368225098, "avg_line_length": 30.810810089111328, "blob_id": "5e34d4a00d1f6d7599b9cd01acf4b65f94522078", "content_id": "430f975d2b494557e53ee033fecd8d944b227237", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2305, "license_type": "permissive", "max_line_length": 91, "num_lines": 37, "path": "/ADsP/Day28.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 텍스트마이닝\n- 1980년대 급부상하였으나 노동집약적이고 수동적인 방법으로 인해 발전이 더디다가 최근 10년 사이에 급속하게 발전\n- 텍스트로부터 고품질의 정보를 도출하는 과정으로, 입력된 텍스트를 구조화해 그 데이터에서 패턴을 도출한 후 결과를 평가/해석하는\n일련의 과정\n- 텍스트마이닝은 다양한 포맷의 문서로부터 단어의 매트릭스를 만들어 추가 분서기나 데이터마이닝 기법을 적용해 통찰(insight)\n을 얻어 의사결정을 지원하는 방법\n- 감성분석(sentiment analysis), 워드 클라우드(word cloud), 문서의 요약(summarization), 분류(classification),\n군집(clustering), 특성 추출(feature extraction) 등에 활용\n\n# 정보 검색의 적절성\n- 정확도(precision) : 정답이라고 분석한 결과 중에서 정답인 비율\n- 재현율(recall) : 실제 정답 중 정답이락 분석 한 결과의 비율\n\n# Corpus\n- 텍스트마이닝의 절차 중 텍스트의 정제, 통합, 선택, 변환의 과정을 거친 구조화된 단계로 더 이상 추가적인 절차 없이 텍스트마이닝\n알고리즘 실험에 활용될 수 있는 상태\n- VCorpus - 메모리에서만 유지하는 Corpus\n- PCorpus - R외부의 DB나 파일로 관리되는 Corpus\n\n# R을 활용한 텍스트마이닝\n- tm_map 함수의 인자들\n - as.plainTextDocument ( XML 문서를 text 변환 )\n - stripWhitespace ( 빈칸 제거 )\n - tolower ( 대문자를 소문자로 변환 )\n - stopwords ( 띄워쓰기, 시제 변환 )\n- DocumentTermMatrix ( 문서별 특정 문자의 빈도표 생성 )\n- TermDocumentMatrix ( 단어별 문서의 빈도표 생성)\n\n# 감성분석 (snetment analysis)\n- 문장에서 사용된 단어의 긍정과 부정여부에 따라 얼마나 긍정적인 단어가 많은지 여부로 문서의 긍정/부정을 판단하는 분석이며 opinion mining\n이라고 불리기도 함\n\n# 한글 텍스트마이닝 패키지 (KoNLP)\n- 전희원 씨가 개발하였으며 한글을 통한 텍스트마이닝이 가능\n\n# 워드클라우드\n- 문서에 포함된 단어의 사용 빈도를 효과적으로 보여주기 위해 단어들의 크기, 색 등으로 표현하고 단어들을 구름과 같이 표현하는 방식\n" }, { "alpha_fraction": 0.5674418807029724, "alphanum_fraction": 0.5715762376785278, "avg_line_length": 22.047618865966797, "blob_id": "4b8c987fffe36b8d33f037f9173d3160147f6fe1", "content_id": "c4f89568644f8a2b6ddbef72daed2fa8e9fd6e1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1935, "license_type": "permissive", "max_line_length": 69, "num_lines": 84, "path": "/HackerRank/Implement/18.Climbing_the_Leaderboard.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\n\ndef climbingLeaderboard(scores, alice):\n # Complete this function\n result = []\n for i in range(m):\n scores.append(alice[i])\n tmp_list = list(set(scores))\n tmp_list.sort()\n tmp_list.reverse()\n\n result.append(tmp_list.index(alice[i]) + 1)\n return result\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n scores = list(map(int, input().strip().split(' ')))\n m = int(input().strip())\n alice = list(map(int, input().strip().split(' ')))\n result = climbingLeaderboard(scores, alice)\n print (\"\\n\".join(map(str, result)))\n\n# !/bin/python3\n\nimport sys\nfrom collections import Counter\n\n\ndef climbingLeaderboard(scores, alice):\n # Complete this function\n result = []\n for i in range(m):\n scores.append(alice[i])\n\n s_counter = Counter(scores)\n a_counter = Counter(alice)\n\n tmp_result = s_counter - a_counter\n\n for a in alice:\n tmp = 1\n for x in s_counter:\n if x > a and tmp_result[x] != 0:\n tmp += 1\n else:\n continue\n result.append(tmp)\n return result\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n scores = list(map(int, input().strip().split(' ')))\n m = int(input().strip())\n alice = list(map(int, input().strip().split(' ')))\n result = climbingLeaderboard(scores, alice)\n print (\"\\n\".join(map(str, result)))\n\n\n# !/bin/python3\n\nimport sys\nfrom collections import Counter\n\n\ndef climbingLeaderboard(scores, alice):\n # Complete this function\n\n tmp = [len(set([i for i in scores if i > a])) + 1 for a in alice]\n\n return tmp\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n scores = list(map(int, input().strip().split(' ')))\n m = int(input().strip())\n alice = list(map(int, input().strip().split(' ')))\n result = climbingLeaderboard(scores, alice)\n print (\"\\n\".join(map(str, result)))" }, { "alpha_fraction": 0.5748730897903442, "alphanum_fraction": 0.5799492597579956, "avg_line_length": 17.325580596923828, "blob_id": "bf95b66c4085b12e18df8d079349d59eb1f27bcc", "content_id": "06bb85bca6228bd81dca2642309b7f1eb552fc96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "permissive", "max_line_length": 150, "num_lines": 43, "path": "/HackerRank/Warmup/06.Staircase.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Consider a staircase of size :\n#\n# #\n# ##\n# ###\n# ####\n# Observe that its base and height are both equal to , and the image is drawn using # symbols and spaces. The last line is not preceded by any spaces.\n#\n# Write a program that prints a staircase of size .\n#\n# Input Format\n#\n# A single integer, , denoting the size of the staircase.\n#\n# Output Format\n#\n# Print a staircase of size using # symbols and spaces.\n#\n# Note: The last line must have spaces in it.\n#\n# Sample Input\n#\n# 6\n# Sample Output\n#\n# #\n# ##\n# ###\n# ####\n# #####\n# ######\n\n#!/bin/python3\n\nimport sys\n\ndef staircase(n):\n # Complete this function\n for i in range(1,n+1):\n print(\"{}{}\".format(\" \"*(n-i),\"#\"*i))\nif __name__ == \"__main__\":\n n = int(input().strip())\n staircase(n)\n" }, { "alpha_fraction": 0.4593939483165741, "alphanum_fraction": 0.4593939483165741, "avg_line_length": 57.92856979370117, "blob_id": "02cb3a9d1cc3a4dd2b6f004c6cda4f9fbad72058", "content_id": "6255c75d363dc53cdb649fec37483e48d1ac38e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1289, "license_type": "permissive", "max_line_length": 101, "num_lines": 14, "path": "/Linux/Find_File.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 파일의 위치를 찾는 방법 (find)\n- find / : root directory로부터 찾겠다\n- find . : 현재 디렉토리부터 하위 디렉토리로 찾겠다\n- < find . -type f -name tecmint.php > : 현재 디렉토리에 있는 파일을 찾는데 파일 이름은 tecmint.php이고\n 파일의 확장자를 지정할 수 있다. f로 하면 file이 되는 것이다.\n 혹시나 tecmint.php가 있더라도 찾는 목록에서 제외 됨\n- < find . -type f -name \"tecmint.txt\" -exec rm -f {} \\; > : 현재 디렉토리에서 파일을 찾는데 파일이여야 하고\n 이름은 tecmint.txt 이고 이 파일에 대해서\n exec는 실행시킨다는 의미인데 rm -f 묻지도 따지지도 말고 삭제하라\n {}는 명령을 통해서 검색한 파일의 이름이 이곳에 위치하게 됨.\n\n# whereis 와 $PATH\n- whereis 실행파일 : 첫번째에는 디렉토리가 있다. 두번쨰에는 man ls에 대한 정보가 담겨있는 디렉토리를 보여준다\n- $PATH : (환경변수) 전체적인 경로를 보여준다\n" }, { "alpha_fraction": 0.668639063835144, "alphanum_fraction": 0.668639063835144, "avg_line_length": 32.79999923706055, "blob_id": "f0136681e88790aefd9ad185ec53576166f06f3a", "content_id": "4eae3b4504baeed3e8e8766cd6c666b775bd9ef9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 245, "license_type": "permissive", "max_line_length": 68, "num_lines": 5, "path": "/Linux/Startup_Script.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 쉘이 시작될 떄 어떤 명령을 실행하는 방법\n- alias l='ls -al' 이렇게 하면 l만 쳐도 'ls -al'이 된다.\n- alias c='clear' 이것도 똑같음\n\n# [관련링크](cyberciti.biz/tips/bash-aliases-mac-centos-linux-unix.html)\n" }, { "alpha_fraction": 0.670113742351532, "alphanum_fraction": 0.672182023525238, "avg_line_length": 33.53571319580078, "blob_id": "552759e79d788c845613aafdae11ab73219f985b", "content_id": "02fe1b8c3e514d9ed27366a13131c1e27c1d6cd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2115, "license_type": "permissive", "max_line_length": 175, "num_lines": 28, "path": "/ADsP/test02_.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 주관식\n\n### 인터넷상의 ㅓ버에서 데이터 저장, 처리 네트워크, 콘텐츠 사용 등 서로 다른 물리적인 위치에 존재하는 컴퓨팅 자원을 가상화 기술을 통해 IT 관련 서비스를 한 번에 제공하는 혁신적인 컴퓨팅 기술은 무엇인가?\n- 클라우드 컴퓨팅\n\n### 풀어야 할 문제에 대한 상세한 설명 및 해당 문제를 해결했을 때 발생하는 효과를 명시함으로써 향후 데이터 분석 문제로의 전환 및 적합성 평가에 활용하돍 하는 것은 무엇인가?\n- 분석 유즈 케이스\n\n### 아래의 R 명령의 결과를 쓰시오\n 0/0\n- NaN (Not a Number)\n\n### 통계분석 방법에는 크게 ( A ) 와 ( B ) 가 있는데 ( A )은 수집된 자료를 이용해 대상 집단에 대한 특성값(모수)이 무엇인지를 추측하는 것을 의미하고 ( B )는 수집된 자료를 정리, 요약하기 위해 평균, 표준편차, 중위수, 최빈값 등과 다양한 그래프를 통해 대상집단을 분석하는 방법이다.\n- A : 통계적 추론\n- B : 기술 통계\n\n### 의사결정나무 중 연속형 타깃변수(또는 목표변수)를 예측하는 의사결정나무를 무엇이라고 하는가?\n- CART\n\n### 데이터마이닝 모델링 분석 기법 중 random input에 따른 forest of tree를 이용한 분류방법으로 랜덤한 forest에는 많은 트리들이 생성된다. 새로운 오브젝트를 분류하기 위해 forest에 있는 트리에 각각 투입해 각각의 트리들이 voting함으로써 분류하는 방식의 R 패키지는 무엇인가?\n- 랜덤포레스트\n\n### 개인과 집단들 간의 관게를 노드와 링크로서 모델링해 그것의 위상구조와 확산 및 진화과정을 계량적으로 분석하는 방법론은 무엇인가?\n- 사회연결망\n\n### 텍스트 마이닝의 절차 중 데이터의 정제, 통합 선택, 변환의 과정을 거친 구조화된 단계로서 더 이상 추가적인 절차 없이 텍스트 마이닝 알고리즘 실험에서 활용될 수 있는 상태를 무엇이라 하는가?\n- corpus\n" }, { "alpha_fraction": 0.670536994934082, "alphanum_fraction": 0.6734397411346436, "avg_line_length": 35.26315689086914, "blob_id": "4c763478c6970b0918bc0fe619a243c0ebb1f121", "content_id": "882d231ec4ac9d8c53f2cc0485b43b6a5aba4288", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1345, "license_type": "permissive", "max_line_length": 73, "num_lines": 19, "path": "/Linux/CLI.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "## 왜 배우기 어려운 명령어(CLI)를 사용하는 것일까? 명령어를 사용하는 이유는?\n\n# GUI\n- CLI보다 훨씬 많은 에너지를 사용\n- 쉽다는 장점이 있지만 그만큼 많은 노동이 필요함.\n\n# CLI를 사용했을 떄 얻을 수 있는 강력한 효과 1 ( sequence execution (semicolon ;))\n- mkdir why;cd why 의 경우 why라는 디렉토리를 생성하고 그 디렉토리로 이동함. (동시에 가능)\n- 어떤 명령을 실행했을 때 순차적으로 해야할 일을 적어서 컴퓨터에게 보내주면 순서대로 그 명령들을 실행하고 최종적인 결과만 알려줌\n- 그 중간과정에서 지켜볼 필요가 없다.\n\n# CLI를 사용했을 때 얻을 수 있는 강력한 효과 2 ( pipeline | )\n- 한 곳에서 다른 곳으로 전송한다는 특성을 비유해서 pipeline이라고 함. \n- 하나의 프로그램의 결과를 다른 프로그램의 입력으로. \n- cat 파일명 ( 파일의 내용을 화면에 출력 )\n- grep 파일명 찾고싶은내용 (파일안의 찾고싶은 내용의 문자를 보여줌)\n- ls --help | grep sort ( ls의 메뉴얼에 sort문자열이 있는 것만 보여주는 명령어 ) \n- 맥사용자는 man ls | grep sort 로 할 것.\n- 추가적으로 man ls | grep sort | grep file 처럼 교집합을 만들어서 출력도 가능하다.\n" }, { "alpha_fraction": 0.6417713165283203, "alphanum_fraction": 0.6688697934150696, "avg_line_length": 28.096153259277344, "blob_id": "4ac2585550a2097b021852783e04c25afde62627", "content_id": "fdd9a7ff502384101da4a3f4d80bee9e31e3de04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1513, "license_type": "permissive", "max_line_length": 246, "num_lines": 52, "path": "/HackerRank/Warmup/05.Plus_Minus.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Given an array of integers, calculate which fraction of its elements are positive, which fraction of its elements are negative, and which fraction of its elements are zeroes, respectively. Print the decimal value of each fraction on a new line.\n#\n# Note: This challenge introduces precision problems. The test cases are scaled to six decimal places, though answers with absolute error of up to are acceptable.\n#\n# Input Format\n#\n# The first line contains an integer, , denoting the size of the array.\n# The second line contains space-separated integers describing an array of numbers .\n#\n# Output Format\n#\n# You must print the following lines:\n#\n# A decimal representing of the fraction of positive numbers in the array compared to its size.\n# A decimal representing of the fraction of negative numbers in the array compared to its size.\n# A decimal representing of the fraction of zeroes in the array compared to its size.\n# Sample Input\n#\n# 6\n# -4 3 -9 0 4 1\n# Sample Output\n#\n# 0.500000\n# 0.333333\n# 0.166667\n\n#!/bin/python3\n\nimport sys\n\ndef plusMinus(arr):\n # Complete this function\n plus = 0\n minus = 0\n zero = 0\n\n for i in arr:\n if int(i) > 0:\n plus += 1\n elif int(i) < 0:\n minus += 1\n elif int(i) == 0:\n zero += 1\n print(round((plus/n),6))\n print(round((minus/n),6))\n print(round((zero/n),6))\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n arr = list(map(int, input().strip().split(' ')))\n plusMinus(arr)\n" }, { "alpha_fraction": 0.546558678150177, "alphanum_fraction": 0.5627530217170715, "avg_line_length": 14.4375, "blob_id": "d62e26391f106015d5994b10665ee26e7be4b231", "content_id": "fa7154f175308c8824c7d7709ae5136436c6acff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 247, "license_type": "permissive", "max_line_length": 28, "num_lines": 16, "path": "/HackerRank/Implement/30.Extra_Long_Factorials.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef extraLongFactorials(n):\n # Complete this function\n\n tmp = 1\n\n for i in range(1,n+1):\n tmp = tmp * i\n print(tmp)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n extraLongFactorials(n)\n" }, { "alpha_fraction": 0.43875277042388916, "alphanum_fraction": 0.4532293975353241, "avg_line_length": 33.53845977783203, "blob_id": "6f9e1b1c362b3c63e771b062f2550593cf479f85", "content_id": "eac5b7486d677195316042f6a2d4c778eb45b964", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1242, "license_type": "permissive", "max_line_length": 65, "num_lines": 26, "path": "/ADsP/Day13.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 데이터 구조\n\n- 벡터 > 리스트 : as.list(vec) \n- 벡터 > 행렬 : \n- (1열짜리 행렬) cbind(vec) 또는 as.matrix(vec)\n (1행짜리 행렬) rbind(vec)\n (n * m 행렬) matrix(vec,n,m)\n- 벡터 > 데이터프레임 : \n- (1열짜리 데이터프레임) as.data.frame(vec\n (1행짜리 데이터프레임) as.data.frame(rbind(vec))\n- 리스트 > 벡터 : unlist(lst)\n- 리스트 > 행렬 : \n- (1열짜리 행렬) as.matirx(lst)\n (1행짜리 행렬) as.matrix(rbind(lst))\n (n * m 행렬) matrix(lst,n,m)\n- 리스트 > 데이터 프레임 : \n (목록 원소들이 데이터의 열이면) as.data.frame(lst)\n (리스트 원소들이 데이터의 행이면) rbind(obs[[1]],obs[[2]]\n- 행렬 > 벡터 : as.vector(ma at)\n- 행렬 > 리스트 : as.list(mat)\n- 행렬 > 데이터 프레임 : as.data.frame(mat)\n- 데이터 프레임 > 벡터 : \n (1열짜리 데이터 프레임) dfm[[1]] or dfm[,1]\n (1행짜리 데이터 프레임) dfm[1,]\n- 데이터 프레임 > 리스트 : as.list(dfm)\n- 데이터 프레임 > 행렬 : as.matrix(dfm)\n" }, { "alpha_fraction": 0.5860214829444885, "alphanum_fraction": 0.5967742204666138, "avg_line_length": 22.25, "blob_id": "0a78ef6975b76d900d77e4942a1e5cb05b6c97f0", "content_id": "dcaf58886ed7ce5292238cabe05662d5f460120b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "permissive", "max_line_length": 56, "num_lines": 16, "path": "/HackerRank/Implement/17.Picking_Numbers.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\nfrom collections import Counter\ndef pickingNumbers(a):\n # Complete this function\n a = Counter(a)\n result_set = [(a[i] + a[i-1]) for i in range(2,n+1)]\n\n return max(result_set)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n a = list(map(int, input().strip().split(' ')))\n result = pickingNumbers(a)\n print(result)\n" }, { "alpha_fraction": 0.48076921701431274, "alphanum_fraction": 0.48076921701431274, "avg_line_length": 28.1200008392334, "blob_id": "07a56b1812ad82ae02cade7a1aa123d4bc3343d2", "content_id": "9996341abf6f1d1e013a137ac75bc56371436dbb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1336, "license_type": "permissive", "max_line_length": 69, "num_lines": 25, "path": "/ADsP/Day08.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# R프로그래밍 언어\n\n- R은 통해 계산과 그래픽을 위한 프로그래밍 언어이자 소프트웨어 환경이다.\n- 뉴질랜드 오클랜드 대학의 로스 이하카와 로버트 젠틀맨에 의해 시작되어 현재는 R 코어 팀이 개발하고 있다.\n- R은 GPL (General Public Licence) 하에 배포되는 S프로그래밍 언어로 구현되어 GNU S라고 한다.\n\n# R의 특징\n\n- 표준 플랫폼 (S 언어 기반)\n- 모든 운영체제에서 사용 가능 (맥, 리눅스, 윈도우)\n- 메모리 저장방식\n- 객체지향언어이며 함수형 언어\n- 오픈소스 프로그램으로 무료\n\n# 통계 분석도구의 비교\n\n\n SAS SPSS 오픈소스 R\n\n 프로그램 비용 유료, 고가 유료, 고가 오픈소스, 공짜\n 설치용량 대용량 대용량 모듈화로 간단\n 다양한 모듈 지원 및 비용 별도구매 별도구매 오픈소스\n 최근 알고리즘 및 기술반영 느림 다소느림 매우빠름\n 학습자료 입수의 편의성 유료도서 위주 유료도서 위주 공개 논문 및 자료 많음\n 질의를 위한 공개 커뮤니티 NA NA 매우 활발\n" }, { "alpha_fraction": 0.6096256971359253, "alphanum_fraction": 0.6345810890197754, "avg_line_length": 33, "blob_id": "8d1dc1909b196c61ef5923e814c9d4b5f8d60c2f", "content_id": "b6945f43bf5942077115e5267761a7965e95bf5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1783, "license_type": "permissive", "max_line_length": 119, "num_lines": 33, "path": "/Linux/Permission.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Permission (basic)\n- 파일과 디렉토리에 대해서 읽기, 쓰기, 실행을 할 수 있도록 하는 것.\n- Read, Write, Execute\n- 소유자가 아닌 user가 다른 user의 파일을 수정하려고 할 때 permission denied 가 뜬다.\n- read = cat,nano 명령어, write = echo 문자 > 'file명'\n- -rw- rw- r-- :(access mode라고 함) 첫번쨰는 소유자(owner)의 권한, 두번째는 그룹(group)의 권한, 3번째는 other의 권한\n\n# chmod (권한을 변경하는 방법)\n- 다른 사람들이 읽지 못하도록 하고자 할 때\n- chmod o-r <파일명> / 그 반대의 경우 chmod o+r <파일명>\n- 다른 사람들이 쓰기 권한을 할 수 있도록 할 때\n- chmod o+w <파일명>\n- 파일의 소유자는 파일을 읽을 수 없게 한다면? \n- chmod u-r <파일명>\n- chmod u+r <파일명>\n\n# 실행의 개념과 권한 설정\n- 어떤 파일에 대해서 실행가능한 파일로 할 것이냐, 실행을 막을 것이냐를 지정하는 것\n- ./<파일명> ( ./ = 현재 디렉토리에 있는 ) : 실행시키다\n- chmod u+x <파일명>\n\n# directory의 권한\n- other에 대해서 읽기의 권한을 없애려면\n- chmod o-r <디렉토리명>\n- 실행권한을 뺀 디렉토리는 들어갈 수 없다\n- 디렉토리안에 디렉토리가 있는 경우 : 한꺼번에 mod를 바꾸고싶다면 chmod -R o+w <디렉토리명> (-R = Recursive)\n\n# chmod 사용법 정리\n- chmod 111 <파일명> 하면  이 파일은 --x --x --x 가 된다. (Octal modes로 인해)\n- Octal modes 첫번째 방법\n- <img src = \"https://i.stack.imgur.com/9fEAm.png\" alt = \"Octal modes\" width = \"240\" height = \"240\">\n- class 두번째 방법\n- <img src = \"https://bpsecblog.files.wordpress.com/2016/09/ci20.jpg?w=670\" alt = \"Class\" width = \"240\" height = \"240\">\n" }, { "alpha_fraction": 0.625268816947937, "alphanum_fraction": 0.6301075220108032, "avg_line_length": 40.33333206176758, "blob_id": "27756796816c4629f35bd8172a344ca862247a94", "content_id": "1fb71b47c3bf73d5e1634649b9d19f421f172478", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3888, "license_type": "permissive", "max_line_length": 79, "num_lines": 45, "path": "/ADsP/Day27.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 연관분석(Association Analysis)\n- 기업의 데이터베이스에서 상품의 구매, 서비스 등 일련의 거래 또는 사건들 간의 규칙을 발견하기 위한 분석.\n흔히 장바구니 분석(Market Basket Analysis), 순차분석(Sequence Analysis) 등이 있다.\n- 장바구니 분석 : 장바구니에 무엇이 같이 들어 있는지에 대해 분석\n - 예1.: 주말을 위해 목요일에 기저귀를 사러 온 30대 직장인 고객은 맥주도 함께 사간다.\n - 예2.: 이전에 동일한 제조사의 전자제품을 주로 구매했던 고객은 신제품 구매에서도 동일한 회사의 제품을 구매한다.\n- 순차분석 : 구매이력을 분석해서 A품목을 산 후 추가 B품목을 사는지를 분석\n - 예1.: 가죽 자켓을 구매한 여성은 한달 내에 가죽부츠를 구매한다.\n - 예2.: 휴대폰을 새로 구매한 고객은 한달 내에 휴대폰 케이스를 구매한다.\n \n# 연관규칙 개념\n- 조건과 반응의 형태 (if-then)\n- (Item set A) => (Item set B)\n IF A then B : 만일 A가 일어나면 B가 일어난다.\n\n# 연관분석의 측도\n- 산업의 특성에 따라 지지도, 신뢰도, 향상도 값을 잘 보고 규칙을 선택해야 한다.\n- 지지도(support) : 전체 거래 중 항목 A와 항목 B를 동시에 포함하는 거래의 비율\n- 지지도 = A 와 B가 동시에 포함된 거래수 / 전체 거래수\n- 신뢰도 (confidence) : 항목 A를 포함한 거래 중에서 항목 A와 항목 B가 같이 포함될 확률. 연관성의 정도를 파악할 수 있다.\n- 신뢰도 = A 와 B가 동시에 포함된 거래수 / A를 포함한 거래수\n- 향상도 (lift) : A가 주어지지 않았을 때의 품목 B의 확률에 비해 A가 주어졌을 때의 품목 B의 확률의 증가 비율이다.\n연관규칙 A => B 는 품목 A와 품목 B의 구매가 서로 관련이 없는 경우에 향상도가 1이 된다.\n- 향상도 = A 와 B가 동시에 포함된 거래수 / A를 포함하는 거래수 X B를 포함하는 거래수\n\n# 연관분석 특징\n- 절차\n - 최소 지지도 (min support)를 선정 - 5% 시작\n - 품목 중 최소 지지도를 넘는 품목을 분류\n - 2가지 품목 집합 생성\n - 반복적으로 수행해 빈발품목 집합을 선정\n- 장점\n - 탐색적인 기법 : 조건반응(if-then)으로 표현되는 연관성분석의 결과를 쉽게 이해할 수 있음\n - 강력한 비목적성 분석기법 : 분석 방향이나 목적이 특별히 없는 경우 목적변수가 없으므로 유용하게 활용 됨\n - 사용이 편리한 분석 데이터의 형태 : 거래 내용에 대한 데이터를 변환 없이 그 자체로 이용\n - 계산의 용이성 : 분석을 위한 계산이 상당히 간단\n- 단점\n - 상당한 수의 계산과정 : 품목수가 증가하면 분석에 필요한 계산은 기하급수적으로 늘어남\n - 유사한 품목을 한 범주로 일반화\n - 연관 규칙의 신뢰도 하한을 새롭게 정의해 실제적으로 데이터에서 드물게 관찰되는 의미가 적은 연관규칙은 제외\n - 적절한 품목의 결정 : 너무 세분화한 품목을 갖고 연관성 규칙을 찾으면 수많은 연관성 규칙들이 발견되겠지만,\n 실제로 발생 비율면에서 의미 없는 분석이 될 수도 있음\n - 적절히 구분되는 큰 범주로 구분해 전체 분석에 포함시킨 후 그 결과 중에서 세부적으로 연관을 찾는 작업을 수행할 수 있음\n - 품목의 비율차이 : 사용될 모든 품목들 자체가 전체자료에서 동일한 빈도를 갖는경우, 연관성 분석은 가장 좋은 결과를 얻음.\n 그러나 거래량이 적은 품목은 당연히 포함된 거래수가 적을 것이고 규칙 발견 과정 중에서 제외되기 쉬움\n" }, { "alpha_fraction": 0.6449671983718872, "alphanum_fraction": 0.6455142498016357, "avg_line_length": 44.70000076293945, "blob_id": "d3335b3dd75d87ee4ae681a46f89c6918afc541a", "content_id": "65e5de550d25146b53ee24bcfed894ec3f1d48b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3426, "license_type": "permissive", "max_line_length": 97, "num_lines": 40, "path": "/ADsP/Day05.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 분석 과제 발굴\n\n- 하향식 접근 방식 (Top Down Approach) : 문제가 주어지고 이에 대한 해법을 찾기 위하여 각 과정이 체계적으로 단계화 되어 수행하는 방식\n- 상향식 접근 방식 (Bottom Up Approach) : 문제의 정의 자체가 어려운 경우 데이터를 기반으로 문제의 재정 및 해결방안을 탐색하고\n 이를 지속적으로 개선하는 방식\n \n- Optimization , Solution 파트는 Top-Down\n- Insight , Discovery 파트는 Bottom-Up\n\n# 하향식 접근 방식\n\n- Problem Discovery > Problem Definition > Solution Searc > Feasibility Study\n- 문제 탐색 (Problem Discovery) \n - 비즈니스 모델 기반 문제 탐색 : 업무(operation), 제품(product), 고객(customer),\n 규제와 감사 (regulation & audit), 지원 인프라(IT & human resource) 등 5가지 영역으로\n 기업의 비즈니스를 분석\n - 거시적 과점의 메가 트렌드에서 해당 산업에 폭넓게 영향을 미치는 사회, 경제적 요인을 STEEP으로 요약하여 사회(Social), 기술(Technological),\n 경제(Economic), 환경(Environment), 정치(Political)으로 폭넓게 분석 기회 도출\n - 경쟁자 확대 관저에서 사업 영역의 직접 경쟁자 및 제품, 서비스 뿐만 아니라 대체재와 신규 진입자 등으로 확대하여 분석 기회 도출\n - 시장의 니즈 탐색 관점에서는 사업에서의 고객 영역, 채널 영역, 영향자들 영역으로 분석 기회를 도출\n - 역량의 재해석 관점에서는 내부 역량(competency)영역, 파트너와 네트워크(partners & network) 영역에서 분석 기회를 도출\n - 외부 참조 모델 기반의 문제 탐색 : 유사, 동종 사례를 벤치마킹을 통해 분석 기회를 발굴\n - 분석 유즈 케이스(Analyics Use Case) 정의\n- 문제 정의 (Problem Definition) : 비즈니스 문제를 데이터와 분석 문제로 변환하여 정의하는 단계\n- 해결방안 탐색 (Solution Search) : 분석역량(Who), 분석기법 및 시스템(How)로 해결방안 탐색\n- 타당성 검토 (Feasibility Study) : 경제적 타당성, 데이터 및 기술적 타당성\n\n# 상향식 접근법\n\n- 기업이 보유하고 있는 다양한 원천 데이터로부터 분석을 통하여 통찰력과 지식을 얻는 접근 방법\n- 다양한 원천 데이터를 대상으로 분석을 수행하여 가치 있는 모든 문제를 도출하는 일련의 과정\n- 기존의 하향식 접근법은 논리적 단계별 접근법으로 최근의 복잡하고 다양한 환경에서 발생하는 문제를 해결하기 어려워 디자인적 사고(Design Thinking)접근법\n 을 통해 Why > What 관점으로 객관적으로 존재하는 데이터 그 자체를 관찰하여 문제를 해결하려는 접근\n- 비지도 학습 방법으로 수행되며, 데이터 자체의 결합, 연관성, 유사성을 중심으로 접근\n- 시행착오를 통한 문제 해결 : 프로토타이핑 접근법\n- 빅데이터 분석 환경에서 프로토타이핑 접근법의 필요성 대두\n\n# 분석과제 정의\n\n- 분석 과제 정의서를 통해 분석별 필요 소스 데이터, 분석 방법, 데이터 입수 및 분석의 난이도, 분석 수행주기, 검증 오너십 상세 분석 과정 등을 정의\n" }, { "alpha_fraction": 0.7223650217056274, "alphanum_fraction": 0.7236503958702087, "avg_line_length": 34.3636360168457, "blob_id": "dc4fae7e628844d3447d7306fd3b97bd0df034c5", "content_id": "560a1446fc941fee25d43595d4fee7563db325d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1636, "license_type": "permissive", "max_line_length": 79, "num_lines": 22, "path": "/ADsP/Day24.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 앙상블기법 (ensemble)\n- 주어진 자료로부터 여러 개의 예측모형들을 만든 후 조합하여 하나의 최종 예측모형을 만드는 방법\n- 다중 모델 조합(combining multiple models), classifier combination 방법이 있음\n- 학습방법의 불안정성을 해결하기 위해 고안된 기법\n- 가장 불안정성을 가지는 기법은 의사결정나무, 가장 안정성을 가지는 기법은 1 -nearest neighbor\n\n# 배깅(bagging : bootstrap aggregating)\n- 여러 개의 붓스트랩 자료를 생성하고 각 붓스트랩 자료의 예측모형 결과를 결합하여 결과를 선정\n- 배깅은 훈련자료를 모집단으로 생각하고 평균 예측모형을 구한 것과 같아 분산을 줄이고 예측력을 향상시킬 수 있음\n\n# 부스팅(boosting)\n- 예측력이 약한 모형(weak learner)들을 결합하여 강한 예측모형을 만드는 방법\n- 훈련오차를 빨리 그리고 쉽게 줄일 수 있고, 예측오차의 향상으로 배깅에 비해 뛰어난 예측력을 보임\n\n# 랜덤 포레스트(Random forest)\n- 의사결정나무의 특징인 분산이 크다는 점을 고려하여 배깅과 부스팅보다 더 많은 무작위성을 주어 약한 학습기들을 생성한 후 이를 선형 결합하여\n 최종 학습기를 만드는 방법\n- 이론적 설명이나 해석이 어렵다는 단점이 있지만 예측력이 매우 높은 장점이 있음\n- 입력변수가 많은 경우 더 좋은 예측력을 보임\n\n# 스태킹(stacking)\n- 동일한 타입의 모델을 조합하는 배깅, 부스팅과는 달리 다양한 학습 모델을 통해 구성\n" }, { "alpha_fraction": 0.7046728730201721, "alphanum_fraction": 0.7084112167358398, "avg_line_length": 41.79999923706055, "blob_id": "800f04f3fa7c171852a79e487e5326083ede39c8", "content_id": "68a74890d4d40bc4e39b2137a10b64fba1208160", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2482, "license_type": "permissive", "max_line_length": 147, "num_lines": 25, "path": "/ADsP/test03_.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 주관식\n\n### 구글에서 대용량 데이터 처리를 분산 병렬 컴퓨팅에서 처맇기 위한 목적으로 제작하여 2004년 발표한 소프트웨어 프레임워크로 페타바이트 이상의 대용량 데이터를 신뢰도가 낮은 컴퓨터로 구성된 클러스터 환경에서 병렬 처리를 지원하기 위해서 개발된 것은 무엇인가?\n- 맵 리듀스\n\n### 분석 활용 시나리오와 분석 체계를 보다 상세히 나타내는 방법으로서 분석별로 필요한 소스데이터, 분석방법, 데이터 입수 및 분석의 난이도, 분석수행 주기, 분석결과에 대한 검증 오너십, 상세 분석 과정을 정의하는 방법은 무엇인지 기입하시오.\n- 분석정의서\n\n### 사용자가 요구사항이나 데이터를 정확히 규정하기 어렵고 데이터 소스도 명확히 파악하기 어려운 상황에서 일단 분석을 시도해 보고 그 결과를 확인해 가면서 반복적으로 개선해 나가는 방법은 무엇인가?\n- 프로토타이핑 접근법\n\n### 아래의 설명이 나타내는 척도는 무엇인가? \n     자료의 위치를 나타내는 척도의 하나로 관측치를 크기 순으로 배열하였을 때 전체의 중앙에 위치한 수치이다. 평균에 비해 이상치에 의한 영향이 적기 때문에 \n 자료의 분포가 심하게 비대칭인 경우 중심을 파악할 때 합리적인 방법이다\n- 중앙값\n\n### 암 연구소에서 환자들을 대상으로 암을 예측하고자 하는 분류 문제를 해결하기 위해 학습데이터를 활용한 모델을 개발하였다. 테스트 데이터를 활용하여 모델의 성능을 평가하고자 할 때 아래의 분류표를 활용하여 모델의 정확도를 계산하시오.\n- TT + FF / 전체\n\n### 데이터마이닝의 중심이 되는 학습(learning) 방법 중 자료가 입력변수와 출력변수로 주어지며 입력변수와 출력변수의 함수적 의존 관계를 자료로부터 추정함으로써 예측모형을 얻을 때 사용되는 학습방법은 무엇인가?\n- 지도학습(supervised learning)\n\n### 텍스트마이닝에서 문서에서 문장 내에 포함된 다어들을 어간과 어미로 분리 하여 각 문서마다 사용된 단어의 어간들의 빈도를 표현하는 행렬을 만들 수 있다. R프로그램을 통해 이러한 행렬을 만들고자 할 때 활용하는 함수는 무엇인가?\n- DocumnetTermMatrix()\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6539792418479919, "avg_line_length": 31, "blob_id": "080a7af4d5959d2325a0b6968d2bac82adad6119", "content_id": "5252ab95a08aed503b67a1e6e475368cd3f71d4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 473, "license_type": "permissive", "max_line_length": 58, "num_lines": 9, "path": "/Linux/Daemon_service.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Daemon\n- ls, mkdir, rm 이 데몬은 아니다. 사용자가 필요할 떄 쓰고 안쓰고 함. ( TV 같은 )\n- 데몬은 항상 켜져있다. ( 냉장고 ) (ex : Server) Daemon Service\n\n# 언제나 실행되어야 하는 데몬을 크고 끼는 법\n- sudo service <프로그램명> start\n- ps aux : 현재 실행되고 있는 프로그램의 리스트\n- ps aux | grep apache2 를 치면 apache2에 해당되는 것을 보여줌.\n- sudo service <프로그램명> stop\n\n" }, { "alpha_fraction": 0.6245954632759094, "alphanum_fraction": 0.6294498443603516, "avg_line_length": 31.526315689086914, "blob_id": "c42907306454b50e03713181f49a1a86b2f6cd6b", "content_id": "5e91e822016ae087d22791a9345fbb8bd61a1c80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2374, "license_type": "permissive", "max_line_length": 95, "num_lines": 38, "path": "/ADsP/Day15.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 요약변수\n\n- 정의 : 수집된 정보를 분석에 맞게 종합하는 변수, 데이터 마트에서 가장 기본적인 변수로 총구매 금액, 횟수, 구매여부 등이 있으며\n 많은 모델이 공통으로 사용될 수 있어 재활용성이 높다.\n- 기간별 구매 금액, 횟수, 여부/ 위클리 쇼퍼/ 상품별 금액, 횟수, 여부/ 상품별 구매 순서/ 유통 채널별 구매 금액/ 단어 빈도/ 초기 행동변수\n 트랜스 변수/ 결측값과 이상값 처리/ 연속형 변수의 구간화\n \n# 파생변수\n\n- 정의 : 사용자가 특정 조건을 만족하거나 특정 함수에 의해 값을 만들어 의미를 부여하는 변수로 매우 주관적일 수 있으므로 논리적 타당성을 갖출\n 필요가 있다.\n- 근무시간 구매지수/ 주 구매 매장 변수/ 주 활동 지역 변수/ 주 구매 상품 변수/ 구매상품 다양성 변수/ 선호하는 가격대 변수/ 시즌 선호 고객 변수\n 라이프 스테이지 변수/ 라이프스타일 변수/ 행사민감 변수/ 휴면가망 변수/ 최대가치 변수/ 최적 통화시간 등\n \n# reshape 패키지\n\n- 2개의 핵심 함수로 구성되어 있다.\n - melt() : 데이터를 DB구조로 녹이는 함수\n - cast() : 새로운 구조로 데이터를 만드는 함수\n \n# sqldf 패키지\n\n- R에서 sql 명령ㅇ를 사용가능하게 해주는 패키지로 SAS의 proc sql과 같은 기능이다.\n- head([df]) : sqldf( \"select * from [df] limit 6\" )\n- subset([df], [col] %in% c(\"BF\",\"HF\") : sqldf(\"select * from [df] where [col] in('BF', 'HF')\")\n- merge([df1], [df2]) : sqldf(\"select * from [df1], [df2]\")\n\n# plyr 패키지\n\n- apply 함수를 기반으로 데이터와 출력변수를 동시에 배열로 치환하여 처리하는 패키지로 split - apply - combine 방식으로 데이터를 분리하고 처리한\n 다음, 다시 결합하는 등 필수적인 데이터 처리기능을 제공한다.\n \n# data.table 패키지\n\n- R에서 가장 많이 사용하는 데이터 핸들링 패키지 중 하나로 대용량 데이터의 탐색, 연산, 병합에 유용하다.\n- 기존 data.frame 방식보다 월등히 빠른 속도이다.\n - 특정 column을 key값으로 색인을 지정한 후 데이터를 처리\n - 빠른 그루핑과 ordering, 짧은 문장 지원 측면에서 데이터프레임 보다 유용\n" }, { "alpha_fraction": 0.36894410848617554, "alphanum_fraction": 0.3863354027271271, "avg_line_length": 20.756755828857422, "blob_id": "37c4d7b0e359c98f500cca538290a9cc963d82e8", "content_id": "2b2d0ac89eb9064c606f38530a4e4f28ad79bb40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "permissive", "max_line_length": 50, "num_lines": 37, "path": "/HackerRank/Implement/04.Between_Two_Sets.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef getTotalX(a, b):\n # Complete this function\n result = []\n\n if max(a) < min(b):\n\n for x in range(max(a), min(b)+1):\n tmp = 0\n for i in range(n):\n if x % a[i] != 0:\n tmp += 1\n\n if tmp == 0:\n tmp_2 = 0\n for j in range(m):\n if b[j] % x != 0:\n tmp_2 += 1\n\n\n if tmp_2 == 0:\n result.append(b[j])\n\n return len(result)\n else:\n return 0\n\nif __name__ == \"__main__\":\n n, m = input().strip().split(' ')\n n, m = [int(n), int(m)]\n a = list(map(int, input().strip().split(' ')))\n b = list(map(int, input().strip().split(' ')))\n total = getTotalX(a, b)\n print(total)\n" }, { "alpha_fraction": 0.6553672552108765, "alphanum_fraction": 0.6694915294647217, "avg_line_length": 24.285715103149414, "blob_id": "90e07b0eb3732fc5ab32d27c00d1374100b46028", "content_id": "deeb5ecf37f448432eaa1c2ca6e4d95c6484a6aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "permissive", "max_line_length": 54, "num_lines": 14, "path": "/HackerRank/Implement/08.Migratory_Birds.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\nfrom collections import Counter\ndef migratoryBirds(n, ar):\n # Complete this function\n counter = Counter(ar)\n result = [counter[idx] for idx in range(1, 5 + 1)]\n\n return result.index(max(result))+1\nn = int(input().strip())\nar = list(map(int, input().strip().split(' ')))\nresult = migratoryBirds(n, ar)\nprint(result)\n" }, { "alpha_fraction": 0.6201257705688477, "alphanum_fraction": 0.6446540951728821, "avg_line_length": 37.75609588623047, "blob_id": "0e63eb2b8bf965ea78c9a090a5c3434966cae5ef", "content_id": "d9da2d00ea799385bd7d7c203ac2ddc06ced1abb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3233, "license_type": "permissive", "max_line_length": 316, "num_lines": 41, "path": "/ADsP/test04_.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 주관식\n\n### 아래에서 언급한 이것은 무엇인가?\n- 이것은 인공신경망의 한계를 극복하기 위해 제안된 심화 신경망(Deep Neural Network)를 활용한 기계학습 방법이다. 기존의 인경신경망은 높은 분해 정확도에 비해 속도가 느린 것이 단점이었다. 게다가 과적합(Overfitting)도 웬만해선 해결되지 않는 과제였다. 이 때문에 비교적 오랜 기간 실무에선 외면당하기도 했다. 하지만 최근 들어 이분야를 깊숙이 고민해온 연구자들이 그에 대한 해법을 내놓으면서 다시 각광을 받기 시작했다. 그 핵심에 토론토 대학교의 제프리 힌튼 교수, 뉴욕대학교의 안 리쿤 교수 스탠포드 대학교의 앤드류 융 교수 등이 있다.\n- 딥 러닝\n\n### 1Gbps는 1초에 대략 1기가바이트의 데이터를 전달할 수 있는 속도를 나타낸다. 1Gbps의 속도를 제공하는 통신망을 통해 1페타바이트 크기의 데이터를 전송하는데 걸리는 시간은 대략 얼마인가. 초단위로 답하시오\n- 1 = 1024페타 >> 1024 * 1024\n\n### 아래에서 설명하고 있는 것은?\n- 가. 기업의 전사 또는 개별 업무별 주요 의사결정 포인트에 활용할 수 있는 분석의 후보들이다\n- 나. Analytics를 적용하였을 때 업무 흐름을 개념적으로 설명한 것으로 일반적으로 '유즈케이스(UseCase)' 라고도 표현한다\n- 다. 비즈니스 모델을 구성하는 이론을 설명한다\n- 라. 하나 이상의 분석을 포함한다\n - 분석 유즈케이스\n \n### 아래의 ( ㄱ )에 들어갈 단어로 적절한 것은?\n- 통계적 추론에서 ( ㄱ ) 검정은 자료와 추출된 모집단의 분포에 대해 아무 제약을 가하지 않고 검정을 실시하는 검정방법으로, 관측된 자료가 특정 분포를 따른다고 가정할 수 없는 경우에 이용된다\n - 비모수\n \n### 상관분석은 데이터 안의 두 변수간의 관계를 알아보기 위해 사용한다. 두 변수간의 상관관계를 알아보기 위해 상관계수를 이용한다. 상관계수 중 서열척도인 변수간의 상관관계를 측정하는데 사용하는 상관계수\n- 스피어만 상관계수\n\n### 시계열자료를 분석하는 목적 중 하나는 과거의 패턴이 유지된다는 가정하에서, 현재까지 수집된 자료를 분석하여 미래에 대한 예측을 하는 것이다. 이를 위해 전체 자료를 이용하는 대신 최근 m개의 관측값들만의 평균을 구하여 직엽적인 변동을 제거하여 장기적인 추세를 쉽게 파악할 수 있는 방법은?\n- 자기회귀모형(AR모형)\n\n### 아래의 트랜젝션에서 추출된 연관규칙 중 하나인 \"BC\"의 신뢰도는?\n Transaction #1 {A,B,C}\n Transaction #2 {A,B,D}\n Transaction #3 {A,B}\n Transaction #4 {B,C}\n Transaction #5 {A,B,C,D}\n Transaction #6 {E}\n- (B와 C가 포함된 거래 수) / (B가 포함된 거래 수) = 3 / 5\n\n### 아래 두 개체 A와 B 사이의 유클리디안 거리(Euclidean Distance)는?\n 개체    변수1   변수2\n A 3 4\n B 6 8\n- 루트{(4-3)^2 + (8-6)^2} = 루트{5}\n\n" }, { "alpha_fraction": 0.5260736346244812, "alphanum_fraction": 0.5368098020553589, "avg_line_length": 24.076923370361328, "blob_id": "95b64a98acaf429956cb6b6039b7998c97915210", "content_id": "c544f350b070797f400e938d50ce0ad376d76dc7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "permissive", "max_line_length": 54, "num_lines": 26, "path": "/HackerRank/Implement/05.Breaking_the_Records.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef breakingRecords(score):\n # Complete this function\n max_score = score[0]\n min_score = score[0]\n max_change = 0\n min_change = 0\n for idx in range(n):\n if score[idx] > max_score:\n max_score = score[idx]\n max_change += 1\n elif score[idx] < min_score:\n min_score = score[idx]\n min_change += 1\n else:\n continue\n print(max_change, min_change)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n score = list(map(int, input().strip().split(' ')))\n result = breakingRecords(score)\n print (\" \".join(map(str, result)))\n" }, { "alpha_fraction": 0.6010638475418091, "alphanum_fraction": 0.6170212626457214, "avg_line_length": 22.5, "blob_id": "f024eff640a36ba4ebf0024b4fc3648db4fecc7d", "content_id": "7c05ebc315faea8d77b40438dd8b4a169d04f918", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "permissive", "max_line_length": 65, "num_lines": 16, "path": "/HackerRank/30_days_of_Code/03.Operators.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "import sys\ndef Operators():\n meal_cost = float(input().strip())\n tip_percent = int(input().strip())\n tax_percent = int(input().strip())\n\n tmp_tip = meal_cost * tip_percent / 100\n\n tmp_tax = meal_cost * tax_percent/100\n\n\n result = int(round(meal_cost + tmp_tip + tmp_tax))\n\n return result\n\nprint(\"The total meal cost is \" + str(Operators()) + \" dollars.\")\n" }, { "alpha_fraction": 0.43478259444236755, "alphanum_fraction": 0.45652174949645996, "avg_line_length": 18.16666603088379, "blob_id": "3773f7bc6166a473784d46efb57b10f5047a8e27", "content_id": "a81ce7817e505f58d694e4a50926681836cd9177", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "permissive", "max_line_length": 33, "num_lines": 24, "path": "/HackerRank/Implement/21.Utopian_Tree.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef utopianTree(n):\n # Complete this function\n start = 1\n\n if n == 0:\n return start\n\n else:\n for i in range(1,n+1):\n if i % 2 == 1:\n start = start * 2\n else:\n start += 1\n return start\nif __name__ == \"__main__\":\n t = int(input().strip())\n for a0 in range(t):\n n = int(input().strip())\n result = utopianTree(n)\n print(result)\n" }, { "alpha_fraction": 0.4171322286128998, "alphanum_fraction": 0.5009310841560364, "avg_line_length": 20.479999542236328, "blob_id": "5819b2f2b2881b3f9b6c80612561e2cb46f6c64a", "content_id": "2d4d97bad6fae6ecc20d8901d015667e6c0b9999", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "permissive", "max_line_length": 66, "num_lines": 25, "path": "/HackerRank/Implement/09.Day_of_the_Programmer.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "#!/bin/python3\n\nimport sys\n\ndef solve(year):\n # Complete this function\n\n if year <= 1917:\n if (year % 4 == 0):\n return \"12.09.{}\".format(year)\n else:\n return \"13.09.{}\".format(year)\n\n if year == 1918:\n return \"26.09.{}\".format(year)\n\n if year > 1918:\n if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:\n return \"12.09.{}\".format(year)\n else:\n return \"13.09.{}\".format(year)\n\nyear = int(input().strip())\nresult = solve(year)\nprint(result)\n" }, { "alpha_fraction": 0.5617897510528564, "alphanum_fraction": 0.5681818127632141, "avg_line_length": 39.228572845458984, "blob_id": "017778d152c2a3a4cb8fce2a09296d679bcaa9ad", "content_id": "b18513823e5283834dc2a668db17e9fc60ee082f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1826, "license_type": "permissive", "max_line_length": 67, "num_lines": 35, "path": "/Tensorflow/README.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "### 주요 함수\n\n| <center>함수</center> | <center>설명</center> |\n|:--------|:--------:|\n|**tf.add** | <center>덧셈</center> |\n|**tf.subtract** | <center>뺄셈</center> |\n|**tf.multiply** | <center>곱셈</center> |\n|**tf.div** | <center>나눗셈의 몫 (Python2 스타일)</center> |\n|**tf.truediv** | <center>나눗셈의 몫(Python3 스타일)</center> |\n|**tf.mod** | <center>나눗셈의 나머지</center> |\n|**tf.abs** | <center>절대값을 리턴</center> |\n|**tf.negative** | <center>음수를 리턴</center> |\n|**tf.sign** | <center>부호를 리턴(음수는 -1, 양수는 1, 0 일땐 0을 리턴)</center> |\n|**tf.reciprocal** | <center>역수를 리턴(3의 역수는 1/3)</center> |\n|**tf.square** | <center>제곱을 계산</center> |\n|**tf.round** | <center>반올림 값을 리턴</center> |\n|**tf.sqrt** | <center>제곱근을 계산</center> |\n|**tf.pow** | <center>거듭제곱 값을 계산</center> |\n|**tf.exp** | <center>지수 값을 계산</center> |\n|**tf.log** | <center>로그 값을 계산</center> |\n|**tf.maximum** | <center>최대값을 리턴</center> |\n|**tf.minimum** | <center>최소값을 리턴</center> |\n|**tf.cos** | <center>코사인 함수 값을 계산</center> |\n|**tf.sin** | <center>사인 함수 값을 계산</center> |\n\n### 행렬 연산을 위한 함수\n\n| <center>함수</center> | <center>설명</center> |\n|:--------|:--------:|\n|**tf.diag** | <center>대각행렬을 리턴</center> |\n|**tf.transpose** | <center>전치행렬을 리턴</center> |\n|**tf.matmul** | <center>두 텐서를 행렬곱셈하여 결과 텐서를 리턴</center> |\n|**tf.matrix_determinant** | <center>정방행렬의 행렬식 값을 리턴</center> |\n|**tf.matrix_inverse** | <center>정방행렬의 역행렬을 리턴</center> |\n|**tf.multiply** | <center>곱셈</center> |\n" }, { "alpha_fraction": 0.6623376607894897, "alphanum_fraction": 0.6623376607894897, "avg_line_length": 29.799999237060547, "blob_id": "5ad530b58d0345fe104baaa94f622423d3d49694", "content_id": "0efd937d2cdf0535b19e05e05e7e9bcec4df36bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1056, "license_type": "permissive", "max_line_length": 63, "num_lines": 20, "path": "/Linux/Multi_user.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 다중 사용자\n- 권한에 대한 체크, 기능이 훨씬 복잡해짐\n- 기본적으로 유닉스는 다중 사용자의 관점이 투영되어 있음\n- 여러사람이 하나의 시스템을 같이 사용한다면 필수적으로 알아 둘 필요가 있다\n\n# id, who\n- uid = 사용자ID\n- who = 현재 이 시스템에 누가 접속했는지를 보여줌\n\n# super(root) user VS user\n- 일반 사용자는 sudo를 사용할 수 없다.\n- su 라는 명령어는 a라는 유저에서 b라는 유저가 되고 싶을 때 혹은 suepr user가 되고싶을 때\n- su - root >> $ > # super user가 되었음을 나타냄.\n- sudo passwd -u root : -u 는 unlock을 의미함. -l 은 lock을 거는 것.\n- 일반 user는 /home 에 위치 super user는 /root 에 위치\n\n# user 추가\n- useradd -m tom : -m을 붙이면 home 디렉토리를 같이 만들어 줌 (앞에 sudo를 붙일 것)\n- sudo passwd tom : password를 설정해줘야 로그인을 할 수 있다.\n- tom이라는 user가 sudo를 사용하게 하고 싶을 때 : sudo usermod -a -G sudo tom\n" }, { "alpha_fraction": 0.6089078783988953, "alphanum_fraction": 0.6143990159034729, "avg_line_length": 35.021976470947266, "blob_id": "9729ab3f3e87db77ccf9667cc238f7c40ebb0738", "content_id": "6a510d511031929bf3f22190d7008888ba1a6daf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6888, "license_type": "permissive", "max_line_length": 103, "num_lines": 91, "path": "/ADsP/Day06.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 마스터 플랜 수립\n\n1. 비즈니스 관점에서 도출된 다양한 분석과제들을 기어베 적용시키기 위해서는 적용 우선순위를 평가해야 한다.\n - 우선순위 고려요소 : 전략적 중요도, 비즈니스 성과/ROI, 실행 용이성 > 적용 우선순위 설정\n - 적용범위/방식 고려요소 : 업무내재화 적용 수준, 분석데이터 적용 수준, 기술적용 수준 > Analytics구현 로드맵 수립\n - ISP(Information Strategy Planning) : 정보기술 또는 정보시스템을 전략적으로 활용하기 위하여 조직 내/외부 환경을 분석하여\n 기회나 문제점을 도출하고 사용자의 요구사항을 분석하여 시스템 구추 우선순위를 결정하는 등 중장기 마스터 플랜을 수립하는 절차이다.\n - 분석 마스터 플랜 : 일반적인 ISP 방법론을 활용하되 데이터 분석 기획의 특성을 고려하여 수행하고 기업에서 필요한 데이터 분석 과제를\n 빠짐없이 도출한 후 과제의 우선순위를 결정하고 단기 및 중/장기로 나누어 계획을 수립한다.\n - 데이터 분석 과제를 추진할 때 고려해야 하는 우선순위 평가 기준\n - 시급성 : 전략적 중요도와 목표가치에 부합하는지에 따른 시급성이 가장 중요한 기준이다. 시급성의 판단 기준은 전략적 중요도가 핵심이다.\n - 난이도 : 현 시점에서 과제를 추진하는 것이 적용 비용 측면과 범위 측면에서 바로 적용하기 쉬운 것 인지 또는 어려운 것인지에 판단 기준이다.\n \n2. 빅데이터의 특징인 4V를 고려한 우선순위 평가기준을 적용할 수 있어야 한다.\n - ROI 관점에서 빅데이터의 핵심 특징\n - 투자비용 (Investment) 요소 : 크기(Volume), 다양성(Variety), 속도(Velocity)의 3V\n - 비즈니스 효과 (Return) 요소 : 가치(Value)\n - 최근에 새롭게 생성되는 비정형 데이터와 기존의 정형 데이터가 결합(Mesh-Up)되어 분석됨으로써 새로운 가치(Value)가 창출된다는 점에서\n 빅데이터를 4V로 정의하기도 한다.\n\n3. 기업 전사관점에서의 분석 적용에 대한 단계적 로드맵과 추진 일정계획을 수립하는 것이 필요하다.\n - 로드맵 수립 : 분석 과제에 대한 포트폴리오 사분면(Quadrant) 분석을 통해 과제의 1차적 우선순위를 결정한다.\n - 세부 이행계획 수립 : 데이터 분석체계는 고전적인 폭포수(Water-Fall) 방식도 있으나 반복적이 정련과정을 통하여 프로젝트의 완성도를 높이는\n 방식을 주로 사용한다.\n - 폭포수 모델(Water-Fall) : 순차적인 소프트웨어 개발 프로세스로 개발의 흐름이 마치 폭포숴럼 지속적으로 아래로 향하는 것처럼 보이는 데서 이름이 붙여짐.\n - 반복적인 분석 체계는 모든 단계를 반복하기보다 데이터 수집 및 확보와 분석데이터를 준비하는 단계를 순차적으로 진행하고 모델링 단계는 반복적으로 수행하는\n 혼합형을 많이 적용하며, 이러한 특성을 고려하여 세부적인 일정계획도 수립해야 한다.\n \n# 분석 거버넌스 체계\n\n1. 빅데이터 시대에 진입하면서 기업의 분석 수준 및 목적에 맞게 데이터를 분석하여 적용하는 것이 궁극적으로 기업의 경쟁력이 될 수 있기 때문에 기업 데이터의\n 체계적인 관리가 필수적이다.\n - 마스터 플랜 수립 시점에서 데이터 분석의 지속적인 적용과 확산을 위한 거버넌스 체계는 분석 기획 및 관리를 수행하는 조직(Organization),\n 과제 기획 및 운영 프로세스(Process), 분석 관련 시스템(System), 데이터(Data), 분석 관련 교육 및 마인드 육성 체계(Human Resource)로 구성된다.\n2. 조직 내에 분석을 효율적이고 안정적으로 적용하기 위해서는 기업에 적합한 형태의 데이터 분석 전문조직을 구성하고, 분석 전문 인력을 양성하느 것이 매우 중요하다.\n - 분석 준비도 : 분석업무를 파악하고 인력이나 조직에 대해 평가하고 분석기법, 분석데이터, 분석문화, IT 인프라를 평가함으로서 기업의 분석의 현재 수준이\n 어느정도이고 목표를 어떻게 세울 것인지에 대한 분석 거버넌스 체계를 수립하는데 활용되는 방법론.\n - 분석 준비도 구성 (6가지)\n 1. 분석업무 파악\n - 발생한 사실 분석 업무\n - 예측 분석 업무\n - 시뮬레이션 분석 업무\n - 최적화 분석 업무\n - 분석 업무 정기적 개선\n 2. 인력 및 조직\n - 분석 전문가 직무 존재\n - 분석 전문가 교육 훈련 프로그램\n - 관리자들의 기본적 분석 능력\n - 전사 분석업무 총괄 조직 존재\n - 경영진 분석 업무 이해 능력\n 3. 분석 기법\n - 업무별 적합한 분석기법 사용\n - 분석 업무 도입 방법론\n - 분석기법 라이브러리\n - 분석기법 효과성 평가\n - 분석기법 정기적 개선\n 4. 분석 데이터\n - 분석업무를 위한 데이터 충분성\n - 분석업무를 위한 데이터 신뢰성\n - 분석업무를 위한 데이터 적시성\n - 비구조적 데이터 관리\n - 외부 데이터 활용 체계\n - 기준 데이터 관리(MDM)\n 5. 분석 문화\n - 사실에 근거한 의사결정\n - 관리자의 데이터 중시\n - 회의 등에서 데이터 활용\n - 경영진의 직관보다 데이터\n - 데이터 공유 및 협업 문화\n 6. IT인프라\n - 운영시스템 데이터 통합\n - EAI, ETL 등 데이터유통체계\n - 분석 전용 서버 및 스토리지\n - 빅데이터 분석 환경\n - 통계 분석 환경\n - 비쥬얼 분석 환경\n3. 데이터 분석을 위한 3가지 조직 구조\n - 집중구조\n - 전사 분석업무를 별도의 분석전담 조직에서 담당\n - 전략적 중요도에 따라 분석조직이 우선순위를 정해서 진행 가능\n - 현업 업무부서의 분석 업무와 이중화/이원화 가능성 높음\n - 기능구조\n - 일반적인 분석 수행 구조\n - 별도 분석조직이 없고 해당 업무부서에서 분석 수행\n - 전사적 핵심분석이 어려우며, 부서 현황 및 실적 통계 등 과거 실적에 국한된 분석 수행 가능성 높음\n - 분산구조\n - 분석조직 인력들을 현업부서로 직접 배치하여 분석업무 수행\n - 전사차원의 우선순위 수행\n - 분석결과에 따른 식속한 Action 가능\n - 베스트 프랙티스 공유 가능\n - 부서 분석업무와 역할 분담 명확히 해야함\n" }, { "alpha_fraction": 0.6479750871658325, "alphanum_fraction": 0.6682242751121521, "avg_line_length": 17.882352828979492, "blob_id": "57e75d9ee906f78ddce6dfb65b0f452bf1237ed5", "content_id": "01952ff28110d59bd36037118f26f77cfde1062f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "permissive", "max_line_length": 87, "num_lines": 34, "path": "/HackerRank/Warmup/01.Simple_Array_Sum.py", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# Given an array of integers, can you find the sum of its elements?\n#\n# Input Format\n#\n# The first line contains an integer, , denoting the size of the array.\n# The second line contains space-separated integers representing the array's elements.\n#\n# Output Format\n#\n# Print the sum of the array's elements as a single integer.\n#\n# Sample Input\n#\n# 6\n# 1 2 3 4 10 11\n# Sample Output\n#\n# 31\n\n#!/bin/python3\n\nimport sys\n\ndef simpleArraySum(n, ar):\n tmp = 0\n for num in range(n):\n\n tmp += ar[num]\n return tmp\n\nn = int(input().strip())\nar = list(map(int, input().strip().split(' ')))\nresult = simpleArraySum(n, ar)\nprint(result)\n" }, { "alpha_fraction": 0.6341911554336548, "alphanum_fraction": 0.6415441036224365, "avg_line_length": 27.63157844543457, "blob_id": "989a23045a8ff8300fd8617bd788b0345891023b", "content_id": "8a056c4afc9b7c3816f32433e4fcc21e8bce9546", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2160, "license_type": "permissive", "max_line_length": 99, "num_lines": 38, "path": "/ADsP/Day20.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 시계열 자료 (time series)\n\n- 시간의 흐름에 따ㅏ 관찰된 값들\n - 시계열 데이터의 분석 목적\n- 미래의 값을 예측한다\n- 시계열 데이터의 특성 파악(경향, 주기, 계절성, 불규칙성 등)\n\n# 정상성 (3가지를 모두 만족)\n\n- 평균이 일정하다. (모든 시점에서 일정한 평균을 가진다.)\n- 분산도 일정하다.\n- 공분산도 특정시점에서 t,s에 의존하지 않고 일정하다.\n\n# 정상시계열의 특징\n\n- 어떤 시점에서 평균과 분산 그리고 특정한 시차의 길이를 갖는 자기공분산을 측정동일한 값이다.\n- 항상 평균값으로 회귀하려는 경향이 있고 변동은 평균값 주변에서 일정한 폭을 유지한다.\n- 비정상시계열은 특정 기간의 시계열 자료로부터 얻은 정보를 다른 시기로 일반화 할 수 없다.\n\n# 시계열 모형\n\n- 자기회귀 모형 (AR 모형, autoregressive model)\n - AR(1) 모형\n - AR(2) 모형\n - ACF는 빠르게 감소, PACF는 절단점이 존재 > AR(절단점 -1)\n \n- 이동평균 모형 (MA 모형, moving average model)\n - MA(1) 모형\n - MA(2) 모형\n - ACF는 절단점이 존재, PACF는 빠르게 감소\n- 자기회귀누적이동평균 모형(ARIMA(p,d,q))\n  - d = 0 이면 ARMA(p,d,q) 모형이라 부르고, 이 모형은 정상성을 만족한다.\n  - p = 0 이면 IMA(d,d,q) 모형이라고 부르고, d번 차분하면 MA(q) 모형을 따른다.\n- 분해 시계열\n  - 추세요인(Trend factor) : 자료의 그림을 그렸을 때 그 형태가 오르거나 또는 내리는 추세를 따르는 경우, 선형 이차식 형태, 지수적 형태\n  - 계절요인(Seasonal factor) : 요읾마다 반복되거나 일년 중 각 월에 의한 변화, 사분기 잘에서 각 분기에 의한 변화 등 고정된 주기에 따라 자료가 변화할 경우\n  - 순환요인(Cyclical factor) : 명백한 경제적이나 자연적인 이유가 없이 알려지지 않은 주기를 가지고 변화하는 자료\n  - 불규칙요인(Irregual factor) : 이 세 가지의 요인으로 설명할 수 없는 회귀분석에서 오차에 해당하는 요인\n" }, { "alpha_fraction": 0.6669921875, "alphanum_fraction": 0.6728515625, "avg_line_length": 36.925926208496094, "blob_id": "7d351e51d799542051baff005d42ffff542a7c0e", "content_id": "9e73d0f6b4b9e0222ed282375759373b4b86efa3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4188, "license_type": "permissive", "max_line_length": 129, "num_lines": 54, "path": "/ADsP/test01.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 오답노트\n## 객관식\n\n### 빅데이터의 시대가 도래함에 따라 CRM 환경이 바뀌고 있다. 옳은 것은\n- 1. 기업과 고객 간 양방향 소통이 가능한 서비스 플랫폼이 활성화되고 있다.\n- 2. 실시간으로 고객 트랜드를 파악할 수 있게 되었다.\n- 3. 다양한 채널로 CRM 수행이 가능하게 되었다.\n\n### 분석기회 발굴의 범위 중 시장니즈 탐색 관저에서 고객 니즈의 변화에 해당하는 것\n- 1. 고객\n- 2. 채널\n- 3. 영향자들\n\n### 분석 프로젝트 영역별 주요 관리 항목은?\n- 품질, 시간, 자원, 원가, 통합, 조달, 리스크, 의사소통, 이해 관계자\n\n### 모형을 개발하여 운영상황에서 실제 테스트를 할 때 모형 개발 데이터를 통해서는 높은 적중률을 보이지만 테스트 데이터에서는 적중률이 떨어져 정중률을 유지하지 못하는 것을 무엇이라고 하는가?\n- 과대적합 (= 학습 데이터를 너무 과대하게 학습한 경우)\n\n### 측정대상이 갖고 있는 속성의 양을 측정하는 것으로 측정결과가 숫자로 표현되나 해당 속성이 전혀 없는 상태인 절대적인 영점이 없어 두 관측 값 사이의 비율은 별 의미가 없게 된다. 온도, 지수 등이 해당되는 이척도는 무엇인가?\n- 구간척도 (= 구간이나 구간사이의 가녁이 의미가 있는 자료)\n\n### 다음 함수 x<-c(fee,1*pi,TRUE,3) 의 결과의 값으로 올바른 것은?\n- error (= fee 는 문자형이기 때문에 \"fee\"라고 적어줘야 함)\n\n### 통계분석에서 자료를 수집하고 그 수집된 자료로부터 어떤 정보를 얻고자 하는 경우에는 항상 수집된 자료가 특정한 확률분포를 따른다고 가정한다. 다음 중 연속형 확률분포인 것은?\n- 정규분포, T분포(두 집단의 평균이 동일한 지 알고자 할 때), F분포(두 집단간 분산의 동일성 검정에 사용되는 검정통계량의 분포)\n- 이항분포 (= 베르누이 사항을 n번 반복했을 때, 연속형 확률분포 x)\n\n### 다음 중 비모수검정인 것을 고르시오\n- 윌콕슨의 순위합검증, 윌콕슨의 부호순위합, 만-위트니의 U검정, 스피어만의 순위상관계수, 부호검정\n- 자기상관검증 (= 비모수 검정 x)\n\n### 데이터마이닝 모델링 방법 중 분류(classification) 방법으로 활용되고 있는 R 패키지는?\n- rpart, rpartOrdinal, randomForest, party, tree, marginTree, mapTree\n- kmeans (= 분류 방법으로 활용 x)\n\n### 모형의 성능을 평가할 때 사용되는 방법론 중 사후확률과 각 분류기준값에 의해 오분류 행렬을 만든 다음, 민감도(sensitivity)와 특이도(specificity)를 산출하여 도표에 도식화 하여 평가하는 방식은 무엇인가?\n- ROC (= receive operating characteristics)\n\n### K-means 군집분석과 계층적 군집분석의 차이는?\n- 1. K-means 군집분석은 게층적 군집분석과는 달리 한 개체가 처음 속한 군집에서 다른 군집을 이동해 재배치 될 수 있다.\n- 2. K-means 군집분석은 초기값에 대한 의존이 커서 초기값을 어떻게 하느냐에 따라 군집이 달라질 수 있다.\n- 3. 계층적 군집분석은 동일한 거리계산법을 적용하면 몇 번을 시행해도 동일한 결과가 나온다.\n- 4. K-means 군집분석은 동일한 거래계산법을 적용하면 몇번을 시행해도 동일한 결과가 나온다 x (= 초기값 선택에 따라 다르다)\n\n### 텍스트마이닝 패키지인 TM에서 문서를 관리하는 기본 구조를 Courpus라고 부르는데, 이는 텍스트 문서들의 집합을 의미한다. R외부의 DB나 파일로 관리되는 것은 무엇인가?\n- PCorpus (= R외부의 DB나 파일로 관리된다)\n- VCorpus x (= 메모리에서만 유지)\n\n### 텍스트마이닝 패키지인 TM에서 영어문서 A에 포함된 줄바꿈을 제거하기 위해 사용되는 R프로그래밍으로 적합한 것은?\n- A <-tm_map(A, strip Whitespace)\n- A <-tm_map(A, removeWords, stopwords(\"english\")) = 띄어쓰기 시제표준화\n- A <-tm_map(A, tolower) = 대문자 > 소문자 변환\n" }, { "alpha_fraction": 0.6772823929786682, "alphanum_fraction": 0.6878980994224548, "avg_line_length": 32.64285659790039, "blob_id": "2e4ee7fd15031fd4a7904ecfe9b5597ce6f1df91", "content_id": "b5da47ba02bcd4f4a0b9304ba8e92d3b217f8309", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2585, "license_type": "permissive", "max_line_length": 94, "num_lines": 42, "path": "/ADsP/Day17.md", "repo_name": "KimDH94/TIL", "src_encoding": "UTF-8", "text": "# 통계\n\n- 통계: 특정집단을 대상으로 수행한 조사나 실험을 통해 나온 결과에 대한 요약된 형태의 표현\n- 통계자료의 획득 총조사(census)와 표본조사(sampling)\n- 샘플링 기법: 단순랜덤 추출(simple random sampling), 계통추출법(systematic sampling), 집락추출법(cluster sampling)\n 층화추출법(stratified random sampling)\n- 자료의 형태 : 명목척도, 순서척도, 구간척도, 비율척도\n\n# 통계분석\n\n- 통계적 추론 (statistical inference) : 추정, 가설검정, 예측\n- 기술통계 (descriptive statistic) : 평균, 표준편차, 중위수, 최빈값, 그래프의 표현\n\n# 확률 및 확률분포\n\n- 확률변수(random variable) : 특정값이 나타날 가능성이 확률적으로 주어지는 변수\n- 이산형 확률분포(discrete distribution) : 베르누이분포, 이항분포, 기하분포, 다항분포, 포아송분포\n- 연속형 확률분포(continuous distribution) : 균일분포, 정규분포, 지수분포, t분포, F분포, x^2 분포\n\n# 추정 및 가설검정\n\n- 추정 : 표본으로부터 미지의 모수를 추측하는 것\n- 점추정(point estimation)\n - '모수가 특정한 값일 것' 이라고 추정하는 것, 평균, 표준편차, 중앙값 등을 추정\n - 점추정 조건 : 불편성(unbiasedness), 효율성(efficiency), 일치성(consistency), 충족성(sufficient)\n- 구간추정(interval estimation)\n - 점추정을 보완하기 위해 모수가 특정 구간에 있을 것이라고 추정하는 것\n - 모분산을 알거나 대표본의 경우 표준정규분포를 활용\n - 모분산을 모르거나 소표본의 경우 t분포를 활용\n- 가설검정\n - 귀무가설(null hypothesis) vs 대립가설(alternative hypothesis)\n - 1종오류(type 1 error) : 귀무가설이 옳은데 귀무가설을 기각하는 오류\n - 2종오류(type 2 error) : 귀무가설이 옳지 않은데 귀무가설을 채택하는 오류\n - 1종오류의 크기를 0.1, 0.05, 0.01 로 고정시키고 2종오류가 최소가 되도록 기각역을 설정\n \n# 비모수 검정\n\n- 모집단의 분포에 대한 아무 제약을 가하지 않고 실행하는 검정\n- 분포의 형태가 동일하다 또는 동일하지 않다 라는 식으로 가설 설정\n- 순위나 두 관측값 차이의 부호를 이용해 검정\n- 예 : 부호검정(sign test), 윌콕슨의 순위함검정(rank sum test), 윌콕슨의 부호순위합검정(Wilcoxon signed rank test),\n 만-위트니의 U검정, 런검정(run test), 스피어만의 순위상관계수\n" } ]
102
GabrieleDiFlavio/kub
https://github.com/GabrieleDiFlavio/kub
46bfb12926c4d6f67738d193daa1d6fd68a4c63a
43a14cac11a2a4448fb903f261da3c29b5b55b3a
5d11ee51e9e120bc29abe8d79105014f0c60d51e
refs/heads/master
2020-06-08T12:46:01.222314
2019-06-22T12:32:37
2019-06-22T12:32:37
193,230,702
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4128078818321228, "alphanum_fraction": 0.44630542397499084, "avg_line_length": 20.55555534362793, "blob_id": "73b39dfde0775600791b1da6fbd4d904f6fb65e9", "content_id": "2d01352de90d741c948f6f57b9ed399a42f931f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/ku.py", "repo_name": "GabrieleDiFlavio/kub", "src_encoding": "UTF-8", "text": "import sys\r\nimport urllib.request\r\nfrom flask import Flask\r\nfrom flask import request\r\n\r\n\r\n# insert flask annotation here\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef main():\r\n \r\n Ns = request.args.get('N',default = 10, type = int)\r\n if not Ns:\r\n Ns=10\r\n N = int(Ns)\r\n my_array=[]\r\n for i in computepi(N):\r\n my_array.append(str(i))\r\n my_array = my_array[:1] + ['.'] + my_array[1:]\r\n big_string = \"\".join(my_array)\r\n \r\n \r\n \r\n return big_string\r\n\r\n\r\ndef computepi(N):\r\n q, r, t, k, m, x = 1, 0, 1, 1, 3, 3\r\n counter = 0\r\n while True:\r\n if 4 * q + r - t < m * t:\r\n yield m\r\n q, r, t, k, m, x = 10*q, 10*(r-m*t), t, k, (10*(3*q+r))//t - 10*m, x\r\n if counter>N-1:\r\n break\r\n else:\r\n counter=counter+1\r\n else:\r\n q, r, t, k, m, x = q*k, (2*q+r)*x, t*x, k+1, (q*(7*k+2)+r*x)//(t*x), x+2\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', threaded=True)\r\n" } ]
1
CaMeLCa5e/ObjectOrientedPracticeFiles
https://github.com/CaMeLCa5e/ObjectOrientedPracticeFiles
0bbaf7cf1c54ff6ec9d47896d59e8da7af4bb480
456ef1e8d01e4ce68309a1ca250f4f3848f93f50
f0c70153f7f46de39072dbe506b0141139367dc8
refs/heads/master
2020-06-11T21:44:04.905583
2015-11-13T03:51:47
2015-11-13T03:51:47
25,543,927
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6574074029922485, "avg_line_length": 18.454545974731445, "blob_id": "c0c1e863bef55781ed276a3196bf50a64f1d67e7", "content_id": "7eae2e6eb587d4a3f6a5d549bb9fa27d863b13a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/Exceptions.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\"\"\"Exception handling\n\"\"\"\n\ndef main():\n\tfilename = sys.argv[1]\n\ttry:\n\t\tfor row in parse_csv(filename):\n\t\t\tprint row\n\texcept IOError:\n\t\tprint >> sys.stderr, \"The given file does not exist: \", filename\n\t\tsys.exit(1)\n\t\t" }, { "alpha_fraction": 0.6636771559715271, "alphanum_fraction": 0.6995515823364258, "avg_line_length": 21.399999618530273, "blob_id": "45915368fcc9da06a39ee5c0b72a5a5afe273949", "content_id": "a8e69fd9051498880f6610ab92a5f7dd43b07c30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/ExceptionHandling10.23.14.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "def do_stuff_with_number(n):\n\tprint n\n\nthe_list = (1,2,3,4,5)\n\nfor i in range(20):\n\ttry:\n\t\tdo_stuff_with_number(the_list[i])\n\texcept IndexError: #this will show when there is something out of range\n\t\tdo_stuff_with_number(0)" }, { "alpha_fraction": 0.4960629940032959, "alphanum_fraction": 0.5433070659637451, "avg_line_length": 14.875, "blob_id": "1487c000d3493b73c64978a42980999432fac869", "content_id": "4042e64dcb587aaf8a780b3d2ca39e0b80444e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 69, "num_lines": 24, "path": "/Fibonaccinumbers.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\"\"\"Playing with Fibonacci numbers\n\"\"\"\n\ndef F(n):\n\t#Time 0(2**n), Space 0(n)\n\tif n < 2: return n\n\telse: F(n-1) + F(n-2)\n\n\n#########\n# another way:\n\n# Time 0(n), Space(n)\ndef F(n):\n\ta, b = 0, 1\n\tfor i in range(0, n):\n\t\ta, b = b, a + b\n\treturn a\n\n###\n#yet another way 0(1) for Time and Space\nimport math\ndef F(n):\n\treturn ((1+math.sqrt(5))**n-(1-math.sqrt(5))**n)/(2**n*math.sqrt(5))\n" }, { "alpha_fraction": 0.638671875, "alphanum_fraction": 0.64453125, "avg_line_length": 27.44444465637207, "blob_id": "9114e75f168eac6fda026f33e9f800a2353a63d7", "content_id": "6056d34e033583e4d7b6f3da3dd295789f3b0a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/multiplefuncargs.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#multiple function arguments. \ndef foo(first, second, third, *therest):\n\tprint \"First: %s\" % first\n\tprint \"Second: %s\" % second\n\tprint \"Third: %s\" % third\n\tprint \"and all the rest... %s\" % list(therest)\n\n#now try sending words through key word\n\ndef bar(first, second, third, **options):\n\tif options.get('action') == \"sum\":\n\t\tprint \"The sum is: %d\" % (first + second + third)\n\n\tif options.get('number') == 'first':\n\t\treturn first\n\nresult = bar(1,2,3, action = 'sum', number = 'first')\nprint \"Result: %d\" % result\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6637930870056152, "avg_line_length": 20.090909957885742, "blob_id": "1ff660183a10f53543023d8ca018b71cb783ee15", "content_id": "d22ad3b856a84ab0508a06d671ef2d37cfb9c0e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/sets10.23.14.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#sets10.23.14.py\n\nprint set('my name is Jack and Jack is my name'.split())\n\na = set(['Jack', 'John', 'Bill'])\nb = set(['John', 'Jill'])\n\nprint a.intersection(b)\nprint a.symmetric_difference(b)\nprint a.difference(b)\nprint a.union(b)\n" }, { "alpha_fraction": 0.7151514887809753, "alphanum_fraction": 0.7220779061317444, "avg_line_length": 22.75, "blob_id": "c0946427eeb8857177267f51a56708e4e1044b01", "content_id": "b8a25a931fede5b53c7d010b6651a78cd454ad18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 107, "num_lines": 48, "path": "/Decorators.10.23.14.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\n@decorator\ndef funtions(arg):\n\treturn 'Return'\n\n# this is the same as the first statement- \ndef funtion(arg):\n\treturn 'Return'\nfuntion = decorator(function) #passes funtion to decorator and reassigns it to the function. \n\n# a decorator is a function that takes a function and returns one. would you call this a nested function? \n\ndef repeater(old_function):\n\tdef new_function(*args, **kwds):\n\t\told_function(*args, **kwds)# old function\n\t\told_function(*args, **kwds) #do it again for illistration \n\treturn new_function \n\n\n@repeater\ndef Multiply(num1, num2):\n\tprint num1*num2\n\n\ndef Double_Out (old_function):\n\tdef new_function(*args, **kwds):\n\t\treturn 2*old_function(*args, **kwds)\n\treturn new_function\n\ndef Double_in(old_function):\n\tdef new_function(arg):\n\t\treturn old_function(arg*2)\n\treturn new_function\n\ndef Check(old_function):\n\tdef new_finction(arg):\n\t\tif arg<0: raise ValueError, \"Negative Argument\"\n\t\told_function(arg)\n\treturn new_function\n\ndef multiply(multiplier):\n\tdef Multiply_Generator(old_function):\n\t\tdef new_function(*args, **kwds):\n\t\t\treturn new_function\t\nreturn Multiply_Generator\n\n@Multiply(3) \ndef Num(num):\n\treturn num\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7077244520187378, "alphanum_fraction": 0.7160751819610596, "avg_line_length": 20.727272033691406, "blob_id": "be2c54bf45134e7ba3536aa493b92e2d5ca03f94", "content_id": "f4da52612e11123e4bb07bbc0b0f4c9d666dbfff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 71, "num_lines": 22, "path": "/BankingApplication.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\"\"\"Banking application for building a checking account. Customers have\nthe following attributes: name and balance. \n\"\"\"\nclass Customer(object):\n\n\tdef __init__ (self, name, name:\n\t\tself.name = name\n\t\tself.balance = 0.0\n\n\tdef set_balance(set, balance=0.0)\n\t\tself.balance = balance\n\n\tdef withdraw(self, amount):\n\t\tif amount > self.balance:\n\t\t\traise RuntimeError(\"amount greater than available\")\n\n\tdef deposit(self, amount):\n\t\tself.balance += amount\n\t\treturn self.balance\n\n\nexcept\n\n" }, { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6759259104728699, "avg_line_length": 17, "blob_id": "e203796e59d2973cfeefc801de9340af328f606f", "content_id": "a9e5bff4ec0e08ab4fd42ffb01c2ed6e9f4768a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 216, "license_type": "no_license", "max_line_length": 54, "num_lines": 12, "path": "/Json(Vorhees)10.23.14.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#serialization\n\nimport json\nimport cPickle\n\n\njson_string = json.dumps([1,2,3,'a', 'b', 'c'])\nprint json.loads(json_string)\n\n\npickled_string = cPickle.dumps([1,2,3, 'a', 'b', 'c'])\nprint cPickle.loads(pickled_string)\n" }, { "alpha_fraction": 0.6277843117713928, "alphanum_fraction": 0.6570926308631897, "avg_line_length": 17.72527503967285, "blob_id": "3e212ba04ab9c1920b4a5a040777ca5f91691c53", "content_id": "ba04d12b3dbb65200f381a92514c1a80b734ec4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1706, "license_type": "no_license", "max_line_length": 65, "num_lines": 91, "path": "/StaticMethods.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\"\"\"Static methods (car)\"\"\"\nfrom abc import ABCMeta, abstractmethod\nclass Vehicle(object):\n\n\t__metaclass__ = ABCMeta\n\n\t\n\tbase_sale_price = 0\n\twheels = 0 \n\n\tdef __init__(self, wheels, miles, make, model, year, sold_on):\n\t\tself.wheels = wheels\n\t\tself.miles = miles\n\t\tself.make = make\n\t\tself.model = model\n\t\tself.year = year\n\t\tself.sold_on = sold_on\n\n\tdef sale_price(self):\n\t\tif self.sold_on is not None:\n\t\t\treturn 0.0\n\t\treturn 5000.0 * self.wheels\n\n\tdef purchase_price(self):\n\t\tif self.sold_on is None:\n\t\t\treturn 0.0 \n\t\treturn self.base_sale_price - (.10 * self.miles)\n\n\n\t\nclass Car(Vehicle):\n\n\t# def __init__(self, wheels, miles, make, model, year, sold_on):\n\t# \tself.wheels = wheels\n\t# \tself.miles = miles\t\n\t# \tself.make = make\n\t# \tself.model = model\n\t# \tself.year = year\n\t# \tself.sold_on = sold_on\n\n\tbase_sale_price = 8000\n\twheels = 4\n\tdef vehicle_type(self):\n\t\treturn 'car'\n\nclass Truck(Vehicle):\n\n\t# def __init__(self, wheels, miles, make, model, year, sold_on):\n\t# \tself.wheels = wheels\n\t# \tself.miles = miles\n\t# \tself.make = make\n\t# \tself.model = model\n\t# \tself.year = year\n\t# \tself.sold_on = sold_on\n\t# \tself.base_sale_price = 10000\n\t\n\tbase_sale_price = 10000\n\twheels = 4\n\n\tdef sale_price(self):\n\t\tif self.sold_on is not None:\n\t\t\treturn 0.0 \n\t\treturn 5000.0 * self.wheels\n\n\tdef purchase_price(self):\n\t\tif self.sold_on is None:\n\t\t\treturn 0.0 \n\t\treturn 8000 - (.10 *self.miles)\n\t\n\tdef vehicle_type(self):\n\t\treturn 'truck'\n\nclass Motorcycle(Vehicle):\n\n\tbase_sale_price = 4000\n\twheels = 2\n\n\tdef vehicle_type(self):\n\t\treturn 'Motorcycle'\n\n@abstractmethod\ndef vehicle_type():\n\tpass\n\n@classmethod\ndef is_motorcycle(cls):\n\treturn cls.wheels == 2\n\n@staticmethod\ndef make_car_sound():\n\tprint 'VROOOOOOOMMMMM'\n\n\n" }, { "alpha_fraction": 0.6310160160064697, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 14, "blob_id": "2e5f86d8ca6e656d6875f41fc174cdc0bf002639", "content_id": "df80d075b44648ee1c8724b91ee498dc8488cd32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 49, "num_lines": 25, "path": "/ObjectOrientedPractice.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n\"\"\"OO walkthrough\n\"\"\"\n\nclass pet:\n\tnumber_of_legs = 0\n\t\n\tdef sleep(self):\n\t\tprint 'zzz'\n\t\t\n\tdef count_legs(self):\n\t\tprint 'Pumba has %s legs' %self.number\n\t\t\nclass dog(pet):\n\tdef bark(self):\n\t\tprint \"Woof\"\n\t\t\n\t\t\n\t\t\npumba = dog()\npumba.bark()\npumba.sleep()\npumba.number_of_legs = 4\n# pumba.count_legs()\nprint \"Pumba has %s legs.\" % pumba.number_of_legs" }, { "alpha_fraction": 0.6601671576499939, "alphanum_fraction": 0.6824512481689453, "avg_line_length": 20.117647171020508, "blob_id": "2070a933cce301ffe8ff82e678bc4459197b3b53", "content_id": "d7665074fc97d6e8902e6990b843595c1e0b23f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 718, "license_type": "no_license", "max_line_length": 82, "num_lines": 34, "path": "/10.25.14Generators.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#10.25.14Generators\nimport random\n\ndef get_data():\n\t#return 3 ints between 0 and 9\n\treturn random.sample(range(10),3)\n\ndef consume():\n\t#display a running average \n\trunning_sum = 0\n\tdata_items_seen = 0\n\n\twhile True:\n\t\tdata = yield\n\t\tdata_items_seen += len(data)\n\t\trunning_sum += sum(data)\n\t\tprint ('The running average is {}'.format(running_sum / float(data_items_seen)))\n\ndef produce(consumer):\n\t#produce set and forwards them to predetermined function\n\twhile True:\n\t\tdata = get_data()\n\t\tprint ('Produced {}'.format(data))\n\t\tconsumer.send(data)\n\t\tyield\n\nif __name__ == '__main__':\n\tconsumer = consume()\n\tconsumer.send(None)\n\tproducer = producer(consumer)\n\n\tfor _ in range(10)\n\t\tprint ('Producing...')\n\t\tnext(producer)\n" }, { "alpha_fraction": 0.650632917881012, "alphanum_fraction": 0.6607595086097717, "avg_line_length": 14.680000305175781, "blob_id": "8b254dfd3ce0d2c8824584ee398efc0bd1cd65ee", "content_id": "2f647495c47377930c3a499e4ebe578b2da306cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 45, "num_lines": 25, "path": "/CharlesTheClassPet.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "\"\"\"Practice in OO\"\"\"\n\nclass pet:\n\tnumber_of_legs = 0 \n\n\tdef sleep(self):\n\t\tprint \"ZzZzZzZzZzZzZ\"\n\n\tdef count_legs(self):\n\t\tprint \"I have %s legs\" %self.number_of_legs\nclass dog(pet):\n\tdef bark(self):\n\t\tprint \"Woof\"\n\ncharles = pet()\ncharles.number_of_legs = 4\ncharles.count_legs()\n\nnemo = pet()\nnemo.number_of_legs = 0\nnemo.count_legs()\n\ndoug = dog()\ndoug.number_of_legs = 4\ndoug.count_legs()\n\n\n\n" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6872727274894714, "avg_line_length": 22, "blob_id": "9837f7c5d2b607dc7ca956ec66264f4549054aaa", "content_id": "b1b74da2060c0fba5269ea9a89eb7e72a0f7fe6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 82, "num_lines": 12, "path": "/running_sum_generator.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#running sum generator\n\ndef consume():\n\t#display a running average \n\trunning_sum = 0\n\tdata_items_seen = 0\n\n\twhile True:\n\t\tdata = yield\n\t\tdata_items_seen += len(data)\n\t\trunning_sum += sum(data)\n\t\tprint ('The running average is {}'.format(running_sum / float(data_items_seen)))" }, { "alpha_fraction": 0.7552447319030762, "alphanum_fraction": 0.7762237787246704, "avg_line_length": 19.285715103149414, "blob_id": "153f32ca5376e7dfba1be6e4f3c9201897f341a6", "content_id": "a930baa5e68e80bb645c86a039a279d1ef70f4ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/partialfunctions.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#partial functions\n\nfrom functools import partial\n\n#create a new function that multiplies things by 2\ndbl = partial(multiply,2)\nprint dbl(4)\n\n" }, { "alpha_fraction": 0.6348039507865906, "alphanum_fraction": 0.6421568393707275, "avg_line_length": 16.7391300201416, "blob_id": "c7707d922c34f16f9806a5136539ba122620d288", "content_id": "48b86d576848c6ad7ba0aaaf5b3461ccad0987fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 73, "num_lines": 23, "path": "/myDogIsAnObject.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python \n\"\"\" Object Oriented Code Review\"\"\"\n\nclass Dog:\n\t#What are the attributes of a person? \n\tname = \"Pumba\"\n\tage = 0\n\t\n\t#add methods\n\tdef setName(self, x):\n\t\tself.name = x\n\t\t\n\tdef setAge(self, x):\n\t\tself.age = x\n\t\t\n\tdef talk(self):\n\t\tprint \"My dog's name is\", self.name, \"and he is\", self.age \"years old.\"\n\t\t\n\t\nmyObject = Dog()\nmyObject.setName(\"Peter\")\nmyObject.setAge(74)\nmyObject.talk()\n" }, { "alpha_fraction": 0.6822134256362915, "alphanum_fraction": 0.6996047496795654, "avg_line_length": 19.57377052307129, "blob_id": "d4e3cc6764f91b4c1beb57d470a8af12459afe3a", "content_id": "9963bd8c022930ce76c58daf5ae99a84858ba629", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1265, "license_type": "no_license", "max_line_length": 55, "num_lines": 61, "path": "/Practicegenerators.py", "repo_name": "CaMeLCa5e/ObjectOrientedPracticeFiles", "src_encoding": "UTF-8", "text": "#Practice with generators\ndef simple_generator_function():\n\tyield 1\n\tyield 2\n\tyield 3\n\nfor value in simple_generator_function():\n\tprint (value)\n\nour_generator = simple_generator_function()\nprint next(our_generator)\n\n#the next two blocks push the generator to exhaustion. \n# def get_primes(number):\n# \twhile True:\n# \t\tif is_prime(number):\n# \t\t\tyield number\n# \t\tnumber += 1\n\n# our_generator = simple_generator_function()\n# for value in our_generator:\n# \tprint value\n# print (next(our_generator))\n\ndef solve_number_10():\n\ttotal = 2\n\tfor next_prime in get_primes(3):\n\t\tif next_prime < 2000000:\n\t\t\ttotal += next_prime\n\t\telse:\n\t\t\tprint (total)\n\t\t\treturn\ndef get_primes(number):\n\twhile True:\n\t\tif is_prime(number):\n\t\t\tyield number\n\t\tnumber += 1\n\n\ndef print_successive_primes(iterations, base = 10):\n\n\tprime_generator = get_primes(base)\n\tfor power in range(iterations):\ndef get_primes(number):\n\twhile True:\n\t\tif is_prime(number):\n\t\t\tnumber = yield number\n\t\tnumber += 1\n\n\ndef get_primes(number):\n\twhile True:\n\t\tif is_prime(number):\n\t\t\tnumber = yield number\n\t\tnumber += 1\n\ndef print_successive_primes(iterations, base = 10):\n\tprime_generator = get_primes(base)\n\tprime_generator.send(None)\n\tfor power in range(iterations):\n\t\tprint(prime_generator.send(base**power))\n\n\t\t\n\n\n\n\n\n\n" } ]
16
christianbos/aircheck
https://github.com/christianbos/aircheck
cdf0d3adf43969f3c1da59d1380d40aef5b2d302
2f2e1466598e0bee6f3bf684f503b51e3eac27ca
e2dfa344ac6689ac16bcb6842e2970954d07a1fe
refs/heads/master
2016-09-13T07:55:56.744619
2016-04-24T16:41:40
2016-04-24T16:41:40
56,938,488
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5289617776870728, "alphanum_fraction": 0.5562841296195984, "avg_line_length": 25.91176414489746, "blob_id": "d4532e06870f22c69d33c5ed9fc158d4bbb403ab", "content_id": "db752b20a1b79576fee8660f57be4c0bf3dc4163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "no_license", "max_line_length": 75, "num_lines": 34, "path": "/measurer/migrations/0002_auto_20160424_1146.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.5 on 2016-04-24 11:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('measurer', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='temperature',\n name='celcius',\n ),\n migrations.AddField(\n model_name='temperature',\n name='celsius',\n field=models.FloatField(blank=True, max_length=30, null=True),\n ),\n migrations.AlterField(\n model_name='gas',\n name='ppm',\n field=models.FloatField(blank=True, null=True),\n ),\n migrations.AlterField(\n model_name='humidity',\n name='rangomed',\n field=models.FloatField(blank=True, max_length=950, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.7267267107963562, "alphanum_fraction": 0.7417417168617249, "avg_line_length": 24.615385055541992, "blob_id": "8ccd6ecc688bc9e6c44ae0cf6e21aab0071cef79", "content_id": "781a77531d6d2dfedac4ef1df59ac610077c5b94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 71, "num_lines": 13, "path": "/measurer/models.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Humidity(models.Model):\n rangomed = models.FloatField(max_length=950, blank=True, null=True)\n\n\nclass Gas(models.Model):\n ppm = models.FloatField(max_length=None, blank=True, null=True)\n\n\nclass Temperature(models.Model):\n celsius = models.FloatField(max_length=30, blank=True, null=True)\n" }, { "alpha_fraction": 0.6161825656890869, "alphanum_fraction": 0.6234439611434937, "avg_line_length": 27.352941513061523, "blob_id": "59ee646318505860fbe805d4177bebfc42880a12", "content_id": "dbaa6ff75bde0594b88d48bab45226eafccce06c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "no_license", "max_line_length": 56, "num_lines": 34, "path": "/main/views.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\nimport requests\n\n\nclass HomeView(TemplateView):\n def get(self, request):\n template_name = 'index.html'\n return render(request, template_name)\n\n\nclass MedidasView(TemplateView):\n url = \"http://habilita.com.mx/iArduino.txt\"\n template_name = 'index.html'\n\n def getFile(self):\n rq = requests.get(self.url)\n contenido = rq.text\n contenido = contenido.strip()\n contenido = contenido.split(\",\")\n rangomed = float(contenido[0].strip())\n celsius = float(contenido[2].strip())\n\n if rangomed > 225 and celsius > 16:\n gasMetano = True\n return(gasMetano)\n else:\n gasMetano = False\n return(gasMetano)\n\n def get(request, getFile):\n template_name = 'index.html'\n gasmetano = {'gasmetano': getFile}\n return render(request, template_name, gasmetano)\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 27.75, "blob_id": "753b7c176bfeaec319b4013ec22d626e0294e485", "content_id": "951d96e8321e627d2223e000f5951f622e3e4c4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 45, "num_lines": 8, "path": "/measurer/views.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n\nclass MedidorView(TemplateView):\n def get(self, request):\n template_name = 'medidor.html'\n return render(request, template_name)\n" }, { "alpha_fraction": 0.6325088143348694, "alphanum_fraction": 0.6325088143348694, "avg_line_length": 20.769229888916016, "blob_id": "695a921644dca623d054d097b87662ad82545faa", "content_id": "02d15ecba0d031c2109bf3ad5b62e4dc64be8f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/nasa/urls.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n url(r'^main/',\n include('main.urls', namespace=\"main\")),\n\n url(r'^measurer/',\n include('measurer.urls', namespace=\"measurer\")),\n\n]\n" }, { "alpha_fraction": 0.6466666460037231, "alphanum_fraction": 0.6466666460037231, "avg_line_length": 20.428571701049805, "blob_id": "a704c175db4b274e45d969ce69862734ab0d957e", "content_id": "fb01605e677111db12489ed9d3d4d40bf790db0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 150, "license_type": "no_license", "max_line_length": 68, "num_lines": 7, "path": "/measurer/urls.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^medidor/$', views.MedidorView.as_view(), name='medidor'),\n\n ]\n" }, { "alpha_fraction": 0.5757892727851868, "alphanum_fraction": 0.5899180173873901, "avg_line_length": 36.966888427734375, "blob_id": "49975f5bcb39a31c46ebc2a855a0964b99edcd60", "content_id": "c8c515d61c01f7f0972f964bd7f8be9d8553f43a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5746, "license_type": "no_license", "max_line_length": 456, "num_lines": 151, "path": "/main/templates/index.html", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"es\">\n<head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\"/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=no\"/>\n <title></title>\n\n <!-- CSS -->\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\">\n <link href=\"css/materialize.css\" type=\"text/css\" rel=\"stylesheet\" media=\"screen,projection\"/>\n <link href=\"css/style.css\" type=\"text/css\" rel=\"stylesheet\" media=\"screen,projection\"/>\n\n</head>\n<body>\n\n <div id=\"index-banner\" class=\"parallax-container\">\n <div class=\"section no-pad-bot\">\n <div class=\"container\">\n <br><br>\n <div class=\"header center\">\n <img id=\"testimg\" src=\"logotipo.png\" alt=\"See-d-rone\">\n </div>\n <div class=\"row center\">\n <h5 class=\"header col s12 light\">Agricultura sustentable en suelo no profundo.</h5>\n </div>\n <div class=\"row center\">\n </div>\n <br><br>\n\n </div>\n </div>\n <div class=\"parallax\"><img src=\"background1.jpg\" alt=\"Unsplashed background img 1\"></div>\n </div>\n\n\n <div class=\"container\">\n <div class=\"section\">\n\n <!-- Icon Section -->\n <div class=\"row\">\n <div class=\"col s12 m4\">\n <div class=\"icon-block\">\n <h2 class=\"center brown-text\"><i class=\"material-icons\">flash_on</i></h2>\n <h5 class=\"center\">Problemática</h5>\n\n <p class=\"light\">En un mundo de contaminación constante, nos topamos con la incapacidad de producir alimento que no se encuentre contaminado.</p>\n </div>\n </div>\n\n <div class=\"col s12 m4\">\n <div class=\"icon-block\">\n <h2 class=\"center brown-text\"><i class=\"material-icons\">group</i></h2>\n <h5 class=\"center\">Trabajando juntos.</h5>\n\n <p class=\"light\">La idea propuesta es medir la calidad de tierra no profunda en ciudades, con la finalidad de promover agriculturas sustentables en sitios.</p>\n </div>\n </div>\n\n <div class=\"col s12 m4\">\n <div class=\"icon-block\">\n <h2 class=\"center brown-text\"><i class=\"material-icons\">settings</i></h2>\n <h5 class=\"center\">Drones y Web.</h5>\n\n <p class=\"light\">La idea propuesta es medir la calidad de tierra no profunda en ciudades, con la finalidad de promover agriculturas sustentables en sitios urbanos.</p>\n </div>\n </div>\n </div>\n\n </div>\n </div>\n\n\n <div class=\"parallax-container valign-wrapper\">\n <div class=\"section no-pad-bot\">\n <div class=\"container\">\n <div class=\"row center\">\n <h5 class=\"header col s12 light\"> \"La naturaleza no es un lugar para visitar. Es el hogar.\" <i>Gary Snyder.</i></h5>\n </div>\n </div>\n </div>\n <div class=\"parallax\"><img src=\"background2.jpg\" alt=\"Unsplashed background img 2\"></div>\n </div>\n\n <div class=\"container\">\n <div class=\"section\">\n\n <div class=\"row\">\n <div class=\"col s12 center\">\n <h3><i class=\"mdi-content-send brown-text\"></i></h3>\n <h4></h4>\n <p class=\"left-align light\">Proponemos la inspección de sitiios baldíos urbanos, lugares en los que sea posible generar forestación floral o alimenticia para obtener un lugar más verde, así como alimentos sustentables.<br> Promoviendo el bienestar de comunidades a través de posibles jardines urbanos y comunitarios. Los usos son múltiples.<br> La aplicación web hace sugerencias de semillas de siembra para la humedad y temperatura obtenida.</p>\n </div>\n </div>\n </div>\n </div>\n\n\n <div class=\"parallax-container valign-wrapper\">\n <div class=\"section no-pad-bot\">\n <div class=\"container\">\n <div class=\"row center\">\n <h5 class=\"header col s12 light\">\"La Tierra es un lugar más bello para nuestros ojos que cualquiera que conozcamos. Pero esa belleza ha sido esculpida por el cambio: el cambio suave, casi imperceptible, y el cambio repentino y violento.\" <i>Carl Sagan.</i></h5>\n </div>\n </div>\n </div>\n <div class=\"parallax\"><img src=\"background3.jpg\" alt=\"Unsplashed background img 3\"></div>\n </div>\n\n <footer class=\"page-footer teal\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col l6 s12\">\n <h5 class=\"white-text\">See-D-RONE</h5>\n <p class=\"grey-text text-lighten-4\">Los datos obtenidos para este sitio web se obtinen de nuestros drones en campo.</p>\n </div>\n </div>\n <div class=\"col l3 s12\">\n <h5 class=\"white-text\">MEDICIONES EN CAMPO</h5>\n <ul>\n <li><a class=\"white-text\" href=\"#!\">Link 1</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 2</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 3</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 4</a></li>\n </ul>\n </div>\n <div class=\"col l3 s12\">\n <h5 class=\"white-text\">Connect</h5>\n <ul>\n <li><a class=\"white-text\" href=\"#!\">Link 1</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 2</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 3</a></li>\n <li><a class=\"white-text\" href=\"#!\">Link 4</a></li>\n </ul>\n </div>\n </div>\n </div>\n <div class=\"footer-copyright\">\n <div class=\"container\">\n <p class=\"brown-text text-lighten-3\">Space Apps Abril/2016 by Frijolito-x</p>\n </div>\n </div>\n </footer>\n\n\n <!-- Scripts-->\n <script src=\"https://code.jquery.com/jquery-2.1.1.min.js\"></script>\n <script src=\"js/materialize.js\"></script>\n <script src=\"js/init.js\"></script>\n <script src=\"js/DjangoAjax.js\"></script>​\n </body>\n</html>\n" }, { "alpha_fraction": 0.5783699154853821, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 26.7391300201416, "blob_id": "0e22bc20a1e8c7e99c944ee9a50005b5dd01ada9", "content_id": "1429292c91728b15a6be7b8f14a06f1183eb58ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/converter.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "import requests\n\n\nclass Medidas:\n url = \"http://habilita.com.mx/iArduino.txt\"\n\n def getFile(self):\n rq = requests.get(self.url)\n contenido = rq.text\n contenido = contenido.strip()\n contenido = contenido.split(\",\")\n rangomed = float(contenido[0].strip())\n ppm = float(contenido[1].strip())\n celsius = float(contenido[2].strip())\n\n if rangomed > 225 and celsius > 16:\n return(\"hay gas metano en la zona {} y {}\".format(rangomed))\n else:\n return(\"El ambiente esta bien {} y {}\".format(rangomed, celsius))\n\n\nmedida = Medidas()\nprint(medida.getFile())\n" }, { "alpha_fraction": 0.603715181350708, "alphanum_fraction": 0.6532507538795471, "avg_line_length": 34.88888931274414, "blob_id": "2b59348b3683323ba0f16fedbf2b8b170948eb03", "content_id": "4275796377f8672863d7a8798f1a7a84b30a8290", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/ftpclient.py", "repo_name": "christianbos/aircheck", "src_encoding": "UTF-8", "text": "from ftplib import FTP\n\nfile = open('iArduino.txt', 'wb') # ftp = FTP('189.216.247.89')\nftp = FTP('ftp.habilita.com.mx') # ftp = FTP.connect('189.216.247.89')\nftp.login(user='habilita', passwd='8AB77tu6yi')\nftp.cwd(\"/public_html/\")\nftp.retrbinary('RETR iArduino.txt', lambda s, w=file.write: w(s+\"\\n\"))\nfile.close()\nftp.quit()\n" } ]
9
CLTanuki/LineByLine
https://github.com/CLTanuki/LineByLine
3df432297bfe7897861a3e7282e86ea55de15ed6
7717fbeef8d6c4b30bf05d5091a10be3315c1904
219e464d8431bb4bdd917021a8e27a2856f8faf0
refs/heads/master
2020-04-23T09:13:07.095238
2014-05-28T16:53:35
2014-05-28T16:53:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5850500464439392, "alphanum_fraction": 0.595644474029541, "avg_line_length": 26.419355392456055, "blob_id": "34b626fbe7bd9f212553003dcd2a8823361f0ee7", "content_id": "5326c0160efae8d3854dec135d2db86d5e2707e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1699, "license_type": "no_license", "max_line_length": 122, "num_lines": 62, "path": "/main.py", "repo_name": "CLTanuki/LineByLine", "src_encoding": "UTF-8", "text": "__author__ = 'cltanuki'\nimport sys\nimport codecs\nimport re\nfrom os import listdir\nfrom os.path import isfile, join, split\nimport xlsxwriter\n\nif hasattr(sys, 'frozen'):\n basis = sys.executable\nelse:\n basis = sys.argv[0]\n\nbase_folder = split(basis)[0]\n\ntype_path = join(base_folder, \"types\")\ninput_path = join(base_folder, \"input\")\n\ntype_files = [f for f in listdir(type_path) if isfile(join(type_path, f))]\ninput_files = [f for f in listdir(input_path) if isfile(join(input_path, f))]\n\ntype_dict = {}\ninput_dict = {}\ncounter_dict = {}\n\nfor file in type_files:\n type_dict[file] = [x.rstrip().lower() for x in codecs.open(join(type_path, file), 'r', encoding='cp1251').readlines()]\n\nfor file in input_files:\n data = codecs.open(join(input_path, file), 'r', encoding='cp1251').read()\n input_dict[file] = [i.lower() for i in re.sub(\"[^\\w]\", \" \", data).split()]\n counter_dict[file] = 0\n\nfor i in input_dict:\n word_list = input_dict.get(i)\n cat_data = {}\n for k, v in type_dict.items():\n word_data = {}\n type_list = v\n for word in word_list:\n if word in type_list:\n word_data[word] = word_data.get(word, 0) + 1\n cat_data[k] = word_data\n counter_dict[i] = cat_data\n\nworkbook = xlsxwriter.Workbook('result.xlsx')\n\nfor k, v in counter_dict.items():\n worksheet = workbook.add_worksheet(k)\n col = 0\n for i, t in v.items():\n s_col = col + 1\n line = 0\n worksheet.write(line, col, i)\n for a, z in t.items():\n line += 1\n print(col, s_col, line, a, z)\n worksheet.write(line, col, a)\n worksheet.write(line, s_col, z)\n col += 2\n\nworkbook.close()" } ]
1
Andvari/Translator
https://github.com/Andvari/Translator
551bd374606d83f25335b05255303e694f7a570c
7f5724b5d00a817fe06ce0b7c1e820c7ef0db066
999e678b92d2f204d20c77bbee7414c54c2b872f
refs/heads/master
2021-01-23T12:16:42.194810
2015-08-16T14:58:04
2015-08-16T14:58:04
6,860,284
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6133682727813721, "alphanum_fraction": 0.6343381404876709, "avg_line_length": 25.771930694580078, "blob_id": "c2e760a8bd35ba1814a42d84fa0ae9f98f340f83", "content_id": "f1338cb9bf5c5d8fddbadfcab25306993b750f11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 165, "num_lines": 57, "path": "/src/Translator.py", "repo_name": "Andvari/Translator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n'''\nCreated on Nov 26, 2012\n\n@author: nemo\n'''\n\nimport gtk\nimport httplib\nimport pynotify\nimport os\n\nSRC_LANG = 'en'\nDST_LANG = 'ru'\n\n\nterm = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).wait_for_text().encode('utf-8').replace(\"\\n\", \"\").lstrip().strip() # @UndefinedVariable\n\n'''\nconn = httplib.HTTPConnection(\"translate.google.com\")\nconn.request(\"GET\",\"/translate_a/t?client=t&text=\" + term.replace(\" \", \"%20\") + \"&hl=\" + SRC_LANG + \"&tl=\" + DST_LANG + \"&ie=UTF-8&oe=UTF-8\")\ndata = conn.getresponse().read()\n\ntransl = data[data.find('\"') + 1 : ]\ntransl = transl[ : transl.find('\"')]\n\npynotify.init(\"Null\")\nn = pynotify.Notification (term, transl, \"Null\")\nn.show()\n\nos.system(\"cvlc --play-and-exit 'http://translate.google.com/translate_tts?ie=UTF-8&q=\" + term.replace(\" \", \"%20\") + \"&tl=\" + SRC_LANG + \"&total=1&idx=0&textlen=5'\")\nos.system(\"cvlc --play-and-exit 'http://translate.google.com/translate_tts?ie=UTF-8&q=\" + transl + \"&tl=\" + DST_LANG + \"&total=1&idx=0&textlen=5'\")\n'''\n\nimport urllib\nimport urllib2\n\nurl = 'http://www.perevod-online.com/translate.php'\nvalues = {'from' : 'en',\n 'to' : 'ru',\n 'text' : term}\n\ndata = urllib.urlencode(values)\nreq = urllib2.Request(url, data)\nresponse = urllib2.urlopen(req)\npage = response.read()\n\ntransl = page [ page.find('textarea') + 43 : page.rfind('textarea') - 2 ]\ntts = 'espeak -v en -s 120 \"' + term + '\"'\n\npynotify.init(\"Null\")\nn = pynotify.Notification (term, transl, \"Null\")\nn.show()\n\nos.system(tts)\n" }, { "alpha_fraction": 0.8237179517745972, "alphanum_fraction": 0.8237179517745972, "avg_line_length": 61.400001525878906, "blob_id": "f89949cbf12fe686d0492720325c7646c8041278", "content_id": "1adbc82a131ebb197ace3e0a116fafce67050979", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 312, "license_type": "no_license", "max_line_length": 73, "num_lines": 5, "path": "/install.sh", "repo_name": "Andvari/Translator", "src_encoding": "UTF-8", "text": "mkdir /usr/share/pyshared/Translator\ncp src/Translator.py /usr/share/pyshared/Translator/Translator.py\ncp src/images/icon.png /usr/share/pyshared/Translator/images/icon.png\ncp translator.desktop /usr/share/applications/translator.desktop\nln -s -T /usr/share/pyshared/Translator/Translator.py /usr/bin/Translator\n" } ]
2
fl1d/sudoku-solver
https://github.com/fl1d/sudoku-solver
caf92694c46f64c1f5e2cab32c76ce73e3b6403e
14b33ae978390d2be66c787240313421c8c83d84
f79ad1717f5a95abb63599d3da0bec8e207a34b0
refs/heads/master
2022-11-16T05:10:17.691807
2016-10-22T10:25:29
2016-10-22T10:25:29
71,631,664
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 30, "blob_id": "ea4f43a1e6eaee339b0d66d2d7f7fd862be640b6", "content_id": "9bb6ca056b8debd5b6188be3054cc457274175f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 62, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/README.md", "repo_name": "fl1d/sudoku-solver", "src_encoding": "UTF-8", "text": "# sudoku-solver\na sudoku solver with simple gui using tkinter\n" }, { "alpha_fraction": 0.4506314694881439, "alphanum_fraction": 0.48029085993766785, "avg_line_length": 33.156864166259766, "blob_id": "6c357d7770a0780ce35bca191cec276cf7a95c51", "content_id": "43f2505f9dfca16accf5771c4c74b47704362e4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5226, "license_type": "no_license", "max_line_length": 98, "num_lines": 153, "path": "/sudoku.py", "repo_name": "fl1d/sudoku-solver", "src_encoding": "UTF-8", "text": "import tkinter as tk\n\n\nclass solver:\n def __init__(self, master=None):\n self.solutions = []\n self.solution_idx = 0\n # window\n if not master:\n master = tk.Tk()\n self.master = master\n master.title('Sodoku Solver')\n master.resizable(width=False, height=False)\n\n # entries\n self.entries = []\n for row in range(9):\n c = []\n for col in range(9):\n en = tk.Entry(master, width=2)\n en.grid(row=row, column=col)\n c.append(en)\n self.entries.append(c)\n\n # buttions\n # solve button\n self.solve_btn = tk.Button(master, text=\"Solve\", command=self.get_solution)\n self.solve_btn.grid(row=0, column=9)\n # close button\n self.close_btn = tk.Button(master, text=\"Close\", command=master.quit)\n self.close_btn.grid(row=8, column=9)\n # next solution buttion\n self.next_btn = tk.Button(master, text='Next solution', command=self.show_next_solution)\n self.next_btn['state'] = 'disable'\n self.next_btn.grid(row=1, column=9)\n # clear button\n self.clear_btn = tk.Button(master, text='Clear', command=self.clear_entries)\n self.clear_btn.grid(row=7, column=9)\n\n # label\n self.label = tk.Label(master, text='<=Enter puzzle')\n self.label.grid(row=4, column=9)\n # example puzzle\n self.set_example()\n\n def run(self):\n self.master.mainloop()\n\n def get_puzzle(self):\n puzzle = []\n for row in range(9):\n c = []\n for col in range(9):\n value = self.entries[row][col].get()\n if not value:\n value = '0'\n if not('0'<= value<='9') or len(value)>1:\n self.label['text'] = 'Input valid value!'\n self.label['fg'] = 'red'\n return None\n value = int(value)\n c.append(value)\n puzzle.append(c)\n return puzzle\n\n def sudoku(self, puzzle, unfill=None, solution=None):\n if unfill is None:\n unfill = [[x, y] for x in range(9) for y in range(9) if not puzzle[x][y]]\n if solution is None:\n solution = []\n if not unfill:\n return [puzzle]\n x, y = unfill[0]\n for n in range(1, 10):\n if n in puzzle[x] or n in [puzzle[x2][y] for x2 in range(9)]:\n continue\n x2, y2 = x // 3 * 3, y // 3 * 3\n if n in puzzle[x2][y2:y2 + 3] + puzzle[x2 + 1][y2:y2 + 3] + puzzle[x2 + 2][y2:y2 + 3]:\n continue\n # how to copy this list efficiently?????\n new_puzzle = [[puzzle[x2][y2] for y2 in range(9)] for x2 in range(9)]\n new_puzzle[x][y] = n\n r = self.sudoku(new_puzzle, unfill[1:], solution)\n if r != solution:\n solution.append(r[-1])\n return solution\n\n def set_example(self):\n p = [[9, 0, 6, 0, 7, 0, 4, 0, 3],\n [0, 0, 0, 4, 0, 0, 2, 0, 0],\n [0, 7, 0, 0, 2, 3, 0, 1, 0],\n [5, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 4, 0, 2, 0, 8, 0, 6, 0],\n [0, 0, 3, 0, 0, 0, 0, 0, 5],\n [0, 3, 0, 7, 0, 0, 0, 5, 0],\n [0, 0, 7, 0, 0, 5, 0, 0, 0],\n [4, 0, 5, 0, 1, 0, 7, 0, 8]]\n self.set_entries(p)\n\n def set_entries(self, values):\n for row in range(9):\n for col in range(9):\n self.entries[row][col].delete(0, 'end')\n self.entries[row][col].insert(0, str(values[row][col]))\n\n def entries_bg(self, p):\n for row in range(9):\n for col in range(9):\n if not p[row][col]:\n self.entries[row][col]['bg'] = 'yellow'\n\n def clear_entries(self):\n self.label['text'] = '<= Enter puzzle!'\n self.next_btn['state'] = 'disable'\n for row in range(9):\n for col in range(9):\n self.entries[row][col]['bg'] = 'white'\n self.entries[row][col].delete(0, 'end')\n\n def get_solution(self):\n self.next_btn['state'] = 'disable'\n self.label['fg'] = 'black'\n\n puzzle = self.get_puzzle()\n if not puzzle:\n return\n\n ss = self.sudoku(puzzle)\n self.solutions = ss\n if len(ss) == 0:\n self.label['text'] = 'No solution!'\n self.label['fg'] = 'red'\n else:\n self.entries_bg(puzzle)\n self.solution_idx = 0\n self.show_next_solution()\n if len(ss) == 1:\n self.label['text'] = '1 solution'\n else:\n self.next_btn['state'] = 'normal'\n self.label['text'] = '{} in total {}'.format(1, len(self.solutions))\n\n def show_next_solution(self):\n self.set_entries(self.solutions[self.solution_idx])\n self.label['text'] = '{} in total {}'.format(self.solution_idx + 1, len(self.solutions))\n self.solution_idx += 1\n if self.solution_idx == len(self.solutions):\n self.solution_idx = 0\n self.next_btn['state'] = 'disable'\n\n\ns = solver()\ns.run()\n" } ]
2
chetanmreddy/image-search-engine
https://github.com/chetanmreddy/image-search-engine
caeb945e8d256cae90ed79a17aa4d2b2280483ad
8191cdf9d2d071a43950f65a5e8d4d3f13d0d871
6a8b70347fc28948d6ef4b3a9e076f907a92ae56
refs/heads/master
2020-03-29T00:34:01.481737
2017-06-24T07:51:49
2017-06-24T07:51:49
94,635,169
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5257731676101685, "alphanum_fraction": 0.6134020686149597, "avg_line_length": 13.076923370361328, "blob_id": "5ad2af8c1aa9d724960ce75c164fc4157cc569d9", "content_id": "f3b4497a564a175bb256f1e01fa46802f7c5f115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/test.py", "repo_name": "chetanmreddy/image-search-engine", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 9 11:43:58 2017\r\n\r\n@author: chetan\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg=cv2.imread('input.jpeg')\r\ncv2.imshow('img',img)\r\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6376811861991882, "alphanum_fraction": 0.6798418760299683, "avg_line_length": 23.299999237060547, "blob_id": "6c5c3d9224020ab3024baa0fdd56af842186f896", "content_id": "2a17706be83b8a6371fb124ef660da0442a5b925", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/threshold.py", "repo_name": "chetanmreddy/image-search-engine", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 8 11:48:53 2017\r\n\r\n@author: chetan\r\n\"\"\"\r\n\r\nimport argparse\r\nimport cv2\r\n \r\n\r\n \r\n# load the image and convert it to grayscale\r\nimage = cv2.imread('input.jpeg')\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n \r\n# initialize the list of threshold methods\r\nmethods = [\r\n\t(\"THRESH_BINARY\", cv2.THRESH_BINARY),\r\n\t(\"THRESH_BINARY_INV\", cv2.THRESH_BINARY_INV),\r\n\t(\"THRESH_TRUNC\", cv2.THRESH_TRUNC),\r\n\t(\"THRESH_TOZERO\", cv2.THRESH_TOZERO),\r\n\t(\"THRESH_TOZERO_INV\", cv2.THRESH_TOZERO_INV)]\r\n \r\n# loop over the threshold methods\r\nfor (threshName, threshMethod) in methods:\r\n\t# threshold the image and show it\r\n\t(T, thresh) = cv2.threshold(gray,200, 255, threshMethod)\r\n\tcv2.imshow(threshName, thresh)\r\n\tcv2.waitKey(0)\r\n" }, { "alpha_fraction": 0.8091602921485901, "alphanum_fraction": 0.8091602921485901, "avg_line_length": 64.5, "blob_id": "86d6142c162fa0e64c3730e1ef3fcf9c0f473d3c", "content_id": "794dedb3f3cc8a0a0c242010eb003a83096cdff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 131, "license_type": "no_license", "max_line_length": 108, "num_lines": 2, "path": "/README.md", "repo_name": "chetanmreddy/image-search-engine", "src_encoding": "UTF-8", "text": "# Image-search-engine\nThis is a basic mechanism for implementing a image search engine.Here i have used OpenCV library and python.\n" }, { "alpha_fraction": 0.6094003319740295, "alphanum_fraction": 0.661264181137085, "avg_line_length": 19.34482765197754, "blob_id": "9e9906cc1292acfb68a1baff34050563a1bb186c", "content_id": "613406d49db7564cb96e7d40770c57c67be45c50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "no_license", "max_line_length": 55, "num_lines": 29, "path": "/gray_hist.py", "repo_name": "chetanmreddy/image-search-engine", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 8 20:15:34 2017\r\n\r\n@author: chetan\r\n\"\"\"\r\n\r\n# import the necessary packages\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\n \r\n\r\n \r\n# load the image and show it\r\nimage = cv2.imread('input.jpeg')\r\ncv2.imshow(\"image\", image)\r\n\r\n\r\n# convert the image to grayscale and create a histogram\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\ncv2.imshow(\"gray\", gray)\r\nhist = cv2.calcHist([gray], [0], None, [256], [0, 256])\r\nplt.figure()\r\nplt.title(\"Grayscale Histogram\")\r\nplt.xlabel(\"Bins\")\r\nplt.ylabel(\"# of Pixels\")\r\nplt.plot(hist)\r\nplt.xlim([0, 256])" }, { "alpha_fraction": 0.5912508368492126, "alphanum_fraction": 0.6534518003463745, "avg_line_length": 29.17021369934082, "blob_id": "bf8e410a6cb892f19aae29cfdbef030ce61f9ede", "content_id": "a5440b331f74a1388398c18f69c7e5cd6be42ca6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1463, "license_type": "no_license", "max_line_length": 55, "num_lines": 47, "path": "/2d_hist.py", "repo_name": "chetanmreddy/image-search-engine", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 9 12:03:08 2017\r\n\r\n@author: chetan\r\n\"\"\"\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\n\r\nimage = cv2.imread('input.jpeg')\r\ncv2.imshow(\"image\", image)\r\nchans = cv2.split(image)\r\ncolors = (\"b\", \"g\", \"r\")\r\n# let's move on to 2D histograms -- I am reducing the\r\n# number of bins in the histogram from 256 to 32 so we\r\n# can better visualize the results\r\nfig = plt.figure()\r\n \r\n# plot a 2D color histogram for green and blue\r\nax = fig.add_subplot(331)\r\nhist = cv2.calcHist([chans[1], chans[0]], [0, 1], None,\r\n\t[32, 32], [0, 256, 0, 256])\r\np = ax.imshow(hist, interpolation = \"nearest\")\r\nax.set_title(\"2D Color Histogram for Green and Blue\")\r\nplt.colorbar(p)\r\n \r\n# plot a 2D color histogram for green and red\r\nax = fig.add_subplot(332)\r\nhist = cv2.calcHist([chans[1], chans[2]], [0, 1], None,\r\n\t[32, 32], [0, 256, 0, 256])\r\np = ax.imshow(hist, interpolation = \"nearest\")\r\nax.set_title(\"2D Color Histogram for Green and Red\")\r\nplt.colorbar(p)\r\n \r\n# plot a 2D color histogram for blue and red\r\nax = fig.add_subplot(333)\r\nhist = cv2.calcHist([chans[0], chans[2]], [0, 1], None,\r\n\t[32, 32], [0, 256, 0, 256])\r\np = ax.imshow(hist, interpolation = \"nearest\")\r\nax.set_title(\"2D Color Histogram for Blue and Red\")\r\nplt.colorbar(p)\r\n \r\n# finally, let's examine the dimensionality of one of\r\n# the 2D histograms\r\nprint (\"2D histogram shape: %s, with %d values\" % (\r\n\thist.shape, hist.flatten().shape[0]))" } ]
5
artur1salnik/online_store
https://github.com/artur1salnik/online_store
a51763fa44b83ea2581646da2b7ff65921abb179
2dd9a651b2d107481f19164b990e8ca2626bdbc0
5e56aaa1a323451f8854f7e219e6665833e4b759
refs/heads/master
2022-12-30T21:20:44.393998
2020-10-22T19:25:39
2020-10-22T19:25:39
273,473,468
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5270141959190369, "alphanum_fraction": 0.5327014327049255, "avg_line_length": 38.8301887512207, "blob_id": "d02192b41951f64fbd82dc7702aa1d8ad7f58b0e", "content_id": "fe6b286516101d1de31fa6674d6d5297ccfe64c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2110, "license_type": "no_license", "max_line_length": 96, "num_lines": 53, "path": "/shop/views.py", "repo_name": "artur1salnik/online_store", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404\nfrom .models import Category, Product\nfrom cart.forms import CartAddProductForm\nfrom django.core.paginator import Paginator\n\n\ndef landing(request):\n return render(request, 'shop/main_page.html')\n\n\ndef product_list(request, category_slug=None):\n search_query = request.GET.get('search', '')\n if search_query:\n p_list = Product.objects.filter(name__icontains=search_query)\n else:\n p_list = Product.objects.all()\n\n category = None\n categories = Category.objects.all()\n products = Product.objects.filter(available=True)\n if category_slug:\n category = get_object_or_404(Category, slug=category_slug)\n p_list = products.filter(category=category)\n paginator = Paginator(p_list, 20)\n page_number = request.GET.get('page', 1)\n page = paginator.get_page(page_number)\n is_paginated = page.has_other_pages()\n if page.has_previous():\n prev_url = '?page={}'.format(page.previous_page_number())\n else:\n prev_url = ''\n\n if page.has_next():\n next_url = '?page={}'.format(page.next_page_number())\n else:\n next_url = ''\n\n\n return render(request, 'shop/product/list.html', {'category': category,\n 'categories': categories,\n 'products': products,\n 'page_object': page,\n 'is_paginated': is_paginated,\n 'next_url': next_url,\n 'prev_url': prev_url\n })\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n return render(request, 'shop/product/detail.html', {'product': product,\n 'cart_product_form': cart_product_form})" }, { "alpha_fraction": 0.6586901545524597, "alphanum_fraction": 0.6681360006332397, "avg_line_length": 35.953487396240234, "blob_id": "8781809742b42b2bfb4cee4b0604437ad2f6c781", "content_id": "2a85bfc5b48cea83d96b5b1cb79a2f79ccb9340b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1700, "license_type": "no_license", "max_line_length": 131, "num_lines": 43, "path": "/shop/models.py", "repo_name": "artur1salnik/online_store", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.urls import reverse\n\n\nclass Category(models.Model):\n name = models.CharField(\"Категория\", max_length=150, db_index=True)\n slug = models.SlugField(\"URL\", max_length=150, db_index=True, unique=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('shop:product_list_by_category', args=[self.slug])\n\n\nclass Product(models.Model):\n category = models.ForeignKey(Category, related_name='products', verbose_name=\"Категория\", on_delete=models.SET_NULL, null=True)\n name = models.CharField(\"Название\", max_length=200, db_index=True)\n slug = models.SlugField(\"URL\", max_length=200, db_index=True)\n description = models.TextField(\"Описание\")\n price = models.DecimalField(\"Цена\", max_digits=10, decimal_places=2)\n image = models.ImageField(\"Фото\", upload_to='media/products/', blank=True)\n stock = models.PositiveIntegerField(\"Склад\")\n available = models.BooleanField(\"Активный\", default=True)\n created = models.DateTimeField(\"Дата добавления\", auto_now_add=True)\n updated = models.DateTimeField(\"Дата обновления\", auto_now=True)\n\n class Meta:\n ordering = ('name',)\n index_together = (('id', 'slug'),)\n verbose_name = 'Товар'\n verbose_name_plural = 'Товары'\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('shop:product_detail', args=[self.id, self.slug])" }, { "alpha_fraction": 0.5534664988517761, "alphanum_fraction": 0.564629852771759, "avg_line_length": 36, "blob_id": "c3ed7b33e8d3fdeed5d6d42c2a5824a613b126f8", "content_id": "5b9b07bd5cb0ab32bd5a0beb91ba96482aa31c44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 164, "num_lines": 46, "path": "/shop/migrations/0003_auto_20200515_1406.py", "repo_name": "artur1salnik/online_store", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.6 on 2020-05-15 14:06\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0002_product_available'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='product',\n options={'ordering': ('name',), 'verbose_name': 'Товар', 'verbose_name_plural': 'Товары'},\n ),\n migrations.RemoveField(\n model_name='product',\n name='image',\n ),\n migrations.AlterField(\n model_name='product',\n name='category',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='products', to='shop.Category', verbose_name='Категория'),\n ),\n migrations.AlterField(\n model_name='product',\n name='description',\n field=models.TextField(verbose_name='Описание'),\n ),\n migrations.CreateModel(\n name='ProductImage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(upload_to='media_products/')),\n ('is_main', models.BooleanField(default=False)),\n ('is_active', models.BooleanField(default=True)),\n ('product', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Product')),\n ],\n options={\n 'verbose_name': 'Фотография',\n 'verbose_name_plural': 'Фотографии',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5461346507072449, "alphanum_fraction": 0.5935162305831909, "avg_line_length": 21.27777862548828, "blob_id": "b3bc972f8d988fae2a7bb882c3a71b2cc89251da", "content_id": "64ea91ab9178b65b483baf805df6d2a4e11cdae0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 77, "num_lines": 18, "path": "/shop/migrations/0002_product_available.py", "repo_name": "artur1salnik/online_store", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.6 on 2020-05-14 12:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='available',\n field=models.BooleanField(default=True, verbose_name='Активный'),\n ),\n ]\n" }, { "alpha_fraction": 0.5978260636329651, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 24.18181800842285, "blob_id": "65a46f564221370efc435fa601e952dfec298e58", "content_id": "9daf56e4637621837ba85eac495ba0e0c8c1f1dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 329, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/templates/orders/order/created.html", "repo_name": "artur1salnik/online_store", "src_encoding": "UTF-8", "text": "{% extends \"shop/base.html\" %}\n\n{% block title %}Спасибо!{% endblock %}\n\n{% block content %}\n<div class=\"container text-center mt-3\">\n <h1>Спасибо!</h1>\n <p>Вы успешно оформили заказ. Номер вашего заказа\n <strong>{{ order.id }}</strong>.</p>\n</div>\n{% endblock %}" } ]
5
mancanfly-1/test_hv6
https://github.com/mancanfly-1/test_hv6
033d616133dc4e1c5795fbd667e225d23ebe9e6d
f2c928cf5336c81e9673c570f7ef84e489acd0b2
9f60a2bb132dfe7240cf41aea5de8c8531f412bc
refs/heads/master
2020-03-16T05:39:19.148011
2018-05-09T02:53:50
2018-05-09T02:53:50
132,537,390
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5935483574867249, "alphanum_fraction": 0.6116129159927368, "avg_line_length": 13.351851463317871, "blob_id": "e83eb758df357670fe71d9ecf7bdcb988ece82c5", "content_id": "4e0fd97752500a91b0185baa7d5ea011bd1f6646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 775, "license_type": "no_license", "max_line_length": 48, "num_lines": 54, "path": "/counter.c", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <limits.h>\n#include <string.h>\n\n#define BOOL int\n#define true 1\n#define false 0\n\n#define bool BOOL\n\nstatic unsigned int counter = 0;\n\nbool iszero()\n{\n\treturn counter >0 ? true : false;\t\n}\n\nint inc()\n{\n\tif(counter >= UINT_MAX){\n\t\treturn -1;\n\t}\n\t//printf (\"%d\", UINT_MAX);\n\tcounter +=1;\n\treturn 0;\n}\n\nunsigned int dec()\n{\n\t/*\n\tif(counter != 0){\n\t\tcounter -= 1;\n\t}\n\treturn counter;*/\n\tif(counter < 1){\n\t\treturn -1;\n\t}\n\tcounter -=1;\n\treturn 0;\n}\n\nint main(int arg, char **args)\n{\n\tprintf(\"do increase...\\n\");\n\tinc();\n\tprintf(\"current counter value: %d\\n\", counter);\n\tprintf(\"do decreas...\\n\");\n\tdec();\n\tprintf(\"current counter value: %d\\n\", counter);\n\tprintf(\"do iszero...\\n\");\n\tiszero();\n\tprintf(\"current counter value: %d\\n\", counter);\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6722407937049866, "alphanum_fraction": 0.6856187582015991, "avg_line_length": 26.090909957885742, "blob_id": "a4b1ed62d6d2c2e38ecf1363866cca25a3930e2e", "content_id": "55ef4d648de62edcc6fc62140ce29cdc74000a37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/invariants.py", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "import z3\nfrom libirpy import util\nimport datatype.datatypes as dt\n\n# only a invariant, counter >= 0 && counter < dt.MAX_UNIT\ndef impl_invariants_py(ctx):\n\tconj = []\n\tconj.append(z3.And(\n\t\tutil.global_value(ctx, '@counter') >= 0, \n\t\tutil.global_value(ctx, '@counter') <= dt.MAX_UINT))\n\treturn conj\n\n" }, { "alpha_fraction": 0.6053571701049805, "alphanum_fraction": 0.6196428537368774, "avg_line_length": 27, "blob_id": "adc8ebb01f6335383286346683459ac47748af36", "content_id": "4c516e1935dce5cc5c75aad1c7469ccb7a901d17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 127, "num_lines": 40, "path": "/README.md", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "# test_hv6\n create a simple Counter case according to hv6\n\n## how to run\n\nI test the test_hv6 at the same environment with hv6\n\nTo compile:\n must run compile first, it includes compile .c --> .ll , then compose any .ll --> hv6.ll, at end, translating\n hv6.ll --> hv6.py using irpy which provided by hv6.\n \n `make`\n \nTo run verfiy:(must complie first)\n running testcase in main.py. it verifies 3 test cases which include increase, decrease and iszero. it verifies the equals \n between statemachine and lowwer code(behavior as contex object in python), and add a invariant property(counter never < 0 )\n \n `make verify`\n \n to run app\n \n `make app`\n \n## file structure\n \n- test_hv6\n - counter.c // source code\n - invariant.py // as you see\n - main.py // test case\n - specs.py // specification of three funcation\n - irpy // copy from hv6\n \n- x86_64 // build dir\n \n \n- script // clear undefined behavior in llvm ir.\n\n- datatype // define our state machine.\n\n- libirpy // as the name defined.\n" }, { "alpha_fraction": 0.5862436890602112, "alphanum_fraction": 0.5921087861061096, "avg_line_length": 29.729507446289062, "blob_id": "5efd776d20fd3fa640edd1b52ac1d065b2a1e8a6", "content_id": "5a5dfd38458c187aee32686ae8387f2d122acff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3751, "license_type": "no_license", "max_line_length": 91, "num_lines": 122, "path": "/main.py", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\nimport copy\nimport unittest\nimport inspect\n\nimport libirpy\nimport libirpy.util as util\nimport libirpy.solver as solver\nimport libirpy.ex as ex\nimport libirpy.itypes as itypes\nfrom libirpy.datatypes import BitcastPointer\n\nimport counter\n\n#import syscall_spec\nimport datatype.datatypes as dt\nimport z3\nimport specs as spec\nimport invariants as inv\nSolver = solver.Solver\nINTERACTIVE = False\nclass BaseTest(unittest.TestCase):\n def _prove(self, cond, pre=None, return_model=False, minimize=True):\n if minimize:\n self.solver.push()\n self.solver.add(z3.Not(cond))\n\n res = self.solver.check()\n if res != z3.unsat:\n if not minimize and not return_model:\n self.assertEquals(res, z3.unsat)\n\n model = self.solver.model()\n if return_model:\n return model\n\n print \"Could not prove, trying to find a minimal ce\"\n assert res != z3.unknown\n if z3.is_and(cond):\n self.solver.pop()\n # Condition is a conjunction of some child clauses.\n # See if we can find something smaller that is sat.\n\n if pre is not None:\n ccond = sorted(\n zip(cond.children(), pre.children()), key=lambda x: len(str(x[0])))\n else:\n ccond = sorted(cond.children(), key=lambda x: len(str(x)))\n\n for i in ccond:\n self.solver.push()\n if isinstance(i, tuple):\n self._prove(i[0], pre=i[1])\n else:\n self._prove(i)\n self.solver.pop()\n\n print \"Can not minimize condition further\"\n if pre is not None:\n print \"Precondition\"\n print pre\n print \"does not imply\"\n print cond\n self.assertEquals(model, [])\n\ndef newctx():\n ctx = libirpy.newctx()\n # If we don't need the values of any constants we don't have to\n # initialize them, slightly faster execution time.\n ctx.eval.declare_global_constant = ctx.eval.declare_global_variable\n libirpy.initctx(ctx, counter)\n\n return ctx\n\n\nclass DetailTest(BaseTest):\n\tdef setUp(self):\n\t\t# init LLVM IR context. \n\t\tself.ctx = newctx()\n\t\t#define counter machine state of ourself\n\t\tself.state = dt.CounterState()\n\t\t# instance z3 solver.\n\t\tself.solver = Solver()\n\t\tself.solver.set(AUTO_CONFIG=False)\n\t\t# current ctx state and machine state is equal?\t\t\n\t\tself._pre_state = spec.state_equiv(self.ctx, self.state)\n\n\t\t# we should add our invariants to context.\n\t\tself.ctx.add_assumption(inv.impl_invariants_py(self.ctx))\n\t\t\n\t\t# take the condition pre state equal to solver.\n\t\tself.solver.add(self._pre_state)\n\tdef tearDown(self):\n\t\tif isinstance(self.solver, solver.Solver):\n\t\t\tdel self.solver\n\t'''def test_test1(self):\n\t\tprint \"testOne\"'''\n\tdef _general_test(self, call_name):\n\t\tprint \"starting test_{}....\".format(call_name)\n\t\targs = ()\n\t\tres = self.ctx.call('@' + call_name, *args)\n\t\tcond, newstate = getattr(spec, call_name)(self.state, *args)\n\t\t#print z3.And(spec.state_equiv(self.ctx, newstate), cond == (res == util.i32(0)))\n\t\tmodel = self._prove(z3.And(spec.state_equiv(self.ctx, newstate),\n cond == (res == util.i32(0))),\n pre=z3.And(self._pre_state, z3.BoolVal(True)),\n return_model=INTERACTIVE)\t\t\n\tdef test_inc(self):\n\t\tself._general_test('inc')\n\t\t\n\tdef test_dec(self):\n\t\tself._general_test('dec')\n\t\t\n\tdef test_iszero(self):\n\t\tself._general_test('iszero')\n\t\t\n\nif __name__ == \"__main__\":\n\t#t = child()\n\t#print t['a'][1]\n\tunittest.main()\n\n\n" }, { "alpha_fraction": 0.5625473856925964, "alphanum_fraction": 0.5739196538925171, "avg_line_length": 23.867923736572266, "blob_id": "631f91aef69f24a3529aaea1fc2ca09c26340567", "content_id": "c1ce7ce27ca83088d7597919c6672b7075e5d317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 83, "num_lines": 53, "path": "/Makefile", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "O\t\t?= x86_64\nCLANGPATH\t?= \"/usr/lib/llvm-5.0/bin/\"\nCFLAGS\t\t+= -fno-PIE\nCFLAGS\t\t+= -ffreestanding -MD -MP\nCFLAGS\t\t+= -Wall\nCFLAGS\t\t+= -g\nCFLAGS\t\t+= -mno-red-zone\nCFLAGS\t\t+= -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx\n# CFLAGS += -DCOMMIT_HASH=$(shell git rev-parse --short --verify HEAD)\n# CFLAGS += -DCOMMIT_BRANCH=$(shell git rev-parse --abbrev-ref --verify HEAD)\nCFLAGS\t\t+= -I include -I $(O)/include\nNR_CPUS\t\t= 1\nMKDIR_P\t\t:= mkdir -p\n\nKERNEL_CFLAGS = $(CFLAGS) -DNR_CPUS=$(NR_CPUS) -fwrapv -I include\n\nLLS\t\t:=\t\t\\\n\t\t$(O)/counter.ll\t\\\n\n# generate .ll from .c\n$(O)/%.ll: %.c\n\t$(MKDIR_P) $(@D)\n\t$(CLANGPATH)clang -o $@~ -c -S -emit-llvm $(KERNEL_CFLAGS) -O2 $<\n\t./script/no_undef.sh $@~\n\tmv $@~ $@\n\t@echo 'convert .c --> .ll done!'\t\n\n$(O)/%.py: $(O)/%.ll\n\t$(Q)$(MKDIR_P) $(@D)\n\t@touch $(join $(dir $@), __init__.py)\n\t./irpy \"$<\" > \"$@\"\n\t@echo 'convert .ll --> $@ done!'\n\t\nall: $(LLS) $(O)/counter.py\n\tcp ./main.py ./specs.py ./irpy ./$(O)\n\tcp ./datatype -rf ./$(O)\n\tcp ./libirpy -rf ./$(O) \n\tcp ./invariants.py ./$(O)\n\t@echo 'copy files to build dir done!'\n\nverify:\n\tpython2 $(O)/main.py\n\t@echo 'executing test case done!'\n\t\n\napp: \n\tllvm-as-5.0 $(O)/counter.ll -o counter.bc\n\tlli-5.0 counter.bc\n#\t$(CLANGPATH)clang -c counter.c -o counter.o\n#\t$(CLANGPATH)clang -o counter counter.o\n\nclean:\n\trm -rf $(O)\n\n" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6290322542190552, "avg_line_length": 19.66666603088379, "blob_id": "af7b06b8fd7f2302fe88e8b3e426c9d3a4245a00", "content_id": "caa09761a7da20f47ef7ff725e4f647b38387d94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "no_license", "max_line_length": 65, "num_lines": 30, "path": "/specs.py", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "import z3\nfrom libirpy import util\nimport datatype.datatypes as dt\n\n\n#specification\n\n \ndef inc(old):\n cond = z3.ULT(old.counter, dt.MAX_UINT)\n new = old.copy()\n new.counter += 1\n return cond, util.If(cond, new, old)\n\ndef dec(old):\n cond = z3.UGT(old.counter, 0)\n new = old.copy()\n new.counter -= 1\n return cond, util.If(cond, new, old)\n\ndef iszero(old):\n if old.counter == 0:\n return z3.BoolVal(True), old\n else:\n return z3.BoolVal(False), old\n\ndef state_equiv(ctx, state):\n\tconj = []\n\tconj.append(state.counter == util.global_value(ctx, '@counter'))\n\treturn z3.And(*conj)\n" }, { "alpha_fraction": 0.5733202695846558, "alphanum_fraction": 0.6061794757843018, "avg_line_length": 26.1733341217041, "blob_id": "dd702dde40a9e30c93e12d14ed5258f6e654e681", "content_id": "912e3e4574ea0ac0133040890368b9fc8f90da88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2039, "license_type": "no_license", "max_line_length": 83, "num_lines": 75, "path": "/datatype/datatypes.py", "repo_name": "mancanfly-1/test_hv6", "src_encoding": "UTF-8", "text": "import sys\nimport z3\nimport copy\n\nimport libirpy\nfrom libirpy import util\nfrom base import BaseStruct, Struct, Map, Refcnt, Refcnt2\n\n\ndef _populate_enums():\n module = sys.modules[__name__]\n ctx = libirpy.newctx()\n import counter\n counter._init_metadata(ctx)\n for k, v in ctx.metadata.items():\n if isinstance(v, tuple) and v[0] == 'DICompositeType':\n if v[1].get('tag') == 'DW_TAG_enumeration_type':\n name = v[1].get('name')\n size = v[1].get('size')\n elements = v[1].get('elements')\n\n if name is None or size is None or elements is None:\n continue\n\n setattr(module, name + '_t', z3.BitVecSort(size))\n enum = {}\n\n for element in ctx.metadata.get(elements):\n element = ctx.metadata.get(element)\n assert element[0] == 'DIEnumerator'\n element_name = element[1].get('name')\n element_value = element[1].get('value')\n enum[element_name] = z3.BitVecVal(element_value, size)\n\n setattr(module, name, type(name, (), enum))\n\n\n# These are populated from llvm metadata info\npage_type_t = None\nfile_type_t = None\nproc_state_t = None\nintremap_state_t = None\n\n\n# Fetch the enums from the llvm metadata and populate this module with their values\n_populate_enums()\n\n\n#assert page_type_t is not None\n#assert file_type_t is not None\n#assert proc_state_t is not None\n\nbool_t = z3.BoolSort()\n\nsize_t = z3.BitVecSort(64)\nuint64_t = z3.BitVecSort(64)\nuint32_t = z3.BitVecSort(32)\nuint16_t = z3.BitVecSort(16)\nuint8_t = z3.BitVecSort(8)\n\nssize_t = z3.BitVecSort(64)\nint64_t = z3.BitVecSort(64)\nint32_t = z3.BitVecSort(32)\nint16_t = z3.BitVecSort(16)\nint8_t = z3.BitVecSort(8)\nint = int32_t\nMAX_UINT = z3.BitVecVal(100,32)\n\"\"\"\nGlobal machine state\n\"\"\"\nclass CounterState(object):\n\tdef __init__(self): \t\n \tself.counter = z3.BitVecVal(0,32)\n\tdef copy(self):\n\t\treturn copy.deepcopy(self)\n\n" } ]
7
evacuati/QuizApp
https://github.com/evacuati/QuizApp
89e7f8c312f87f00cbdb154bd97a96409ad96a49
d8cc570493bb8838e832352626c978501d8f7381
b759caecd1b1a470edd0c407fe7f6a5c8c1fd235
refs/heads/master
2023-03-30T22:35:35.622483
2021-04-01T21:01:10
2021-04-01T21:01:10
283,489,670
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.759929895401001, "alphanum_fraction": 0.7622663378715515, "avg_line_length": 49.35293960571289, "blob_id": "6323f001319f4a4c8bdac67b32e370235fc9171f", "content_id": "17b98606413d42c0594134e9b80383dd684c30c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1712, "license_type": "no_license", "max_line_length": 246, "num_lines": 34, "path": "/README.md", "repo_name": "evacuati/QuizApp", "src_encoding": "UTF-8", "text": "### TODO: Change README.md before exposing the project\n\n# Git fundamentals:\n\n1. Master branch should be the latest stable (tested and working properly) version of the project!\n2. The idea of branches is so that the developers (Karo & Maja) can work parallelley without interrupting each other or easily revert changes if mistakes were made.\nWorkflow:\n* create a branch (copy the state of the master branch)\n* add new features/changes-> test them if everything works fine\n* once you're sure the task is done, works properly and doesn't destroy anything previously done, start a merge/pull request\n* ask your collaborator to test your changes and approve\n```python\ncollaborator = \"Maja\" if my_name()==\"Karolina\" else \"Karolna\"\n```\n**Warning** - you might experience a merge conflict (TODO: Learn what it is, when one could occur, how to resolve).\n3. The green button on GitHub interface exposes https or ssh address which is used to clone the repository and initialise git on your device - go to the directory where you want to have your project and type the following command using a terminal\n```bash\ngit clone {github_url}\n```\nwhen you're in the directory with the project, you can use git commands, for instance:\n```bash\ngit status\n```\n4. How to upload (TODO: Learn the relevant commands):\n* Create a branch and switch (use self-explanatory names on what you are going to do)\n* Work on your changes (do the coding)\n* Add files to git\n* Commit the changes (this kinda saves them so you can go back to work from another device)\n* Push the changes\n* Create a merge/pull request\n* Merge to the master branch\n\n\nIf you have any questions, use the Google search. If this doesn't help, you can ask your buddy wiksla :)\n" }, { "alpha_fraction": 0.6816244721412659, "alphanum_fraction": 0.6846327185630798, "avg_line_length": 35.59174346923828, "blob_id": "404af3205b0d35efed5f5ddb85a5a07ac5117e72", "content_id": "9d8c8437409768d47d9a4cd9c0ed424cbc70a5d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7978, "license_type": "no_license", "max_line_length": 84, "num_lines": 218, "path": "/script.js", "repo_name": "evacuati/QuizApp", "src_encoding": "UTF-8", "text": "function game() {\n\tlistOfQuestions = createList();\n\tlistOfIndexes = []\n\tnumber = listOfQuestions[0]\n\tmaxPoints = 10\n\tmyPoints = 0\n\tmaxQuestions = 10\n\tmyQuestions = 1\n\tcreateList();\n\tgetQuestion();\n\tgetAnswers();\n\tdocument.getElementById(\"whereAmI\").innerHTML = myQuestions + \"/\" + maxQuestions;\n\tdocument.getElementById(\"score\").innerHTML = myPoints + \"/\" + maxPoints\n\tdocument.getElementById(\"A\").onclick = () => checkA();\n\tdocument.getElementById(\"B\").onclick = () => checkB();\n\tdocument.getElementById(\"C\").onclick = () => checkC();\n\tdocument.getElementById(\"D\").onclick = () => checkD();\n\tdocument.getElementById(\"next\").onclick = () => next();\n\tquiz();\n}\n\nlet listOfQuestions = createList();\nlet listOfIndexes = []\nlet number = listOfQuestions[0]\nlet maxPoints = 10\nlet myPoints = 0\nlet maxQuestions = 10\nlet myQuestions = 1\n\nfunction shuffle(listOfIndexes) {\n\tfor (let i = listOfIndexes.length - 1; i > 0; i--) {\n \tlet j = Math.floor(Math.random() * (i + 1));\n \t[listOfIndexes[i], listOfIndexes[j]] = [listOfIndexes[j], listOfIndexes[i]];\n\t}\n}\n\nfunction createList() {\t\n\tlet listOfIndexes = []\n\tlet listOfQuestions = []\n\tfor (i=0; i < questions.length; i++) {\n\t\tlistOfIndexes.push(i)\n\t}\t\n\tshuffle(listOfIndexes);\n\tfor(i=0; i<10; i++) {\n\t\tlistOfQuestions.push(listOfIndexes[i])\n\t}\n\treturn listOfQuestions;\n}\n\nfunction getIndex(){\n\tlet number = listOfQuestions[0]\n}\t\n\nfunction getQuestion(number) {\n\treturn(questions[number]);\n}\nfunction getAnswers(number) {\n\treturn(answers[number]);\n}\nfunction checkA(number, values) {\n\tdocument.getElementById(\"A\").disabled = true;\n\tdocument.getElementById(\"B\").disabled = true;\n\tdocument.getElementById(\"C\").disabled = true;\n\tdocument.getElementById(\"D\").disabled = true;\n\tif (values[number].A === true) {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're right!\";\n \t\tdocument.getElementById(\"A\").style.outlineColor = \"green\";\n \t\tmyPoints++\n \t\tdocument.getElementById(\"score\").innerHTML = myPoints + \"/\" + maxPoints\n \t}\n else {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're wrong!\";\n \t\tdocument.getElementById(\"A\").style.outlineColor = \"red\";\n \t\tif (values[number].B === true) {\n \t\t\tdocument.getElementById(\"B\").style.outlineColor = \"green\";\n\t\t}\t\n\t\telse if (values[number].C === true) {\n \t\t\tdocument.getElementById(\"C\").style.outlineColor = \"green\";\n\t\t}\n\t\telse if (values[number].D === true) {\n \t\t\tdocument.getElementById(\"D\").style.outlineColor = \"green\";\n\t\t}\n \t}\n}\nfunction checkB(number, values) {\n\tdocument.getElementById(\"A\").disabled = true;\n\tdocument.getElementById(\"B\").disabled = true;\n\tdocument.getElementById(\"C\").disabled = true;\n\tdocument.getElementById(\"D\").disabled = true;\n\tif (values[number].B === true) {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're right!\";\n \t\tdocument.getElementById(\"B\").style.outlineColor = \"green\";\n \t\tmyPoints++\n \t\tdocument.getElementById(\"score\").innerHTML = myPoints + \"/\" + maxPoints\n \t}\n else {\n\t \tdocument.getElementById(\"userAnswer\").innerHTML = \"You're wrong!\";\n\t \tdocument.getElementById(\"B\").style.outlineColor = \"red\";\n\t \tif (values[number].A === true) {\n\t \t\tdocument.getElementById(\"A\").style.outlineColor = \"green\";\n\t\t}\t\n\t\telse if (values[number].C === true) {\n\t \t\tdocument.getElementById(\"C\").style.outlineColor = \"green\";\n\t\t}\n\t\telse if (values[number].D === true) {\n\t \t\tdocument.getElementById(\"D\").style.outlineColor = \"green\";\n\t\t}\n \t}\n}\nfunction checkC(number, values) {\n\tdocument.getElementById(\"A\").disabled = true;\n\tdocument.getElementById(\"B\").disabled = true;\n\tdocument.getElementById(\"C\").disabled = true;\n\tdocument.getElementById(\"D\").disabled = true;\n\tif (values[number].C === true) {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're right!\";\n \t\tdocument.getElementById(\"C\").style.outlineColor = \"green\";\n \t\tmyPoints++\n \t\tdocument.getElementById(\"score\").innerHTML = myPoints + \"/\" + maxPoints\n \t}\n else {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're wrong!\";\n \t\tdocument.getElementById(\"C\").style.outlineColor = \"red\";\n \t\tif (values[number].A === true) {\n \t\t\t\tdocument.getElementById(\"A\").style.outlineColor = \"green\";\n\t\t}\t\n\t\telse if (values[number].B === true) {\n\t \t\t\tdocument.getElementById(\"B\").style.outlineColor = \"green\";\n\t\t}\n\t\telse if (values[number].D === true) {\n\t \t\t\tdocument.getElementById(\"D\").style.outlineColor = \"green\";\n\t\t}\n \t}\t\n}\nfunction checkD(number, values) {\n\tdocument.getElementById(\"A\").disabled = true;\n\tdocument.getElementById(\"B\").disabled = true;\n\tdocument.getElementById(\"C\").disabled = true;\n\tdocument.getElementById(\"D\").disabled = true;\n\tif (values[number].D === true) {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're right!\";\n \t\tdocument.getElementById(\"D\").style.outlineColor = \"green\";\n \t\tmyPoints++\n \t\tdocument.getElementById(\"score\").innerHTML = myPoints + \"/\" + maxPoints\n \t}\n else {\n \t\tdocument.getElementById(\"userAnswer\").innerHTML = \"You're wrong!\";\n \t\tdocument.getElementById(\"D\").style.outlineColor = \"red\";\n \t\t if (values[number].A === true) {\n \t\t\tdocument.getElementById(\"A\").style.outlineColor = \"green\";\n\t\t}\t\n\t\telse if (values[number].B === true) {\n\t \t\tdocument.getElementById(\"B\").style.outlineColor = \"green\";\n\t\t}\n\t\telse if (values[number].C === true) {\n\t \t\t\tdocument.getElementById(\"C\").style.outlineColor = \"green\";\n\t\t}\n \t}\t\n}\ndocument.getElementById(\"start\").onclick = () => createList();\ndocument.getElementById(\"start\").onclick = () => shuffle(listOfIndexes);\ndocument.getElementById(\"start\").onclick = () => (quiz());\n\nfunction quiz() {\n\tdocument.getElementById(\"end-container\").style.display = \"none\";\n\tdocument.getElementById(\"quiz\").style.display = \"\";\n\tdocument.getElementById(\"start\").style.display = \"none\";\n\tdocument.getElementById(\"start-container\").style.display = \"none\";\n\tlet number = listOfQuestions.shift();\n\tgetIndex();\n\n\tdocument.getElementById(\"question\").innerHTML = (getQuestion(number)[\"question\"]);\n\n\tdocument.getElementById(\"A\").innerHTML = (getAnswers(number)[\"A\"]);\n\tdocument.getElementById(\"B\").innerHTML = (getAnswers(number)[\"B\"]);\n\tdocument.getElementById(\"C\").innerHTML = (getAnswers(number)[\"C\"]);\n\tdocument.getElementById(\"D\").innerHTML = (getAnswers(number)[\"D\"]);\t\n\n\tdocument.getElementById(\"A\").onclick = () => (checkA(number, values));\n\tdocument.getElementById(\"B\").onclick = () => (checkB(number, values));\n\tdocument.getElementById(\"C\").onclick = () => (checkC(number, values));\n\tdocument.getElementById(\"D\").onclick = () => (checkD(number, values));\n\n\tdocument.getElementById(\"next\").onclick = () => (next());\n} \n//end of quiz function\n\nfunction next() {\n \tif (myQuestions < 10) {\n \t\tdocument.getElementById(\"A\").disabled = false;\n\t\tdocument.getElementById(\"B\").disabled = false;\n\t\tdocument.getElementById(\"C\").disabled = false;\n\t\tdocument.getElementById(\"D\").disabled = false;\n\t \tdocument.getElementById(\"A\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"B\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"C\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"D\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"userAnswer\").innerHTML = \"What's your answer?\";\n\t \tmyQuestions++\n\t \tdocument.getElementById(\"whereAmI\").innerHTML = myQuestions + \"/\" + maxQuestions;\n\t\tquiz();\n \t}\n \telse {\n \t\tdocument.getElementById(\"quiz\").style.display = \"none\";\n \t\tdocument.getElementById(\"end-container\").style.display = \"\";\n \t\tdocument.getElementById(\"end-score\").innerHTML = myPoints + \"/\" + maxPoints\n \t\tdocument.getElementById(\"A\").disabled = false;\n\t\tdocument.getElementById(\"B\").disabled = false;\n\t\tdocument.getElementById(\"C\").disabled = false;\n\t\tdocument.getElementById(\"D\").disabled = false;\n\t\tdocument.getElementById(\"A\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"B\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"C\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"D\").style.outlineColor = \"\";\n\t \tdocument.getElementById(\"userAnswer\").innerHTML = \"What's your answer?\";\n \t\tdocument.getElementById(\"end\").onclick = () => (game());\n \t}\n } \n" }, { "alpha_fraction": 0.40959998965263367, "alphanum_fraction": 0.42719998955726624, "avg_line_length": 47.153846740722656, "blob_id": "8ccc1df958b8b69182c9c743df345309cdfe7292", "content_id": "9873b7ed9049552724eb72cfdfa71b1c3edca2ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 625, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/values.js", "repo_name": "evacuati/QuizApp", "src_encoding": "UTF-8", "text": "values =\n\t[\n\t\t{\"id\": 1, \"A\": true, \"B\": false, \"C\": false, \"D\": false,},\n\t\t{\"id\": 2, \"A\": false, \"B\": false, \"C\": true, \"D\": false,},\n\t\t{\"id\": 3, \"A\": true, \"B\": false, \"C\": false, \"D\": false,},\n\t\t{\"id\": 4, \"A\": false, \"B\": true, \"C\": false, \"D\": false,},\n\t\t{\"id\": 5, \"A\": false, \"B\": false, \"C\": false, \"D\": true,},\n\t\t{\"id\": 6, \"A\": false, \"B\": false, \"C\": true, \"D\": false,},\n\t\t{\"id\": 7, \"A\": false, \"B\": true, \"C\": false, \"D\": false,},\n\t\t{\"id\": 8, \"A\": true, \"B\": false, \"C\": false, \"D\": false,},\n\t\t{\"id\": 9, \"A\": false, \"B\": false, \"C\": false, \"D\": true,},\n\t\t{\"id\": 10, \"A\": false, \"B\": false, \"C\": true, \"D\": false,},\n\t]" }, { "alpha_fraction": 0.518305242061615, "alphanum_fraction": 0.5310571789741516, "avg_line_length": 32.2253532409668, "blob_id": "de26ec54f6d5396177e570b522fd09002adae9c5", "content_id": "27d575290f3dfcb64fb647337eaa0cba2374afbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2431, "license_type": "no_license", "max_line_length": 95, "num_lines": 71, "path": "/quizzapp.py", "repo_name": "evacuati/QuizApp", "src_encoding": "UTF-8", "text": "user_points = []\r\n\r\n#Question 1\r\ndef question_1_1():\r\n print(\"Humans and chimpanzees share roughly how much DNA?\")\r\n print(\" A) 70% \\n B) 98% \\n C) 9% \\n D) 38%\")\r\n user_answer=input(\"Your answer: \")\r\n if user_answer==\"B\" or user_answer==\"b\" or user_answer== \"98%\" or user_answer== \"98\":\r\n user_points.append(1)\r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong :c\")\r\n user_points.append(0)\r\n\r\n\r\ndef question_1_2():\r\n import os\r\n os.chdir(r'C:\\Users\\majab\\Desktop\\QUizapp\\QuizApp-questions\\QuizApp-questions\\questions')\r\n filename = open(\"1.txt\",\"r\")\r\n print(filename.read())\r\n os.chdir(r'C:\\Users\\majab\\Desktop\\QUizapp\\QuizApp-answers\\QuizApp-answers\\answers')\r\n cwd = os.getcwd()\r\n filename = open(\"1.txt\",\"r\")\r\n print(filename.read())\r\n filename = \"1.txt\"\r\n filename.split(\"/n\")\r\n with open(filename) as f_obj:\r\n answers = f_obj.read(1)\r\n answer_to_check = input(\"Your answer: \")\r\n if answer_to_check in answers:\r\n \r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong :c\")\r\n\r\n\r\n\r\ndef whers_star():\r\n import os\r\n os.chdir(r'C:\\Users\\majab\\Desktop\\QUizapp\\QuizApp-questions\\QuizApp-questions\\questions')\r\n filename = open(\"1.txt\",\"r\")\r\n print(filename.read())\r\n os.chdir(r'C:\\Users\\majab\\Desktop\\QUizapp\\QuizApp-answers\\QuizApp-answers\\answers')\r\n filename = open(\"1.txt\",\"r\")\r\n print(filename.read())\r\n filename = \"1.txt\"\r\n filename.splitlines(\"/n\")\r\n with open(filename) as f_obj:\r\n answer_to_check = input(\"Your answer: \")\r\n if \"*\" in f_obj.readline():\r\n if answer_to_check in f_obj.readlines(1):\r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong!\")\r\n elif \"*\" in f_obj.readlines(2):\r\n if answer_to_check in f_obj.readlines(2):\r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong!\")\r\n elif \"*\" in f_obj.readlines(3):\r\n if answer_to_check in f_obj.readlines(3):\r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong!\")\r\n else:\r\n if answer_to_check in f_obj.readlines(4):\r\n print(\"You are right!\")\r\n else:\r\n print(\"You are wrong!\")\r\n\r\nwhers_star() \r\n" } ]
4
marcioaug/mutant-context-jdolly
https://github.com/marcioaug/mutant-context-jdolly
9a175b0dd8d78183ebcba9ab619f72245ed4a198
680c7948c8fe9b2916b2a039d031d6cb926a9122
7fe24d377d2db826a869ddda89d23559d9e06292
refs/heads/master
2021-03-30T21:12:15.784336
2018-03-14T16:03:33
2018-03-14T16:03:33
124,832,689
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5443081259727478, "alphanum_fraction": 0.5455009937286377, "avg_line_length": 35.67499923706055, "blob_id": "af54acb376fd9ff45e4d5ac530b62edb996865bf", "content_id": "00908575403456711c793f5ce7ff5b0a4f24c3e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5868, "license_type": "permissive", "max_line_length": 129, "num_lines": 160, "path": "/gen/__main__.py", "repo_name": "marcioaug/mutant-context-jdolly", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport subprocess\nimport shutil\n\n\nINPUT_DIR = sys.argv[1]\nOUTPUT_DIR = sys.argv[2]\nSOOT_JAR = sys.argv[3]\n\ndef compile(classpath, package, java_file, dest_dir):\n classes_dir = os.path.join(dest_dir, 'build', 'classes')\n java_file_path = os.path.join(package, java_file)\n src_dir = os.path.join(dest_dir, 'src', package)\n \n if not os.path.exists(classes_dir):\n os.makedirs(classes_dir)\n\n if not os.path.exists(src_dir):\n os.makedirs(src_dir)\n\n shutil.copy(os.path.join(classpath, java_file_path), src_dir)\n\n mutant_dest = os.path.join(dest_dir, 'mutants', java_file_path.split('.')[0])\n\n if not os.path.exists(mutant_dest):\n os.makedirs(mutant_dest)\n\n command = [\n 'javac', \n '-XMutator:ALL', \n '-J-Dmajor.export.context=true', \n '-J-Dmajor.export.mutants=true', \n '-J-Dmajor.export.directory=' + os.path.join(mutant_dest, 'mutants'),\n '-cp', classpath, '-d', classes_dir, os.path.join(classpath, java_file_path)\n ]\n\n return subprocess.call(command, shell=False, cwd=mutant_dest, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\n\ndef soot(process_dir):\n soot_jar = os.path.abspath(SOOT_JAR)\n command = ['java', '-jar', soot_jar, '-process-dir', process_dir]\n\n return subprocess.call(command, shell=False, cwd=process_dir, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\ndef soot_cp(original_dir, process_dir):\n soot_jar = os.path.abspath(SOOT_JAR)\n\n rt_jar = '/usr/lib/jvm/default-jvm/jre/lib/rt.jar'\n classpath = original_dir + ':' + rt_jar\n\n command = ['java', '-jar', soot_jar, '-cp', classpath, '-process-dir', process_dir]\n\n return subprocess.call(command, shell=False, cwd=process_dir, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\n\ndef diff(original_dir, mutant_dir, dest_dir):\n\n is_equivalent = True\n \n for package in os.listdir(mutant_dir):\n\n if package != 'sootOutput':\n original_package_dir = os.path.join(original_dir, package)\n mutant_package_dir = os.path.join(mutant_dir, package)\n\n for class_file in os.listdir(mutant_package_dir):\n original_class_file = os.path.join(original_package_dir, class_file)\n mutant_class_file = os.path.join(mutant_package_dir, class_file)\n command = ['diff', original_class_file, mutant_class_file]\n status = subprocess.call(command, shell=False, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\n if status != 0:\n is_equivalent = False\n\n return is_equivalent\n\n\ndef tce(data_dir):\n\n all_tce_file = os.path.join(data_dir, 'tce.csv')\n all_csv = open(all_tce_file, 'w', 1)\n all_csv.write(\"program,mutantNo,TCE\\n\")\n\n for program in os.listdir(data_dir):\n test_dir = os.path.join(data_dir, program)\n if os.path.isdir(test_dir):\n src_dir = os.path.join(test_dir, 'src')\n soot(src_dir)\n original_dir = os.path.join(src_dir, 'sootOutput')\n mutants_dir = os.path.join(test_dir, 'mutants')\n for package in os.listdir(mutants_dir):\n package_dir = os.path.join(mutants_dir, package)\n for class_dir in os.listdir(package_dir): \n class_mutant_dir = os.path.join(package_dir, class_dir, 'mutants')\n tce_file = os.path.join(package_dir, class_dir, 'tce.csv')\n csv = open(tce_file, 'w')\n\n csv.write(\"program,mutantNo,TCE\\n\")\n\n for mutant_dir in os.listdir(class_mutant_dir):\n process_dir = os.path.join(class_mutant_dir, mutant_dir)\n soot_cp(original_dir, process_dir)\n mutant_soot_dir = os.path.join(process_dir, 'sootOutput')\n if diff(original_dir, mutant_soot_dir, class_mutant_dir):\n csv.write(program + ',' + mutant_dir + ',CONFIRMED\\n')\n all_csv.write(program + ',' + mutant_dir + ',CONFIRMED\\n')\n else:\n csv.write(program + ',' + mutant_dir + ',NO\\n')\n all_csv.write(program + ',' + mutant_dir + ',NO\\n')\n\n csv.close()\n all_csv.close()\n\n\ndef main():\n output_dir = os.path.abspath(OUTPUT_DIR)\n input_dir = os.path.abspath(INPUT_DIR)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for file in os.listdir(input_dir):\n list_dir = os.path.join(input_dir, file)\n\n if os.path.isdir(list_dir):\n\n is_valid = True\n dest_dir = os.path.join(output_dir, file)\n\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n\n os.makedirs(dest_dir)\n\n for test_dir in os.listdir(list_dir):\n in_dir = os.path.join(input_dir, list_dir, test_dir)\n if os.path.isdir(in_dir) and test_dir == 'in':\n project_dir = in_dir\n for package in os.listdir(project_dir):\n package_dir = os.path.join(project_dir, package)\n for java_file in os.listdir(package_dir):\n return_code = compile(classpath=project_dir, package=package, java_file=java_file, dest_dir=dest_dir)\n\n if return_code != 0:\n is_valid = False\n \n if not is_valid:\n shutil.rmtree(dest_dir)\n\n tce(data_dir=output_dir)\n" }, { "alpha_fraction": 0.510869562625885, "alphanum_fraction": 0.532608687877655, "avg_line_length": 17.600000381469727, "blob_id": "a233daea699740e7000c530e5dbf6f9ae8e54002", "content_id": "444057891602bc6571f36922c69b30eadca02ba1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "permissive", "max_line_length": 34, "num_lines": 5, "path": "/main.py", "repo_name": "marcioaug/mutant-context-jdolly", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nif __name__ == '__main__':\n from gen.__main2__ import main\n main()" }, { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 23, "blob_id": "612fc803c67b898559962a0606f63e5f9b999d23", "content_id": "e0fc3d1f0ce59b94134dbe52e86aed4e05600ab4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "permissive", "max_line_length": 23, "num_lines": 1, "path": "/README.md", "repo_name": "marcioaug/mutant-context-jdolly", "src_encoding": "UTF-8", "text": "# mutant-context-jdolly" }, { "alpha_fraction": 0.5476843118667603, "alphanum_fraction": 0.5511882901191711, "avg_line_length": 31.48019790649414, "blob_id": "a38a136c649ea275b7eb80d4b078e8c6a21d1d63", "content_id": "2be5502ef6a6c6fcfde02f2d20c83d4b3da42b94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6564, "license_type": "permissive", "max_line_length": 118, "num_lines": 202, "path": "/gen/__main2__.py", "repo_name": "marcioaug/mutant-context-jdolly", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nimport subprocess\nimport re\n\n\nINPUT_DIR = os.path.join('jdolly_source', '0')\nOUTPUT_DIR = os.path.join('data')\nSOOT_JAR = os.path.join('soot', 'soot.jar')\n\n\ndef sort_files(files):\n return sorted(files, key=lambda x: (int(0 if re.sub(r'[^0-9]+','',x) == '' else re.sub(r'[^0-9]+','',x)),x))\n\n\ndef get_files(path, ext='.java'):\n\n files = []\n\n for node in os.listdir(path):\n node = os.path.join(path, node)\n if os.path.isdir(node):\n files += get_files(node, ext)\n elif os.path.splitext(node)[1] == ext:\n files.append(node)\n \n return files\n\n\ndef get_class_files(path, package='', ext='.class'):\n\n files = []\n\n for node in os.listdir(path):\n node_path = os.path.join(path, node)\n if os.path.isdir(node_path):\n package = os.path.join(package, node)\n files += get_class_files(node_path, package, ext)\n elif os.path.splitext(node_path)[1] == ext:\n files.append(os.path.join(package, node))\n \n return files\n\n\ndef compile(classpath, program_dir, files):\n\n command = ['javac', '-cp', classpath]\n\n for input_file in files:\n command.append(os.path.join(program_dir, input_file))\n\n return subprocess.call(command, shell=False, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\n\ndef exec_major(class_file, classpath, mutants_dir):\n\n dest_dir = os.path.join(classpath, 'build', 'classes')\n\n if os.path.exists(dest_dir):\n shutil.rmtree(dest_dir)\n\n os.makedirs(dest_dir)\n\n command = [\n 'javac', \n '-XMutator:ALL', \n '-J-Dmajor.export.context=true', \n '-J-Dmajor.export.mutants=true', \n '-J-Dmajor.export.directory=' + mutants_dir,\n '-cp', classpath, \n '-d', dest_dir,\n class_file\n ]\n\n return subprocess.call(command, shell=False, cwd=mutants_dir, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n\n\ndef soot_optmizer(classpath, program_dir, java_files):\n soot_jar = os.path.abspath(SOOT_JAR)\n\n return_code = compile(classpath, program_dir, java_files)\n\n for java_file in java_files:\n class_file = java_file.split('.')[0].replace(os.sep, '.')\n java_file = os.path.join(program_dir, java_file)\n\n rt_jar = '/usr/lib/jvm/default-jvm/jre/lib/rt.jar'\n\n classpath = program_dir + ':' + rt_jar + ':' + classpath\n\n command = ['java', '-jar', soot_jar, '-cp', classpath, '-O', class_file]\n\n subprocess.call(command, shell=False, cwd=program_dir, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL)\n \n return return_code\n\n\ndef diff(mutant_dir, original_dir):\n\n opt_mutant_dir = os.path.join(mutant_dir, 'sootOutput')\n opt_original_dir = os.path.join(original_dir, 'sootOutput')\n\n mutants = get_class_files(opt_mutant_dir)\n\n equivalent = True\n\n for mutant in mutants:\n command = ['diff', os.path.join(opt_original_dir, mutant), os.path.join(opt_mutant_dir, mutant)]\n status = subprocess.call(command, shell=False, \n stdout=subprocess.DEVNULL, \n stderr=subprocess.DEVNULL) \n if status != 0:\n equivalent = False\n \n return equivalent\n\n\ndef generate_mutants(test, classpath, working_dir):\n print('-' * 80)\n print('> GENERATING MUTANTS AND CHECKING EQUIVALENCE FOR %s...' % test)\n\n full_qualified_java = get_class_files(classpath, ext='.java')\n\n for java_file in full_qualified_java: \n mutants_dir = os.path.join(working_dir, 'mutants', java_file.split('.')[0])\n class_file = os.path.join(classpath, java_file)\n\n if os.path.exists(mutants_dir):\n shutil.rmtree(mutants_dir)\n \n os.makedirs(mutants_dir)\n \n exec_major(class_file, classpath, mutants_dir)\n\n csv = open(os.path.join(mutants_dir, 'tce.csv'), 'w')\n\n total_mutants = len([dir for dir in os.listdir(mutants_dir) if os.path.isdir(os.path.join(mutants_dir, dir))])\n\n print('%s -> %d MUTANTS GENERATED FOR %s' % (test, total_mutants, java_file))\n\n count = 1\n equivante_count = 0\n\n for mutant in sort_files(os.listdir(mutants_dir)): \n mutant_dir = os.path.join(mutants_dir, mutant)\n if os.path.isdir(mutant_dir):\n print('%s -> RUNNING TCE FOR %d OF %d MUTANTS.' % (test, count, total_mutants))\n count += 1\n \n if soot_optmizer(classpath, mutant_dir, [java_file]) == 0:\n if (diff(mutant_dir, classpath)):\n \n print ('!!!! %s -> MUTANT %s (%s) IS EQUIVALENT. %s' % (test, mutant, java_file, ('< ' * 10)))\n \n equivante_count += 1\n csv.write(mutant + ',' + 'TCE_CONFIRMED\\n')\n else:\n csv.write(mutant + ',' + 'NOT_CONFIRMED\\n')\n else:\n print('%s ERROR -> MUTANT %s DONT COMPILE.' % (test, mutant))\n csv.write(mutant + ',' + 'DONT_COMPILE\\n')\n \n csv.close()\n\n print('EQUIVALENCE ANALISIS FOR %s FINISH. %d EQUIVALENT(S) OF %d' % (test, equivante_count, total_mutants))\n print('-' * 80)\n\n\ndef copy_original(src, dest, test):\n dest_path = os.path.join(dest, test, 'original')\n\n if not os.path.exists(dest_path):\n shutil.copytree(os.path.join(src, test, 'in'), dest_path)\n return dest_path\n\n return None \n\n\ndef main(): \n input_dir = os.path.abspath(INPUT_DIR)\n output_dir = os.path.abspath(OUTPUT_DIR)\n\n for test_dir in sort_files(os.listdir(input_dir)):\n if os.path.isdir(os.path.join(input_dir, test_dir)): \n working_dir = os.path.join(output_dir, test_dir)\n original_dir = copy_original(input_dir, output_dir, test_dir)\n \n if original_dir != None: \n full_qualified_java = get_class_files(original_dir, ext='.java')\n\n if soot_optmizer(original_dir, original_dir, full_qualified_java) != 0:\n shutil.rmtree(os.path.join(output_dir, test_dir)) \n else:\n generate_mutants(test_dir, original_dir, working_dir)\n else:\n print(\"SKIPING \" + test_dir) " }, { "alpha_fraction": 0.5024201273918152, "alphanum_fraction": 0.5121006965637207, "avg_line_length": 28.514286041259766, "blob_id": "0827aa638471f356a235d76cb8f368667956cf07", "content_id": "38d36d8118f62299823ad1fade7b729f7b2d7c16", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1033, "license_type": "permissive", "max_line_length": 96, "num_lines": 35, "path": "/Dockerfile", "repo_name": "marcioaug/mutant-context-jdolly", "src_encoding": "UTF-8", "text": "FROM python:3.6-alpine\n\nLABEL mainteiner=\"Marcio Augusto Guimarães <marcioaugustosg@gmail.com\"\n\nRUN apk add --update --no-cache \\\n git \\\n subversion \\\n openjdk7-jre \\\n perl \\\n patch \\\n bash \\\n perl-dev \\\n coreutils\n\n\nRUN apk add --no-cache --virtual .build-deps \\\n unzip \\\n wget \\\n gcc \\\n g++ \\\n make \\\n curl \\\n && curl -L http://xrl.us/cpanm > /bin/cpanm && chmod +x /bin/cpanm \\\n && cpanm DBI \\\n && mkdir /opt \\\n && cd /opt && git clone https://github.com/rjust/defects4j \\\n && cd defects4j && ./init.sh \\\n && echo 'export PATH=/opt/defects4j/framework/bin:$PATH' > /etc/profile.d/defects4j.sh \\\n && echo 'export PATH=/opt/defects4j/major/bin:$PATH' > /etc/profile.d/major.sh \\\n && apk del .build-deps\n\nWORKDIR /opt/src/\n\nENV PATH /opt/defects4j/framework/bin:/opt/defects4j/major/bin:$PATH\nENV JAVA_TOOL_OPTIONS -Dmajor.export.context=true " } ]
5
Pogosaurus/Restaurant-Management-System
https://github.com/Pogosaurus/Restaurant-Management-System
25b6c6d3e242d3155fe11cd010ddd578b192b0d3
c1076a4ea9fce8c9d4114a6379b68953198e2d57
696b7c384167789325852509b9362cd2d043ee31
refs/heads/master
2023-06-29T07:45:44.940073
2021-07-19T09:05:40
2021-07-19T09:05:40
387,400,222
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5236486196517944, "alphanum_fraction": 0.537162184715271, "avg_line_length": 23.66666603088379, "blob_id": "bc1328a22063843503cff8322818f4847dd588f0", "content_id": "e6a913d25beabcc849384257c206f64303aab00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/restaurant_table.py", "repo_name": "Pogosaurus/Restaurant-Management-System", "src_encoding": "UTF-8", "text": "import sqlite3\ncon = sqlite3.connect('restaurant.db')\ncur = con.cursor()\n\ncur.execute('''CREATE TABLE restaurant(\n name text,\n email text,\n DOB text,\n service_rating char(1),\n food_rating char(1))''')\ncon.commit()\ncon.close()\n" }, { "alpha_fraction": 0.4596439301967621, "alphanum_fraction": 0.5120177865028381, "avg_line_length": 37.50857162475586, "blob_id": "d93efbac26f99ac40f3ebff877441594ed7e6fdc", "content_id": "4f7db9bb4c40adc8dc1776554f212181489fdd56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6740, "license_type": "no_license", "max_line_length": 140, "num_lines": 175, "path": "/restaurant_management_system.py", "repo_name": "Pogosaurus/Restaurant-Management-System", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom tkinter import simpledialog\nimport sqlite3\n\na = Tk()\nwid = a.winfo_screenwidth()\nheight = a.winfo_screenheight()\na.geometry('%dx%d' %(wid,height))\na.title(\"ABC Restaurant\")\nlabel = Label(a, text = \"Welcome To ABC Restaurant\", font = ('Metropolis',30,'bold')).place(x = 500, y = 20)\ncon = sqlite3.connect('restaurant.db')\ncur = con.cursor()\n\nclass restaurant:\n def __init__(self):\n global menu_items\n global menu_price\n b = Tk()\n b.title(\"ABC Restaurant\")\n wid_1 = b.winfo_screenwidth()\n height_1 = b.winfo_screenheight()\n b.geometry('%dx%d' % (wid_1,height_1))\n b.configure(bg = 'blue')\n\n MenuBar = Menu(b)\n SubMenu = Menu(MenuBar, tearoff = 0)\n MenuBar.add_command(label = 'Book Now', command = self.book)\n\n SubMenu1 = Menu(MenuBar, tearoff = 0)\n MenuBar.add_cascade(label = 'Menu', menu = SubMenu1)\n SubMenu1.add_command(label = 'See Menu Here', command = self.menu)\n\n b.config(menu = MenuBar)\n\n menu_items = {'1':'Pizza','2':'French Fries','3':'Burger'}\n menu_price = {'1':'100','2':'200','3':'300'}\n\n def submit(self):\n messagebox.showinfo(\"Submit\",\"Your Item \" + str(item1) + \" Has Been Ordered\")\n\n def generate_bill(self):\n h = quantity.get()\n if item1 == menu_items['1']:\n j = menu_price['1']\n elif item1 == menu_items['2']:\n j = menu_price['2']\n elif item1 == menu_items['3']:\n j = menu_price['3']\n k = int(h) * int(j)\n l = k + 5\n if item2 == 'YES':\n xy = l - 4\n else:\n xy = l\n messagebox.showinfo(\"Generate Bill\", \"Your Bill (\" + str(xy) + \" AED) Has Been Made\")\n self.feedback()\n \n def book(self):\n c = Tk()\n c.geometry(\"1920x1080\")\n label = Label(c,text = \"Book Here\", font = ('Metropolis',20,'')).place(x = 10, y = 15)\n label_2 = Label(c, text = 'Table No.', font = ('Metropolis',20,'')).place(x = 50, y = 50)\n label_3 = Label(c,text = 'Item', font = ('Metropolis',20,'')).place(x = 50, y = 100)\n label_4 = Label(c,text = 'Quantity', font = ('Metropolis',20,'')).place(x = 50, y = 150)\n label_5 = Label(c,text = 'Do you have a Corporate or a Government ID with you?', font = ('Metropolis',20,'')).place(x = 50, y = 200)\n\n \n def get_value(event):\n global item1 \n global item2 \n item1 = i.get()\n item2 = f.get()\n\n\n global table_number, quantity\n table_number = Entry(c)\n quantity = Entry(c)\n table_number.place(x = 200, y = 50)\n quantity.place(x = 200, y = 150)\n item = StringVar()\n item_1 = StringVar()\n i = ttk.Combobox(c, width = 20, textvariable = item)\n i['value'] = ('none', menu_items['1'],menu_items['2'],menu_items['3'])\n i.place(x = 200, y = 100)\n i.current(0)\n i.bind(\"<<ComboboxSelected>>\",get_value)\n f = ttk.Combobox(c, width = 20, textvariable = item_1)\n f['value'] = ('none', 'YES','NO')\n f.place(x = 800, y = 205)\n f.current(0)\n f.bind(\"<<ComboboxSelected>>\",get_value)\n b1 = Button(c, text = \"Submit\", font = ('Metropolis',20,''), command = self.submit).place(x = 50, y = 300)\n b2 = Button(c,text = \"Generate Bill\", font = ('Metropolis',20,''), command = self.generate_bill).place(x = 200, y = 300)\n \n\n def menu(self):\n \n d = Tk()\n d.geometry(\"700x650\")\n\n label_ = Label(d,text = 'Menu', font = ('Metropolis',20,'')).place(x = 10, y = 10)\n label_7 = Label(d,text = 'Item', font = ('Metropolis',20,'')).place(x = 20, y = 90)\n label_8 = Label(d,text = 'Price', font = ('Metropolis',20,'')).place(x = 200, y = 90)\n \n label_1 = Label(d, text = menu_items['1'], font=('Metropolis',20,'')).place(x = 10, y = 120)\n label_2 = Label(d, text = menu_items['2'], font=('Metropolis',20,'')).place(x = 10, y = 180)\n label_3 = Label(d, text = menu_items['3'], font=('Metropolis',20,'')).place(x = 10, y = 240)\n\n label_4 = Label(d, text = menu_price['1'], font=('Metropolis',20,'')).place(x = 200, y = 120)\n label_5 = Label(d, text = menu_price['2'], font=('Metropolis',20,'')).place(x = 200, y = 180)\n label_6 = Label(d, text = menu_price['3'], font=('Metropolis',20,'')).place(x = 200, y = 240)\n\n def save_feedback(self):\n vb = name.get()\n vc = email.get()\n vd = DOB.get()\n sql = \"INSERT INTO restaurant VALUES(?,?,?,?,?)\"\n xyz = (vb,vc,vd,item4,item5)\n try:\n cur.execute(sql,xyz)\n con.commit()\n messagebox.showinfo(\"Feedback Form\",\"Feedback Saved\")\n except:\n con.roleback()\n messagebox.showinfo(\"Feedback Form\", \"Feedback not saved\")\n \n \n def feedback(self):\n v = Tk()\n v.geometry(\"1920x1080\")\n\n label = Label(v, text = \"Feedback Form\", font = ('Metropolis', 20,'')).place(x = 10, y = 5)\n l1 = Label(v, text = \"Name\", font = ('Metropolis',20,'')).place(x = 20, y = 50)\n l2 = Label(v, text = \"Email Id\", font = ('Metropolis', 20,'')).place(x = 20, y = 100)\n l3 = Label(v, text = \"DOB\", font = ('Metropolis',20,'')).place(x = 20, y = 150)\n l4 = Label(v,text = \"Service Rating\", font = ('Metropolis',20,'')).place(x = 20, y = 200)\n l5 = Label(v,text = \"Food Rating\", font = ('Metropolis',20,'')).place(x = 20, y = 250)\n global name, email, DOB\n \n name = Entry(v)\n email = Entry(v)\n DOB = Entry(v)\n name.place(x = 150, y = 50)\n email.place(x = 150, y = 100)\n DOB.place(x = 150, y = 150)\n\n def get_value1(event):\n global item4, item5\n item4 = u.get()\n item5 = xz.get()\n\n item3 = StringVar()\n item0 = StringVar()\n \n u = ttk.Combobox(v, width = 20, textvariable = item3)\n u['value'] = ('0','1','2','3','4','5')\n u.place(x = 300, y = 200)\n u.current(0)\n u.bind(\"<<ComboboxSelected>>\",get_value1)\n\n xz = ttk.Combobox(v, width = 20, textvariable = item0)\n xz['value'] = ('0','1','2','3','4','5')\n xz.place(x = 300, y = 250)\n xz.current(0)\n xz.bind(\"<<ComboboxSelected>>\",get_value1)\n\n button = Button(v, text = \"Submit\", font = ('Metropolis',20,''), command = self.save_feedback).place(x = 50, y = 400)\n \ndef begin():\n a.destroy()\n n = restaurant()\n\na.after(2000,begin)\n\n" } ]
2
ElementQi/ViDeNN-master.py
https://github.com/ElementQi/ViDeNN-master.py
05bb8d96ffd1db8494041e39e507bed18264aa43
869ad0f71f5abab0b5db09ce80e261fbad094bd2
ec085541dcde48e520381edb4346c046af6f7b96
refs/heads/master
2023-08-15T01:00:21.708719
2021-09-30T12:57:26
2021-09-30T12:57:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5637193918228149, "alphanum_fraction": 0.5848900079727173, "avg_line_length": 39.4789924621582, "blob_id": "4f8620ec7248911e67279360620f3487620fe46e", "content_id": "0cdf9138a6aa66681ec899d6e55b292c571c1783", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4818, "license_type": "permissive", "max_line_length": 110, "num_lines": 119, "path": "/Temp3-CNN/add_noise_temp3-CNN.py", "repo_name": "ElementQi/ViDeNN-master.py", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: clausmichele\n\"\"\"\nimport argparse\nimport numpy as np\nimport cv2\nfrom glob import glob\nimport os\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('--download_videos', dest='download_videos', type=int, default=0,\n help='Set it to False if you have downloaded the videos')\nargs = parser.parse_args()\n\n\ndef gaussian_noise(sigma, image):\n gaussian = np.random.normal(0, sigma, image.shape)\n noisy_image = np.zeros(image.shape, np.float32)\n noisy_image = image + gaussian\n noisy_image = np.clip(noisy_image, 0, 255)\n noisy_image = noisy_image.astype(np.uint8)\n return noisy_image\n\n\ndef realistic_noise(Ag, Dg, image):\n CT1 = 1.25e-4\n CT2 = 1.11e-4\n Nsat = 7480\n image = image / 255.0\n M = np.sqrt(((Ag * Dg) / (Nsat * image) + (Dg ** 2) * ((Ag * CT1 + CT2) ** 2)))\n N = np.random.normal(0, 1, image.shape)\n noisy_image = image + N * M\n cv2.normalize(noisy_image, noisy_image, 0, 1.0, cv2.NORM_MINMAX, dtype=-1)\n return noisy_image\n\n\nif __name__ == \"__main__\":\n\n if not os.path.isdir(\"./data\"):\n os.mkdir(\"./data\")\n if not os.path.isdir(\"./data/train\"):\n os.mkdir(\"./data/train\")\n if not os.path.isdir(\"./data/train/original\"):\n os.mkdir(\"./data/train/original\")\n if not os.path.isdir(\"./data/train/noisy\"):\n os.mkdir(\"./data/train/noisy\")\n if not os.path.isdir(\"./data/train/denoised\"):\n os.mkdir(\"./data/train/denoised\")\n if not os.path.isdir(\"./data/test\"):\n os.mkdir(\"./data/test\")\n if not os.path.isdir(\"./data/test/original\"):\n os.mkdir(\"./data/test/original\")\n if not os.path.isdir(\"./data/test/noisy\"):\n os.mkdir(\"./data/test/noisy\")\n\n # Download Videos\n download_videos = args.download_videos\n if download_videos:\n videos = [\"akiyo_cif.y4m\", \"bowing_cif.y4m\", \"bridge_close_cif.y4m\",\n \"bridge_far_cif.y4m\", \"bus_cif.y4m\", \"city_cif.y4m\", \"coastguard_cif.y4m\",\n \"container_cif.y4m\", \"crew_cif.y4m\", \"deadline_cif.y4m\", \"flower_cif.y4m\",\n \"flower_garden_422_cif.y4m\", \"football_422_cif.y4m\", \"football_cif.y4m\",\n \"galleon_422_cif.y4m\"]\n\n root_link = \"https://media.xiph.org/video/derf/y4m/\"\n for video in tqdm(videos, desc=\"[*] Downloading videos...\"):\n os.system(\"wget -q --no-check-certificate \" + root_link + video)\n os.system(\"mv *.y4m ./data\")\n\n videos = glob(\"./data/*.y4m\")\n num_vids = len(videos)\n for i in tqdm(range(int(num_vids)), desc=\"[*] Extracting frames...\"):\n if not os.path.isdir(\"./data/train/original/\" + str(i)):\n os.mkdir(\"./data/train/original/\" + str(i))\n if not os.path.isdir(\"./data/train/noisy/\" + str(i)):\n os.mkdir(\"./data/train/noisy/\" + str(i))\n if not os.path.isdir(\"./data/train/denoised/\" + str(i)):\n os.mkdir(\"./data/train/denoised/\" + str(i))\n os.system(\"ffmpeg -loglevel quiet -i \" + videos[i] + \" ./data/train/original/\" + str(i) + \"/%05d.png\")\n frames = glob(\"./data/train/original/\" + str(i) + \"/*.png\")\n frames = sorted(frames)\n while (len(frames) > 300): # Max 300 frames each video, you can set it depending on your ram size\n os.remove(frames[-1])\n frames.pop()\n if len(frames) % 3 == 2:\n os.remove(frames[-1])\n frames.pop()\n if len(frames) % 3 == 1: # Making the frames number divisible by 3, necessary for training\n os.remove(frames[-1])\n frames.pop()\n imgs_path_train = sorted(glob(\"./data/train/original/*/*.png\"))\n num_of_samples = int(len(imgs_path_train) / 3)\n\n sigma_train = np.linspace(0, 50, num_of_samples + 1)\n np.random.shuffle(sigma_train)\n for i in tqdm(range(num_of_samples), desc=\"[*] Creating original-noisy set...\"):\n sigma = sigma_train[i]\n img_path = imgs_path_train[i * 3]\n folder = img_path.split('\\\\')[-2]\n\n img_file = os.path.basename(img_path)\n img_original = cv2.imread(img_path)\n img_noisy = gaussian_noise(sigma, img_original)\n cv2.imwrite(\"./data/train/noisy/\" + folder + \"/\" + img_file, img_noisy)\n\n img_path = imgs_path_train[i * 3 + 1]\n img_file = os.path.basename(img_path)\n img_original = cv2.imread(img_path)\n img_noisy = gaussian_noise(sigma, img_original)\n cv2.imwrite(\"./data/train/noisy/\" + folder + \"/\" + img_file, img_noisy)\n\n img_path = imgs_path_train[i * 3 + 2]\n img_file = os.path.basename(img_path)\n img_original = cv2.imread(img_path)\n img_noisy = gaussian_noise(sigma, img_original)\n cv2.imwrite(\"./data/train/noisy/\" + folder + \"/\" + img_file, img_noisy)\n\n" }, { "alpha_fraction": 0.4845019280910492, "alphanum_fraction": 0.5224454998970032, "avg_line_length": 46.01507568359375, "blob_id": "d90620a336d030bef8aacfb812594babb52a7d2f", "content_id": "c1fd24be16a1da6191c50e8d33f92b94d69f49e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9356, "license_type": "permissive", "max_line_length": 118, "num_lines": 199, "path": "/model_ViDeNN.py", "repo_name": "ElementQi/ViDeNN-master.py", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: clausmichele\n\"\"\"\n\nimport time\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef SpatialCNN(input, is_training=False, output_channels=3, reuse=tf.compat.v1.AUTO_REUSE):\n with tf.compat.v1.variable_scope('block1', reuse=reuse):\n output = tf.compat.v1.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.relu)\n for layers in range(2, 20):\n with tf.compat.v1.variable_scope('block%d' % layers, reuse=reuse):\n output = tf.compat.v1.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)\n output = tf.nn.relu(tf.compat.v1.layers.batch_normalization(output, training=is_training))\n with tf.compat.v1.variable_scope('block20', reuse=reuse):\n output = tf.compat.v1.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False)\n return input - output\n\n\ndef Temp3CNN(input, is_training=False, output_channels=3, reuse=tf.compat.v1.AUTO_REUSE):\n input_middle = input[:, :, :, 3:6]\n with tf.compat.v1.variable_scope('temp-block1', reuse=reuse):\n output = tf.compat.v1.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.leaky_relu)\n for layers in range(2, 20):\n with tf.compat.v1.variable_scope('temp-block%d' % layers, reuse=reuse):\n output = tf.compat.v1.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)\n output = tf.compat.v1.nn.leaky_relu(output)\n with tf.compat.v1.variable_scope('temp-block20', reuse=reuse):\n output = tf.compat.v1.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False)\n return input_middle - output\n\n\nclass ViDeNN(object):\n def __init__(self, sess):\n self.sess = sess\n # build model\n self.Y_ = tf.compat.v1.placeholder(tf.float32, [None, None, None, 3], name='clean_image')\n self.X = tf.compat.v1.placeholder(tf.float32, [None, None, None, 3], name='noisy_image')\n self.Y = SpatialCNN(self.X)\n self.Y_frames = tf.compat.v1.placeholder(tf.float32, [None, None, None, 9], name='clean_frames')\n self.Xframes = tf.compat.v1.placeholder(tf.float32, [None, None, None, 9], name='noisy_frames')\n self.Yframes = Temp3CNN(self.Xframes)\n init = tf.compat.v1.global_variables_initializer()\n self.sess.run(init)\n print(\"[*] Initialize model successfully...\")\n\n def denoise(self, eval_files, eval_files_noisy, print_psnr, ckpt_dir, save_dir):\n # init variables\n tf.compat.v1.global_variables_initializer().run()\n assert len(eval_files) != 0, '[!] No testing data!'\n if ckpt_dir is None:\n full_path = tf.train.latest_checkpoint('./Temp3-CNN/ckpt')\n if (full_path is None):\n print('[!] No Temp3-CNN checkpoint!')\n quit()\n vars_to_restore_temp3CNN = {}\n for i in range(len(tf.compat.v1.global_variables())):\n if tf.compat.v1.global_variables()[i].name[0] != 'b':\n a = tf.compat.v1.global_variables()[i].name.split(':')[0]\n vars_to_restore_temp3CNN[a] = tf.compat.v1.global_variables()[i]\n saver_t = tf.compat.v1.train.Saver(var_list=vars_to_restore_temp3CNN)\n saver_t.restore(self.sess, full_path)\n\n full_path = tf.train.latest_checkpoint('./Spatial-CNN/ckpt_awgn')\n if (full_path is None):\n print('[!] No Spatial-CNN checkpoint!')\n quit()\n vars_to_restore_spatialCNN = {}\n for i in range(len(tf.compat.v1.global_variables())):\n if tf.compat.v1.global_variables()[i].name[0] != 't':\n a = tf.compat.v1.global_variables()[i].name.split(':')[0]\n vars_to_restore_spatialCNN[a] = tf.compat.v1.global_variables()[i]\n saver_s = tf.compat.v1.train.Saver(var_list=vars_to_restore_spatialCNN)\n saver_s.restore(self.sess, full_path)\n else:\n load_model_status, _ = self.load(ckpt_dir)\n print(\"[*] Model restore successfully!\")\n #\n psnr_sum = 0\n start = time.time()\n for idx in tqdm(range(len(eval_files) - 1)):\n if idx == 0:\n test = cv2.imread(eval_files[idx])\n test1 = cv2.imread(eval_files[idx + 1])\n test2 = cv2.imread(eval_files[idx + 2])\n noisy = cv2.imread(eval_files_noisy[idx])\n noisy1 = cv2.imread(eval_files_noisy[idx + 1])\n noisy2 = cv2.imread(eval_files_noisy[idx + 2])\n\n test = test.astype(np.float32) / 255.0\n test1 = test1.astype(np.float32) / 255.0\n test2 = test2.astype(np.float32) / 255.0\n noisy = noisy.astype(np.float32) / 255.0\n noisy1 = noisy1.astype(np.float32) / 255.0\n noisy2 = noisy2.astype(np.float32) / 255.0\n\n noisyin2 = np.zeros((1, test.shape[0], test.shape[1], 9))\n current = np.zeros((test.shape[0], test.shape[1], 3))\n previous = np.zeros((test.shape[0], test.shape[1], 3))\n\n noisyin = np.zeros((3, test.shape[0], test.shape[1], 3))\n noisyin[0] = noisy\n noisyin[1] = noisy1\n noisyin[2] = noisy2\n out = self.sess.run([self.Y], feed_dict={self.X: noisyin})\n out = np.asarray(out)\n\n noisyin2[0, :, :, 0:3] = out[0, 0]\n noisyin2[0, :, :, 3:6] = out[0, 0]\n noisyin2[0, :, :, 6:] = out[0, 1]\n temp_clean_image = self.sess.run([self.Yframes], feed_dict={self.Xframes: noisyin2})\n temp_clean_image = np.asarray(temp_clean_image)\n cv2.imwrite(save_dir + '/%04d.png' % idx, temp_clean_image[0, 0] * 255)\n psnr = psnr_scaled(test, temp_clean_image[0, 0])\n psnr1 = psnr_scaled(test, out[0, 0])\n psnr_sum += psnr\n if print_psnr:\n print(\" frame %d denoised, PSNR: %.2f\" % (idx, psnr))\n else:\n print(\" frame %d denoised\" % (idx))\n\n noisyin2[0, :, :, 0:3] = out[0, 0]\n noisyin2[0, :, :, 3:6] = out[0, 1]\n noisyin2[0, :, :, 6:] = out[0, 2]\n current[:, :, :] = out[0, 2, :, :, :]\n previous[:, :, :] = out[0, 1, :, :, :]\n else:\n if idx < (len(eval_files) - 2):\n test3 = cv2.imread(eval_files[idx + 2])\n test3 = test3.astype(np.float32) / 255.0\n noisy3 = cv2.imread(eval_files_noisy[idx + 2])\n noisy3 = noisy3.astype(np.float32) / 255.0\n\n out2 = self.sess.run([self.Y], feed_dict={self.X: np.expand_dims(noisy3, 0)})\n out2 = np.asarray(out2)\n noisyin2[0, :, :, 0:3] = previous\n noisyin2[0, :, :, 3:6] = current\n noisyin2[0, :, :, 6:] = out2[0, 0]\n previous = current\n current = out2[0, 0]\n else:\n try:\n out2\n except NameError:\n out2 = np.zeros((out.shape))\n out2 = out\n out2[0, 0] = out[0, 2]\n noisyin2[0, :, :, 0:3] = current\n noisyin2[0, :, :, 3:6] = out2[0, 0]\n noisyin2[0, :, :, 6:] = out2[0, 0]\n temp_clean_image = self.sess.run([self.Yframes], feed_dict={self.Xframes: noisyin2})\n\n temp_clean_image = np.asarray(temp_clean_image)\n cv2.imwrite(save_dir + '/%04d.png' % (idx + 1), temp_clean_image[0, 0] * 255)\n\n # calculate PSNR\n if idx == 0:\n psnr1 = psnr_scaled(test1, out[0, 1])\n psnr = psnr_scaled(test1, temp_clean_image[0, 0])\n else:\n psnr1 = psnr_scaled(test2, previous)\n psnr = psnr_scaled(test2, temp_clean_image[0, 0])\n try:\n test3\n except NameError:\n test3 = test2\n test2 = test3\n if print_psnr:\n print(\" frame %d denoised, PSNR: %.2f\" % (idx + 1, psnr))\n else:\n print(\" frame %d denoised\" % (idx + 1))\n psnr_sum += psnr\n avg_psnr = psnr_sum / len(eval_files)\n if print_psnr: print(\"--- Average PSNR %.2f ---\" % avg_psnr)\n print(\"--- Elapsed time: %.4fs\" % (time.time() - start))\n\n def load(self, checkpoint_dir):\n print(\"[*] Reading checkpoint...\")\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n full_path = tf.train.latest_checkpoint(checkpoint_dir)\n global_step = int(full_path.split('/')[-1].split('-')[-1])\n saver.restore(self.sess, full_path)\n return True, global_step\n else:\n return False, 0\n\n\ndef psnr_scaled(im1, im2): # PSNR function for 0-1 values\n mse = ((im1 - im2) ** 2).mean()\n mse = mse * (255 ** 2)\n psnr = 10 * np.log10(255 ** 2 / mse)\n return psnr\n" } ]
2
agarwalsiddhant10/rl-practise
https://github.com/agarwalsiddhant10/rl-practise
6e6fe6af3070576f7940c00782cf96101bd9092a
e2d010ce140f2030c68289c33d3a5feadbfa2050
7e9b3e863c10bf90f976486c62d73fdaa2ca2c22
refs/heads/master
2020-05-27T23:42:11.300660
2019-05-27T10:29:22
2019-05-27T10:29:22
188,822,418
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 32, "blob_id": "91e4e8456857960e9bc2b36569766c18ec62c4bf", "content_id": "21e1b12e51589fe55c6e88f98883a5500b32cdeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "agarwalsiddhant10/rl-practise", "src_encoding": "UTF-8", "text": "# rl-practise\nSome common RL algorithms implemented on tensorflow\n" }, { "alpha_fraction": 0.6118271350860596, "alphanum_fraction": 0.6247156858444214, "avg_line_length": 29.674419403076172, "blob_id": "ad03c85d17672705d41358c301ca0968d1a1cfbc", "content_id": "e7c1cf4c0d7e1b91f5515300224c977461dd279a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 109, "num_lines": 43, "path": "/dqn/agent.py", "repo_name": "agarwalsiddhant10/rl-practise", "src_encoding": "UTF-8", "text": "import tensorflow as tf \nimport numpy as np \n\nclass Model:\n def __init__(self, scope, num_actions):\n self.scope = scope\n self.num_actions = num_actions\n\n def forward(self, state):\n X = tf.contrib.layers.fullyconnected(state, 32, activation_fn = relu, scope = self.scope)\n out = tf.contrib.layers.fullyconnected(X, self.num_actions, activation_fn = None, scope = self.scope)\n return out\n\ndef copy_var(sess, current, target):\n '''\n Copies the parameters of the current network to the target network\n '''\n col1 = tf.get_collection(tf.GraphKeys.VARIABLES, scope = current)\n col2 = tf.get_collection(tf.GraphKeys.VARIABLES, scope = target)\n\n col_curr = {}\n for var in col1:\n var_name = var.name.split('/')[-1]\n col_curr[var_name] = var\n\n for var in col2:\n var_name = var.name.split('/')[-1]\n sess.run(tf.assign(col_curr[var_name], var))\n\ndef best_action(env, q_state, steps, eps_begin = 1.0, eps_end = 0.25, num_steps = 100):\n eps = (eps_begin - eps_end) * steps/num_steps\n p = random.random()\n if p < eps:\n return env.action_space.sample()\n else:\n return tf.max(q_state, axis = 1)\ndef compute_loss(batch, current, target, gamma):\n pass\n return loss_op\n\ndef eval(batch):\n pass\n return eval_op\n" } ]
2
jay-lark/aws-ddns
https://github.com/jay-lark/aws-ddns
59a46cfe01568a50aedf8232d3ebe92027a452bd
ce7fd3dbc31fe453d9392d9cfc68c650e233b6cd
4515f0104ba81a0187056e8fff674a5fc7fdc241
refs/heads/master
2020-06-23T04:28:20.900972
2019-07-24T00:29:24
2019-07-24T00:29:24
198,512,612
0
0
null
2019-07-23T21:45:01
2019-07-23T21:49:56
2019-07-23T21:53:28
Python
[ { "alpha_fraction": 0.4797047972679138, "alphanum_fraction": 0.4944649338722229, "avg_line_length": 22.565217971801758, "blob_id": "8d5ac5f348595a900b933ba142ae5a0275eeecab", "content_id": "25b26e30f27d489dbaff6d44bff104af46080111", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "permissive", "max_line_length": 70, "num_lines": 46, "path": "/ip.py", "repo_name": "jay-lark/aws-ddns", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport urllib.request\nimport boto3\n\nzone_id = \"XXXXXXX\"\nzone_record = \"XXXXX\"\npublic_url = \"XXXXXX\"\n\n\nexternal_ip = urllib.request.urlopen(public_url).read().decode('utf8')\n\n#print(external_ip)\n\nclient = boto3.client('route53')\n\nr = client.list_resource_record_sets(HostedZoneId=zone_id)\nfor i in r['ResourceRecordSets']:\n records = i['ResourceRecords'][0]\n if (i['Name']) == zone_record:\n current_r53 = (records['Value'])\n\n\n#print(current_r53)\n\nif external_ip != current_r53:\n response = client.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n \"Comment\": \"Automatic DNS update\",\n \"Changes\": [\n {\n \"Action\": \"UPSERT\",\n \"ResourceRecordSet\": {\n \"Name\": zone_record,\n \"Type\": \"A\",\n \"TTL\": 300,\n \"ResourceRecords\": [\n {\n \"Value\": external_ip\n },\n ],\n }\n },\n ]\n }\n)\n" }, { "alpha_fraction": 0.7353308200836182, "alphanum_fraction": 0.7403246164321899, "avg_line_length": 36.85714340209961, "blob_id": "2507e3d9673fb3daa719b08a4bdccee79016d8af", "content_id": "066409ab771b41cbe71c2a0da674d57dab0b3565", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 801, "license_type": "permissive", "max_line_length": 148, "num_lines": 21, "path": "/README.md", "repo_name": "jay-lark/aws-ddns", "src_encoding": "UTF-8", "text": "# aws-ddns\nPersonal DDNS system using AWS Route 53. \n\n## Requirements\n- Python 3\n- Boto 3\n\n## Usage\n\n### ip.php\nPlace this PHP file somewhere on the internet that the system with a dynamic IP can reach.\n\nThis PHP file simply grabs the IP of any visitor and displays it on the page. \n\n### ip.py\nFill in the following variables at the top of the file:\n- zone_id - This is referred to as the Hosted Zone ID if you are looking at the AWS Management Console.\n- zone_record - The DNS record you want to monitor and change. This variable must end in a period. (www.example.com., mail.example.com.)\n- public_url - The direct link to the ip.php file.\n\nOnce you've entered your details just save the file somewhere and run it manually or have it run in a regular basis by your favorite task scheduler.\n\n\n\n\n\n\n" } ]
2
maggieyaochen/yao
https://github.com/maggieyaochen/yao
cabc6cf766a3f38719abeaac7794dc317986536c
82585ea34e84c86d8612cf0f520a23f87b8ba02f
332c0fa46ba4717d094fe45a66abcbce7f2cfd27
refs/heads/master
2023-03-30T18:15:25.693297
2021-03-02T23:27:14
2021-03-02T23:27:14
343,938,534
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.779411792755127, "avg_line_length": 22, "blob_id": "a90dbca192dd6d52728e239109f84fc5503fa585", "content_id": "40d65f951ea071e8b4382e412c928507f16bbf29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/Eirtrade.py", "repo_name": "maggieyaochen/yao", "src_encoding": "UTF-8", "text": "print('how are you?')\nprint (11111111111111111111111)\nadd a new line" } ]
1
JanDziuba/SIK
https://github.com/JanDziuba/SIK
c747dd13af48e0d376085a5d8fd40bef4552a1b6
ef56616f2b838064a7512cfa6207dc5403fb182b
fec5b2ac6d5b4ac903e97600d62189dd7031324d
refs/heads/master
2022-12-11T01:11:28.403248
2020-09-13T20:13:22
2020-09-13T20:13:22
295,229,266
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6555555462837219, "avg_line_length": 12.769230842590332, "blob_id": "417bb95765127c79cef91bcde58f5c4c0e1d4965", "content_id": "4d675daabcb84e20ac69168db9a1269fe7adbd4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 180, "license_type": "no_license", "max_line_length": 30, "num_lines": 13, "path": "/zadanie1/Makefile", "repo_name": "JanDziuba/SIK", "src_encoding": "UTF-8", "text": "CC\t= gcc\nCFLAGS\t= -Wall -O2\nLFLAGS\t= -Wall\nTARGETS = testhttp_raw\n\nall: $(TARGETS) \n\ntesthttp_raw.o: testhttp_raw.c\n\ntesthttp_raw: testhttp_raw.o\n\nclean:\n\trm -f *.o *~ $(TARGETS) \n" }, { "alpha_fraction": 0.5153294205665588, "alphanum_fraction": 0.5273972749710083, "avg_line_length": 23.926828384399414, "blob_id": "ab8a57f81880d239baa3c7a4db877ff7b55f6908", "content_id": "2bd5f490bc6729d937dbd85f7945772565593e70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3066, "license_type": "no_license", "max_line_length": 78, "num_lines": 123, "path": "/zadanie1/testhttp", "repo_name": "JanDziuba/SIK", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.6\n\nimport sys\nimport os\nimport subprocess\nfrom pathlib import Path\nimport signal\nimport atexit\n\n\ndef exit_handler():\n if os.path.isfile('stunnel.pid'):\n stunnel_pid_file = open(\"stunnel.pid\", \"r\")\n pid = int(stunnel_pid_file.read())\n os.kill(pid, signal.SIGKILL)\n subprocess.run([\"rm\", \"stunnel.pid\"])\n\n if os.path.isfile('stunnel.conf'):\n subprocess.run([\"rm\", \"stunnel.conf\"])\n\n\natexit.register(exit_handler)\n\n\ndef check_if_https(url):\n if url.find(\"https\") == 0:\n return True\n else:\n return False\n\n\ndef check_if_has_port(url):\n if check_if_https(url):\n if url[len(\"https://\"):].find(\":\") >= 0:\n return True\n else:\n return False\n else:\n if url[len(\"http://\"):].find(\":\") >= 0:\n return True\n else:\n return False\n\n\ndef get_port(url, has_port, is_https):\n if has_port:\n if is_https:\n port_start = url[len(\"https://\"):].find(\":\") + 1 + len(\"https://\")\n else:\n port_start = url[len(\"http://\"):].find(\":\") + 1 + len(\"http://\")\n\n port_end = url[port_start:].find(\"/\") + port_start\n return url[port_start:port_end]\n else:\n if is_https:\n return \"443\"\n else:\n return \"80\"\n\n\ndef get_address(url, has_port, is_https):\n if is_https:\n address_start = len(\"https://\")\n else:\n address_start = len(\"http://\")\n\n if has_port:\n address_end = url[address_start:].find(\":\") + address_start\n else:\n address_end = url[address_start:].find(\"/\") + address_start\n\n return url[address_start:address_end]\n\n\ndef get_parent_dir(path):\n path = Path(path)\n return str(path.parent)\n\n\ndef make_https_request(address, port, cookies_path, url):\n parent_dir = get_parent_dir(os.path.abspath(__file__))\n\n stunnel_conf_file = open(\"stunnel.conf\", \"w+\")\n stunnel_conf_file.write(\"pid = \" + parent_dir + \"/stunnel.pid\\n\"\n + \"[service]\\n\"\n + \"client = yes\\n\"\n + \"accept = 127.0.0.1:3333\\n\"\n + \"connect = \" + address + \":\" + port + \"\\n\")\n\n stunnel_conf_file.close()\n\n subprocess.run([\"stunnel\", parent_dir + \"/stunnel.conf\"])\n\n subprocess.run([\"./testhttp_raw\", \"127.0.0.1\" + \":\" + \"3333\",\n cookies_path, url])\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Usage: \", sys.argv[0],\n \"<plik ciasteczek> <testowany adres http>\\n\",\n file=sys.stderr)\n exit(1)\n\n cookies_path = sys.argv[1]\n url = sys.argv[2]\n\n has_port = check_if_has_port(url)\n is_https = check_if_https(url)\n\n port = get_port(url, has_port, is_https)\n address = get_address(url, has_port, is_https)\n\n if is_https:\n make_https_request(address, port, cookies_path, url)\n\n else:\n subprocess.run([\"./testhttp_raw\", address + \":\" + port, cookies_path,\n url])\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5257841348648071, "alphanum_fraction": 0.5330173969268799, "avg_line_length": 26.939023971557617, "blob_id": "3bbe15e11945df80f3f401169d201ddf385ba75b", "content_id": "9fb3bea688d249ca55e0281b4baba9c63fa14c6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 16041, "license_type": "no_license", "max_line_length": 107, "num_lines": 574, "path": "/zadanie1/testhttp_raw.c", "repo_name": "JanDziuba/SIK", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <limits.h>\n#include <stdbool.h>\n\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <netdb.h>\n\n#define BUFFER_SIZE 1024\n\nenum transfer_encoding {chunked, identity};\n\nint socket_connect(char *address, char *port) {\n\n int ret_value;\n int sock;\n struct addrinfo addr_hints, *addr_result;\n\n sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);\n if (sock < 0) {\n fprintf(stderr, \"ERROR: creating socket at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n memset(&addr_hints, 0, sizeof(struct addrinfo));\n addr_hints.ai_flags = 0;\n addr_hints.ai_family = AF_INET;\n addr_hints.ai_socktype = SOCK_STREAM;\n addr_hints.ai_protocol = IPPROTO_TCP;\n\n ret_value = getaddrinfo(address, port, &addr_hints, &addr_result);\n if (ret_value != 0) {\n fprintf(stderr, \"ERROR: getaddrinfo at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n if (connect(sock, addr_result->ai_addr, addr_result->ai_addrlen) != 0) {\n fprintf(stderr, \"ERROR: connect at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n freeaddrinfo(addr_result);\n\n return sock;\n}\n\nvoid separate_address_port(const char *address_port, char **address_p, char **port_p) {\n\n char *address;\n char *port;\n int index = 0;\n int address_size;\n int port_size;\n\n while (address_port[index] != ':' && address_port[index] != '\\0') {\n index++;\n }\n\n if (address_port[index] == '\\0') {\n fprintf(stderr, \"ERROR: argv[1] nie jest postaci \"\n \"<adres połączenia>:<port> %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n address_size = index + 1;\n address = (char *) malloc(sizeof(char) * address_size);\n if (address == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n while (address_port[index] != '\\0') {\n index++;\n }\n port_size = index - address_size + 1;\n port = (char *) malloc(sizeof(char) * port_size);\n if (port == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n index = 0;\n\n while (address_port[index] != ':') {\n address[index] = address_port[index];\n index++;\n }\n address[index] = '\\0';\n index++;\n\n int port_index = 0;\n\n while (address_port[index] != '\\0') {\n port[port_index] = address_port[index];\n index++;\n port_index++;\n }\n port[port_index] = '\\0';\n\n *address_p = address;\n *port_p = port;\n}\n\nchar *build_cookies(const char *cookies_path) {\n\n long buffer_size;\n long file_size;\n FILE *file = fopen(cookies_path, \"r\");\n if (file == NULL) {\n fprintf(stderr, \"ERROR: file open %s at %s (%d)\\n\", cookies_path, __FILE__, __LINE__);\n exit(1);\n }\n fseek(file, 0, SEEK_END);\n file_size = ftell(file);\n fclose(file);\n\n buffer_size = (file_size + 100) * 2;\n char *cookies_string = malloc(buffer_size);\n if (cookies_string == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n file = fopen(cookies_path, \"r\");\n if (file == NULL) {\n fprintf(stderr, \"ERROR: file open %s at %s (%d)\\n\", cookies_path, __FILE__, __LINE__);\n exit(1);\n }\n\n int temp_char;\n unsigned int temp_char_idx;\n\n strcpy(cookies_string, \"Cookie: \");\n temp_char_idx = strlen(\"Cookie: \");\n\n while (!feof(file)) {\n temp_char = getc(file);\n\n if (temp_char == EOF) {\n cookies_string[temp_char_idx] = '\\0';\n break;\n } else if (temp_char == '\\n') {\n\n temp_char = getc(file);\n if (temp_char == EOF) {\n cookies_string[temp_char_idx] = '\\0';\n break;\n }\n ungetc(temp_char, file);\n cookies_string[temp_char_idx] = ';';\n temp_char_idx++;\n cookies_string[temp_char_idx] = ' ';\n temp_char_idx++;\n continue;\n } else {\n cookies_string[temp_char_idx] = (char) temp_char;\n temp_char_idx++;\n }\n }\n fclose(file);\n\n return cookies_string;\n}\n\nvoid separate_request_URL(const char *request_URL, char **request_path_p, char **host_p) {\n\n char *request_path;\n char *host;\n\n char *host_start = strchr(request_URL, '/') + 2;\n char *host_end = strchr(host_start, '/');\n\n long host_length = host_end - host_start;\n host = malloc(sizeof(char) * (host_length + 1));\n if (host == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n memcpy(host, host_start, host_length);\n host[host_length] = '\\0';\n\n char *request_path_start = host_end;\n char *request_path_end = strchr(request_path_start, '\\0');\n\n long request_path_length = request_path_end - request_path_start;\n request_path = malloc(sizeof(char) * (request_path_length + 1));\n if (request_path == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n strcpy(request_path, request_path_start);\n\n *request_path_p = request_path;\n *host_p = host;\n}\n\nchar *build_request(const char *cookies_path, const char *request_URL) {\n char *cookies_string = build_cookies(cookies_path);\n char *request_path;\n char *host;\n\n separate_request_URL(request_URL, &request_path, &host);\n\n unsigned long buffer_size = sizeof(char) * (strlen(\"GET HTTP/1.1\\r\\n\") +\n strlen(request_path) +\n strlen(\"Host: \\r\\n\") +\n strlen(host) +\n strlen(\"\\r\\n\") +\n strlen(cookies_string) +\n strlen(\"Connection: close\\r\\n\\r\\n\") + 1);\n char *request = malloc(buffer_size);\n if (request == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n int ret_val;\n\n ret_val = snprintf(request, buffer_size, \"GET %s HTTP/1.1\\r\\n\"\n \"Host: %s\\r\\n\"\n \"%s\\r\\n\"\n \"Connection: close\\r\\n\\r\\n\",\n request_path, host, cookies_string);\n\n if (ret_val < 0 || ret_val > buffer_size) {\n printf(\"%d\", ret_val);\n fprintf(stderr, \"ERROR: at %s (%d)\\n increase buffer_size.\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n free(cookies_string);\n free(request_path);\n free(host);\n\n return request;\n}\n\nchar *get_response(const int socket) {\n\n int response_size = BUFFER_SIZE;\n char *response = malloc(response_size);\n if (response == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n int response_index = 0;\n int bytes_received;\n\n while ((bytes_received = read(socket, &response[response_index], BUFFER_SIZE)) > 0) {\n response_index += bytes_received;\n if (response_size <= response_index + BUFFER_SIZE) {\n response_size *= 2;\n response = realloc(response, response_size);\n if (response == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n }\n }\n\n if(bytes_received < 0)\n {\n fprintf(stderr, \"ERROR: failed read at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n response[response_index] = '\\0';\n\n return response;\n}\n\nchar *get_header_line(const char **cur_line_p, bool *is_last_line_p) {\n\n const char *cur_line = *cur_line_p;\n bool is_last_line = *is_last_line_p;\n\n char *end_of_line = strchr(cur_line, '\\r');\n if (end_of_line == NULL) {\n fprintf(stderr, \"ERROR: no header line end at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n long cur_lineLen = end_of_line - cur_line;\n char *header_line = (char *) malloc(sizeof(char) * (cur_lineLen + 1));\n if (header_line == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n memcpy(header_line, cur_line, cur_lineLen);\n header_line[cur_lineLen] = '\\0';\n\n cur_line = &end_of_line[2];\n\n if(cur_line[0] == '\\r') {\n is_last_line = true;\n }\n\n *cur_line_p = cur_line;\n *is_last_line_p = is_last_line;\n\n return header_line;\n}\n\nvoid write_cookie(const char *report_line) {\n\n const char *cookie_key_start;\n const char *cookie_value_end;\n\n cookie_value_end = strchr(report_line, ';');\n if (cookie_value_end == NULL) {\n cookie_value_end = strchr(report_line, '\\0');\n if (cookie_value_end == NULL) {\n fprintf(stderr, \"ERROR: incorrect cookie at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n }\n\n cookie_key_start = report_line + strlen(\"Set-Cookie: \");\n\n long cookie_length = cookie_value_end - cookie_key_start;\n char *cookie = (char *) malloc(sizeof(char) * (cookie_length + 1));\n if (cookie == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n memcpy(cookie, cookie_key_start, cookie_length);\n cookie[cookie_length] = '\\0';\n\n printf(\"%s\\n\", cookie);\n\n free(cookie);\n}\n\nenum transfer_encoding get_transfer_encoding(const char *response) {\n\n const char *cur_line = response;\n bool is_last_header_line = false;\n\n while(!is_last_header_line){\n\n char *header_line = get_header_line(&cur_line, &is_last_header_line);\n\n if (strstr(header_line, \"Transfer-Encoding: chunked\") == header_line) {\n\n free(header_line);\n return chunked;\n\n } else if (strstr(header_line, \"Transfer-Encoding: identity\") == header_line) {\n\n free(header_line);\n return identity;\n\n } else if (strstr(header_line, \"Transfer-Encoding: \") == header_line) {\n\n free(header_line);\n fprintf(stderr, \"ERROR: unsupported Transfer-Encoding at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n\n }\n free(header_line);\n }\n return identity;\n}\n\nvoid write_cookies(const char *response) {\n\n const char *cur_line = response;\n bool is_last_header_line = false;\n\n while(!is_last_header_line){\n\n char *header_line = get_header_line(&cur_line, &is_last_header_line);\n\n if (strstr(header_line, \"Set-Cookie: \") == header_line) {\n\n write_cookie(header_line);\n }\n free(header_line);\n }\n}\n\nchar *get_response_line(const char **curLine_p, bool *is_last_line_p) {\n\n const char *curLine = *curLine_p;\n bool is_last_line = *is_last_line_p;\n\n char *end_of_line = strchr(curLine, '\\r');\n if (end_of_line == NULL) {\n end_of_line = strchr(curLine, '\\0');\n if (end_of_line == NULL) {\n fprintf(stderr, \"ERROR: no string_end at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n is_last_line = true;\n }\n long curLineLen = end_of_line - curLine;\n char *report_line = (char *) malloc(sizeof(char) * (curLineLen + 1));\n if (report_line == NULL) {\n fprintf(stderr, \"ERROR: failed malloc at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n memcpy(report_line, curLine, curLineLen);\n report_line[curLineLen] = '\\0';\n\n curLine = &end_of_line[2];\n\n *curLine_p = curLine;\n *is_last_line_p = is_last_line;\n\n return report_line;\n}\n\nconst char *get_message_body_start(const char *response) {\n\n const char *cur_line = response;\n bool is_last_header_line = false;\n\n while(!is_last_header_line){\n\n free(get_header_line(&cur_line, &is_last_header_line));\n }\n free(get_response_line(&cur_line, &is_last_header_line));\n\n return cur_line;\n}\n\nint hexadecimal_to_decimal(char* hex_val)\n{\n int len = (int)strlen(hex_val);\n int base = 1;\n int dec_val = 0;\n\n // Extracting characters as digits from last character\n for (int i = len - 1; i>=0; i--) {\n\n if (hex_val[i] >= '0' && hex_val[i] <= '9') {\n dec_val += (hex_val[i] - '0') * base;\n base = base * 16;\n\n } else if (hex_val[i] >= 'A' && hex_val[i] <= 'F') {\n dec_val += (hex_val[i] - 'A' + 10) * base;\n base = base * 16;\n\n } else if (hex_val[i] >= 'a' && hex_val[i] <= 'f') {\n dec_val += (hex_val[i] - 'a' + 10) * base;\n base = base * 16;\n }\n }\n\n return dec_val;\n}\n\nvoid write_chunked_response_length(const char *message_body_start) {\n\n int response_length = 0;\n const char *cur_line = message_body_start;\n bool is_last_line = false;\n char *response_line;\n int chunk_size = INT_MAX;\n\n while (true) {\n response_line = get_response_line(&cur_line, &is_last_line);\n chunk_size = hexadecimal_to_decimal(response_line);\n response_length += chunk_size;\n free(response_line);\n\n if (chunk_size == 0) {\n printf(\"Dlugosc zasobu: %d\\n\", response_length);\n return;\n } else if (is_last_line) {\n fprintf(stderr, \"ERROR: unexpected end of response at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n //non-trailing CRFL are counted in chunk_size\n while (chunk_size > -(int)strlen(\"\\r\\n\") && !is_last_line) {\n response_line = get_response_line(&cur_line, &is_last_line);\n chunk_size -= (int)(strlen(response_line) + strlen(\"\\r\\n\"));\n free(response_line);\n }\n if (is_last_line) {\n fprintf(stderr, \"ERROR: unexpected end of response at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n if (chunk_size != -(int)strlen(\"\\r\\n\")) {\n fprintf(stderr, \"ERROR: incorrect chunk size %d at %s (%d)\\n\", chunk_size, __FILE__, __LINE__);\n exit(1);\n }\n }\n\n}\n\nvoid write_response_length(const char *response) {\n\n enum transfer_encoding encoding = get_transfer_encoding(response);\n\n const char *message_body_start = get_message_body_start(response);\n\n if (encoding == identity) {\n\n printf(\"Dlugosc zasobu: %lu\\n\", strlen(message_body_start));\n return;\n\n } else if (encoding == chunked) {\n\n write_chunked_response_length(message_body_start);\n return;\n\n } else {\n fprintf(stderr, \"ERROR: unexpected enum at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n}\n\nvoid write_report(const char *response) {\n\n const char *first_line = response;\n bool is_last_line = false;\n\n char *status_line = get_header_line(&first_line, &is_last_line);\n if (strstr(status_line, \"200 OK\") == NULL) {\n printf(\"%s\\n\", status_line);\n } else {\n write_cookies(response);\n write_response_length(response);\n }\n\n free(status_line);\n}\n\nint main(int argc, char *argv[]) {\n\n if (argc != 4) {\n fprintf(stderr, \"ERROR: Usage: %s <adres połączenia>:<port> \"\n \"<plik ciasteczek> <testowany adres http>\\n\", argv[0]);\n exit(1);\n }\n\n int socket;\n char *address;\n char *port;\n\n separate_address_port(argv[1], &address, &port);\n\n socket = socket_connect(address, port);\n\n char *request = build_request(argv[2], argv[3]);\n\n if (write(socket, request, strlen(request) + 1) < 0) {\n fprintf(stderr, \"ERROR: writing on stream socket at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n char *response = get_response(socket);\n\n write_report(response);\n\n shutdown(socket, SHUT_RDWR);\n if (close(socket) < 0) {\n fprintf(stderr, \"ERROR: closing stream socket at %s (%d)\\n\", __FILE__, __LINE__);\n exit(1);\n }\n\n free(address);\n free(port);\n free(request);\n free(response);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6116071343421936, "alphanum_fraction": 0.6160714030265808, "avg_line_length": 14.928571701049805, "blob_id": "dce25c96613bcdedb55e67e7a30d083dad56a9dd", "content_id": "855699243bc01d494b9883cacf5265ca56a0baf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 224, "license_type": "no_license", "max_line_length": 35, "num_lines": 14, "path": "/zadanie2/makefile", "repo_name": "JanDziuba/SIK", "src_encoding": "UTF-8", "text": "CC\t = g++\nCXXFLAGS = -Wall -O2 -Wextra\nTARGETS = radio-proxy\n\nall: $(TARGETS) \n\nradio-proxy.o: radio-proxy.cpp\n\nradio-proxy: radio-proxy.o\n\t$(CC) $(CFLAGS) $^ -lpthread -o $@\n\n.PHONY: clean\nclean:\n\trm -f *.o *~ $(TARGETS) \n" }, { "alpha_fraction": 0.5415395498275757, "alphanum_fraction": 0.548513650894165, "avg_line_length": 27.04645538330078, "blob_id": "6544e6e9aeec664593bce52721014b20f76af4b7", "content_id": "4d550218c22267642af91a70368fb7df4ab0d457", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22942, "license_type": "no_license", "max_line_length": 99, "num_lines": 818, "path": "/zadanie2/radio-proxy.cpp", "repo_name": "JanDziuba/SIK", "src_encoding": "UTF-8", "text": "#include <cstdio>\n#include <string>\n#include <cstring>\n#include <cstdlib>\n#include <unistd.h>\n#include <iostream>\n#include <csignal>\n#include <algorithm>\n#include <chrono>\n#include <mutex>\n#include <unordered_map>\n#include <thread>\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <netdb.h>\n#include <arpa/inet.h>\n#include <atomic>\n\n#define MAX_MSG_LENGTH 65535\n#define BUFFER_SIZE 8192\n#define DISCOVER 1\n#define IAM 2\n#define KEEPALIVE 3\n#define AUDIO 4\n#define METADATA 6\n#define CLIENT_HEADER_LENGTH 4\n\nusing namespace std;\n\nstruct Parameters\n{\n string host;\n string resource;\n string streamPort;\n string clientPort;\n string multicastAddress;\n bool metaData = false;\n unsigned long streamTimeout = 5;\n unsigned long clientTimeout = 5;\n};\n\nstruct Client\n{\n sockaddr_in addrInfo;\n chrono::time_point<chrono::system_clock> lastKeepAliveTime;\n};\n\nmutex dataMutex;\n\n// mutex that locks next access to critical section\nmutex nextMutex;\n\n// first - client address\nunordered_map<in_addr_t, Client> clients;\n\n// flag that tells whether radio stream should end\nvolatile atomic<bool> stream(true);\n\nstatic void catchInt(int sig)\n{\n if (sig == SIGINT)\n {\n stream = false;\n }\n else\n {\n cerr << \"ERROR: Unknown signal received at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n}\n\nvoid addSigaction()\n{\n struct sigaction action{};\n sigset_t block_mask;\n\n sigemptyset(&block_mask);\n action.sa_handler = catchInt;\n action.sa_mask = block_mask;\n action.sa_flags = SA_RESTART;\n\n if (sigaction(SIGINT, &action, nullptr) == -1)\n {\n cerr << \"ERROR: sigaction at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n}\n\nParameters getParameters(int argc, const char *const argv[])\n{\n if (argc % 2 == 0)\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n bool isHost = false;\n bool isResource = false;\n bool isStreamPort = false;\n bool isClientPort = false;\n bool isMulticastAddress = false;\n bool isClientTimeout = false;\n\n Parameters parameters;\n\n int index = 1;\n\n while (index < argc - 1)\n {\n if (strcmp(argv[index], \"-h\") == 0)\n {\n isHost = true;\n index++;\n parameters.host.assign(argv[index]);\n }\n else if (strcmp(argv[index], \"-r\") == 0)\n {\n isResource = true;\n index++;\n parameters.resource.assign(argv[index]);\n }\n else if (strcmp(argv[index], \"-p\") == 0)\n {\n isStreamPort = true;\n index++;\n parameters.streamPort.assign(argv[index]);\n }\n else if (strcmp(argv[index], \"-m\") == 0)\n {\n index++;\n if (strcmp(argv[index], \"yes\") == 0)\n {\n parameters.metaData = true;\n }\n else if (strcmp(argv[index], \"no\") == 0)\n {\n parameters.metaData = false;\n }\n else\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n else if (strcmp(argv[index], \"-t\") == 0)\n {\n index++;\n string streamTimeoutStr(argv[index]);\n parameters.streamTimeout = stoul(streamTimeoutStr, nullptr, 10);\n if (parameters.streamTimeout == 0)\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n else if (strcmp(argv[index], \"-P\") == 0)\n {\n index++;\n isClientPort = true;\n parameters.clientPort.assign(argv[index]);\n }\n else if (strcmp(argv[index], \"-B\") == 0)\n {\n index++;\n isMulticastAddress = true;\n parameters.multicastAddress.assign(argv[index]);\n }\n else if (strcmp(argv[index], \"-T\") == 0)\n {\n index++;\n isClientTimeout = true;\n string clientTimeoutStr(argv[index]);\n parameters.clientTimeout = stoul(clientTimeoutStr, nullptr, 10);\n if (parameters.clientTimeout == 0)\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n else\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n index++;\n }\n\n if (!isHost || !isResource || !isStreamPort ||\n (!isClientPort && (isMulticastAddress || isClientTimeout)))\n {\n cerr << \"ERROR: wrong arguments at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n return parameters;\n}\n\n// Creates socket that connects to radio.\nint socketConnect(const Parameters &parameters)\n{\n int retValue;\n int sock;\n struct addrinfo addrHints{}, *addrResult;\n\n addrHints.ai_flags = 0;\n addrHints.ai_family = AF_INET;\n addrHints.ai_socktype = SOCK_STREAM;\n addrHints.ai_protocol = IPPROTO_TCP;\n\n retValue = getaddrinfo(parameters.host.c_str(), parameters.streamPort.c_str(), &addrHints,\n &addrResult);\n if (retValue != 0)\n {\n cerr << \"ERROR: getaddrinfo at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n sock = socket(addrResult->ai_family, addrResult->ai_socktype, addrResult->ai_protocol);\n if (sock < 0)\n {\n cerr << \"ERROR: creating socket at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n struct timeval tv{};\n tv.tv_sec = parameters.streamTimeout;\n tv.tv_usec = 0;\n if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *) &tv, sizeof(tv)) != 0)\n {\n cerr << \"ERROR: setsockopt at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n if (connect(sock, addrResult->ai_addr, addrResult->ai_addrlen) != 0)\n {\n cerr << \"ERROR: connect at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n freeaddrinfo(addrResult);\n\n return sock;\n}\n\n// Returns http request. Memory needs to be freed.\nchar *buildRequest(const Parameters &parameters)\n{\n string metaDataHeader;\n\n if (parameters.metaData)\n {\n metaDataHeader = \"Icy-MetaData:1\\r\\n\";\n }\n else\n {\n metaDataHeader = \"\";\n }\n\n unsigned long bufferSize = (strlen(\"GET HTTP/1.0\\r\\n\")\n + parameters.resource.length()\n + strlen(\"Host: \\r\\n\")\n + parameters.host.length()\n + metaDataHeader.length()\n + strlen(\"Connection: close\\r\\n\")\n + strlen(\"\\r\\n\") + 1);\n\n char *request = new char[bufferSize];\n\n int retVal;\n\n retVal = snprintf(request, bufferSize, \"GET %s HTTP/1.0\\r\\n\"\n \"Host: %s\\r\\n\"\n \"%s\"\n \"Connection: close\\r\\n\\r\\n\",\n parameters.resource.c_str(), parameters.host.c_str(),\n metaDataHeader.c_str());\n\n if (retVal < 0 || (unsigned) retVal > bufferSize)\n {\n printf(\"%d\", retVal);\n cerr << \"ERROR: at line \" << __LINE__ << \"increase bufferSize\\n\";\n exit(EXIT_FAILURE);\n }\n\n return request;\n}\n\n// Reads exactly count number of bytes from fd. If it can't be done exits the program.\nvoid safeRead(int fd, void *buffer, size_t count)\n{\n int bytesReceived;\n size_t index = 0;\n char *buf = (char *) buffer;\n\n while (index < count)\n {\n bytesReceived = read(fd, &(buf[index]), count - index);\n\n if (bytesReceived == 0)\n {\n cerr << \"ERROR: end of stream at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n else if (bytesReceived < 0)\n {\n if (errno == EINTR)\n {\n continue;\n }\n else\n {\n cerr << \"ERROR: failed read at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n index += bytesReceived;\n }\n}\n\n// Reads http response header from socket and returns it\nstring getHeader(int socket)\n{\n char *buffer = new char();\n string header;\n\n while (header.length() < strlen(\"\\r\\n\\r\\n\")\n || header.compare(header.length() - strlen(\"\\r\\n\\r\\n\"),\n strlen(\"\\r\\n\\r\\n\"), \"\\r\\n\\r\\n\") != 0)\n {\n safeRead(socket, buffer, 1);\n header.push_back(*buffer);\n }\n\n delete buffer;\n return header;\n}\n\n// Checks if http response status is correct.\n// If not ends the program.\nvoid checkStatus(const string &header)\n{\n size_t statusStart = 0;\n size_t statusEnd;\n string status;\n\n statusEnd = header.find(\"\\r\\n\", statusStart);\n if (statusEnd == std::string::npos)\n {\n cerr << \"ERROR: wrong header at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n statusEnd--;\n status = header.substr(statusStart, statusEnd - statusStart + 1);\n\n if (status != \"ICY 200 OK\" && status != \"HTTP/1.0 200 OK\" && status != \"HTTP/1.1 200 OK\")\n {\n cerr << \"incorrect status: \" << status << \" at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n}\n\n// Sets metaDataInterval to \"icy-metaint:\" value, or to 0 if no \"icy-metaint:\".\n// Sets radioName to \"icy-name:\" value, or to \"\" if no \"icy-name:\".\nvoid getHeaderData(const string &header, unsigned long &metaDataInterval, string &radioName)\n{\n size_t lineStart = 0;\n size_t lineEnd;\n string line;\n string lowerCaseLine;\n\n metaDataInterval = 0;\n radioName = \"\";\n\n while (lineStart < header.length())\n {\n lineEnd = header.find(\"\\r\\n\", lineStart);\n if (lineEnd == std::string::npos)\n {\n cerr << \"ERROR: wrong header at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n lineEnd += strlen(\"\\r\\n\") - 1;\n line = header.substr(lineStart, lineEnd - lineStart + 1);\n\n lowerCaseLine = line;\n transform(lowerCaseLine.begin(), lowerCaseLine.end(), lowerCaseLine.begin(), ::tolower);\n\n if (lowerCaseLine.find(\"icy-metaint:\") == 0)\n {\n size_t metaIntStart = strlen(\"icy-metaint:\");\n size_t metaIntEnd = lowerCaseLine.find(\"\\r\\n\");\n if (metaIntEnd == std::string::npos)\n {\n cerr << \"ERROR: wrong header at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n metaIntEnd--;\n\n string metaIntStr = line.substr(metaIntStart, metaIntEnd - metaIntStart + 1);\n metaDataInterval = stoul(metaIntStr, nullptr, 10);\n }\n\n if (lowerCaseLine.find(\"icy-name:\") == 0)\n {\n size_t radioNameStart = strlen(\"icy-name:\");\n size_t radioNameEnd = lowerCaseLine.find(\"\\r\\n\");\n if (radioNameEnd == std::string::npos)\n {\n cerr << \"ERROR: wrong header at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n radioNameEnd--;\n\n radioName = line.substr(radioNameStart, radioNameEnd - radioNameStart + 1);\n }\n\n lineStart = lineEnd + 1;\n }\n}\n\n// Creates socket for communication with clients.\nint clientSocketConnect(const Parameters &parameters)\n{\n in_port_t localPort;\n int sock;\n struct sockaddr_in localAddress{};\n struct ip_mreq ipMreq{};\n\n localPort = (in_port_t) stoul(parameters.clientPort, nullptr, 10);\n\n // initialise socket\n sock = socket(AF_INET, SOCK_DGRAM, 0);\n if (sock < 0)\n {\n cerr << \"ERROR: socket at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n struct timeval tv{};\n tv.tv_sec = 1;\n tv.tv_usec = 0;\n if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *) &tv, sizeof(tv)) != 0)\n {\n cerr << \"ERROR: setsockopt at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n // connect to multicast group\n if (!parameters.multicastAddress.empty())\n {\n ipMreq.imr_interface.s_addr = htonl(INADDR_ANY);\n if (inet_aton(parameters.multicastAddress.c_str(), &ipMreq.imr_multiaddr) == 0)\n {\n cerr << \"ERROR: inet_aton - invalid multicast address at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n if (setsockopt(sock, IPPROTO_IP, IP_ADD_MEMBERSHIP, (void *) &ipMreq, sizeof ipMreq)\n < 0)\n {\n cerr << \"ERROR: setsockopt at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n\n // set address and local port\n localAddress.sin_family = AF_INET;\n localAddress.sin_addr.s_addr = htonl(INADDR_ANY);\n localAddress.sin_port = htons(localPort);\n if (bind(sock, (struct sockaddr *) &localAddress, sizeof localAddress) < 0)\n {\n cerr << \"ERROR: bind at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n return sock;\n}\n\n// Returns true if message was sent, false if it failed to send it.\nbool sendMsgToClient(int clientSocket, const char *data, uint16_t dataLength, uint16_t msgType,\n sockaddr_in addrInfo)\n{\n uint8_t msgTypeMoreSignificantByte = msgType >> 8;\n uint8_t msgTypeLessSignificantByte = msgType & 0xFF;\n uint8_t dataLengthMoreSignificantByte = dataLength >> 8;\n uint8_t dataLengthLessSignificantByte = dataLength & 0xFF;\n size_t messageLength = dataLength + CLIENT_HEADER_LENGTH;\n auto *message = new char[messageLength];\n int bytesSent;\n\n // fill 4 first bytes of message with msgType and dataLength Big Endian order\n message[0] = msgTypeMoreSignificantByte;\n message[1] = msgTypeLessSignificantByte;\n message[2] = dataLengthMoreSignificantByte;\n message[3] = dataLengthLessSignificantByte;\n memcpy(&(message[5]), data, dataLength);\n\n bytesSent = sendto(clientSocket, message, messageLength, 0,\n (struct sockaddr *) &(addrInfo), (socklen_t) sizeof(sockaddr_in));\n delete[] message;\n\n if (bytesSent >= 0 && (size_t) bytesSent == messageLength)\n {\n return true;\n }\n else\n {\n return false;\n }\n}\n\n// Sends message to all clients.\nvoid sendToClients(int clientSocket, const char *data, uint16_t dataLength, uint16_t msgType,\n unsigned long clientTimeout)\n{\n long elapsedTime;\n\n nextMutex.lock();\n dataMutex.lock();\n nextMutex.unlock();\n\n for (auto client : clients)\n {\n elapsedTime = chrono::duration_cast<chrono::seconds>\n (chrono::system_clock::now() - client.second.lastKeepAliveTime).count();\n\n if (elapsedTime > 0 && (unsigned long) elapsedTime > clientTimeout)\n {\n clients.erase(client.first);\n continue;\n }\n\n if (!sendMsgToClient(clientSocket, data, dataLength,\n msgType, client.second.addrInfo))\n {\n clients.erase(client.first);\n }\n }\n\n dataMutex.unlock();\n}\n\n// Streams radio without metadata.\nvoid sendOutputWithoutMetadata(int socket, int clientSocket, bool withClients,\n unsigned long clientTimeout)\n{\n char buffer[BUFFER_SIZE];\n\n while (stream)\n {\n safeRead(socket, buffer, BUFFER_SIZE);\n\n if (withClients)\n {\n sendToClients(clientSocket, buffer, BUFFER_SIZE, AUDIO, clientTimeout);\n }\n else\n {\n string output(buffer, BUFFER_SIZE);\n cout << output;\n }\n }\n}\n\nvoid sendMetadata(int socket, int clientSocket, bool withClients, unsigned long clientTimeout)\n{\n char buffer[MAX_MSG_LENGTH];\n uint8_t lengthBuffer[1];\n uint16_t metaDataLength;\n\n safeRead(socket, lengthBuffer, 1);\n metaDataLength = lengthBuffer[0] * 16;\n if (metaDataLength == 0)\n {\n return;\n }\n\n safeRead(socket, buffer, metaDataLength);\n\n if (withClients)\n {\n sendToClients(clientSocket, buffer, metaDataLength, METADATA, clientTimeout);\n }\n else\n {\n string output(buffer, metaDataLength);\n cerr << output;\n }\n\n}\n\n// Streams radio with metadata.\nvoid sendOutputWithMetadata(int socket, unsigned long metaDataInterval, int clientSocket,\n bool withClients, unsigned long clientTimeout)\n{\n char buffer[BUFFER_SIZE];\n unsigned long index;\n\n while (stream)\n {\n index = 0;\n while (stream && metaDataInterval - index > BUFFER_SIZE)\n {\n safeRead(socket, buffer, BUFFER_SIZE);\n if (withClients)\n {\n sendToClients(clientSocket, buffer, BUFFER_SIZE, AUDIO, clientTimeout);\n }\n else\n {\n string output(buffer, BUFFER_SIZE);\n cout << output;\n }\n index += BUFFER_SIZE;\n }\n\n if (stream)\n {\n safeRead(socket, buffer, metaDataInterval - index);\n if (withClients)\n {\n sendToClients(clientSocket, buffer, metaDataInterval - index, AUDIO,\n clientTimeout);\n }\n else\n {\n string output(buffer, metaDataInterval - index);\n cout << output;\n }\n sendMetadata(socket, clientSocket, withClients, clientTimeout);\n }\n }\n}\n\nvoid sendOutput(int socket, unsigned long metaDataInterval, int clientSocket, bool withClients,\n unsigned long clientTimeout)\n{\n if (metaDataInterval == 0)\n {\n sendOutputWithoutMetadata(socket, clientSocket, withClients, clientTimeout);\n }\n else\n {\n sendOutputWithMetadata(socket, metaDataInterval, clientSocket, withClients, clientTimeout);\n }\n}\n\n// Receives messages from clients and responds accordingly.\nvoid talkWithClients(int clientSocket, const string &radioName)\n{\n unsigned char buffer[CLIENT_HEADER_LENGTH];\n sockaddr_in srcAddr{};\n auto addrLen = (socklen_t) sizeof(srcAddr);\n uint16_t dataLength;\n uint16_t msgType;\n uint8_t msgTypeMoreSignificantByte;\n uint8_t msgTypeLessSignificantByte;\n uint8_t dataLengthMoreSignificantByte;\n uint8_t dataLengthLessSignificantByte;\n\n while (stream)\n {\n if (recvfrom(clientSocket, buffer, CLIENT_HEADER_LENGTH, MSG_WAITALL,\n (struct sockaddr *) &srcAddr, &addrLen) != CLIENT_HEADER_LENGTH)\n {\n continue;\n }\n\n // read 4 first bytes of message as msgType and dataLength Big Endian order\n msgTypeMoreSignificantByte = buffer[0];\n msgTypeLessSignificantByte = buffer[1];\n dataLengthMoreSignificantByte = buffer[2];\n dataLengthLessSignificantByte = buffer[3];\n\n msgType = ((uint16_t) msgTypeMoreSignificantByte) << 8\n | msgTypeLessSignificantByte;\n dataLength = ((uint16_t) dataLengthMoreSignificantByte) << 8\n | dataLengthLessSignificantByte;\n\n //all supported client messages have dataLength 0\n if (dataLength != 0)\n {\n continue;\n }\n\n nextMutex.lock();\n dataMutex.lock();\n nextMutex.unlock();\n\n if (msgType == DISCOVER)\n {\n // check if client is already manned\n if (clients.find(srcAddr.sin_addr.s_addr) == clients.end())\n {\n clients.insert({srcAddr.sin_addr.s_addr,\n {srcAddr, chrono::system_clock::now()}});\n\n if (!sendMsgToClient(clientSocket, radioName.c_str(), radioName.length(), IAM,\n srcAddr))\n {\n clients.erase(srcAddr.sin_addr.s_addr);\n }\n }\n else\n {\n auto client = clients.find(srcAddr.sin_addr.s_addr);\n client->second.lastKeepAliveTime = chrono::system_clock::now();\n }\n }\n else if (msgType == KEEPALIVE)\n {\n if (clients.find(srcAddr.sin_addr.s_addr) == clients.end())\n {\n cerr << \"ERROR: msg from unknown client at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n else\n {\n auto client = clients.find(srcAddr.sin_addr.s_addr);\n client->second.lastKeepAliveTime = chrono::system_clock::now();\n }\n }\n dataMutex.unlock();\n }\n}\n\n// If socket is open closes it and sets its value to -1. If it can't be done exits the program\nvoid safeClose(int &socket)\n{\n if (socket >= 0)\n {\n if (close(socket) < 0)\n {\n if (errno == EINTR)\n {\n if (close(socket) < 0)\n {\n cerr << \"ERROR: closing socket at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n else\n {\n cerr << \"ERROR: closing socket at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n }\n socket = -1;\n }\n}\n\nvoid streamRadio(int socket, const Parameters &parameters)\n{\n string header = getHeader(socket);\n checkStatus(header);\n\n unsigned long metaDataInterval;\n string radioName;\n\n getHeaderData(header, metaDataInterval, radioName);\n\n if (metaDataInterval != 0 && !parameters.metaData)\n {\n cerr << \"ERROR: response with metadata when request with no metadata at line \" << __LINE__\n << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n int clientSocket = -1;\n bool withClients;\n\n if (parameters.clientPort.empty())\n {\n withClients = false;\n }\n else\n {\n withClients = true;\n }\n\n if (withClients)\n {\n clientSocket = clientSocketConnect(parameters);\n thread talkThread(talkWithClients, clientSocket, radioName);\n thread sendThread(sendOutput, socket, metaDataInterval, clientSocket,\n withClients, parameters.clientTimeout);\n talkThread.join();\n sendThread.join();\n }\n else\n {\n sendOutput(socket, metaDataInterval, clientSocket, withClients, parameters.clientTimeout);\n }\n\n safeClose(clientSocket);\n}\n\nint main(int argc, char *argv[])\n{\n addSigaction();\n\n Parameters parameters = getParameters(argc, argv);\n int socket;\n\n socket = socketConnect(parameters);\n\n char *request = buildRequest(parameters);\n\n if (write(socket, request, strlen(request) + 1) < 0)\n {\n cerr << \"ERROR: writing on stream socket at line \" << __LINE__ << \"\\n\";\n exit(EXIT_FAILURE);\n }\n\n streamRadio(socket, parameters);\n safeClose(socket);\n\n delete[] request;\n return 0;\n}\n" } ]
5