repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
โŒ€
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
95hongju/kiki_chatbot
https://github.com/95hongju/kiki_chatbot
abbd0138c7c97be94298a3f391425f52d4f88dc2
f862fba023570de36b522d306bcfc85eb93bdff2
040650454e183e42a93202af926d34a216130d98
refs/heads/master
2020-06-19T02:27:26.132640
2019-10-14T06:58:30
2019-10-14T06:58:30
196,531,899
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.569698691368103, "alphanum_fraction": 0.5767389535903931, "avg_line_length": 27.5220890045166, "blob_id": "6dea1990b514b260f75bd836127c2d804dd62cc4", "content_id": "1bbdd94616ae05d843c44b068bae02f272e4e4ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7448, "license_type": "no_license", "max_line_length": 101, "num_lines": 249, "path": "/bugsmusic_chat.py", "repo_name": "95hongju/kiki_chatbot", "src_encoding": "UTF-8", "text": "import re\nimport json\nimport random\nimport time\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request\nfrom slack import WebClient\nfrom slack.web.classes import extract_json\nfrom slack.web.classes.blocks import *\nfrom slack.web.classes.elements import *\nfrom slack.web.classes.interactions import MessageInteractiveEvent\nfrom slackeventsapi import SlackEventAdapter\n\n\nSLACK_TOKEN = \"\"\nSLACK_SIGNING_SECRET = \"\"\n\n\napp = Flask(__name__)\n# /listening ์œผ๋กœ ์Šฌ๋ž™ ์ด๋ฒคํŠธ๋ฅผ ๋ฐ›์Šต๋‹ˆ๋‹ค.\nslack_events_adaptor = SlackEventAdapter(SLACK_SIGNING_SECRET, \"/listening\", app)\nslack_web_client = WebClient(token=SLACK_TOKEN)\n\ndef show_menu_list():\n button_actions = ActionsBlock(\n elements=[\n ButtonElement(\n text=\"์‹ค์‹œ๊ฐ„์ฐจํŠธ\",style=\"primary\",\n action_id=\"chart_current\",value = \"chart_current\"\n ),\n ButtonElement(\n text=\"์žฅ๋ฅด๋ณ„์ฐจํŠธ\", style=\"danger\",\n action_id=\"chart_genre\",value = \"chart_genre\"\n ),\n ButtonElement(\n text=\"์˜ค๋Š˜์˜ ๋…ธ๋ž˜์ถ”์ฒœ\",\n action_id=\"chart_album\",value = \"chart_album\"\n ),\n\n ]\n )\n return [button_actions]\n\ndef today_musics():\n\n url = \"https://music.bugs.co.kr/connect/chart/track/day/connectall\"\n source_code = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(source_code, \"html.parser\")\n\n recos = soup.find('table', class_='list trackList').find('tbody')\n p_titles = recos.find_all('p', class_='title')\n p_artists = recos.find_all('p', class_='artist')\n p_imgs = recos.find_all('a', class_='thumbnail')\n\n titles = [title.find('a').get_text() for title in p_titles]\n artists = [artist.find('a').get_text() for artist in p_artists]\n imgs = [img.find('img')['src'] for img in p_imgs]\n\n random_list = {}\n\n for idx,(title, artist, img) in enumerate(zip(titles, artists, imgs)):\n\n random_list[idx] = [title, artist, img]\n\n random_recommand= [random.randint(0,len(random_list)) for r in range(3)]\n print(random_recommand)\n\n message_list = []\n attachments_list = []\n\n for s in range(len(random_recommand)):\n tmp = random_list[random_recommand[s]]\n print(tmp)\n tmp_txt = '{} / {}'.format(tmp[0], tmp[1])\n\n\n attachments = [{\"text\": tmp_txt,\n \"thumb_url\": tmp[2]}]\n message_list.append('')\n attachments_list.append(attachments)\n\n\n return message_list, attachments_list\n\n\ndef genre_crawl(sel):\n\n genre = [\"ballad\",\"rnh\",\"rns\",\"elec\",\"rock\"]\n\n url = \"https://music.bugs.co.kr/genre/chart/kpop/\" + genre[sel-1] + \"/total/day\"\n source_code = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(source_code, \"html.parser\")\n\n #upper message\n message = {}\n views = soup.find(\"table\", class_=\"list trackList byChart\").find('tbody')\n titles_p = views.find_all('p', class_='title')\n artists_p = views.find_all('p', class_= 'artist')\n imgs_p = views.find_all('a', class_=\"thumbnail\")\n\n titles = [tit.find(\"a\").get_text() for tit in titles_p]\n artists = [tit.find(\"a\").get_text() for tit in artists_p]\n imgs = [tit.find(\"img\")['src'] for tit in imgs_p]\n\n i = 0\n for title, artist in zip(titles, artists):\n message[i] = [title, artist]\n i += 1\n\n rtn_msg = []\n rtn_att = []\n\n for num in range(0,10):\n txt = \"{}์œ„ : {} / {}\".format(num + 1, message[num][0], message[num][1])\n attachments = [{\"text\": txt, \"thumb_url\": imgs[num]}]\n\n rtn_msg.append('')\n rtn_att.append(attachments)\n\n return rtn_msg, rtn_att\n\n\n\n\ndef _crawl_music_chart():\n\n url = \"https://music.bugs.co.kr/chart\"\n source_code = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(source_code, \"html.parser\")\n\n\n message_list = []\n attachments_list = []\n\n total_table = soup.find('table',class_='list trackList byChart').find('tbody').find_all('tr')\n\n for idx,row in enumerate(total_table[:10]):\n thumbs = row.find('img')['src']\n title = row.find('p',class_='title').find('a')['title']\n artist = row.find('p',class_='artist').find('a')['title']\n\n txt = '{}์œ„: {} / {}'.format(idx+1,title,artist)\n\n attachments = [{\"text\": txt,\n \"thumb_url\": thumbs}]\n message_list.append('')\n attachments_list.append(attachments)\n\n\n return message_list, attachments_list\n\n\n# ์ฑ—๋ด‡์ด ๋ฉ˜์…˜์„ ๋ฐ›์•˜์„ ๊ฒฝ์šฐ\n@slack_events_adaptor.on(\"app_mention\")\ndef app_mentioned(event_data):\n channel = event_data[\"event\"][\"channel\"]\n text = event_data[\"event\"][\"text\"]\n\n if 'music' in text:\n message = show_menu_list()\n slack_web_client.chat_postMessage(\n channel=channel,\n blocks=extract_json(message)\n )\n\n\n elif text[-1].isdigit():\n\n tmp = [1,2,3,4,5]\n if int(text[-1]) in tmp:\n # ์žˆ๋Š”๊ฒฝ์šฐ\n sel = int(text[-1])\n message_list,attachments_list = genre_crawl(sel)\n\n for i in range(len(message_list)):\n slack_web_client.chat_postMessage(\n channel=channel,\n text = message_list[i],\n attachments = attachments_list[i]\n )\n time.sleep(1)\n\n else:\n #์—†๋Š”๊ฒฝ์šฐ ๋ฉ”์„ธ์ง€ ์ถœ๋ ฅ\n slack_web_client.chat_postMessage(\n channel=channel,\n text = \"`@<๋ด‡์ด๋ฆ„> music` ๊ณผ ๊ฐ™์ด ๋ฉ˜์…˜ํ•ด์ฃผ์„ธ์š”.\"\n )\n\n else:\n slack_web_client.chat_postMessage(\n channel=channel,\n text = \"`@<๋ด‡์ด๋ฆ„> music` ๊ณผ ๊ฐ™์ด ๋ฉ˜์…˜ํ•ด์ฃผ์„ธ์š”.\"\n )\n\n return \"OK\", 200\n\n\n\n#button click\n@app.route(\"/click\", methods=[\"GET\", \"POST\"])\ndef on_button_click():\n # ๋ฒ„ํŠผ ํด๋ฆญ์€ SlackEventsApi์—์„œ ์ฒ˜๋ฆฌํ•ด์ฃผ์ง€ ์•Š์œผ๋ฏ€๋กœ ์ง์ ‘ ์ฒ˜๋ฆฌํ•ฉ๋‹ˆ๋‹ค\n payload = request.values[\"payload\"]\n click_event = MessageInteractiveEvent(json.loads(payload))\n ordertxt = str(click_event.value)\n\n\n if 'chart_current' in ordertxt:\n message_list,attachments_list = _crawl_music_chart()\n\n for i in range(len(message_list)):\n slack_web_client.chat_postMessage(\n channel=click_event.channel.id,\n text = message_list[i],\n attachments = attachments_list[i]\n )\n time.sleep(1)\n#-------------์ž‘์—…์ค‘\n #when yser clicked ์žฅ๋ฅด๋ณ„์ฐจํŠธ\n elif 'chart_genre' in ordertxt:\n\n slack_web_client.chat_postMessage(\n channel=click_event.channel.id,\n text = \"`@<๋ด‡์ด๋ฆ„> [(๋ฐœ๋ผ๋“œ/๋Œ„์Šค/ํŒ)์€ 1,(๋žฉ/ํž™ํ•ฉ)์€ 2,(์•Œ์•ค๋น„/์†Œ์šธ)์€ 3,(์ผ๋ ‰ํŠธ๋กœ๋‹‰)์€ 4,(๋ฝ/๋ฉ”ํƒˆ)์€ 5]` ๊ณผ ๊ฐ™์ด ๋ฉ˜์…˜ํ•ด์ฃผ์„ธ์š”.\"\n )\n\n #when user clicked ์˜ค๋Š˜์˜ ๋…ธ๋ž˜\n elif 'chart_album' in ordertxt:\n message_list,attachments_list = today_musics()\n for i in range(len(message_list)):\n slack_web_client.chat_postMessage(\n channel=click_event.channel.id,\n text = message_list[i],\n attachments = attachments_list[i]\n )\n time.sleep(1)\n\n return \"OK\", 200\n\n# / ๋กœ ์ ‘์†ํ•˜๋ฉด ์„œ๋ฒ„๊ฐ€ ์ค€๋น„๋˜์—ˆ๋‹ค๊ณ  ์•Œ๋ ค์ค๋‹ˆ๋‹ค.\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return \"<h1>Server is ready.</h1>\"\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=8080)\n" }, { "alpha_fraction": 0.7432432174682617, "alphanum_fraction": 0.7432432174682617, "avg_line_length": 17.5, "blob_id": "3d56c1e12b7c6e987e99187ea84575a0c83a025c", "content_id": "1939f84c83168a96b8162768cf6c9a6b22f7659b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/README.md", "repo_name": "95hongju/kiki_chatbot", "src_encoding": "UTF-8", "text": "# kiki_chatbot\nKIKI music chatbot\n\n![first_mention](./img/screenshot.PNG)\n" } ]
2
annwhoorma/DS-lab-6
https://github.com/annwhoorma/DS-lab-6
be40951d1362cbef713e7a8e5d76bd90f41e876d
46e75b7c83f38558f7083bc5d11d345ad03cfe7a
f09e23f9b3f95599bc23eea115203d589237d3b9
refs/heads/master
2022-12-21T12:41:27.848580
2020-09-19T20:12:53
2020-09-19T20:12:53
296,846,188
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6076518893241882, "alphanum_fraction": 0.6159039735794067, "avg_line_length": 30.746030807495117, "blob_id": "6c39fc0e895287ab0d4a45b5b82095bfddf9900b", "content_id": "397309663ae893781845242b59a70f6ec6b9a60a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3999, "license_type": "no_license", "max_line_length": 90, "num_lines": 126, "path": "/server.py", "repo_name": "annwhoorma/DS-lab-6", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor: Anna Boronina\nemail: a.boronina@innopolis.university\ntelegram: @whoorma\ngroup: DS-01\nTO RUN: python3 server.py\n\"\"\"\n\nimport socket\nimport os\nimport re\nimport threading\n\n# host and port information\nHOST = '127.0.0.1'\nPORT = 8080\n# buffer size for the incoming files\nBUFFER_SIZE = 4096\n# create server TCP socket with IPv4 and bind it to host ip and port number\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind((HOST, PORT))\n# start listerning for incoming connections\nserver_socket.listen(10)\n# an array with all active clients\nall_cli_socks = []\n# append server itself\nall_cli_socks.append(server_socket)\n# separator that will be used to receive file name and file size together\nSEPARATOR = '<SEP>'\n\n\n# class responsible for handling name collisions\nclass NamesMap:\n def __init__(self):\n # dictionary with existing names and number of their occurences\n self.names_map = {}\n # regex to make sure that file(1) is treated as file\n self.catch_number = re.compile(r'\\(.*?\\)')\n # initial call to check existing files\n self.init_names_map()\n \n \n def init_names_map(self):\n # build initial dictionary for existing files\n for file_name in os.listdir(os.getcwd()):\n number = self.catch_number.search(file_name)\n original_file_name = re.sub(r'\\(.*?\\)', '', file_name)\n occurences = 0\n if number is not None:\n number = number.group()\n occurences = int(number[1:len(number)-1]) \n else:\n occurences = 1\n \n self.add_file_to_names_map(original_file_name, occurences)\n\n \n def add_file_to_names_map(self, file_name, number):\n # add a new file to the dictionary\n if number:\n self.names_map[file_name] = max(number, (self.names_map.get(file_name) or 0))\n return\n\n self.names_map[file_name] = (self.names_map.get(file_name) or 0) + 1\n\n new_file_name = file_name\n if self.names_map.get(file_name) > 1:\n new_file_name = '(' + str(self.names_map.get(file_name) - 1) + ')' + file_name\n \n return new_file_name\n\n\n# class responsible for creating blocking sockets for each new client\nclass Dobby:\n def __init__(self, sock):\n # create a thread\n threading.Thread.__init__(self)\n self.sock = sock\n # append this client's socket to the list of active sockets\n all_cli_socks.append(self.sock)\n\n def receive(self):\n # receive file name and file size\n data = self.sock.recv(BUFFER_SIZE).decode()\n orig_file_name, file_size = data.split(SEPARATOR)\n file_size = int(file_size)\n \n # make sure file name doesn't case name collision\n file_name = names_map.add_file_to_names_map(orig_file_name, None)\n \n # open the new file\n new_file = open(file_name, 'wb')\n\n total = 0\n # receive file by parts until the file is received completely\n while total != file_size:\n bytes_read = self.sock.recv(BUFFER_SIZE)\n\n if total == file_size:\n # nothing is received\n # file transmitting is done\n break\n # write to the file the bytes we just received\n new_file.write(bytes_read)\n\n total += len(bytes_read)\n \n new_file.close()\n print(f'I received {orig_file_name} and saved it as {file_name} :)')\n\n def be_free(self):\n # call this function after receiving is over and the socket is not needed anymore\n all_cli_socks.remove(self.sock)\n\n\n# create dictionary of names\nnames_map = NamesMap()\nwhile 1:\n # accept a new client and create a thread\n cli_sock, cli_addr = server_socket.accept()\n newthread = Dobby(cli_sock)\n # receive the client's file\n newthread.receive()\n # let the client go\n newthread.be_free()" }, { "alpha_fraction": 0.6464706063270569, "alphanum_fraction": 0.6582353115081787, "avg_line_length": 26.852458953857422, "blob_id": "263d9b6168d6d5ae075cb4b4e485ccbf79865f57", "content_id": "43ead2273fe24704a07578ff839b56c826f7ccf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1700, "license_type": "no_license", "max_line_length": 112, "num_lines": 61, "path": "/client.py", "repo_name": "annwhoorma/DS-lab-6", "src_encoding": "UTF-8", "text": "\"\"\"\nauthor: Anna Boronina\nemail: a.boronina@innopolis.university\ntelegram: @whoorma\ngroup: DS-01\nTO RUN: python client.py <full/path/to/file> <public_ip> 8080\n\"\"\"\n\nimport socket\nimport sys\nimport os\nimport time\nimport tqdm\n\n# open the file in a binary reading mode that must be specified after the script name\nfile = open(sys.argv[1], 'rb')\n# get file's name without its path\nfile_name = os.path.basename(file.name)\n# get file's size\nfile_size = os.path.getsize(sys.argv[1])\n\n# get host and port number\nhost = sys.argv[2]\nport = int(sys.argv[3])\n# initialize a TCP socket with IPv4\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# connect to the server\nsock.connect((host, port))\n\n# specify buffer size for sending the file\nBUFFER_SIZE = 4096\n# specify the separator. must be the same as for the server\nSEPARATOR = '<SEP>'\n\n\ntry:\n # send file info - name and size\n sock.send(f\"{file_name}{SEPARATOR}{file_size}\".encode('utf-8'))\n progress = tqdm.tqdm(range(file_size), f\"Sending {file_name}\", unit=\"B\", unit_scale=True, unit_divisor=1024)\n reading_finished = False\n # start sending the file itself and show the progress bar\n for _ in progress:\n while True and not reading_finished:\n # read BUFFER_SIZE bytes from the file \n bytes_read = file.read(BUFFER_SIZE)\n if not bytes_read:\n # file has been transmitted\n reading_finished = True\n \n # use sendall to assure that all the bytes will be transmitted\n sock.sendall(bytes_read)\n\n # update the progress bar\n progress.update(len(bytes_read))\n\n # close the file\n file.close()\n\nfinally:\n # close the socket\n sock.close()\n\n" } ]
2
SAR2652/gpu-mangrove
https://github.com/SAR2652/gpu-mangrove
22b646b215e1e1f98743a79f2995d02960552ad2
2485dd51f296a8215519c91d2a1fd1a5ea1a0642
615b0070da1f2fdbc4fbc562de31dfa8bce0cc80
refs/heads/master
2023-02-21T15:36:49.646512
2021-01-25T12:05:22
2021-01-25T12:05:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5408025979995728, "alphanum_fraction": 0.5586056113243103, "avg_line_length": 37.01323318481445, "blob_id": "024e333cd2084e3823d0556756229c0dd21cae40", "content_id": "1d2717891576456269be8f36f4a1bf604a4f1ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20109, "license_type": "no_license", "max_line_length": 156, "num_lines": 529, "path": "/gpumangrove/utils.py", "repo_name": "SAR2652/gpu-mangrove", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport sqlite3\nimport pickle\nfrom tqdm import tqdm\nimport re\n\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, PredefinedSplit, LeaveOneOut\n\n# TODO this function needs refactoring!\ndef convert(args):\n CUDAFluxMetricDB = args.i\n FeatureTableDB = args.o\n\n # Get all Column and Row Labels\n with sqlite3.Connection(CUDAFluxMetricDB) as conn:\n cur = conn.cursor()\n\n cur.execute('select distinct metric from fluxmetrics')\n metrics = cur.fetchall()\n cur.execute('select distinct bench,app,dataset,lseq from fluxmetrics')\n kernels = cur.fetchall()\n\n # Consistent Mapping/Order of rows and columns\n row = {}\n count = 0\n for kernel in kernels:\n row[kernel] = count\n count += 1\n col = {}\n count = 0\n for metric in metrics:\n col[metric[0]] = count\n count += 1\n with sqlite3.Connection(CUDAFluxMetricDB) as conn:\n data = np.zeros((len(kernels),len(metrics)))\n\n cur = conn.cursor()\n cur.execute('select * from fluxmetrics')\n print(\"Processing Items..\")\n for item in tqdm(cur.fetchall()):\n data[row[item[0:4]], col[item[5]]] = item[-1]\n \n # Build Dataframe for Labeled Storage\n df_f = pd.DataFrame(data, \n index=pd.Index(kernels, names=['bench', 'app', 'dataset', 'lseq']), \n columns=[item[0] for item in metrics])\n # Check for Errors\n if False:\n with sqlite3.Connection(CUDAFluxMetricDB) as conn:\n cur = conn.cursor()\n cur.execute(\"select * from fluxmetrics where bench=? and app=? and dataset=? and lseq=?\", kernels[0])\n res = cur.fetchall()\n print(data[0,:])\n print([x[-1] for x in res])\n # Define how instructions are grouped\n inst_groups = {# Integer Inst\n r'(add|sub|mul|mad|mul24|mad24|sad|div|rem|abs|neg|min|max|popc|clz|bfind|fns|brev|bfe|bfi|dp4a|dp2a)[\\w.]*?.[us](\\d+)$' : 'int',\n # ToDo: extended precission int inst (not found yet)\n # Floating Inst\n r'(testp|copysign|add|sub|mul|fma|mad|div|abs|neg|min|max|rcp)[\\w.]*?.f(\\d+)$' : 'float',\n r'(sqrt|rsqrt|sin|cos|lg2|ex2)[\\w.]*?.f(\\d+)$' : 'float.special',\n # Comparing and Selection inst\n r'(set|setp|selp|slct)[\\w.]*?.\\w(\\d+)$' : 'comp_sel',\n # Logic Inst\n r'(and|or|xor|not|cnot|lop3|shf|shl|shr)[\\w.]*?.\\w?(\\d+|pred)$' : 'logic',\n # Data Movement Inst\n r'(ld.global)(.v[42])?.\\w(\\d+)$' : 'ld.global',\n r'(ld)(.v[42])?.\\w(\\d+)$' : 'ld.global',\n r'(ld.shared)(.v[42])?.\\w(\\d+)$' : 'ld.shared',\n r'(ld.volatile.shared)(.v[42])?.\\w(\\d+)$' : 'ld.shared',\n r'(ld.const)(.v[42])?.\\w(\\d+)$' : 'ld.const',\n r'(ld.local)(.v[42])?.\\w(\\d+)$' : 'ld.local',\n r'(ld.param)(.v[42])?.\\w(\\d+)$' : 'ld.param',\n r'(st.global)(.v[42])?.\\w(\\d+)$' : 'st.global',\n r'(st)(.v[42])?.\\w(\\d+)$' : 'st.global',\n r'(st.shared)(.v[42])?.\\w(\\d+)$' : 'st.shared',\n r'(st.volatile.shared)(.v[42])?.\\w(\\d+)$' : 'st.shared',\n r'(st.const)(.v[42])?.\\w(\\d+)$' : 'st.const',\n r'(st.local)(.v[42])?.\\w(\\d+)$' : 'st.local',\n r'(st.param)(.v[42])?.\\w(\\d+)$' : 'st.param',\n r'(mov)[\\w.]*?.\\w?(\\d+|pred)$' : 'mov',\n # Data Conversion\n r'(cvta|cvt)[\\w.]*?.\\w(\\d+)$' : 'cvt',\n # Control Flow\n r'(bra|call|ret|exit)[\\w.]*?$' : 'control',\n # Atomic Inst\n r'(atom.global)[\\w.]*?(\\d+)$' : 'atom.global',\n r'(atom.shared)[\\w.]*?(\\d+)$' : 'atom.shared',\n # Sync\n r'bar.sync' : 'bar.sync'\n # End\n }\n\n # Helper function to actually group instructions\n def meltInstructions(insts, inst_groups = inst_groups):\n inst_map = { }\n\n for inst in insts:\n if inst in { 'gX', 'gY', 'gZ', 'bX', 'bY', 'bZ', 'shm', 'time'}:\n inst_map[inst] = [inst]\n continue\n m = None\n for ex in inst_groups:\n m = re.match(ex, inst)\n if( m is not None):\n num_groups = len(m.groups())\n #print(inst, m.groups())\n if (num_groups >= 3):\n if m.group(num_groups-1) is None:\n key = inst_groups[ex]+'.'+m.group(num_groups)\n else:\n key = inst_groups[ex]+'.'+str(int(m.group(num_groups-1)[2:])*int(m.group(num_groups)))\n elif (num_groups >= 2):\n if m.group(num_groups) == 'pred':\n key = inst_groups[ex]+'.'+'32'\n else:\n key = inst_groups[ex]+'.'+m.group(num_groups)\n else:\n key = inst_groups[ex]\n #print(inst, inst_groups[ex], key, m.groups())\n if key in inst_map:\n inst_map[key].append(inst)\n else:\n inst_map[key] = [inst]\n break\n if (m == None):\n if 'misc' in inst_map:\n inst_map['misc'].append(inst)\n else:\n inst_map['misc'] = [inst]\n print(inst, '\\033[1;31mNo Match!\\033[0m')\n return inst_map\n\n # Produce map for summing up columns\n inst_map = meltInstructions(df_f.columns.values)\n # Create grouped dataframe\n dfg = pd.DataFrame(index=df_f.index, columns=[key for key in inst_map])\n for key in tqdm(inst_map):\n dfg[key] = (df_f[inst_map[key]].sum(axis=1))\n # Analytical Metrics\n\n feature_map = {}\n\n for col in [ 'gX', 'gY', 'gZ', 'bX', 'bY', 'bZ', 'shm']:\n feature_map[col] = dfg[col].values\n\n feature_map['CTAs'] = dfg['gX'].values * dfg['gY'].values * dfg['gZ'].values\n feature_map['BlockThreads'] = dfg['bX'].values * dfg['bY'].values * dfg['bZ'].values\n feature_map['TotalThreads'] = feature_map['CTAs'] * feature_map['BlockThreads']\n\n def compute_data_vol(df, prefix):\n cols = set()\n for col in df.columns.values:\n if col.startswith(prefix):\n cols.add(col)\n res = None\n for col in cols:\n if res is None:\n res = df[col] * int(col.split('.')[-1])/8\n else: \n res += df[col] * int(col.split('.')[-1])/8\n return res\n\n feature_map['V_ldGlobal'] = compute_data_vol(dfg, 'ld.global.')\n feature_map['V_stGlobal'] = compute_data_vol(dfg, 'st.global.')\n feature_map['V_ldShm'] = compute_data_vol(dfg, 'ld.shared.')\n feature_map['V_stShm'] = compute_data_vol(dfg, 'st.shared.')\n feature_map['V_ldParam'] = compute_data_vol(dfg, 'ld.param.')\n feature_map['V_stParam'] = compute_data_vol(dfg, 'st.param.')\n feature_map['V_ldLocal'] = compute_data_vol(dfg, 'ld.local.')\n feature_map['V_stLocal'] = compute_data_vol(dfg, 'st.local.')\n feature_map['V_ldConst'] = compute_data_vol(dfg, 'ld.const.')\n\n cols_inst = ['float.32', 'logic.64', 'comp_sel.32', 'int.32', 'cvt.32',\n 'comp_sel.64', 'mov.32', 'logic.32', 'control',\n 'int.64', 'cvt.64', 'ld.global.32', 'ld.param.64',\n 'st.global.32', 'ld.shared.32', 'mov.64',\n 'st.shared.32', 'ld.param.32', 'bar.sync', 'float.64',\n 'ld.const.64', 'ld.const.32', 'misc', 'ld.local.32', 'st.local.32',\n 'st.global.64', 'atom.shared.32', 'ld.shared.64', 'st.shared.64',\n 'comp_sel.16', 'st.global.8', 'ld.global.8', 'mov.16',\n 'ld.global.64', 'int.16', 'ld.local.8', 'st.local.8',\n 'st.param.64', 'st.param.32', 'atom.global.32', 'logic.16',\n 'ld.global.128', 'ld.local.64', 'st.local.64', 'cvt.16',\n 'st.global.128', 'ld.shared.8', 'ld.param.8','float.special.32','float.special.64']\n\n # Hack to avoid error on missing columns\n for col in cols_inst:\n if col not in dfg.columns:\n dfg[col] = 0\n feature_map['total_inst'] = dfg[cols_inst].values.sum(axis=1)\n\n for col in cols_inst:\n feature_map[col] = dfg[col].values\n\n features = pd.DataFrame(feature_map)\n # Add Column with Kernel Names\n df_name = None\n with sqlite3.Connection(CUDAFluxMetricDB) as conn:\n # Build Pandas Dataframe from SQL Query\n df_name = pd.read_sql_query(\n 'select distinct bench,app,dataset,lseq,name from fluxmetrics', \n conn, index_col=['bench','app','dataset','lseq'])\n if features.shape[0] == df_name.shape[0]:\n features = features.join(df_name)\n else:\n print(\"Shape mismatch!\")\n # Dump Dataframe into SQL Table\n with sqlite3.connect(FeatureTableDB) as conn:\n features.to_sql('fluxfeatures',conn)\n conn.commit()\n\n\ndef join_features_measurements(feature_db=None, measurement_db=None, grouping='lconf', aggregator='median'):\n groups = {'lseq': ['bench', 'app', 'dataset', 'lseq'],\n 'lconf': ['bench','app','dataset','name',\n 'gX','gY','gZ','bX','bY','bZ','shm',\n 'control','int.32','int.64','total_inst']}\n\n with sqlite3.Connection(feature_db) as conn:\n df_features = pd.read_sql_query(\"select * from fluxfeatures\", conn)\n\n if ('time' in str.lower(measurement_db)):\n with sqlite3.Connection(measurement_db) as conn:\n df_time = pd.read_sql_query(\"select * from kerneltime\", conn)\n df_grouped = df_time.merge(df_features, on=['bench','app','dataset','lseq'], how='inner').groupby(groups[grouping])\n # df_std = df_grouped.std().reset_index()\n\n\n elif ('power' in str.lower(measurement_db)):\n with sqlite3.Connection(measurement_db) as conn:\n df_power = pd.read_sql_query(\"select * from kernelpower\", conn)\n df_grouped = df_power.merge(df_features, on=['bench','app','dataset','lseq'], how='inner').groupby(groups[grouping])\n\n if aggregator == 'median':\n df = df_grouped.median().reset_index()\n if aggregator == 'mean':\n df = df_grouped.mean().reset_index()\n\n #if ('time' in str.lower(measurement_db)):\n # with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1):\n # print(df[df_std['time'] > df['time']][['bench','app','dataset','name']])\n\n # Drop lseq\n df.drop(columns=['lseq'], inplace=True)\n\n return df\n\n\ndef process_features(feature_df, db_path=None):\n df = feature_df\n\n cols = {}\n\n # 'Index' Columns\n cols['bench'] = df['bench'].values\n cols['app'] = df['app'].values\n cols['dataset'] = df['dataset'].values\n cols['name'] = df['name'].values\n\n cols['threads_per_CTA'] = df['bX'] * df['bY'] * df['bZ']\n cols['CTAs'] = df['gX'] * df['gY'] * df['gZ']\n\n cols['total_instructions'] = df['total_inst']\n # Operation groups\n cols['special_operations'] = df['float.special.32'].values + df['float.special.64'].values\n cols['logic_operations'] = df['logic.32'].values + df['logic.16'].values + df['logic.64'].values\n cols['control_operations'] = df['control'].values\n cols['arithmetic_operations'] = (df['float.32'].values + df['float.64'].values\n + df['int.32'].values + df['int.16'].values + df['int.64'].values\n + df['mov.32'].values + df['mov.64'].values + df['mov.16'].values + df[\n 'comp_sel.32'].values\n + df['comp_sel.64'].values + df['comp_sel.16'].values\n + df['cvt.16'].values + df['cvt.32'].values + df['cvt.64'].values)\n cols['sync_operations'] = df['bar.sync'].values\n\n # Memory data volume (bytes)\n cols['global_memory_volume'] = df['V_stGlobal'].values + df['V_ldGlobal'].values + df['atom.global.32'].values * 4\n # cols['contant_memory_volume'] = df['V_ldConst'].values\n # cols['local_memory_volume'] = df['V_ldLocal'].values + df['V_stLocal'].values\n if 'V_stParam' not in df.columns:\n df['V_stParam'] = 0\n cols['param_memory_volume'] = df['V_ldParam'].values + df['V_stParam'].values\n cols['shared_memory_volume'] = df['V_ldShm'].values + df['V_stShm'].values\n cols['arithmetic_intensity'] = cols['arithmetic_operations'] / (\n cols['global_memory_volume'] + cols['param_memory_volume'])\n\n if 'time' in df.columns:\n cols['time'] = df['time']\n elif 'power' in df.columns:\n cols['power'] = df['power']\n\n\n res = pd.DataFrame(cols)\n\n if db_path is not None:\n with sqlite3.connect(db_path) as conn:\n res.to_sql('samples', conn)\n\n return res\n\n\ndef filter_samples(samples, max_samples_per_kernel=100, random_state=42*31415, pickle_path=None, verbose=0):\n if samples is not pd.DataFrame:\n # Read DB\n with sqlite3.connect(samples) as conn:\n dataset = pd.read_sql_query(con=conn, sql=\"select * from samples\",\n index_col=['index']) # , 'bench', 'app', 'dataset'])\n else:\n dataset = samples\n\n # Remove Invalid Data\n dataset.dropna(inplace=True)\n\n # For Kernels with many samples reduce to max_samples_per_kernel\n dataset_small = dataset\n if 'time' in str.lower(pickle_path):\n kernelcount = dataset.groupby(['bench', 'app', 'dataset', 'name'])['time'].count()\n elif 'power' in str.lower(pickle_path):\n kernelcount = dataset.groupby(['bench', 'app', 'dataset', 'name'])['power'].count()\n \n for i, count in kernelcount[kernelcount > max_samples_per_kernel].iteritems():\n if verbose > 0:\n print(i, count)\n sample_set = dataset[(dataset['bench'] == i[0])\n & (dataset['app'] == i[1])\n & (dataset['dataset'] == i[2])\n & (dataset['name'] == i[3])]\n dataset_small.drop(sample_set.index, inplace=True)\n dataset_small = dataset_small.append(sample_set.sample(max_samples_per_kernel, random_state=random_state))\n\n\n if 'time' in str.lower(pickle_path):\n # Drop Samples with time <= 0.0\n dataset_small.drop(dataset_small[dataset_small['time'].le(0.0)].index, inplace=True)\n elif 'power' in str.lower(pickle_path):\n # Drop Samples with power <= 0.0\n dataset_small.drop(dataset_small[dataset_small['power'].le(0.0)].index, inplace=True)\n\n\n if pickle_path is not None:\n pickle.dump(dataset_small,open(pickle_path, \"wb\"))\n\n return dataset_small\n\n\ndef get_xy(sample_df):\n assert ((sample_df.columns.values[-1] == 'time') | (sample_df.columns.values[-1] == 'power')),\\\n \"Last column of DataFrame must be time or power\"\n assert ((sample_df.columns.values[:4]) == ['bench', 'app', 'dataset', 'name']).all(),\\\n \"The first four columns must be index columns (bench,app,dataset,name\"\n\n return sample_df.iloc[:, 4:-1], sample_df.iloc[:, -1]\n\n\ndef nested_cv(X, y, estimator, scorer, param_grid, num_trials=10, n_splits=3, n_high=5, random_state=42*31415):\n\n groups = group_samples_by_threshold(y, [1e3, 1e5])\n\n # Data Storage for CV Scores\n cv_scores = []\n\n # Arrays to store scores\n nested_scores = np.full(num_trials, -np.Inf)\n # Best regression model (return value)\n rg_best = None\n\n for i in tqdm(range(num_trials)):\n seed = i * random_state\n\n inner_cv = PredefinedSplit(split_keep_n_high_grouped(y, groups, folds=n_splits, n_high=n_high, random_state=seed))\n outer_cv = PredefinedSplit(split_keep_n_high_grouped(y, groups, folds=n_splits, n_high=n_high, random_state =seed))\n\n # Non_nested parameter search and scoring\n rg = GridSearchCV(estimator=estimator, param_grid=param_grid,\n iid=False, cv=inner_cv, scoring=scorer, return_train_score=True)\n rg.fit(X, y)\n\n # Nested CV with parameter optimization\n nested_score = cross_val_score(rg.best_estimator_, X=X, y=y, cv=outer_cv, scoring=scorer)\n\n nested_scores[i] = nested_score.mean()\n if nested_scores.max() == nested_scores[i]:\n rg_best = rg.best_estimator_\n\n cv_scores.append({'gs_scores':pd.DataFrame(rg.cv_results_).sort_values('mean_test_score')[['params', 'mean_test_score']], 'ns_scores':nested_score})\n\n return rg_best, cv_scores\n\ndef simple_cv(X, y, estimator, scorer, num_trials=10, n_splits=3, n_high=5, random_state=42*31415):\n\n # Data Storage for CV Scores\n cv_scores = []\n\n for i in tqdm(range(num_trials)):\n seed = i * random_state\n\n splitter = PredefinedSplit(split_keep_n_high(y, folds=n_splits, n_high=n_high, random_state=seed))\n # Nested CV with parameter optimization\n cv_scores.append(cross_val_score(estimator, X=X, y=y, cv=splitter, scoring=scorer))\n\n return cv_scores\n\ndef ablation(dataset, estimatior, output_path=None, thresholds=np.power(10, [np.arange(2, 10)])[0], verbose=0):\n scores = []\n\n for threshold in thresholds:\n if 'time' in dataset.columns:\n X, y = get_xy(dataset[dataset['time'] < threshold])\n elif 'power' in dataset.columns:\n X, y = get_xy(dataset[dataset['power'] < threshold])\n if verbose > 0:\n print(\"Using samples lower than \", threshold)\n print(\"X shape: \", X.shape)\n print(\"y shape: \", y.shape)\n\n scores.append(simple_cv(X, y, estimatior))\n\n if output_path is not None:\n pickle.dump(scores, open(os.path.join(output_path), \"wb\"))\n\n return scores\n\n\ndef group_samples_by_threshold(vec, thresholds):\n thresholds.sort(reverse=True)\n res = np.zeros(vec.shape)\n i = 0\n for t in thresholds:\n res += (vec >= t)\n i += 1\n return res\n\n\ndef split_keep_n_high(targets, folds=3, n_high=5, random_state=42*31415):\n targets = np.array(targets)\n np.random.seed(random_state)\n\n idx = set(range(len(targets)))\n idxHigh = targets.argsort()[-n_high:]\n\n split = np.zeros(targets.shape)\n\n for i in idxHigh:\n split[i] = -1\n idx.remove(i)\n\n idx = [i for i in idx]\n np.random.shuffle(idx)\n fold = 0\n for i in idx:\n split[i] = fold\n fold += 1\n fold = fold % folds\n\n return split\n# myY = [ 3. , 0.5, 1. , 5. , 6. , 7. , 10. , 10.1]\n# print(splitHigh(myY, folds=3, n_high=2))\n\n\ndef split_keep_n_high_grouped(targets, groups, folds=3, n_high=5, random_state=42*31415):\n targets = np.array(targets)\n groups = np.array(groups)\n np.random.seed(random_state)\n\n idx = set(range(len(targets)))\n idxHigh = targets.argsort()[-n_high:]\n\n split = np.zeros(targets.shape)\n\n for i in idxHigh:\n split[i] = -1\n idx.remove(i)\n\n n_groups = int(groups.max()) + 1\n groupMap = {}\n for g in range(0, n_groups):\n groupMap[g] = []\n\n i = 0\n for g in groups:\n if i in idx:\n groupMap[g].append(i)\n i += 1\n\n for g in range(0, n_groups):\n idx = [i for i in groupMap[g]]\n np.random.shuffle(idx)\n fold = 0\n for i in idx:\n split[i] = fold\n fold += 1\n fold = fold % folds\n\n return split\n\n\ndef mape(y_true, y_pred):\n return 100 * np.mean(np.abs(y_pred - y_true)/y_true)\n\n\nneg_mape = make_scorer(mape, greater_is_better=False)\n\n\ndef must_not_exist(path):\n if path is not None and os.path.exists(path):\n raise Exception('Path \"' + path + '\" already exists!')\n return\n\n\ndef loo(X, y, model, output_path=None):\n predictions = np.zeros(y.shape)\n\n splitter = LeaveOneOut()\n for train_index, test_index in tqdm(splitter.split(X, y)):\n model.fit(X.iloc[train_index], y.iloc[train_index])\n predictions[test_index] = model.predict(X.iloc[test_index])\n\n if output_path is not None:\n pickle.dump(predictions, open(os.path.join(output_path), \"wb\"))\n\n return predictions\n\n\n#myY = [ 3. , 0.5, 1. , 5. , 6. , 7. , 10. , 10.1]\n#print(splitHighGrouped(myY, [1,0,0,1,1,2,2,2],folds=3, n_high=2))\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6854838728904724, "avg_line_length": 21.545454025268555, "blob_id": "fb272c992484d6a7a0cfbded55750338e2b4e5d2", "content_id": "3ca0b36ec75a1e8ce24f3f43d29cc24f10c24d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "no_license", "max_line_length": 83, "num_lines": 11, "path": "/download_dbs.sh", "repo_name": "SAR2652/gpu-mangrove", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nif [ ! -f data.tar.gz ]; then\n echo downloading data.tar.gz\n wget -O data.tar.gz https://heibox.uni-heidelberg.de/f/938997f66aef46fc9188/?dl=1\nfi\n\nif [ ! -d data ]; then\n echo extracting data.tar.gz\n tar xzf data.tar.gz\nfi\n" }, { "alpha_fraction": 0.7124078273773193, "alphanum_fraction": 0.7310035228729248, "avg_line_length": 41.72602844238281, "blob_id": "aef447e37769560d00e081b838d2a8e969b08f95", "content_id": "a36c9b8322d10343cceab993d598d112c6e8499a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3119, "license_type": "no_license", "max_line_length": 239, "num_lines": 73, "path": "/README.md", "repo_name": "SAR2652/gpu-mangrove", "src_encoding": "UTF-8", "text": "# GPU Mangrove\nGPU Mangrove is a predictive model for execution time and power consumption of CUDA kernels. Details can be found in the publication \"A Simple Model for Portable and Fast Prediction of Execution Time and Power Consumption of GPU Kernels\".\n\nDue to size restrictions databases are not included in this repository. Execute `download_dbs.sh` to initially download and extract them.\n\n## Requirements\n\nWe provide training data in this repository. To gather your own training data you need to use CUDA Flux.\n\nhttps://github.com/UniHD-CEG/cuda-flux/\n\nIf you only want to train the model with our datasets python3 with tho following packages is recommended:\n\n* numpy >= 1.18.1\n* pandas >= 1.0.3\n* pickle >= 4.0\n* tqdm >= 4.45.0\n* sklearn (scikit-learn) >= 0.22.1\n\ngit lfs is required to checkout the databases (https://git-lfs.github.com/)\n\n## Usage\n\nTo simply build the models simply use the default target of the makefile. \nThere is also a target for the leave-one-out training - `make loo`. \nFYI: The training can take up to multiple days.\n\nOf course gpu mangrove can be used directly:\n\n```\nusage: mangrove.py [-h]\n {process,filter,cv,llo,ablation,timemodel,paramstats} ...\n\nGPU Mangrove: preprocess and filter datesets, train GPU Mangrove model with\ncross-validation or leave-one-out, prediction with trained model.\n\npositional arguments:\n {process,filter,cv,llo,ablation,timemodel,paramstats}\n process Join features and measurements, apply feature\n engineering\n filter Limit the amunt of samples which are being user per\n bench,app,dataset,kernel tuple\n cv train model using cross-validation\n llo train model using leave-one-out\n timemodel measure prediction latency\n\noptional arguments:\n -h, --help show this help message and exit\n```\n\n**Examples:**\n\n```\n./mangrove.py process --fdb data/FeatureTable-0.3.db --mdb data/KernelTime-K20.db -o data/time_samples_K20_median.db\n./mangrove.py filter -i data/time_samples_K20_median.db -t 100 -o data/time_samplesf_K20_median_100.pkl\n./mangrove.py cv -i data/time_samplesf_K20_median_100.pkl -o data/time_model_K20_median_100.pkl -r data/time_cv-res_K20_median_100.pkl -t 30 -s 3 -k 5\n```\n\n## Jupyter Notebooks\n\n* The visualization notebook is used to create plots using our sample data and the models. It can be used to examine the data and the model in depth.\n* The FeatureProcessing notebook is used to create the FeatureTable database. It is not needed to execute it as this database is already there.\n\n## Databases\n\nThe data folder contain databases which can be examined with sqlite3:\n\n* CUDAFlux-0.3-Metrics.db - raw CUDA Flux metrics\n* FeatureTable-0.3.db - processed CUDA FLux metrics\n* KernelTime-$GPU.db - raw time measurements\n* KernelPower-$GPU.db - raw power measurements\n\nEntrys in FeatureTable and KernelTime/KernelPower are usually joined by the tuple (bench,app,dataset,lseq) where lseq stands for launch sequence and is the sequential order of the kernels launched in the application.\n" }, { "alpha_fraction": 0.6223648190498352, "alphanum_fraction": 0.6526122689247131, "avg_line_length": 38.914634704589844, "blob_id": "9ab4e16903863bf0f30239e37b4d9ae9678000a4", "content_id": "16e6d0aec88abd4eb88d8310d1aea4bb1e943d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3273, "license_type": "no_license", "max_line_length": 127, "num_lines": 82, "path": "/Makefile", "repo_name": "SAR2652/gpu-mangrove", "src_encoding": "UTF-8", "text": "# Makefile for training the models for time\n#\n# Set this command if you want to use a scheduler (like e.g. slurm) to \n# distribute the task of data processing and training\n# CMDP=srun -o slurm-%j.out\n#\n# GPUs\nGPUS=K20 TitanXp P100 V100 GTX1650\n# Threshold of how many samples are being used for a bench,app,dataset tuple\nTHRESHOLD=100\n# Aggregation method for combining multiple measurements into one sample\n# choose mean or median\nAGG=median\n# Folder where data is stored\nDDIR=data\n\n.PHONY: \nall: time_models power_models\n\n.PHONY: \ntime_models: $(foreach GPU,$(GPUS),$(DDIR)/time_model_$(GPU)_$(AGG)_$(THRESHOLD).pkl) \n.PHONY: \npower_models: $(foreach GPU,$(GPUS),$(DDIR)/power_model_$(GPU)_$(AGG)_$(THRESHOLD).pkl) \n\n.PHONY:\nloo: time_loo power_loo\n.PHONY:\ntime_loo: $(foreach GPU,$(GPUS),$(DDIR)/time_loo_$(GPU)_$(AGG)_$(THRESHOLD).pkl)\n.PHONY:\npower_loo: $(foreach GPU,$(GPUS),$(DDIR)/power_loo_$(GPU)_$(AGG)_$(THRESHOLD).pkl)\n\n# Time Model Targets\ndefine create_samples_t\n$(DDIR)/time_samples_$1_$2.db: $(DDIR)/FeatureTable-0.3.db $(DDIR)/KernelTime-$1.db\n\t$(CMDP) python mangrove.py process --fdb $(DDIR)/FeatureTable-0.3.db --mdb $(DDIR)/KernelTime-$1.db -o $$@ -g lconf -a median\nendef\n\ndefine create_samplesf_t\n$(DDIR)/time_samplesf_$1_$2_$3.pkl: $(DDIR)/time_samples_$1_$2.db\n\t$(CMDP) python mangrove.py filter -i $$^ -o $$@ -t $3\nendef\n\ndefine create_models_t\n$(DDIR)/time_model_$1_$2_$3.pkl: $(DDIR)/time_samplesf_$1_$2_$3.pkl\n\t$(CMDP) python mangrove.py cv -i $$^ -o $$@ -r $(DDIR)/time_cv-res_$1_$2_$3.pkl -t 30 -s 3 -k 5\nendef\n\ndefine create_loo_t\n$(DDIR)/time_loo_$1_$2_$3.pkl: $(DDIR)/time_model_$1_$2_$3.pkl $(DDIR)/time_samplesf_$1_$2_$3.pkl\n\t$(CMDP) python mangrove.py loo -i $(DDIR)/time_samplesf_$1_$2_$3.pkl -m $(DDIR)/time_model_$1_$2_$3.pkl -o $$@\nendef\n\n$(foreach GPU,$(GPUS),$(eval $(call create_samples_t,$(GPU),$(AGG))))\n$(foreach GPU,$(GPUS),$(eval $(call create_samplesf_t,$(GPU),$(AGG),$(THRESHOLD))))\n$(foreach GPU,$(GPUS),$(eval $(call create_models_t,$(GPU),$(AGG),$(THRESHOLD))))\n$(foreach GPU,$(GPUS),$(eval $(call create_loo_t,$(GPU),$(AGG),$(THRESHOLD))))\n\n# Power Model Targets\ndefine create_samples_p\n$(DDIR)/power_samples_$1_$2.db: $(DDIR)/FeatureTable-0.3.db $(DDIR)/KernelPower-$1.db\n\t$(CMDP) python mangrove.py process --fdb $(DDIR)/FeatureTable-0.3.db --mdb $(DDIR)/KernelPower-$1.db -o $$@ -g lconf -a median\nendef\n\ndefine create_samplesf_p\n$(DDIR)/power_samplesf_$1_$2_$3.pkl: $(DDIR)/power_samples_$1_$2.db\n\t$(CMDP) python mangrove.py filter -i $$^ -o $$@ -t $3\nendef\n\ndefine create_models_p\n$(DDIR)/power_model_$1_$2_$3.pkl: $(DDIR)/power_samplesf_$1_$2_$3.pkl\n\t$(CMDP) python mangrove.py cv -i $$^ -o $$@ -r $(DDIR)/power_cv-res_$1_$2_$3.pkl -t 30 -s 3 -k 5\nendef\n\ndefine create_loo_p\n$(DDIR)/power_loo_$1_$2_$3.pkl: $(DDIR)/power_model_$1_$2_$3.pkl $(DDIR)/power_samplesf_$1_$2_$3.pkl\n\t$(CMDP) python mangrove.py loo -i $(DDIR)/power_samplesf_$1_$2_$3.pkl -m $(DDIR)/power_model_$1_$2_$3.pkl -o $$@\nendef\n\n$(foreach GPU,$(GPUS),$(eval $(call create_samples_p,$(GPU),$(AGG))))\n$(foreach GPU,$(GPUS),$(eval $(call create_samplesf_p,$(GPU),$(AGG),$(THRESHOLD))))\n$(foreach GPU,$(GPUS),$(eval $(call create_models_p,$(GPU),$(AGG),$(THRESHOLD))))\n$(foreach GPU,$(GPUS),$(eval $(call create_loo_p,$(GPU),$(AGG),$(THRESHOLD))))\n" }, { "alpha_fraction": 0.629194438457489, "alphanum_fraction": 0.6351687908172607, "avg_line_length": 36.19259262084961, "blob_id": "cfee8de701bf68e30f2eadb40a4f0ac511b0f81f", "content_id": "fb33332653099dc3a12c2bd6aa04de189a7d1cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10043, "license_type": "no_license", "max_line_length": 159, "num_lines": 270, "path": "/mangrove.py", "repo_name": "SAR2652/gpu-mangrove", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom argparse import ArgumentParser\nimport random\nimport timeit\nimport sqlite3\n\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\n\nimport gpumangrove.utils as gmu\n\n\ndef process(args):\n gmu.must_not_exist(args.o)\n print(\"Processing DBs\")\n df = gmu.join_features_measurements(args.fdb,args.mdb, grouping=args.g, aggregator=args.a)\n gmu.process_features(feature_df=df, db_path=args.o)\n\n\ndef filter(args):\n gmu.must_not_exist(args.o)\n print(\"Filtering samples...\")\n if args.t is None:\n threshold = 100\n else:\n threshold = int(args.t)\n\n gmu.filter_samples(args.i, threshold, pickle_path=args.o)\n\n\ndef cv(args):\n gmu.must_not_exist(args.o)\n gmu.must_not_exist(args.r)\n\n if 'time' in str.lower(args.r):\n print(\"Performing cross-validation for time...\")\n estimator = TransformedTargetRegressor(regressor=ExtraTreesRegressor(n_jobs=24),\n func=np.log,\n inverse_func=np.exp)\n elif 'power' in str.lower(args.r):\n print(\"Performing cross-validation for power...\")\n estimator = ExtraTreesRegressor(n_jobs=24)\n\n scorer = gmu.neg_mape\n # TODO load yml param grid file\n\n if 'time' in str.lower(args.r):\n param_grid = {\n 'regressor__bootstrap': [False],\n 'regressor__max_features': [None, 'log2', 'sqrt'],\n 'regressor__criterion': ['mse', 'mae'],\n 'regressor__n_estimators': [128, 256, 512, 1024]\n }\n elif 'power' in str.lower(args.r):\n param_grid = {\n 'bootstrap': [False],\n 'max_features': [None, 'log2', 'sqrt'],\n 'criterion': ['mse', 'mae'],\n 'n_estimators': [128, 256, 512, 1024]\n }\n\n\n dataset = pickle.load(open(args.i, \"rb\"))\n X, y = gmu.get_xy(dataset)\n model, cv_scores = gmu.nested_cv(X, y, estimator, scorer, param_grid,\n num_trials=int(args.t),\n n_splits=int(args.s),\n n_high=int(args.k))\n\n # for item in cv_scores:\n # print(pd.DataFrame(item[\"gs_scores\"]))\n\n if args.o is not None:\n pickle.dump(model, open(args.o, \"wb\"))\n if args.r is not None:\n pickle.dump(cv_scores, open(args.r,\"wb\"))\n\n\n\ndef loo(args):\n gmu.must_not_exist(args.o)\n print(\"Performing leave one out prediction\")\n\n dataset = pickle.load(open(args.i, \"rb\"))\n X, y = gmu.get_xy(dataset)\n model = pickle.load((open(args.m, \"rb\")))\n gmu.loo(X, y, model, args.o)\n\n\ndef ablation(args):\n gmu.must_not_exist(args.o)\n\n dataset = pickle.load(open(args.i, \"rb\"))\n model = pickle.load(open(args.m, \"rb\"))\n gmu.ablation(dataset, model, args.o)\n\n\ndef timemodel(args):\n max_depth = list()\n max_leaf_nodes = list()\n model = pickle.load(open(args.m, \"rb\"))\n\n if 'time' in str.lower(args.m):\n model.regressor.n_jobs=1\n model.check_inverse = False\n for tree in model.regressor_.estimators_:\n max_depth.append(tree.tree_.max_depth)\n max_leaf_nodes.append(tree.tree_.node_count)\n elif 'power' in str.lower(args.m):\n model.n_jobs=1\n for tree in model.estimators_:\n max_depth.append(tree.tree_.max_depth)\n max_leaf_nodes.append(tree.tree_.node_count)\n print(model)\n print(\"Average maximum depth: %0.1f\" % (sum(max_depth) / len(max_depth)))\n print(\"Average count of nodes: %0.1f\" % (sum(max_leaf_nodes) / len(max_leaf_nodes)))\n\n\n dataset = pickle.load(open(args.i, \"rb\"))\n X, y = gmu.get_xy(dataset)\n\n t = np.zeros(args.n)\n for i in range(args.n):\n features = X.sample(random_state=random.randint(0,1e9))\n t[i] = timeit.timeit('model.predict(features)', globals=locals(), number=10) / 10\n\n print('Min, Max, Average:')\n print(t.min(), t.max(), t.mean())\n\n\ndef paramstats(args):\n gmu.must_not_exist(args.o)\n\n df_array = pickle.load(open(args.i,\"rb\"))\n\n param_df_list = []\n\n i = 0\n for entry in df_array:\n df = entry['gs_scores']\n param_df_list.append( pd.DataFrame({'i': np.ones(len(df))*i, 'params': df['params'].astype(str), 'score': df['mean_test_score']}))\n i += 1\n\n param_df = pd.concat(param_df_list)\n with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.max_colwidth', -1):\n print(\"Best Parameters for each Iteration\")\n print(param_df.sort_values('score',ascending=False).groupby('i',as_index=False).first().sort_values('score',ascending=False)[['params','score']])\n print(\"Top Parameter Combinations:\")\n print(param_df.sort_values('score',ascending=False).groupby('i',as_index=False).first().groupby('params')['score'].mean().sort_values(ascending=False))\n\n if args.o is not None:\n pickle.dump(param_df, open(args.o, \"wb\"))\n\n\ndef convert(args):\n gmu.convert(args)\n\ndef predict(args):\n samples_db_path = args.i\n model_path = args.m\n\n conn = sqlite3.Connection(samples_db_path)\n samples = pd.read_sql_query(\n 'select * from samples',\n conn, index_col=['bench','app','dataset','name'])\n\n y_true = samples['time']\n samples = samples.drop(columns=['index','time'])\n\n print(samples.columns)\n\n X = samples.values\n model = pickle.load(open(model_path, \"rb\"))\n y_pred = model.predict(X)\n results = pd.DataFrame()\n results['y_true'] = y_true\n results['y_pred'] = y_pred\n results['pred/true'] = y_pred/y_true\n # Make sure everything is displayed:\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n \n print('Results:')\n print(results)\n \n if args.o is not None:\n pickle.dump(results, open(args.o,\"wb\"))\n\ndef main():\n # TODO global random state\n # TODO verbose level argument\n p = ArgumentParser(description=\n \"\"\"GPU Mangrove:\n preprocess and filter datesets, train GPU Mangrove model with cross-validation or leave-one-out, prediction with trained model.\"\"\")\n subparsers = p.add_subparsers()\n\n p_convert = subparsers.add_parser('convert', help='Convert raw cuda flux measurements into summarized database')\n p_convert.add_argument('-i', metavar='<input db>', required=True)\n p_convert.add_argument('-o', metavar='<output db>', required=True)\n p_convert.set_defaults(func=convert)\n\n p_process = subparsers.add_parser('process', help='Join features and measurements, apply feature engineering')\n p_process.add_argument('--fdb', metavar='<feature db>', required=True)\n p_process.add_argument('--mdb', metavar='<measurement db>', required=True)\n p_process.add_argument('-o', metavar='<output db>', required=True)\n p_process.add_argument('-g', choices=['lconf','lseq'], default='lconf')\n p_process.add_argument('-a', choices=['median','mean'], default='median')\n p_process.set_defaults(func=process)\n\n p_filter = subparsers.add_parser('filter', help='Limit the amunt of samples which are being user per bench,app,dataset,kernel tuple')\n p_filter.add_argument('-o', metavar='<pickle-output>', required=True)\n p_filter.add_argument('-i', metavar='<sample db>', required=True, help='Input for filtering')\n p_filter.add_argument('-t', metavar='<filter threshold>', help='Max. samples per bench,app,dataset,kernel tuple')\n p_filter.set_defaults(func=filter)\n\n p_cv = subparsers.add_parser('cv', help='train model using cross-validation')\n p_cv.add_argument('-i', metavar='<samples.pkl>', required=True)\n p_cv.add_argument('-o', metavar='<model-output.pkl>')\n p_cv.add_argument('-r', metavar='<result-output.pkl', help='file to store cross-validation scores in')\n p_cv.add_argument('-t', metavar='<num cross validation trials>', default=5)\n p_cv.add_argument('-s', metavar='<num CV splits>', default=3)\n p_cv.add_argument('-k', metavar='<num top \"k\" samples', default=5,\n help='Number of top \"k\" samples to keep in each split')\n p_cv.add_argument('-p', metavar='<param_grid.yml', help='use definition of param grid in yml file')\n p_cv.set_defaults(func=cv)\n\n p_loo = subparsers.add_parser('loo', help='train model using leave-one-out')\n p_loo.add_argument('-m', metavar='<model.pkl>', required=True)\n p_loo.add_argument('-i', metavar='<samples.pkl>', required=True)\n p_loo.add_argument('-o', metavar='<loo-predictions.pkl>')\n p_loo.set_defaults(func=loo)\n\n p_ablation = subparsers.add_parser('ablation', help='TODO')\n p_ablation.add_argument('-m', metavar='<model.pkl>', required=True)\n p_ablation.add_argument('-i', metavar='<samples.pkl>', required=True)\n p_ablation.add_argument('-o', metavar='<ablation-scores.pkl>')\n p_ablation.set_defaults(func=ablation)\n\n p_timemodel = subparsers.add_parser('timemodel', help='measure prediction latency')\n p_timemodel.add_argument('-m', metavar='<model.pkl>', required=True)\n p_timemodel.add_argument('-i', metavar='<samples.pkl>', required=True)\n p_timemodel.add_argument('-n', metavar='<num repeats>', default=100, type=int)\n p_timemodel.set_defaults(func=timemodel)\n\n p_paramstats = subparsers.add_parser('paramstats', help='TODO')\n p_paramstats.add_argument('-i', metavar='<cv-results.pkl>', required=True)\n p_paramstats.add_argument('-o', metavar='<ablation-scores.pkl>')\n p_paramstats.set_defaults(func=paramstats)\n\n p_predict = subparsers.add_parser('predict', help='Predict samples from a sample database with a given pre-trained model')\n p_predict.add_argument('-i', metavar='<samples.db>', required=True)\n p_predict.add_argument('-m', metavar='<model.pkl>', required=True)\n p_predict.add_argument('-o', metavar='<results.pkl>')\n p_predict.set_defaults(func=predict)\n\n args = p.parse_args()\n try:\n args.func(args)\n except AttributeError:\n p.print_help()\n\n\n\nif __name__ == '__main__':\n main()\n\n" } ]
5
Hrishikesh-coder/Tkinter
https://github.com/Hrishikesh-coder/Tkinter
cc097ce8614963c9d3d47601c50295e4e7c8efb3
690a339188d3e2db903ce16e90c990ed3b3ab732
cd7f95bc7f5eedffc5876545d236b68a693e88fe
refs/heads/master
2022-07-10T02:34:25.714301
2020-05-12T07:58:24
2020-05-12T07:58:24
263,270,743
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6115093231201172, "alphanum_fraction": 0.6496006846427917, "avg_line_length": 29.329193115234375, "blob_id": "6ef965a87428e5c11f0ed66479875484e2f9ac8e", "content_id": "52330df89894b60dbb4e36a245859efecf9295f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4883, "license_type": "no_license", "max_line_length": 102, "num_lines": 161, "path": "/Tk5Calculator.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom math import *\n\nroot = Tk()\nroot.title(\"Bhanja Calculator\")\nroot.configure(background=\"blue\")\ne = Entry(root, width=35, borderwidth=5)\ne.grid(row=0, column=0, columnspan=3, padx=10, pady=10)\n\n\ndef button_click(number):\n current = e.get()\n e.delete(0, END)\n e.insert(0, str(current) + str(number))\n\n\ndef button_clear():\n e.delete(0, END)\n\n\ndef button_add():\n first_number = e.get()\n global math\n math = \"addition\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_subtract():\n first_number = e.get()\n global math\n math = \"subtraction\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_multiply():\n first_number = e.get()\n global math\n math = \"multiplication\"\n global f_num\n f_num = float(first_number)\n e.delete(0, END)\n\n\ndef button_sqrt():\n first_number = e.get()\n global math\n math = \"sqrt\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global math\n math = \"division\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_sin():\n first_number = e.get()\n global math\n math = \"square\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_mod():\n first_number = e.get()\n global math\n math = \"MOD\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_intdivide():\n first_number = e.get()\n global math\n math = \"intdivision\"\n global f_num\n f_num = int(first_number)\n e.delete(0, END)\n\n\ndef button_equal():\n second_number = e.get()\n e.delete(0, END)\n\n if (math == \"addition\"):\n e.insert(0, f_num + float(second_number))\n if (math == \"subtraction\"):\n e.insert(0, f_num - float(second_number))\n if (math == \"multiplication\"):\n e.insert(0, f_num * float(second_number))\n if (math == \"division\"):\n e.insert(0, f_num / float(second_number))\n if (math == \"MOD\"):\n e.insert(0, f_num % float(second_number))\n if (math == \"intdivision\"):\n e.insert(0, f_num // float(second_number))\n if (math == \"square\"):\n e.insert(0, f_num * f_num)\n\n\n# define the buttons\n\nbutton_1 = Button(root, text=\"1\", bg=\"light green\", padx=40, pady=20, command=lambda: button_click(1))\nbutton_2 = Button(root, text=\"2\", bg=\"light green\", padx=40, pady=20, command=lambda: button_click(2))\nbutton_3 = Button(root, text=\"3\", padx=40, bg=\"light green\", pady=20, command=lambda: button_click(3))\nbutton_4 = Button(root, text=\"4\", padx=40, bg=\"light green\", pady=20, command=lambda: button_click(4))\nbutton_5 = Button(root, text=\"5\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(5))\nbutton_6 = Button(root, text=\"6\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(6))\nbutton_7 = Button(root, text=\"7\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(7))\nbutton_8 = Button(root, text=\"8\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(8))\nbutton_9 = Button(root, text=\"9\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(9))\nbutton_0 = Button(root, text=\"0\", padx=40, pady=20, bg=\"light green\", command=lambda: button_click(0))\nbutton_add = Button(root, text=\"+\", padx=39, pady=20, bg=\"light green\", command=button_add)\nbutton_subtract = Button(root, text=\"-\", padx=41, pady=20, bg=\"light green\", command=button_subtract)\nbutton_multiply = Button(root, text=\"*\", padx=45, pady=20, bg=\"light green\", command=button_multiply)\nbutton_divide = Button(root, text=\"/\", padx=58, pady=20, bg=\"light green\", command=button_divide)\nbutton_equal = Button(root, text=\"=\", padx=105, pady=20, bg=\"light green\", command=button_equal)\nbutton_clear = Button(root, text=\"Clear\", padx=95, pady=20, bg=\"light green\", command=button_clear)\nbutton_mod = Button(root, text=\"%\", padx=42, pady=20, bg=\"light green\", command=button_mod)\nbutton_int = Button(root, text=\"int(/)\", padx=45, pady=20, bg=\"light green\", command=button_intdivide)\nbutton_exp = Button(root, text=\"Square\", padx=42, pady=20, bg=\"light green\", command=button_sin)\n# Put the butttons on the screen\n\nbutton_1.grid(row=1, column=0)\nbutton_2.grid(row=1, column=1)\nbutton_3.grid(row=1, column=2)\n\nbutton_4.grid(row=2, column=0)\nbutton_5.grid(row=2, column=1)\nbutton_6.grid(row=2, column=2)\n\nbutton_7.grid(row=3, column=0)\nbutton_8.grid(row=3, column=1)\nbutton_9.grid(row=3, column=2)\n\nbutton_0.grid(row=4, column=0)\n\nbutton_add.grid(row=5, column=0)\nbutton_subtract.grid(row=6, column=0)\nbutton_multiply.grid(row=6, column=1)\nbutton_divide.grid(row=6, column=2)\nbutton_mod.grid(row=7, column=0)\nbutton_int.grid(row=7, column=1)\nbutton_clear.grid(row=4, column=1, columnspan=2)\nbutton_equal.grid(row=5, column=1, columnspan=2)\n\nbutton_exp.grid(row=7, column=2)\n\nroot.mainloop()\n" }, { "alpha_fraction": 0.6704545617103577, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 28.399999618530273, "blob_id": "ad9d5eb72baf08b6074c865acf264241f1dc0224", "content_id": "03abb8a459a278c0e8dba39c9ca40593882acad0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 440, "license_type": "no_license", "max_line_length": 84, "num_lines": 15, "path": "/Tk2.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n#everything in tkinter is a widget\n\nroot = Tk()\n\n#Creating a LabelWidget\nmyLabel = Label(root , text=\"Hello World!!\").grid(row = 0 , column = 0) #LabelWidget\nmyLabel2 = Label(root , text=\"Hello!!\").grid(row = 1 , column = 1)\n#Shoving it onto screen\n#Other option\n#myLabel.grid(row = 0 , column = 0)\n#myLabel2.grid(row = 1 , column = 1)\nroot.mainloop()\n#When the window is closed, the mainloop ends, and the prog. ends" }, { "alpha_fraction": 0.6982671022415161, "alphanum_fraction": 0.7247706651687622, "avg_line_length": 26.25, "blob_id": "e844336fce395cade09c29c59428c2f68b3d77bc", "content_id": "68cda8768121f49dcdfc6b759aad36f47b91f1ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "no_license", "max_line_length": 78, "num_lines": 36, "path": "/Tk6.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom PIL import ImageTk,Image\nroot = Tk()\n\nroot.title(\"Hello , H.B. here!!\")\n\n#root.iconbitmap(\"\")#Pass the location of the file(.ico file)\n\nimg1 = ImageTk.PhotoImage(Image.open(\"images/Ahmedabad Satyagraha.jpg\"))\nimg2 = ImageTk.PhotoImage(Image.open(\"images/download.jfif\"))\nimg3 = ImageTk.PhotoImage(Image.open(\"images/khilafat.jpg\"))\nimg4 = ImageTk.PhotoImage(Image.open(\"images/khilaft_movement.jpg\"))\nimg5 = ImageTk.PhotoImage(Image.open(\"images/ghandi-by-rajuarya[221559].jpg\"))\n\nimage_list = [img1,img2,img3,img4,img5]\n\n\n\nmy_label = Label(image=img1)\nmy_label.grid(row=0,column=0,columnspan=3)\n\ndef forward():\n return\n\ndef back():\n return\n\nbutton_back = Button(root,text=\"<<\",command=lambda : back())\nbutton_exit = Button(root,text=\"EXIT PROGRAM\",command=root.destroy)\nbutton_forward = Button(root,text=\">>\",command=lambda:forward())\n\nbutton_back.grid(row=1,column=0)\nbutton_exit.grid(row=1,column=1)\nbutton_forward.grid(row=1,column=2)\n\nroot.mainloop()\n" }, { "alpha_fraction": 0.5027753710746765, "alphanum_fraction": 0.5357873439788818, "avg_line_length": 37.897727966308594, "blob_id": "246996f7690ddd3961b0ce50d6802d9a60b79976", "content_id": "1cf78520dd7e4e9edfadb83aeca1913cd7b9d9c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3423, "license_type": "no_license", "max_line_length": 124, "num_lines": 88, "path": "/Calculator.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\n\nexpression = \" \"\n\ndef press(num):\n global expression\n expression += str(num)\n equation.set(expression)\n\ndef equalpress():\n\n try:\n global expression\n total = str(eval(expression))\n equation.set(total)\n expression = \" \"\n\n except:\n\n equation.set(' error ')\n expression = \" \"\n\ndef clear():\n global expression\n expression = \" \"\n equation.set(\" \")\n\nif __name__ == \"__main__\":\n gui = Tk()\n gui.configure(background = \"light green\")\n gui.title(\"Bhanja Calculator\")\n gui.geometry('265 * 125')\n equation = StringVar()\n expression_field = Entry(gui, textvariable = equation)\n expression_field.grid(columnspan = 4 , ipadx = 70)\n\n equation.set('enter your expression')\n\n\n button1 = Button(gui, text = \"1\" , fg= \"black\" , bg = \"red\" , command = lambda : press(1), height = 1 , width = 7)\n button1.grid(row = 2 , column = 0)\n\n button2 = Button(gui, text = \"2\" , fg= \"black\" , bg = \"red\", command = lambda : press(2), height = 1 , width = 7)\n button2.grid(row = 2 , column = 1)\n\n button3 = Button(gui, text = \"3\" , fg= \"black\" , bg = \"red\", command = lambda : press(3), height = 1 , width = 7)\n button3.grid(row = 2 , column = 2)\n\n button4 = Button(gui, text = \"4\" , fg= \"black\" , bg = \"red\", command = lambda : press(4), height = 1 , width = 7)\n button4.grid(row = 3 , column = 0)\n\n button5 = Button(gui, text = \"5\" , fg= \"black\" , bg = \"red\", command = lambda : press(5), height = 1 , width = 7)\n button5.grid(row = 3 , column = 1)\n\n button6 = Button(gui, text = \"6\" , fg= \"black\" , bg = \"red\", command = lambda : press(6), height = 1 , width = 7)\n button6.grid(row = 3 , column = 2)\n\n button7 = Button(gui, text = \"7\" , fg= \"black\" , bg = \"red\", command = lambda : press(7), height = 1 , width = 7)\n button7.grid(row = 4 , column = 0)\n\n button8 = Button(gui, text = \"8\" , fg= \"black\" , bg = \"red\", command = lambda : press(8), height = 1 , width = 7)\n button8.grid(row = 4 , column = 1)\n\n button9 = Button(gui, text = \"9\" , fg= \"black\" , bg = \"red\", command = lambda : press(9), height = 1 , width = 7)\n button9.grid(row =4 , column = 2)\n\n button0 = Button(gui, text = \"0\" , fg= \"black\" , bg = \"red\", command = lambda : press(0), height = 1 , width = 7)\n button0.grid(row = 5 , column = 0)\n\n plus = Button(gui , text = ' + ' , fg = 'black' , bg = 'red', command = lambda: press(\"+\"),height = 1 , width = 7 )\n plus.grid(row=2 , column = 3)\n\n minus = Button(gui , text = \" - \", fg = 'black' , bg = 'red', command = lambda: press(\"-\"),height = 1 , width = 7 )\n minus.grid(row=3 , column = 3)\n\n multiply = Button(gui , text = \" * \", fg = 'black' , bg = 'red', command = lambda: press(\"*\"),height = 1 , width = 7 )\n multiply.grid(row=4 , column = 3)\n\n divide = Button(gui , text = \" / \", fg = 'black' , bg = 'red', command = lambda: press(\"/\"),height = 1 , width = 7 )\n divide.grid(row=5 , column = 3)\n\n equal = Button(gui , text = \" = \", fg = 'black' , bg = 'red', command = equalpress , height = 1 , width = 7 )\n equal.grid(row=5 , column = 2)\n\n clear = Button(gui , text = \" clear \", fg = 'black' , bg = 'red', command = clear ,height = 1 , width = 7 )\n clear.grid(row=5 , column = '1')\n\n gui.mainloop()\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6895604133605957, "avg_line_length": 29.41666603088379, "blob_id": "3e2a94a0085bf52506ca8b6171133b45c63e4e12", "content_id": "d3ad3d1d3180661559b182ee05dbd3fd83b55e5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 154, "num_lines": 12, "path": "/Tk3.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot = Tk()\n\ndef myClick():\n myLabel = Label(root,text=\"LOOK !!\")\n myLabel.pack()\n\nmyButton = Button(root , text = \"Click Me!!\",padx = 50,pady=10,command=myClick,fg=\"red\",bg=\"light green\")# state = DISABLED)#don't put brackets in command\nmyButton.pack()\n\nroot.mainloop()\n#When the window is closed, the mainloop ends, and the prog. ends" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 18.785715103149414, "blob_id": "0eca34bddafe31e64991fa0fbe778c2fad92df3a", "content_id": "8b2e252f2e76ff3388d23282faa7f346cebf14d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 65, "num_lines": 14, "path": "/TkTUT.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n#everything in tkinter is a widget\n\nroot = Tk()\n\n#Creating a LabelWidget\nmyLabel = Label(root , text=\"Hello World!!\") #LabelWidget\n\n#Shoving it onto screen\nmyLabel.pack()\n\nroot.mainloop()\n#When the window is closed, the mainloop ends, and the prog. ends" }, { "alpha_fraction": 0.665354311466217, "alphanum_fraction": 0.6811023354530334, "avg_line_length": 28.941177368164062, "blob_id": "5df6acf6e8219bd7c39e46ef5c738bc76fd86d59", "content_id": "2da7fc1336004e6ecb7959540a53a6f0a9e48f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "no_license", "max_line_length": 159, "num_lines": 17, "path": "/Tk4.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot = Tk()\n\ne = Entry(root,width=50,bg=\"yellow\",borderwidth=5) #EntryWidget\ne.pack()\n\ne.insert(0,\"Enter your name\")\ndef myClick():\n hello = \"Hello \"+e.get() + \" !!\"\n myLabel = Label(root,text=hello)\n myLabel.pack()\n\nmyButton = Button(root , text = \"Enter Your Name\",padx = 50,pady=10,command=myClick,fg=\"red\",bg=\"light green\")# state = DISABLED)#don't put brackets in command\nmyButton.pack()\n\nroot.mainloop()\n#When the window is closed, the mainloop ends, and the prog. ends" }, { "alpha_fraction": 0.6769230961799622, "alphanum_fraction": 0.6769230961799622, "avg_line_length": 8.428571701049805, "blob_id": "5491a5e5f6d23243b0f10892564c684ae210dd77", "content_id": "ed5257a75e396796343ad4801b7b17428750ce51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/New.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\n\nroot = Tk() #root\n\n#logic\n\nroot.mainloop()" }, { "alpha_fraction": 0.6112082600593567, "alphanum_fraction": 0.6447664499282837, "avg_line_length": 42.04659652709961, "blob_id": "ea0fd7204a9992402956fd7318b87e923becddee", "content_id": "51ec5a7b2cfb2eca0d099b85dd8330622fdd16d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12009, "license_type": "no_license", "max_line_length": 172, "num_lines": 279, "path": "/Login.py", "repo_name": "Hrishikesh-coder/Tkinter", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom PIL import ImageTk, Image\nfrom tkinter.font import Font\n\nuser = ''\n\n\ndef login(login_backend, register_backend):\n x = Tk()\n x.state('zoomed')\n # x.geometry(f\"570x{x.winfo_screenheight()}\")\n x.configure(background=\"light green\")\n x.title(\"Login Form\")\n fontFamilyy = StringVar(value=\"Verdana\")\n fontSizze = IntVar(value=12)\n fontSizze2 = IntVar(value=8)\n appfont = Font(family=fontFamilyy.get(), size=fontSizze.get(), weight='normal')\n\n er = StringVar(value=\"Verdana\")\n eri = IntVar(value=8)\n\n fontconfi = Font(family=er.get(), size=eri.get(), weight='normal')\n mylabel = Label(x, text=\"LOGIN\", font=appfont, bg=\"light green\", fg=\"dark green\", padx=10, pady=50)\n mylabel.place(relx=0.5, rely=0.1, anchor=CENTER)\n\n abel = Label(x, text=\"Enter your username\", font=fontconfi, bg=\"light green\", fg=\"slate blue\", padx=10, pady=10)\n abel.place(relx=0.5, rely=0.2, anchor=CENTER)\n\n q = Entry(x, width=90, bg=\"light blue\", justify=\"center\")\n q.place(relx=0.5, rely=0.3, anchor=CENTER)\n\n abel = Label(x, text=\"Enter your password\", font=fontconfi, bg=\"light green\", fg=\"slate blue\", padx=10, pady=10)\n abel.place(relx=0.5, rely=0.4, anchor=CENTER)\n\n s = Entry(x, show=\"*\", width=90, bg=\"light blue\", justify=\"center\")\n\n s.place(relx=0.5, rely=0.5, anchor=CENTER)\n\n ut = Checkbutton(x, text=\"Remember Me!!\", bg=\"light green\")\n ut.place(relx=0.5, rely=0.6, anchor=CENTER)\n\n bi = Button(x, text=\"LOGIN\", fg=\"white\", bg=\"black\", width=12, command=lambda: [login_backend(q.get(), s.get(), x)])\n bi.place(relx=0.5, rely=0.7, anchor=CENTER)\n\n fontFamily12 = StringVar(value=\"Arial\")\n fontSize15 = IntVar(value=7)\n\n tFont = Font(family=fontFamily12.get(), size=fontSize15.get(), weight='normal')\n\n ylabel = Button(x, text=\"Not yet registered??\", font=tFont, bg=\"forest green\",\n command=lambda: [x.destroy(), register(register_backend, login_backend)])\n\n ylabel.place(relx=0.5, rely=0.8, anchor=CENTER)\n\n Button(x, text=\"EXIT\", fg=\"red\", command=x.destroy).place(relx=0.1, rely=0.1)\n\n x.mainloop()\n\n\ndef register(register_backend, login_backend):\n global root\n root = Tk()\n root.attributes(\"-fullscreen\", True)\n\n root.configure(background=\"light green\")\n root.title(\"Sign-up Form\")\n fontFamily = StringVar(value=\"Verdana\")\n fontSize = IntVar(value=12)\n\n fontcon = Font(family=fontFamily.get(), size=fontSize.get(), weight='normal')\n\n qw = StringVar(value=\"Verdana\")\n qi = IntVar(value=8)\n\n fontconf = Font(family=qw.get(), size=qi.get(), weight='normal')\n\n myLabel = Label(root, text=\"SIGN-UP TODAY!!\", font=fontcon, bg=\"light green\", fg=\"dark green\", padx=20, pady=50)\n myLabel.place(relx=0.5, rely=0.1, anchor=CENTER)\n\n abel = Label(root, text=\"Enter your Username\", font=fontconf, bg=\"light green\", fg=\"slate blue\", padx=10, pady=10)\n abel.place(relx=0.5, rely=0.2, anchor=CENTER)\n\n e = Entry(root, width=90, bg=\"light blue\", justify=\"center\")\n\n e.place(relx=0.5, rely=0.25, anchor=CENTER)\n\n abel = Label(root, text=\"Enter your email-id:\", font=fontconf, bg=\"light green\", fg=\"slate blue\", padx=10, pady=10)\n abel.place(relx=0.5, rely=0.3, anchor=CENTER)\n\n f = Entry(root, width=90, bg=\"light blue\", justify=\"center\")\n\n f.place(relx=0.5, rely=0.35, anchor=CENTER)\n\n abel = Label(root, text=\"Enter your Password\", font=fontconf, bg=\"light green\", fg=\"slate blue\", padx=10, pady=10)\n abel.place(relx=0.5, rely=0.4, anchor=CENTER)\n\n h = Entry(root, width=90, show=\"*\", bg=\"light blue\", justify=\"center\")\n\n h.place(relx=0.5, rely=0.45, anchor=CENTER)\n\n abel = Label(root, text=\"Re-enter your Password\", font=fontconf, bg=\"light green\", fg=\"slate blue\", padx=10,\n pady=10)\n abel.place(relx=0.5, rely=0.5, anchor=CENTER)\n\n g = Entry(root, show=\"*\", width=90, bg=\"light blue\", justify=\"center\")\n\n g.place(relx=0.5, rely=0.55, anchor=CENTER)\n\n b = Button(root, text=\"SUBMIT\", fg=\"white\", width=12, bg=\"black\",\n command=lambda: [register_backend(e.get(), f.get(), h.get(), g.get(), root)])\n b.place(relx=0.5, rely=0.6, anchor=CENTER)\n\n fontFamily2 = StringVar(value=\"Arial\")\n fontSize5 = IntVar(value=7)\n WFont = Font(family=fontFamily2.get(), size=fontSize5.get(), weight='normal')\n\n yLabel = Button(root, text=\"Already registered??\", font=WFont, bg=\"forest green\",\n command=lambda: [root.destroy(), login(login_backend, register_backend)], justify=\"center\")\n yLabel.place(relx=0.5, rely=0.7, anchor=CENTER)\n\n root.mainloop()\n\n\ndef click(num=1):\n chat_page = Tk()\n chat_page.state('zoomed')\n chat_page.configure(background=\"light blue\")\n chat_page.title(\"Contact \" + str(num) + \" Chat Page\")\n Button(chat_page, text=\" <- Back \", font=(\"Courier\", 8, \"normal\"), padx=20, bg=\"white\", fg=\"red\",\n command=lambda: [chat_page.destroy(), default(user)]).grid(row=0, column=0)\n contact_details = Frame(chat_page, bg=\"light blue\", pady=20, padx=50)\n contact_details.grid(row=0, column=1)\n profileimg = Image.open('images/pr.jpg') # the location of the image would change according to every user\n profileimage = profileimg.resize((30, 30), Image.ANTIALIAS)\n openprofileimage = ImageTk.PhotoImage(profileimage)\n buttonforprofileimage = Button(contact_details, image=openprofileimage)\n buttonforprofileimage.grid(row=0, column=1)\n buttonforprofileimage.image = openprofileimage\n Button(contact_details, text=\"Contact \" + str(num), font=(\"Courier\", 12, \"bold\"), bg=\"light green\", fg=\"red\").grid(\n row=0, column=2)\n messagebox = Frame(chat_page, bg=\"pink\")\n messagebox.grid(row=1, column=0)\n Label(messagebox, text=\"Hello!!\", bg=\"pink\", fg=\"black\").grid(row=1, column=0)\n Label(messagebox, text=\"14.08\", bg=\"pink\", font=(\"Arial\", 6, 'roman'), padx=5).grid(row=1, column=1)\n Label(chat_page,\n text=\" \",\n bg=\"light blue\", fg=\"black\").grid(row=2, column=0)\n messagebox2 = Frame(chat_page, bg=\"snow\")\n messagebox2.grid(row=2, column=3)\n Label(messagebox2, text=\"Who are you?\", bg=\"snow\", fg=\"black\").grid(row=2, column=4)\n Label(messagebox2, text=\"14.08\", bg=\"snow\", font=(\"Arial\", 6, 'roman')).grid(row=2, column=5)\n messagebox3 = Frame(chat_page, bg=\"pink\")\n messagebox3.grid(row=3, column=0)\n Label(messagebox3, text=\"I am contact \" + str(num), bg=\"pink\", fg=\"black\").grid(row=3, column=0)\n Label(messagebox3, text=\"14.08\", bg=\"pink\", font=(\"Arial\", 6, 'roman')).grid(row=3, column=1)\n chatbox = Frame(chat_page, pady=300, bg=\"light blue\")\n chatbox.grid(row=5, column=1)\n\n entryforchatbox = Entry(chatbox, borderwidth=5, bg=\"yellow\", justify=\"center\")\n entryforchatbox.grid(row=5, column=1)\n\n send = Image.open('images/send.png')\n imageforsend = send.resize((20, 20), Image.ANTIALIAS)\n openimageforsend = ImageTk.PhotoImage(imageforsend)\n buttonforsend = Button(chatbox, image=openimageforsend)\n buttonforsend.grid(row=5, column=3)\n buttonforsend.image = openimageforsend\n\n chat_page.mainloop()\n\n\ndef default(username):\n global user\n user = username\n root = Tk()\n root.attributes('-fullscreen',True)\n root.title(\"Chat Page\")\n root.configure(background=\"light green\")\n\n userdetails = Frame(root, bg=\"light green\")\n userdetails.place(relx=0.5,rely=0.025,anchor=N)\n\n fontfamily = StringVar(value=\"Kalpurush\")\n font_size = IntVar(value=14)\n app_font = Font(family=fontfamily.get(), size=font_size.get(), weight='bold')\n\n username = Label(userdetails, font=app_font, text=f\"{username}\", bg=\"light green\", fg=\"red\", padx=10)\n username.place(relx=0.5,rely=0.1,anchor=CENTER)\n\n userimg = Image.open('images/default_profile_image.png')\n userimage = userimg.resize((100, 100), Image.ANTIALIAS)\n openuserimage = ImageTk.PhotoImage(userimage)\n\n buttonforuserimage = Button(userdetails, image=openuserimage, bg=\"light green\")\n buttonforuserimage.place(relx=0.4,rely=0.1,anchor=CENTER)\n buttonforuserimage.image = openuserimage\n\n\n\n frame = Frame(root, bg=\"light green\")\n frame.place(relx=0.5,rely=0.2,width=20,anchor=CENTER)\n\n searchimage = Image.open('images/search.png')\n searchimageopen = searchimage.resize((20, 20), Image.ANTIALIAS)\n opensearchimage = ImageTk.PhotoImage(searchimageopen)\n\n searchbar = Entry(frame, font=app_font, bg=\"pink\", fg=\"blue\", justify=\"center\", borderwidth=0)\n\n searchbar.place(relx=0.5,rely=0.2,anchor=CENTER)\n\n button1 = Button(frame, image=opensearchimage, bg='white', command=click)\n button1.place(relx=0.6,rely=0.2,anchor=CENTER)\n button1.image = opensearchimage\n\n # fontstyle = StringVar(value=\"Courier\")\n # font_size = IntVar(value=20)\n #\n # contactfont = Font(family=fontstyle.get(), size=font_size.get(), weight='normal')\n # scrollbar = Scrollbar(root, orient=\"vertical\")\n\n canvas = Canvas(root, bg=\"light green\")\n scroll_y = Scrollbar(root, orient=\"vertical\", command=canvas.yview)\n\n frame = Frame(canvas, bg=\"light green\")\n\n img = Image.open('images/pr.jpg') # the location of the image would change according to every user\n image = img.resize((30, 30), Image.ANTIALIAS)\n openimg = ImageTk.PhotoImage(image)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.3,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(1 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.35,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(2 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.4,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(3 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.45,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(4 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.5,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(5 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.55,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(6 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.6,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(7 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.65,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(8 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.7,anchor=CENTER)\n\n Button(frame, bg=\"yellow\", command=lambda: [root.destroy(), click()], text=\"Contact \" + str(9 + 1), width=48,\n anchor=\"center\", justify=\"center\").place(relx=0.5,rely=0.75,anchor=CENTER)\n\n rel_y = 0.25\n\n for i in range(10):\n conbi = Button(frame, image=openimg)\n rel_y += 0.05\n conbi.place(relx=0.5,rely=rel_y,anchor=CENTER)\n conbi.image = openimg\n\n canvas.create_window(0, 0, anchor='nw', window=frame)\n\n canvas.update_idletasks()\n\n canvas.configure(scrollregion=canvas.bbox('all'),\n yscrollcommand=scroll_y.set)\n\n canvas.place(relx=0.5, rely=0.3, width = 50 , height=100 ,anchor=CENTER)\n scroll_y.place(relx=0.6,rely=0.3 , height=100, width=10 ,anchor=CENTER)\n\n root.mainloop()\ndefault(\"Hello\")" } ]
9
khanderz/MME-Calculator
https://github.com/khanderz/MME-Calculator
2516104320696d2c7e6a9443e91db5f4145155be
ede3f67f59cd2125a1e348367cb4d7bcfae5934d
d127bd9314e9f00deda20582324b3afe79233b17
refs/heads/main
2023-06-16T11:48:38.238597
2021-07-12T18:50:45
2021-07-12T18:50:45
372,886,881
9
1
null
null
null
null
null
[ { "alpha_fraction": 0.6147297620773315, "alphanum_fraction": 0.6161156296730042, "avg_line_length": 30.185184478759766, "blob_id": "d0876b25d82657eb5f32289ee0cfc2fab2801df4", "content_id": "73f22a351f0cb0202bdbf85054ffc1869c9ffc5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5051, "license_type": "permissive", "max_line_length": 270, "num_lines": 162, "path": "/static/js/drug-calc.js", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"use strict\";\n\n// Create button element, add click handler to remove associated medlist row\n// and recalculate + display MME total and clinical assessment.\nconst buildMedlistRowDeleteButton = () => {\n // Create delete button\n const button = $(document.createElement('button'));\n button.html('Delete drug');\n\n // Add event handler to delete button that deletes the button's row\n $(button).on('click', (evt) => {\n // Get the button's row and delete it\n const rowToDelete = $(button).parent();\n rowToDelete.remove();\n\n const MMETotal = calculateTotalMME();\n displayMMETotal(MMETotal);\n displayClinicalAssessment(MMETotal);\n });\n \n return button;\n};\n\n// Recalculate/update total MME based on meds in #med-list\nconst calculateTotalMME = () => {\n let MMETotal = 0;\n\n // Get all .med-list-mme <td> elements\n $('.med-list-mme').each((idx, el) => {\n console.log(el);\n\n MMETotal += Number($(el).html());\n \n console.log(`Total after loop # ${idx}: ${MMETotal}`);\n });\n\n return MMETotal;\n};\n\n// Display clinical assessment message based on total MME.\nconst displayClinicalAssessment = (MMETotal) => {\n \n const LOW_MME_MSG = 'Acceptable therapeutic range; however, always use caution when prescribing opioids at any dosage and always prescribe the lowest effect dose';\n const MED_MME_MSG = 'Use extra precautions such as: monitor and assess pain and function more frequently; discuss reducing dose or tapering and discontinuing opioids if benefits do not outweigh harms; consider non-opioid alternatives; consider prescribing naloxone';\n const HIGH_MME_MSG = 'Avoid; carefully justify dose, increase monitoring, and/or consider prescribing naloxone';\n\n if (MMETotal < 50) {\n $('#assessment').html(LOW_MME_MSG);\n document.getElementById(\"assessment\").style.color = \"green\";\n } else if (MMETotal >=50 && MMETotal < 90) {\n $('#assessment').html(MED_MME_MSG);\n document.getElementById(\"assessment\").style.color = \"black\";\n } else {\n $('#assessment').html(HIGH_MME_MSG);\n document.getElementById(\"assessment\").style.color = \"red\";\n }\n};\n\nconst displayMMETotal = (MMETotal) => {\n $('#mme-total').html(MMETotal);\n};\n\n// Add medication to medlist table\n// Args:\n// medData (object) - an object that looks like:\n// {\n// drug: drug name,\n// dose: drug dose,\n// quantity: quantity of drug,\n// days_supply: days supply \n// date_filled: date filled\n// }\nconst addMedToMedlist = (medData) => {\n\n \n // Build tr element\n const tr = $(document.createElement('tr'));\n tr.attr('class', 'medication');\n\n // Append data from medData to row\n tr.append(`\n <td class=\"med-list-drug\">${medData.drug}</td>\n <td>${medData.dose}</td>\n <td>${medData.quantity}</td>\n <td>${medData.days_supply}</td>\n <td>${medData.date_filled}</td>\n `);\n \n // Calculate MME for medData and add to #med-list table\n $.get('/results', medData, (data) => {\n tr.append(`<td class=\"med-list-mme\">${data.MME}</td>`);\n tr.append(buildMedlistRowDeleteButton());\n \n // Append tr to #med-list\n $('#med-list').append(tr);\n \n const MMETotal = calculateTotalMME();\n\n displayMMETotal(MMETotal);\n displayClinicalAssessment(MMETotal);\n });\n};\n\n// takes user inputs and assigns them to \"params\"\nconst handleCalculate = (event) => {\n event.preventDefault();\n\n const params = {\n 'drug': $('#drug').val(),\n 'dose': $('#dose').val(),\n 'quantity': $('#quantity').val(), \n 'days_supply': $('#days_supply').val(),\n 'date_filled': $('#date_filled').val()\n };\n\n console.log(params);\n addMedToMedlist(params);\n};\n\n// takes in user inputs and assigns it \"formData\"\nconst handleSaveList = (event) => {\n event.preventDefault();\n\n const formData = {\n 'drug': $('#drug').val(),\n 'dose': $('#dose').val(),\n 'quantity': $('#quantity').val(), \n 'days_supply': $('#days_supply').val(), \n 'date_filled': $('#date_filled').val()\n };\n\n console.log(formData);\n\n\n // adds to user.med_list in db\n $.post('/add', formData, (response) => {\n console.log(response);\n if (response.msg === 'medication added') {\n addMedToMedlist(formData);\n alert('Medication added to your database. You can now view the medication in your user dashboard.');\n } else {\n alert('Please login');\n }\n });\n};\n\n// Reset med list button\nconst clearMedList = () => {\n $('#med-list').empty();\n\n const MMETotal = calculateTotalMME();\n\n displayMMETotal(MMETotal);\n displayClinicalAssessment(MMETotal);\n};\n\n\n\n// EVENT LISTENERS\ndocument.getElementById('drug-form').addEventListener('submit', handleCalculate);\ndocument.getElementById('save-list-button').addEventListener('click', handleSaveList);\ndocument.getElementById('clear-med-list').addEventListener('click', clearMedList);" }, { "alpha_fraction": 0.700230598449707, "alphanum_fraction": 0.7332820892333984, "avg_line_length": 38.45454406738281, "blob_id": "f387dd58d518173061c5508001d07c2ce20fb485", "content_id": "d80caa72dbe7f6f861c90fa6011e1027c5a13511", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1301, "license_type": "permissive", "max_line_length": 85, "num_lines": 33, "path": "/seed_db.py", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"\"\"Script to seed database.\"\"\"\nfrom server import app\nfrom model import db, connect_to_db, Opioid\nimport os\n\nos.system('dropdb opioids')\nos.system('createdb opioids')\n\nconnect_to_db(app)\ndb.create_all()\n\n# opioid database\nopioids = [\n Opioid(opioid_name=\"Codeine\", conversion_factor=0.15),\n Opioid(opioid_name=\"Fentanyl transdermal\", conversion_factor=2.4),\n Opioid(opioid_name=\"Fentanyl buccal/sublingual/lozenge\", conversion_factor=0.13),\n Opioid(opioid_name=\"Fentanyl film/oral spray\", conversion_factor=0.18),\n Opioid(opioid_name=\"Fentanyl nasal spray\", conversion_factor=0.16),\n Opioid(opioid_name=\"Hydrocodone\", conversion_factor=1),\n Opioid(opioid_name=\"Hydromorphone\", conversion_factor=4),\n Opioid(opioid_name=\"Methadone 1-20mg\", conversion_factor=4),\n Opioid(opioid_name=\"Methadone 21-40mg\", conversion_factor=8),\n Opioid(opioid_name=\"Methadone 41-60mg\", conversion_factor=10),\n Opioid(opioid_name=\"Methadone 61-80mg\", conversion_factor=12),\n Opioid(opioid_name=\"Morphine\", conversion_factor=1),\n Opioid(opioid_name=\"Oxycodone\", conversion_factor=1.5),\n Opioid(opioid_name=\"Oxymorphone\", conversion_factor=3),\n Opioid(opioid_name=\"Tapentadol\", conversion_factor=0.4)\n]\n\ndb.session.add_all(opioids)\ndb.session.commit()\nprint(\"Added opioids to DB\")" }, { "alpha_fraction": 0.48363634943962097, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 15.176470756530762, "blob_id": "6d1a9a5c914482694d485650115ceea4e3d1169c", "content_id": "1180851a6d25d174c57c2dc68ffdba0ce89da2cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 275, "license_type": "permissive", "max_line_length": 23, "num_lines": 17, "path": "/requirements.txt", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "bcrypt==3.2.0\nblinker==1.4\ncffi==1.14.6\nclick==8.0.1\nFlask==2.0.1\nFlask-Mail==0.9.1\nFlask-SQLAlchemy==2.5.1\ngreenlet==1.1.0\nitsdangerous==2.0.1\nJinja2==3.0.1\nMarkupSafe==2.0.1\npsycopg2-binary==2.8.6\npycparser==2.20\npytz==2021.1\nsix==1.16.0\nSQLAlchemy==1.4.17\nWerkzeug==2.0.1\n" }, { "alpha_fraction": 0.7640525102615356, "alphanum_fraction": 0.7697744965553284, "avg_line_length": 40.83098602294922, "blob_id": "c844600ffd30450185ac53e70ba36d9e8639576e", "content_id": "3f3a94fdf271e9569ca9e3cbf67c01adc64bf1b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2971, "license_type": "permissive", "max_line_length": 468, "num_lines": 71, "path": "/README.md", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "# MME-Calculator\nMorphine Milligram Equivalent (MME) Calculator\n\n## Description:\nMME is a numerical standard for clinicians to gauge opioid addiction risk. It is the amount of morphine in milligrams equivalent to the strength of the opioid dose prescribed. Calculating total daily MME can help guide clinicians in pain management and aid in mitigating opioid addiction and accidental overdose. The calculator will calculate the total daily MME for the given medication list, and relay the appropriate clinical assessment for the MME that populates. \n\nThis web app is designed for use by clinicians and by patients.\n\nFor patients: Using this web app to calculate your MME will increase the transparency between you and your pain management. Understand your options and ways to improve your therapy by reading the clinical assessments based on your MME. Know what is an acceptable therapeutic range and when it may be time to consider a naloxone prescription, which could save your life in an accidental overdose situation. Be prepared. Know your risk. Take control back.\n\nPer CDC: Calculating the MME of opioids helps identify patients who may benefit from closer monitoring, reduction or tapering of opioids, prescribing of naloxone, or other measures to reduce risk of overdose. \n\n*There is no completely safe opioid dose, and this calculator does not substitute for clinical judgment. Use caution when prescribing opioids at any dosage, and prescribe the lowest effective dose.*\n\n## Deployment:\nhttps://mme-calc.com\n\n\n## Contents\n* [Tech Stack](#tech-stack)\n* [Features](#features)\n* [Installation](#installation)\n* [Links](#links)\n\n\n## <a name=\"tech-stack\"></a>Technologies:\n* Python\n* Javascript\n* Flask\n* Jinja2\n* jQuery\n* HTML\n* CSS\n* AJAX\n* SQLAlchemy\n* BootStrap\n* PostgreSQL\n* Chart.js\n\n## <a name=\"features\"></a>Features: \n\n#### Login/Logout\n![alt text](https://github.com/khanderz/MME-Calculator/blob/main/static/img/login.gif)\n\n#### Total MME Increments\n![alt text](https://github.com/khanderz/MME-Calculator/blob/main/static/img/increment.gif)\n\n#### Save a Medication to Your User Dashboard & Charts\n![alt text](https://github.com/khanderz/MME-Calculator/blob/main/static/img/save.gif)\n\nWeekly and monthly charts on the user dashboard displays 7-day and 30-day total daily MMEs. This feature allows the clinician/patient to assess when a person experienced one or more days in the last 7 days/30 days where their cumulative MME from opioid prescriptions exceeded 90 MME, increasing their risk of opioid addiction and/or accidental overdose.\n\nDate filled is a required input for the charts.\n\n\n## <a name=\"installation\"></a>Installation: \n```git clone https://github.com/khanderz/MME-Calculator.git```\n\n```cd MME-calculator```\n\n```pip3 install -r requirements.txt```\n\n```python3 seed_db.py```\n\n## Run program:\n```python3 server.py```\n\n\n## <a name=\"links\"></a>Links:\n* https://www.cdc.gov/drugoverdose/index.html\n* https://www.linkedin.com/in/khanh-mai-33190/\n\n" }, { "alpha_fraction": 0.615616500377655, "alphanum_fraction": 0.615917980670929, "avg_line_length": 22.01388931274414, "blob_id": "91f1cbfc0d41bce5858219601e05d13089c1b135", "content_id": "a776ec8187a6193b45c179cc848ebb4af7d42470", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3317, "license_type": "permissive", "max_line_length": 106, "num_lines": 144, "path": "/crud.py", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"\"\"CRUD operations.\"\"\"\n\nfrom model import db, User, Med, Opioid, connect_to_db\nimport decimal\nfrom datetime import datetime\n\n# Create, Read, Update, Delete functions\n# User functions\ndef create_user(email, password):\n \"\"\"Create and return a new user.\"\"\"\n\n user = User(email=email, password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user\n\ndef get_users():\n \"\"\"Return all users.\"\"\"\n\n return User.query.all()\n\ndef get_user_by_id(user_id):\n \"\"\"Return a user by primary key.\"\"\"\n\n return User.query.get(user_id)\n\ndef get_user_by_email(email):\n \"\"\"Return a user by email.\"\"\"\n\n return User.query.filter(User.email == email).first() \n\ndef get_user_password(email):\n \"\"\"Return password by user email\"\"\"\n\n user = User.query.filter(User.email == email).first()\n password = user.password\n \n return password\n\ndef get_user_by_email_and_password(email, password):\n \"\"\"Return user by email and password.\"\"\"\n \n user = User.query.filter(User.email == email).first()\n\n if user:\n if user.password == password:\n return user\n else:\n return None\n\ndef change_user_password(email, password):\n \"\"\"Change user password and save new password\"\"\"\n\n user = get_user_by_email(email) \n User.query.filter(User.email == email).update(\n {\n \"password\" : password\n }\n ) \n db.session.commit()\n\n return user\n \n# MME and drug functions\ndef get_opioid_by_name(opioid_name):\n \"\"\"Return `Opioid` with the given `opioid_name`.\"\"\"\n \n return Opioid.query.filter_by(opioid_name=opioid_name).first()\n\n\ndef add_opioid_to_user_medlist(\n user,\n opioid,\n drug_dose,\n quantity,\n days_supply,\n daily_MME,\n date_filled\n):\n \"\"\"Create `Med` object associated with `user`.\"\"\"\n\n med = Med(\n drug_dose=drug_dose, \n quantity=quantity, \n days_supply=days_supply, \n daily_MME=daily_MME,\n date_filled=date_filled,\n )\n med.opioid = opioid\n \n user.med_list.append(med)\n \n db.session.add(user)\n db.session.commit()\n\n\ndef calculate_MME(drug, dose, quantity, days_supply): \n \"\"\"Calculate MME with unqiue conversion factor from db for specific drug.\n \n Args:\n - drug (str): the name of a drug (must be present in database)\n - dose (decimal.Decimal)\n - quantity (decimal.Decimal)\n - days_supply (decimal.Decimal)\n \"\"\"\n \n if not dose or not quantity or not days_supply:\n return 0\n\n # Get `Opioid` from database to get its conversion factor\n opioid = get_opioid_by_name(drug)\n\n MME = dose * (quantity // days_supply) * opioid.conversion_factor\n\n return MME\n\ndef get_meds():\n \"\"\"view all meds in med list\"\"\"\n\n return Med.query.all() \n\n\ndef get_meds_by_date_range(date_filled, end_date): \n \"\"\"Takes in a date_filled and end_date and returns a list of med items that fit within the date range.\n \n Args:\n date_filled = db.Column(db.Date, nullable=True) \n end_date = db.Column(db.Date, nullable=True)\n \"\"\"\n\n med_list = Med.query.filter(\n Med.end_date <= end_date\n ).filter(\n Med.date_filled >= date_filled\n ).all()\n\n return med_list\n\n\nif __name__ == '__main__':\n from server import app\n connect_to_db(app) " }, { "alpha_fraction": 0.5917007923126221, "alphanum_fraction": 0.5955533981323242, "avg_line_length": 28.17798614501953, "blob_id": "de1a13910b6f8c0e0185e2d7f389df8e05a64b06", "content_id": "aa7e3eb52f7807d84c175c5e40b429224e7e75b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12459, "license_type": "permissive", "max_line_length": 146, "num_lines": 427, "path": "/server.py", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"\"\"Server for MME calculator app.\"\"\"\n\nfrom flask import (Flask, render_template, request, flash, session,\n redirect, jsonify, Markup)\nfrom model import connect_to_db, db, User, Med, Opioid\nimport crud\nimport decimal\nfrom jinja2 import StrictUndefined\nfrom datetime import date, datetime, timedelta\nimport bcrypt\nfrom flask_mail import Mail, Message\nimport os\n\napp = Flask(__name__)\napp.secret_key = \"dev\"\napp.jinja_env.undefined = StrictUndefined\n\nDATE_FORMAT = '%Y-%m-%d'\n\n# flask_mail parameters\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = \"mmecalc@gmail.com\"\napp.config['MAIL_PASSWORD'] = \"mmecalculation\"\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\n# create Mail class instance\nmail = Mail(app)\n\n\n#app routes and view functions\n@app.route('/')\ndef homepage():\n \"\"\"View homepage.\"\"\"\n\n # temporary flash\n flash(Markup('New security measures have been implemented on July 10, 2021. Please change your password <a href=\"/change_pw_page\">here</a>.'))\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('homepage.html', user=user, user_id=user_id)\n\n\n@app.route('/about')\ndef about_page():\n \"\"\"View about page.\"\"\"\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('resources.html', user=user, user_id=user_id)\n\n\n@app.route('/updates')\ndef updates_page():\n \"\"\"View updates page.\"\"\"\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('updates.html', user=user, user_id=user_id)\n\n@app.route('/contact')\ndef contacts_page():\n \"\"\"Render contacts page.\"\"\"\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('contact.html', user=user, user_id=user_id)\n\n\n@app.route('/message_page') \ndef render_message_page():\n \"\"\"Render message page\"\"\"\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('message.html', user=user, user_id=user_id) \n\n@app.route('/message', methods=['POST'])\ndef send_message():\n \"\"\"Send message\"\"\"\n\n first_name = request.form.get(\"first_name\")\n last_name = request.form.get(\"last_name\")\n email = request.form.get(\"email\")\n # message_type = request.form.get(\"message_type\")\n message = request.form.get(\"message\")\n print(first_name, last_name, email, message, \"*******user inputs\")\n\n msg = Message(sender = email, recipients = ['mmecalc@gmail.com'])\n msg.body = \"Message from\" + \" \" + first_name + \" \" + last_name + \" \" + email + \" \" + \"message:\" + \" \" + message\n mail.send(msg)\n\n flash('Message submitted.')\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n return render_template('message.html', user=user, user_id=user_id) \n\n\n# User routes\n@app.route('/create_user')\ndef render_create_user():\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n\n return render_template('create_account.html', user_id=user_id)\n\n\n@app.route('/user_reg', methods=['POST'])\ndef register_user():\n \"\"\"Create a new user.\"\"\"\n\n email = request.form.get(\"email\")\n pw1 = request.form.get(\"password1\").encode('utf-8')\n pw2 = request.form.get(\"password2\").encode('utf-8')\n\n if pw1 == pw2:\n password_encoded = bcrypt.hashpw(pw1, bcrypt.gensalt())\n # turn hashed password back into string to be stored in db\n password = password_encoded.decode('utf-8')\n\n user = crud.get_user_by_email(email)\n\n crud.create_user(email, password)\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n \n flash(\"Account created successfully! Please log in.\")\n return render_template('user_login.html', user_id=user_id)\n\n else:\n flash('Passwords do not match. Please try again.')\n user = crud.get_user_by_email(email)\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n return render_template('create_account.html', user_id=user_id)\n \n\n\n@app.route('/login_page')\ndef render_login_page():\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n\n if user:\n flash(f\"Hello {user.email}! You are already logged in.\")\n \n return render_template('user_login.html', user_id=user_id) \n\n\n@app.route('/user_login', methods=['POST'])\ndef login():\n \"\"\"Allow existing users to log in.\"\"\"\n\n email = request.form.get(\"email\")\n pw = request.form.get(\"password\").encode(\"utf-8\")\n\n user_password = crud.get_user_password(email)\n\n hashed2 = bcrypt.hashpw(pw, user_password.encode('utf-8'))\n hashed2_str = hashed2.decode('utf-8')\n\n if hashed2_str == user_password:\n print(\"Password match!\")\n # Log the user in ...\n\n user = crud.get_user_by_email_and_password(email, hashed2_str)\n\n # chart descriptions\n today = date.today()\n ago = today - timedelta(days=7)\n month = today.strftime(\"%B\")\n\n\n session[\"user_email\"] = user.email\n session[\"user_id\"] = user.user_id\n flash(f\"Hello {user.email}! You are now logged in.\")\n return render_template('user_details.html', user=user, user_id=user.user_id, ago=ago, today=today, month=month) \n\n else:\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n flash(\"Please enter the correct email and password or create a new account.\")\n return render_template('user_login.html', user_id=user_id)\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"Allow a logged in user to logout.\"\"\"\n\n if session:\n session.pop(\"user_id\")\n session.pop(\"user_email\")\n flash(\"You are now logged out.\")\n\n else:\n flash(\"You are not currently logged in.\")\n \n return redirect('/') \n\n\n@app.route('/users/<user_id>')\ndef show_user(user_id):\n \"\"\"Show details of a particular user\"\"\"\n if \"user_id\" in session:\n user = crud.get_user_by_id(user_id)\n \n\n # chart descriptions\n today = date.today()\n ago = today - timedelta(days=7)\n month = today.strftime(\"%B\")\n\n return render_template('user_details.html', user=user, user_id=user_id, ago=ago, today=today, month=month) \n else:\n flash(\"You are not currently logged in. Please login to view your dashboard.\")\n \n return redirect('/') \n\n\n@app.route('/reset_page')\ndef render_reset_page():\n \"\"\"Render reset password page\"\"\"\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n\n return render_template('reset.html', user_id=user_id)\n\n\n@app.route('/reset', methods=['POST'])\ndef reset_password(user):\n \"\"\"Reset user password\"\"\" \n\n pass\n\n\n@app.route('/change_pw_page')\ndef render_change_pw_page():\n \"\"\"Render change password page\"\"\"\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n\n return render_template('change_password.html', user_id=user_id)\n\n@app.route('/change_pw', methods=['POST'])\ndef change_password():\n \"\"\"Check old password and change it to new password\"\"\"\n email = request.form.get(\"email\")\n old_password = request.form.get(\"old_pw\")\n new_password = request.form.get(\"new_pw\").encode('utf-8')\n new_password2 = request.form.get(\"new_pw2\").encode('utf-8')\n\n user_old_password = crud.get_user_password(email)\n\n if user_old_password == old_password:\n print(\"%%%%%%%%%%%%%%%%%passwords match!%%%%%%%%%%%%%%\")\n \n if new_password == new_password2:\n password_encoded = bcrypt.hashpw(new_password, bcrypt.gensalt())\n password = password_encoded.decode('utf-8')\n\n user = crud.get_user_by_email(email)\n\n crud.change_user_password(email, password)\n\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n\n flash('Password changed successfully. Please log in.')\n return render_template('user_login.html', user_id=user_id)\n\n else:\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n flash('New passwords do not match. Please try again.')\n return render_template('change_password.html', user_id=user_id) \n\n else:\n user_id = session.get('user_id')\n user = User.query.get(user_id)\n flash(\"Please enter the correct email and old password\")\n return render_template('change_password.html', user_id=user_id) \n\n\n\n# MME and drug routes\n@app.route('/results')\ndef addMed():\n \"\"\" add med for non-logged in users\"\"\"\n\n drug = request.args.get('drug') \n dose = decimal.Decimal(request.args.get('dose'))\n quantity = decimal.Decimal(request.args.get('quantity'))\n days_supply = decimal.Decimal(request.args.get('days_supply'))\n date_filled = request.args.get('date_filled', 0)\n\n\n MME = float(crud.calculate_MME(drug=drug, dose=dose, quantity=quantity, days_supply=days_supply)) \n\n\n return jsonify({'MME': MME})\n\n\n@app.route('/add', methods=['POST'])\ndef add():\n \"\"\"Add new `Med` to user.med_list\"\"\"\n\n if \"user_id\" in session:\n # Query for logged in `User` obj from db\n user = User.query.get(session['user_id'])\n\n # Query for `Opioid` from db, by drug name (from request.form)\n drug = request.form.get('drug')\n opioid = crud.get_opioid_by_name(opioid_name=drug)\n\n # Create `Med` object, `Med` attributes:\n # drug_dose = db.Column(db.Integer)\n # quantity = db.Column(db.Integer)\n # days_supply = db.Column(db.Integer)\n # daily_MME = db.Column(db.Integer) \n\n drug_dose = decimal.Decimal(request.form.get('dose', 0))\n quantity = decimal.Decimal(request.form.get('quantity', 0))\n days_supply = decimal.Decimal(request.form.get('days_supply', 0))\n\n date_filled = None\n if request.form.get('date_filled', None) != \"\": \n date_filled = request.form.get('date_filled', None)\n\n\n MME = crud.calculate_MME(\n drug=drug,\n dose=drug_dose,\n quantity=quantity,\n days_supply=days_supply,\n )\n\n crud.add_opioid_to_user_medlist(\n user,\n opioid,\n drug_dose,\n quantity,\n days_supply,\n MME,\n date_filled,\n )\n\n\n return jsonify({'msg': 'medication added',}), 200\n else:\n return jsonify(\"unauthorized\")\n \n\n@app.route('/api/med_list')\ndef get_users_med_list():\n \"\"\"Get currently logged in user's med list.\n\n Can filter by date range.\n \"\"\"\n \n date_filled = request.args.get('date_filled')\n end_date = request.args.get('end_date')\n \n if date_filled:\n date_filled = datetime.strptime(date_filled, DATE_FORMAT).date()\n\n if end_date:\n end_date = datetime.strptime(end_date, DATE_FORMAT).date()\n \n # Get currently logged in user\n if 'user_id' in session:\n # user = User.query.options(\n # db.joinedload('med_list')\n # ).get(session['user_id'])\n user = User.query.get(session['user_id'])\n\n today = date.today()\n ago = today - timedelta(days=7)\n\n filtered_med_list = Med.query.filter(\n Med.date_filled != None, \n Med.user_id== user.user_id,\n ).all()\n\n\n if date_filled and end_date:\n med_list = user.get_meds_by_date_range(\n date_filled=date_filled,\n end_date=end_date\n )\n else:\n med_list = filtered_med_list\n \n med_list_json = []\n \n for med in med_list:\n med_list_json.append({\n 'med_id': med.med_id,\n 'user_id': med.user_id,\n 'opioid': {\n 'opioid_name': med.opioid.opioid_name,\n 'conversion_factor': float(med.opioid.conversion_factor),\n },\n 'drug_dose': med.drug_dose,\n 'quantity': med.quantity,\n 'days_supply': med.days_supply,\n 'date_filled': med.date_filled.strftime(DATE_FORMAT), \n 'end_date': med.end_date.strftime(DATE_FORMAT),\n 'daily_MME': float(med.daily_MME),\n })\n\n\n return jsonify(med_list_json) \n \n\n\nif __name__ == '__main__':\n connect_to_db(app)\n app.run(host='0.0.0.0', debug=False)\n" }, { "alpha_fraction": 0.5211590528488159, "alphanum_fraction": 0.5430477261543274, "avg_line_length": 21.947368621826172, "blob_id": "d2b7b6c20b8277c4795baba1c27e19ea08762712", "content_id": "e334490c2e590978e727da484192d677bc86dabd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4797, "license_type": "permissive", "max_line_length": 77, "num_lines": 209, "path": "/static/js/chart.js", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"use strict\";\n\n// [\n// {\n// \"daily_MME\": 0.75, \n// \"date_filled\": \"2021-06-17\",\n// \"days_supply\": 5, \n// \"drug_dose\": 5, \n// \"end_date\": \"2021-06-22\", \n// \"med_id\": 1, \n// \"opioid\": {\n// \"conversion_factor\": 0.15, \n// \"opioid_name\": \"Codeine\"\n// }, \n// \"quantity\": 5, \n// \"user_id\": 1\n// }\n// ]\n// moment.duration(30, \"days\").asDays();\n\n// Generate array of dates for range.\n// generateDatesForRange('2021-06-01', '2021-06-30')\n// => ['2021-06-01', ..., '2021-06-30']\nconst generateDatesForRange = (start, end) => {\n const startDate = moment(start);\n const endDate = moment(end);\n\n const duration = Math.abs(moment\n .duration(startDate.diff(endDate))\n .asDays()\n );\n\n const dates = [startDate.format('YYYY-MM-DD')];\n\n while (dates.length != duration) {\n const latestDay = dates[dates.length - 1];\n const nextDay = moment(latestDay).add(1, 'days');\n \n dates.push(nextDay.format('YYYY-MM-DD'));\n }\n \n dates.push(endDate.format('YYYY-MM-DD'));\n \n return dates;\n};\n\n\n// WEEKLY CHART\nconst convertToWeeklyChartData = (medList) => {\n const today = moment();\n const sevenDaysAgo = moment().subtract(7, 'days');\n console.log(`today: ${today}`);\n console.log(`sevenDaysAgo: ${sevenDaysAgo}`);\n const days = generateDatesForRange(\n sevenDaysAgo.format('YYYY-MM-DD'),\n today.format('YYYY-MM-DD')\n );\n\n // Create dateAndTotalMME:\n // date: totalMME\n // { 2021-06-01: 0, 2021-06-02: 0}\n const dateAndTotalMME = {};\n for (const day of days) {\n dateAndTotalMME[day] = 0;\n } \n\n // for each med in medlist\n // generate range of dates starting at date_filled, end at end_date\n for (const med of medList) {\n console.log(`med: ${med.opioid.opioid_name}`);\n\n const datesActive = generateDatesForRange(med.date_filled, med.end_date);\n console.log(`datesActive: ${datesActive}`);\n \n\n // for each date of datesActive\n // use date to index into dateAndTotalMME\n // increment value stored there by med.daily_MME\n for (const date of datesActive) {\n dateAndTotalMME[date] += med.daily_MME;\n } \n } \n \n const chartData = [];\n \n for (const [ date, totalMME ] of Object.entries(dateAndTotalMME)) {\n chartData.push({x: date, y: totalMME});\n }\n \n return chartData;\n};\n\n\n$.get('/api/med_list', (medList) => {\n const data = convertToWeeklyChartData(medList);\n\n new Chart(\n $('#week-bar-chart'),\n {\n type: 'bar',\n data: {\n datasets: [\n {\n label: 'Total Daily MME',\n data: data\n }\n ]\n },\n options: {\n datasets: {\n bar: {\n backgroundColor: () => {\n return randomColor();\n }\n }\n },\n scales: {\n xAxes: [\n {\n type: 'time',\n distribution: 'series'\n }\n ]\n }\n }\n }\n );\n});\n\n\n\n\n\n// MONTHLY CHART\nconst convertToMonthlyChartData = (medList) => {\n const firstDayOfMonth = moment().date(1);\n console.log(`firstDayOfMonth: ${firstDayOfMonth}`);\n const days = generateDatesForRange(\n firstDayOfMonth.format('YYYY-MM-DD'),\n firstDayOfMonth.add(30, 'days').format('YYYY-MM-DD')\n );\n \n // Create dateAndTotalMME:\n // date: totalMME\n // { 2021-06-01: 0, 2021-06-02: 0}\n const dateAndTotalMME = {};\n for (const day of days) {\n dateAndTotalMME[day] = 0;\n }\n \n // for each med in medlist\n // generate range of dates starting at date_filled, end at end_date\n for (const med of medList) {\n console.log(`med: ${med.opioid.opioid_name}`);\n\n const datesActive = generateDatesForRange(med.date_filled, med.end_date);\n console.log(`datesActive: ${datesActive}`);\n\n // for each date of datesActive\n // use date to index into dateAndTotalMME\n // increment value stored there by med.daily_MME\n for (const date of datesActive) {\n dateAndTotalMME[date] += med.daily_MME;\n }\n } \n \n const chartData = [];\n for (const [ date, totalMME ] of Object.entries(dateAndTotalMME)) {\n chartData.push({x: date, y: totalMME});\n }\n \n return chartData;\n};\n\n$.get('/api/med_list', (medList) => {\n const data = convertToMonthlyChartData(medList);\n\n new Chart(\n $('#month-bar-chart'),\n {\n type: 'bar',\n data: {\n datasets: [\n {\n label: 'Total Daily MME',\n data: data\n }\n ]\n },\n options: {\n datasets: {\n bar: {\n backgroundColor: () => {\n return randomColor();\n }\n }\n },\n scales: {\n xAxes: [\n {\n type: 'time',\n distribution: 'series'\n }\n ]\n }\n }\n }\n );\n});\n\n" }, { "alpha_fraction": 0.5879629850387573, "alphanum_fraction": 0.5879629850387573, "avg_line_length": 30.609756469726562, "blob_id": "5236163850d4b7b1e244d756c83fe0704e499aa2", "content_id": "8a44bfbd72a785599e1fd43cfd709e242bf81364", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3888, "license_type": "permissive", "max_line_length": 226, "num_lines": 123, "path": "/model.py", "repo_name": "khanderz/MME-Calculator", "src_encoding": "UTF-8", "text": "\"\"\"Models for MME calculator app.\"\"\"\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime, timedelta\n\ndb = SQLAlchemy()\n\n#user class\nclass User(db.Model):\n \"\"\"a user.\"\"\"\n\n __tablename__ = 'users'\n\n user_id = db.Column(db.Integer,\n autoincrement = True,\n primary_key = True)\n email = db.Column(db.String, unique=True, nullable=False) \n password = db.Column(db.String, nullable=False) \n\n med_list = db.relationship(\"Med\", backref=\"user\") \n\n def __repr__(self):\n return f'<User user_id={self.user_id} email={self.email} password={self.password} med_lists={self.med_list}>'\n \n def get_meds_by_date_range(self, date_filled, end_date): \n \"\"\"Takes in a date_filled and end_date and returns a list of med items that fit within the date range.\n \n Args:\n date_filled (datetime.date): start date\n end_date (datetime.date): end date\n \"\"\"\n \n filtered_meds = []\n\n if med.date_filled != None:\n for med in self.med_list:\n if med.end_date <= end_date and med.date_filled >= date_filled:\n filtered_meds.append(med)\n\n return filtered_meds\n\n\n#med class\nclass Med(db.Model):\n \"\"\"a med.\"\"\"\n\n __tablename__ = 'meds'\n\n med_id = db.Column(db.Integer, \n autoincrement = True,\n primary_key = True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.user_id')) \n opioid_id = db.Column(db.Integer, db.ForeignKey('opioids.opioid_id'))\n\n drug_dose = db.Column(db.Integer, nullable=False)\n quantity = db.Column(db.Integer, nullable=False)\n\n days_supply = db.Column(db.Integer, nullable=False)\n date_filled = db.Column(db.Date, nullable=True) \n end_date = db.Column(db.Date, nullable=True)\n\n daily_MME = db.Column(db.Numeric)\n\n opioid = db.relationship(\"Opioid\", backref=\"med\")\n \n def __init__(self, drug_dose, quantity, days_supply, daily_MME, date_filled):\n\n # Calculate end_date based on date_filled + days_supply\n \n if date_filled:\n date_filled = datetime.strptime(date_filled, \"%Y-%m-%d\").date()\n end_date = date_filled + timedelta(days=int(days_supply))\n else:\n date_filled = None \n end_date = None\n \n new_med = super().__init__( # db.Model.__init__()\n drug_dose=drug_dose,\n quantity=quantity,\n days_supply=days_supply,\n daily_MME=daily_MME,\n date_filled=date_filled,\n end_date=end_date,\n )\n \n return new_med\n\n def __repr__(self):\n return f'<med_id={self.med_id} drug dose={self.drug_dose} quantity={self.quantity} days supply={self.days_supply} daily MME={self.daily_MME} date filled={self.date_filled} end date={self.end_date} User={self.user_id}>'\n\n\n#opioids class\nclass Opioid(db.Model):\n \"\"\"an opioid.\"\"\"\n\n __tablename__ = 'opioids'\n\n opioid_id = db.Column(db.Integer, \n autoincrement = True,\n primary_key = True)\n opioid_name = db.Column(db.String, nullable=False)\n conversion_factor = db.Column(db.Numeric, nullable=False) \n\n def __repr__(self):\n return f'<Opioid opioid_id={self.opioid_id} opioid name={self.opioid_name} MME conversion factor={self.conversion_factor}>'\n\n\n# connect to database\ndef connect_to_db(flask_app, db_uri='postgresql:///opioids', echo=False):\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = db_uri\n flask_app.config['SQLALCHEMY_ECHO'] = echo\n flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n db.app = flask_app\n db.init_app(flask_app)\n\n print('Connected to the db!')\n\n\n\nif __name__ == '__main__':\n from server import app \n connect_to_db(app)\n" } ]
8
XiaoyongNI/KalmanNet-Dataset-1
https://github.com/XiaoyongNI/KalmanNet-Dataset-1
e005d23d0222481fb1efc0e5083070e26391a30b
1f6bf52f572912a99d3e0317dffd4452cd55afa2
da65747d17c408e291e910a6dc13cff4b04ef341
refs/heads/main
2023-08-27T20:38:12.073911
2021-10-26T18:45:08
2021-10-26T18:45:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4709489047527313, "alphanum_fraction": 0.5124087333679199, "avg_line_length": 24.567163467407227, "blob_id": "063524c0242b4511900ce81e838b758614245808", "content_id": "f6a74be8fc9b1115c634d5617848b96c4eee80f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3425, "license_type": "no_license", "max_line_length": 98, "num_lines": 134, "path": "/Simulations/Lorenz_Atractor/parameters.py", "repo_name": "XiaoyongNI/KalmanNet-Dataset-1", "src_encoding": "UTF-8", "text": "import torch\nimport math\n\nif torch.cuda.is_available():\n cuda0 = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nelse:\n cuda0 = torch.device(\"cpu\")\n print(\"Running on the CPU\")\n\n#########################\n### Design Parameters ###\n#########################\nm = 3\nn = 3\nvariance = 0\nm1x_0 = torch.ones(m, 1) \nm1x_0_design_test = torch.ones(m, 1)\nm2x_0 = 0 * 0 * torch.eye(m)\n\n#################################################\n### Generative Parameters For Lorenz Atractor ###\n#################################################\n\n# Auxiliar MultiDimensional Tensor B and C (they make A --> Differential equation matrix)\nB = torch.tensor([[[0, 0, 0],[0, 0, -1],[0, 1, 0]], torch.zeros(m,m), torch.zeros(m,m)]).float()\nC = torch.tensor([[-10, 10, 0],\n [ 28, -1, 0],\n [ 0, 0, -8/3]]).float()\n\ndelta_t_gen = 1e-5\ndelta_t = 0.02\ndelta_t_test = 0.01\nJ = 5\n\n# Decimation ratio\nratio = delta_t_gen/delta_t_test\n\n# Length of Time Series Sequence\n# T = math.ceil(3000 / ratio)\n# T_test = math.ceil(6e6 * ratio)\nT = 100\nT_test = 100\n\nH_design = torch.eye(3)\n\n## Angle of rotation in the 3 axes\nroll_deg = yaw_deg = pitch_deg = 1\n\nroll = roll_deg * (math.pi/180)\nyaw = yaw_deg * (math.pi/180)\npitch = pitch_deg * (math.pi/180)\n\nRX = torch.tensor([\n [1, 0, 0],\n [0, math.cos(roll), -math.sin(roll)],\n [0, math.sin(roll), math.cos(roll)]])\nRY = torch.tensor([\n [math.cos(pitch), 0, math.sin(pitch)],\n [0, 1, 0],\n [-math.sin(pitch), 0, math.cos(pitch)]])\nRZ = torch.tensor([\n [math.cos(yaw), -math.sin(yaw), 0],\n [math.sin(yaw), math.cos(yaw), 0],\n [0, 0, 1]])\n\nRotMatrix = torch.mm(torch.mm(RZ, RY), RX)\nH_mod = torch.mm(RotMatrix,H_design)\n\n\nH_design_inv = torch.inverse(H_design)\n\n# Noise Parameters\nr_dB = 0\nlambda_r = math.sqrt(10**(-r_dB/10))\nnx = 0\nlambda_q = lambda_r * nx\n\n# Noise Matrices\nQ_non_diag = False\nR_non_diag = False\n\nQ = (lambda_q**2) * torch.eye(m)\n\nif(Q_non_diag):\n q_d = lambda_q**2\n q_nd = (lambda_q **2)/2\n Q = torch.tensor([[q_d, q_nd, q_nd],[q_nd, q_d, q_nd],[q_nd, q_nd, q_d]])\n\nR = (lambda_r**2) * torch.eye(n)\n\nif(R_non_diag):\n r_d = lambda_r**2\n r_nd = (lambda_r **2)/2\n R = torch.tensor([[r_d, r_nd, r_nd],[r_nd, r_d, r_nd],[r_nd, r_nd, r_d]])\n\n#########################\n### Model Parameters ####\n#########################\n\nm1x_0_mod = m1x_0\nm1x_0_mod_test = m1x_0_design_test\nm2x_0_mod = 0 * 0 * torch.eye(m)\n\n# Sampling time step\ndelta_t_mod = delta_t\n\n# Length of Time Series Sequence\nT_mod = math.ceil(T * ratio)\nT_test_mod = math.ceil(T_test * ratio)\n\n##############################################\n#### Model Parameters For Lorenz Atractor ####\n##############################################\n\n# Auxiliar MultiDimensional Tensor B and C (they make A)\nB_mod = torch.tensor([[[0, 0, 0],[0, 0, -1],[0, 1, 0]], torch.zeros(m,m), torch.zeros(m,m)])\nC_mod = torch.tensor([[-10, 10, 0],\n [ 28, -1, 0],\n [ 0, 0, -8/3]])\n\nJ_mod = 2\n\n# H_mod = torch.eye(n)\n#H_mod = H_design\nH_mod_inv = torch.inverse(H_mod)\n\n# Noise Parameters\nlambda_q_mod = 0.8\nlambda_r_mod = 1\n\n# Noise Matrices\nQ_mod = (lambda_q_mod**2) * torch.eye(m)\nR_mod = (lambda_r_mod**2) * torch.eye(n)" }, { "alpha_fraction": 0.7406962513923645, "alphanum_fraction": 0.767106831073761, "avg_line_length": 47.94117736816406, "blob_id": "43a3dad0c6bb6ec6f0901df239ad2c40ff087092", "content_id": "068d59b0207c77abb1073454b1f2d5d724539b9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 835, "license_type": "no_license", "max_line_length": 292, "num_lines": 17, "path": "/README.md", "repo_name": "XiaoyongNI/KalmanNet-Dataset-1", "src_encoding": "UTF-8", "text": "# KalmanNet-Dataset\n\n## Parameters\n\n* F/f: the evolution model for linear/non-linear cases\n* H/h: the observation model for linear/non-linear cases\n* q: evolution noise\n* r: observation noise\n* J: the order of Taylor series approximation \n\n## Linear case\n\nFor the synthetic linear dataset, we set F and H to take the controllable canonical and inverse canonical forms, respectively. F and H could take dimensions of 2x2, 5x5 and 10x10, while the evolution noise q and observation noise r take a constant gap of 20 dB.\n\n## Non-linear case\n\ndata_gen.pt includes one trajectory of length 6,000,000 of Lorenz Attractor๏ผˆLA) model with J=5 and <img src=\"https://render.githubusercontent.com/render/math?math=\\Delta t = 10^{-5}\">. The other folders includes Discrete-Time datasets of LA model of different trajectory lengths and with J=5.\n\n" } ]
2
nihadern/bank-closing-prediction
https://github.com/nihadern/bank-closing-prediction
eab94c3f24b644193cb6e4c18e7ebf596f533b68
8a7c1cbfeb2b47f5f4b303caea2d6adfdfeb24f1
243d8acbd07221fe597ecbaf81ab95d2c3aa97b3
refs/heads/master
2022-04-11T11:29:31.472488
2020-02-19T06:37:36
2020-02-19T06:37:36
241,532,889
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.719939112663269, "alphanum_fraction": 0.7229832410812378, "avg_line_length": 17.742856979370117, "blob_id": "b628cdb55dcaec60cccf0a36e52d5036f0607898", "content_id": "a8eeb825b27c4fcc3bfe0541b03312dca2c0ebff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 55, "num_lines": 35, "path": "/Prog_Assi_01.py", "repo_name": "nihadern/bank-closing-prediction", "src_encoding": "UTF-8", "text": "# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Bank_Predictions.csv')\n\n# ------ Part-1: Data preprocessing ----------\n\n# Encoding categorical data\n\n# Splitting the dataset into the Training and Test sets\n\n# Feature Scaling\n\n# ------- Part-2: Build the ANN --------\n\n# import keras library and packages\n\n# Initializing the ANN\n\n# Adding the input layer and the first hidden layer\n\n# Adding second hidden layer\n\n# Adding output layer\n\n# Compiling the ANN\n\n# Fitting the ANN to the training set\n\n# Predicting the Test set results\n\n# Making the confusion Matrix\n\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 25, "blob_id": "8d6ffc23bf9a7fed723913522cda61c1b2d0d4e8", "content_id": "5a98da5e988938cdf725d4b35e1bea84ad726de4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/README.md", "repo_name": "nihadern/bank-closing-prediction", "src_encoding": "UTF-8", "text": "# bank-closing-prediction\n \n" } ]
2
uros94/fake_news
https://github.com/uros94/fake_news
e7dd999601b19d4602ce6861e020fa70de2c4a8c
965c98c07682d1f601c0cda8b31bfb11e9efdb9c
17c8eb2ecaa5256e184e42556b075eb81d1bbf4f
refs/heads/master
2020-04-30T00:08:04.394868
2019-05-15T08:50:29
2019-05-15T08:50:29
176,495,470
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7656404972076416, "alphanum_fraction": 0.7725918292999268, "avg_line_length": 58.235294342041016, "blob_id": "558ad64e6a5a3875699e7e646eea500e6a7cd631", "content_id": "c90aa0f15823f70bbd63ba54cd51526f2b684663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 289, "num_lines": 17, "path": "/scraper/README.md", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "# fake_news\nFake news work\n\nThis scraper is \"semi-automatic\". It does not scrape data automatically from a specific facebook page, but goes through already downloaded HTML of a post that should be scrapped. Reason for this is mainly because we wanted to use only data gathered from specific posts and not all of them.\n\nscraper_comments: scrapes data about a post and it's comments (users, text, date...)\n* input: HTML\n* output: json file {'page': page_name,'post_content': post_content, 'post_udate': post_udate,'reactions_link': post_reactions_link, 'shares_link': post_shares_link, 'comments': comments}\n \nscraper_reactions: scrapes data about reactions to a post (users, reactions)\n* input: HTML\n* output: json file {'reactions_link': reactions_link, 'reactions': reactions} \n\nhtml_to_json: main function, calls methods of both scrapers and connects reactions to posts\n\nData for testing this code could be found here:\nhttps://drive.google.com/drive/folders/1X2sNYPzqI86OGLZEi9BldR6Za0wesFaO?usp=sharing\n" }, { "alpha_fraction": 0.560600757598877, "alphanum_fraction": 0.5792874693870544, "avg_line_length": 45.71666717529297, "blob_id": "1d3bb7d9cd16dbd5daa0a89fb02fdfc012795b8a", "content_id": "1d4218a045836ee8a435ec86e04f39d929216cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5726, "license_type": "no_license", "max_line_length": 225, "num_lines": 120, "path": "/scraper/scraper_comments.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\r\nfrom typing import Any\r\nimport json\r\n\r\nclass MyHTMLParser(HTMLParser):\r\n def __init__(self, *args, **kwargs):\r\n super(MyHTMLParser, self).__init__(*args, **kwargs)\r\n #data regarding post\r\n self.page_name = \"\"\r\n self.post_content = \"\"\r\n self.post_udate = \"\"\r\n self.post_reactions_link = \"\"\r\n self.post_shares_link = \"\"\r\n #data regarding comments\r\n self.comments = []\r\n self.temp_link = \"\"\r\n self.temp_name = \"\"\r\n self.temp_comment = \"\"\r\n self.temp_udate = \"\"\r\n self.temp_comment_reactions_link = \"\"\r\n\r\n #NAPISATI FUNKCIJU KOJA PREVOIDI CIRILICU U LATINICU I IYBACUJE YNAKE S 'KVACICAMA'\r\n\r\n data_mode = 0 #data print mode> 0-do not print\r\n # 1-it's users name,\r\n # 2-its a comment,\r\n # 3-finished with current tag - add it to comments list\r\n # 4-it's post content\r\n\r\n def handle_starttag(self, tag, attrs):\r\n if tag == \"div\" and attrs:\r\n if attrs[0][1] == \"_5pbx userContent _3576\": # post content is represented using div part of '_5pbx userContent _3576' class\r\n self.data_mode = 4 # letting data handler know that it should expect post content\r\n\r\n if attrs[0][1] == \"_10lo _10lp\": # if comment has reactions they are inside div part of '_10lo _10lp' class\r\n self.data_mode = 5 # in next iteration following section will catch element with link to reactions\r\n pass\r\n\r\n elif self.data_mode == 5 and tag == \"a\": # catching reactions link\r\n self.temp_comment_reactions_link = \"https://www.facebook.com\"+attrs[5][1]\r\n self.data_mode = 0\r\n\r\n elif tag==\"a\" and attrs: #looking for links\r\n if attrs[0][1] == \"_2x4v\": #links representing list of reactions are part of '_2x4v' class\r\n self.post_reactions_link = \"https://www.facebook.com\"+attrs[1][1]\r\n print(\"REACTIONS\", self.post_reactions_link) #link to reactions\r\n\r\n elif attrs[0][1] == \"_ipm _2x0m\": #links representing list of shares are part of '_ipm _2x0m' class\r\n self.post_shares_link = \"https://www.facebook.com\"+attrs[2][1]\r\n\r\n elif len(attrs)>2:\r\n if attrs[2][1] == \" UFICommentActorName\": #links representing user are part of ' UFICommentActorName' class\r\n self.temp_link = attrs[5][1][0:-14]\r\n print(\"USER LINK\", self.temp_link) #link to user profile\r\n self.data_mode= 1 #letting data handler know that it should expect a users name\r\n\r\n elif tag == \"img\" and attrs: # looking for link to users profile\r\n if attrs[0][1] == \"_s0 _4ooo _5xib _5sq7 _44ma _rw img\": # are part of '_s0 _4ooo _5xib _5sq7 _44ma _rw img' class\r\n self.page_name = attrs[3][1]\r\n print(\"PAGE NAME\", self.page_name) # page name\r\n\r\n elif tag == \"span\" and attrs: # looking for link to users profile\r\n if attrs[0][1] == \"UFICommentBody\": # links representing users comment are part of 'UFICommentBody' class\r\n self.data_mode = 2 #letting data handler know that it should expect a comment text\r\n\r\n elif tag == \"abbr\" and attrs: # looking for timestamp\r\n if attrs[0][1] == \"UFISutroCommentTimestamp livetimestamp\": # elements with timetamp of comment are part of \"UFISutroCommentTimestamp livetimestamp\" class\r\n self.temp_udate = attrs[2][1]\r\n print(\"UDATE\", self.temp_udate) # timestamp\r\n\r\n elif attrs[3][1] == \"_5ptz\": # elements with timetamp of comment are part of \"_5ptz\" class\r\n self.post_udate = attrs[1][1]\r\n print(\"POST UDATE\", self.post_udate)\r\n\r\n\r\n def handle_endtag(self, tag):\r\n if self.data_mode == 3:\r\n self.comments.append({'user': self.temp_name, 'user_link':self.temp_link, 'udate':self.temp_udate, 'comment':self.temp_comment, 'reactions_link':self.temp_comment_reactions_link})\r\n self.temp_comment_reactions_link=\"\"\r\n self.data_mode = 0\r\n elif self.data_mode == 4:\r\n if tag==\"div\":\r\n print (\"POST CONTENT\", self.post_content)\r\n self.data_mode = 0\r\n else:\r\n pass\r\n #print(\"Encountered an end tag :\", tag)\r\n\r\n def handle_data(self, data):\r\n if self.data_mode==1:\r\n #return data\r\n self.temp_name = data\r\n print(\"USER NAME\", self.temp_name)\r\n self.data_mode=0\r\n elif self.data_mode==2:\r\n self.temp_comment = data\r\n print(\"COMMENT\", self.temp_comment)\r\n self.data_mode=3\r\n elif self.data_mode==4:\r\n self.post_content += data\r\n #print(\"POST CONTENT\", data)\r\n else:\r\n pass\r\n\r\ndef scrape_post(html_file, json_file): #html_file - location of file containing html, json_file - destinantion where json data will be saved\r\n parser = MyHTMLParser()\r\n f = open(html_file, \"r\", encoding=\"utf-8\")\r\n if f.mode == 'r':\r\n f1 = f.read()\r\n #print(f1)\r\n parser.feed(f1)\r\n\r\n post_data = {'page': parser.page_name,'post_content': parser.post_content, 'post_udate': parser.post_udate,'reactions_link': parser.post_reactions_link, 'shares_link': parser.post_shares_link, 'comments': parser.comments}\r\n with open(json_file, 'w', encoding='utf8') as outfile:\r\n json.dump(post_data, outfile, indent=4, sort_keys=True, ensure_ascii=False)\r\n print (\"saved data to:\", json_file)\r\n\r\n #POMOCNI DEO\r\n #f = open(\"C:\\\\Users\\\\Win 10\\\\Desktop\\\\reactions.txt\", \"a\")\r\n #f.write(parser.post_reactions_link+'\\n')\r\n" }, { "alpha_fraction": 0.8248175382614136, "alphanum_fraction": 0.8321167826652527, "avg_line_length": 44.66666793823242, "blob_id": "0fdf783af47bff08c7b0efc9c53a02d7d12752b6", "content_id": "98f04f2eba441a24bb6030899dc9b60f45f5db51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 137, "license_type": "no_license", "max_line_length": 76, "num_lines": 3, "path": "/harmonic_blc/README.md", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "# Hoax classification via Harmonic Boolean Label Algorithm\n\nharmonic_blc_2.py contains the implementation of the Harmonic BLC algorithm.\n" }, { "alpha_fraction": 0.5372115969657898, "alphanum_fraction": 0.5697107315063477, "avg_line_length": 42.59420394897461, "blob_id": "57f4b7caf56e9471065d6591fa4f9616ec22c3c0", "content_id": "fb7792f6494e9fc3cb1da6e2c68344ae1c904b0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3077, "license_type": "no_license", "max_line_length": 145, "num_lines": 69, "path": "/scraper/scraper_reactions.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\r\nfrom typing import Any\r\nimport json\r\n\r\nclass MyHTMLParser(HTMLParser):\r\n def __init__(self, *args, **kwargs):\r\n super(MyHTMLParser, self).__init__(*args, **kwargs)\r\n #data regarding user reactions\r\n self.reactions = []\r\n self.reactions_link = \"\"\r\n self.temp_reaction = \"\"\r\n self.temp_name = \"\"\r\n self.temp_user_link = \"\"\r\n\r\n\r\n def detect_reaction(self, icon):\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_72c12d\":\r\n return 'like'\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_52e699\":\r\n return 'angry'\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_aef885\":\r\n return 'sad'\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_c3c538\":\r\n return 'haha'\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_19d7b0\":\r\n return 'wow'\r\n if icon == \"_2p78 _2p7a _9-- img sp_Erm1GyxvO7I sx_2c6fed\":\r\n return 'love'\r\n return ''\r\n\r\n def handle_starttag(self, tag, attrs):\r\n if tag==\"a\" and attrs: #looking for link to users profile\r\n if attrs[0][1] == \"_5i_s _8o _8r lfloat _ohe\": #links representing user are part of '_5i_s _8o _8r lfloat _ohe' class\r\n self.temp_user_link=attrs[1][1][0:-36]\r\n #print(\"USER LINK\", self.temp_user_link) #link to user profile\r\n\r\n elif tag == \"img\" and attrs: # looking for users name, which could be find among attrs of img\r\n if attrs[0][1] == \"_s0 _4ooo img\": # img with user name is part of \"_s0 _4ooo img\" class\r\n self.temp_name = attrs[3][1]\r\n #print(\"USER NAME\", self.temp_name) # users name\r\n\r\n elif tag == \"i\": # looking for users reaction, which could be detected by emoji (icon)\r\n icon = self.detect_reaction(attrs[0][1])\r\n if icon != '':\r\n self.temp_reaction = icon\r\n self.reactions.append({'user': self.temp_name, 'user_link': self.temp_user_link, 'reaction': self.temp_reaction})\r\n #print(\"USER REACTION\", icon) # reaction\r\n\r\n def handle_endtag(self, tag):\r\n pass\r\n #print(\"Encountered an end tag :\", tag)\r\n\r\n def handle_data(self, data):\r\n pass\r\n #print(\"Encountered some data :\", data)\r\n\r\ndef scrape_reactions(html_file, json_file): #html_file - location of file containing html, json_file - destinantion where json data will be saved\r\n parser = MyHTMLParser()\r\n f = open(html_file, \"r\", encoding=\"utf-8\")\r\n if f.mode == 'r':\r\n parser.reactions_link = f.readline()[0:-1] #last sign is '\\n' for new row, we dont need that\r\n f1 = f.read()\r\n print(parser.reactions_link)\r\n parser.feed(f1)\r\n\r\n post_data = {'reactions_link': parser.reactions_link, 'reactions': parser.reactions}\r\n with open(json_file, 'w', encoding='utf8') as outfile:\r\n json.dump(post_data, outfile, indent=4, sort_keys=True, ensure_ascii=False)\r\n print (\"saved data to:\", json_file)\r\n" }, { "alpha_fraction": 0.5624397397041321, "alphanum_fraction": 0.5708775520324707, "avg_line_length": 41.0405387878418, "blob_id": "beedbea0577805f65b251f4df7690c476b900b9b", "content_id": "0b5a35d171ae54c91307c0534c51d762ba8c431a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12444, "license_type": "no_license", "max_line_length": 121, "num_lines": 296, "path": "/harmonic_blc/harmonic_blc_2.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport unittest\nfrom user import User\nfrom post import Post\nimport json\nimport utils\nfrom random import randrange\n\n#addresses of datasets - posts + comments\ndatasets = utils.datasets\n\nclass VotingGraph(object):\n \"\"\"This class represents a bipartite graph of users and items. Users and items are connected via\n edges that can be labeled either +1 (True) or -1 (False) according to whether the user thinks\n the item is true or false. We could use booleans as edge labels, but we would lose the ability to\n represent levels of certainty later on, so for now, we use integers.\n It is possible to label items as +1 (True) or -1 (False), in which case the items have a known truth\n value; alternatively, the item label can be left to None, and it can be later inferred.\"\"\"\n\n def __init__(self, start_pos_weight=5.01, start_neg_weight=5.0, post_inertia=5.0):\n \"\"\"Initializes the graph.\n The user initially has a value of\n (start_pos_weight - start_neg_weight) / (start_pos_weight + start_neg_weight),\n and start_pos_weight and start_neg_weight essentially give the inertia with which\n we modify the opinion about a user as new evidence accrues.\n :param start_pos_weight: Initial positive weight on a user.\n :param start_neg_weight: Initial negative weight on a user.\n :param item_inertia: Inertia on an item.\n \"\"\"\n # Dictionary from items to users.\n self.posts = {} # Dictionary from id to item\n self.users = {} # Dictionary from id to user\n self.edges = [] # To sample.\n self.start_pos_weight = start_pos_weight\n self.start_neg_weight = start_neg_weight\n self.post_inertia = post_inertia\n\n def get_hoax(self):\n return [it.id for it in self.iter_posts() if it.true_value == -1.0]\n\n def get_real(self):\n return [it.id for it in self.iter_posts() if it.true_value == 1.0]\n\n def get_user_ids(self):\n return self.users.keys()\n\n def get_post_ids(self):\n return self.posts.keys()\n\n def iter_posts(self):\n return self.posts.values()\n\n def iter_users(self):\n return self.users.values()\n\n def get_user(self, user_id):\n return self.users.get(user_id)\n\n def get_post(self, post_id):\n return self.posts.get(post_id)\n\n def perform_inference(self, num_iterations=5):\n \"\"\"Performs inference on the graph.\"\"\"\n for u in self.users.values():\n u.initialize()\n for it in self.posts.values():\n it.initialize()\n for _ in range(num_iterations):\n [u.compute_user_value() for u in self.users.values()]\n [it.compute_post_value() for it in self.posts.values()]\n\n def print_stats(self):\n \"\"\"Prints graph statistics, mainly for testing purposes\"\"\"\n num_posts_truth_known = len([it for it in self.iter_posts() if it.true_value is not None])\n num_posts_inferred_known = len([it for it in self.iter_posts() if it.inferred_value is not None])\n num_posts_testing = len([it for it in self.iter_posts() if it.is_testing])\n print (\"Num items:\", len(self.posts))\n print (\"Num items with truth known:\", num_posts_truth_known)\n print (\"Num items with inferred known:\", num_posts_inferred_known)\n print (\"Num items testing:\", num_posts_testing)\n print (\"Min degree:\", min([it.degree() for it in self.iter_posts()]))\n print (\"Num users:\", len(self.users))\n print (\"Num likes:\", sum([len(u.posts) for u in self.users.values()]))\n\n def evaluate_inference(self, fraction=100, num_runs=50):\n \"\"\"\n Evaluation function we use.\n :param num_chunks: In how many chunks we split the data.\n :param num_eval_chunks: number of chunks used for evaluation, if different from num_chunks.\n :param learn_from_most: If True, we learn from all but one chunk (on which we test).\n If False, we learn from one chunk, test on all others.\n :return: A dictionary of values for creating plots and displaying performance.\n \"\"\"\n post_ids = list(self.get_post_ids())\n num_posts = len(post_ids)\n chunk_len = num_posts / fraction\n correct = 0.0\n tot = 0.0\n ratios = []\n # We split the items into k portions, and we cycle, considering each\n # of these portions the testing items.\n for run_idx in range(num_runs):\n self.perform_inference()\n # vg.print_stats()\n # print(\"Performed inference for chunk %d\" % chunk_idx)\n # Measures the accuracy.\n run_correct = 0.0\n run_total = 0.0\n for it_id in post_ids[int(chunk_len):]:\n it = self.get_post(it_id)\n tot_cor = it.is_correctly_classified()\n tot += tot_cor[0]\n correct += tot_cor[1]\n run_total += tot_cor[0]\n run_correct += tot_cor[1]\n run_ratio = run_correct / run_total\n ratios.append(run_ratio)\n print (\"One run result:\", run_ratio)\n # Computes the averages.\n ratio_correct = correct / tot\n return dict(\n ratio_correct=ratio_correct,\n stdev=np.std(ratios),\n )\n\n\n def evaluate_inference_given_learning(self, fun_learning):\n \"\"\"\n :param: A function fun_learning, which tells us if we have to learn from an item\n with a given id yes or no.\n :return: The ratio of correctly classified items.\n \"\"\"\n post_ids = self.get_post_ids()\n # We want to measure the accuracy for posts that have at least 1, 2, ..., LIKES_MEASURED likes.\n correct = 0.0\n tot = 0.0\n # Sets which items are learning, and which are testing.\n test_posts = []\n for it_id in post_ids:\n is_testing = not fun_learning(it_id)\n self.get_post(it_id).set_is_testing(is_testing)\n if is_testing:\n test_posts.append(it_id)\n # Performs the inference.\n self.perform_inference()\n # vg.print_stats()\n # print(\"Performed inference for chunk %d\" % chunk_idx)\n # Measures the accuracy.\n for it_id in test_posts:\n it = self.get_post(it_id)\n tot_cor = it.is_correctly_classified()\n tot += tot_cor[0]\n correct += tot_cor[1]\n return correct / tot if tot > 0 else 1\n\n def evaluate_inference_selecting_prop_likes(self, frac=0.1):\n \"\"\"\n Evaluates the accuracy over ONE run, selecting a fraction frac of items, where each item\n is selected with probability proportional to the number of links.\n :param frac: Fraction of items considered.\n :return: The ratio of correct items. \n \"\"\"\n learn_posts = set()\n post_ids = self.get_post_ids()\n # How many items do we have to pick?\n num_posts = max(1, int(0.5 + frac * len(post_ids)))\n # How many we have picked already?\n num_picked = 0\n while num_picked < num_posts:\n it_id, _ = random.choice(self.edges)\n if it_id not in learn_posts:\n num_picked += 1\n learn_posts.add(it_id)\n # Ok, now we do the learning.\n for it_id in post_ids:\n self.get_post(it_id).set_is_testing(it_id not in learn_posts)\n self.perform_inference()\n correct = 0.0\n tot = 0.0\n for it_id in post_ids:\n it = self.get_post(it_id)\n tot_cor = it.is_correctly_classified()\n tot += tot_cor[0]\n correct += tot_cor[1]\n return correct / tot if tot > 0 else 1.0\n\n #Create graph\n def create_graph(self, datasets=utils.datasets, mode=0):\n \"\"\"\n\n \"\"\"\n posts_out = []\n size = len(datasets)\n if mode == 1:\n posts_out.append(randrange(size))\n\n for post_file in datasets:\n with open(post_file, encoding='utf-8') as f:\n post = json.load(f)\n pid = utils.post2pid(post[\"page\"] + post[\"post_udate\"]) # ZA SADA TO JE PAGE_NAME + UDATE\n if pid: #cant identify?? - skip\n if mode ==1 and posts_out[0]==pid:\n p = Post(pid, post[\"page\"], post[\"post_content\"], post[\"post_udate\"], None, True)\n p.compute_post_value()\n else:\n p = Post(pid, post[\"page\"], post[\"post_content\"], post[\"post_udate\"], utils.t_vector[pid], False)\n self.posts[pid] = p\n if \"reactions\" in post:\n for reaction in post[\"reactions\"]:\n uid = utils.user2uid(reaction[\"user_link\"])\n if uid:\n u = self.users.get(uid)\n if u is None:\n u = User(uid, pos_weight=self.start_pos_weight, neg_weight=self.start_neg_weight)\n self.users[uid] = u\n u.add_post(p, utils.classify_reaction(reaction[\"reaction\"]))\n p.add_user(u, utils.classify_reaction(reaction[\"reaction\"])) #PROMENI POLARITY\n self.edges.append((pid, uid))\n if \"comments\" in post:\n for comment in post[\"comments\"]:\n uid = utils.user2uid(comment[\"user_link\"])\n if uid:\n u = self.users.get(uid)\n if u is None:\n u = User(uid, pos_weight=self.start_pos_weight, neg_weight=self.start_neg_weight)\n self.users[uid] = u\n u.add_post(p, 1)\n p.add_user(u, 1) #PROMENI POLARITY\n self.edges.append((pid, uid))\n print (\"graph built\")\n return posts_out\n\ndef test_1(g): # First, we do the analysis of leave-one-page-out.\n frac_correct_all = [] # On complete graph\n for pg in g.posts:\n #print (\"post:\", g.get_post(pg).page, g.get_post(pg).udate)\n # Creates the function that classifies items.\n def is_learning(itid):\n return itid != pg\n fc = g.evaluate_inference_given_learning(is_learning)\n print (\"For all, correctness:\", fc)\n frac_correct_all.append(fc)\n #frac_correct_w.append(posts_per_page[pg])\n print (\"Final average correctness for leave-one-page-out all:\", np.average(frac_correct_all))\n print (\"Standard deviation:\", np.std(frac_correct_all))\n\ndef test_2(g):\n # Now, let's try to keep HALF of the pages out.\n # Now, we do the analysis in which we randomly select two\n # pages hoax and two non-hoax, and we learn from those alone.\n frac_correct_all_half = [] # On complete graph\n fraction_pages = 0.5\n hoax_pages = g.get_hoax()\n nonhoax_pages = g.get_real()\n # First, for all.\n num_hoax_in = max(1, int(0.5 + len(hoax_pages) * fraction_pages))\n num_nonhoax_in = max(1, int(0.5 + len(nonhoax_pages) * fraction_pages))\n for _ in range(50):\n # Picks pages in and out.\n random.shuffle(hoax_pages)\n random.shuffle(nonhoax_pages)\n learn_pages = hoax_pages[:num_hoax_in] + nonhoax_pages[:num_nonhoax_in]\n\n # Defines the function.\n def is_learning(itid):\n return itid in learn_pages\n\n fc = g.evaluate_inference_given_learning(is_learning)\n print (\"Learning from 50% of each kind, all:\", fc)\n frac_correct_all_half.append(fc)\n\n print (\"Final average correctness for learning from half of each kind, all:\", np.average(frac_correct_all_half))\n print (\"avg\", np.average(frac_correct_all_half))\n print (\"stdev\", np.std(frac_correct_all_half))\n\ng = VotingGraph()\nout = g.create_graph(datasets)\ng.print_stats()\n\n\"\"\"TESTS = [10, 20, 40, 100, 200, 400, 1000]\nresults_all_x = []\nresults_all_y = []\nresults_all_err = []\nfor f in TESTS[:2]:\n d = g.evaluate_inference(fraction=f)\n print (f, d['ratio_correct'])\n results_all_x.append(f)\n results_all_y.append(d['ratio_correct'])\n results_all_err.append(d['stdev'])\nprint (results_all_x)\nprint (results_all_y)\nprint (results_all_err)\"\"\"\n\ntest_1(g)\ntest_2(g)\n" }, { "alpha_fraction": 0.586624801158905, "alphanum_fraction": 0.5897666215896606, "avg_line_length": 40.25925827026367, "blob_id": "adc6987148c74bcea95e425dcc59dca1b595b7dc", "content_id": "ee5a249d55eade992ea5abbfe792278be8624b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2228, "license_type": "no_license", "max_line_length": 113, "num_lines": 54, "path": "/harmonic_blc/user.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport unittest\n\nclass User(object):\n \"\"\"Class representing a user. See VotingGraph for general comments.\"\"\"\n\n def __init__(self, id, known_value=None, neg_weight=1.0, pos_weight=1.5):\n \"\"\"Initializes a user.\n :param known_value: None if we don't know the goodness of the user, otherwise, the goodness of the\n user as a number between 0 and 1.\n :param pos_weight: Initial positive weight of a user.\n :param neg_weight: Initial (offset) negative weight of a user. These two weights correspond to how many\n correct and wrong likes we have seen the user do in the past, and is used to initialize the algorithm\n so we need automatically some evidence before we believe a user is right or wrong, with a weak\n initial positive bias.\n \"\"\"\n self.id = id\n self.initial_pos_weight = pos_weight\n self.initial_neg_weight = neg_weight\n self.known_value = known_value\n self.inferred_value = known_value\n self.posts = []\n\n def __repr__(self):\n return repr(dict(\n id=self.id,\n known_value=self.known_value,\n inferred_value=self.inferred_value,\n posts=[post.id for _, post in self.posts]\n ))\n\n def add_post(self, post, pol):\n \"\"\" Adds an item it with polarity pol to the user. \"\"\"\n self.posts.append((pol, post))\n\n def initialize(self):\n self.inferred_value = None\n\n def compute_user_value(self):\n \"\"\"Performs one step of inference on the user.\"\"\"\n pos_w = float(self.initial_pos_weight)\n neg_w = float(self.initial_neg_weight)\n # Iterates over the items.\n for pol, post in self.posts:\n if post.inferred_value is not None:\n delta = pol * post.inferred_value\n # print \" User\", self.id, \"from item\", it.id, \"polarity\", pol, \"delta:\", delta # debug\n if delta > 0:\n pos_w += delta\n else:\n neg_w -= delta\n self.inferred_value = (pos_w - neg_w) / (pos_w + neg_w)\n #print (\"User\", self.id, \"inferred value:\", pos_w, neg_w, self.inferred_value)\n" }, { "alpha_fraction": 0.5644153356552124, "alphanum_fraction": 0.5736387968063354, "avg_line_length": 39.48192596435547, "blob_id": "36501185f46c67957d11694f72c7d64064aee598", "content_id": "a80e166f0b1afd293fe50c4aaaeae27ab76c670e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3361, "license_type": "no_license", "max_line_length": 107, "num_lines": 83, "path": "/harmonic_blc/post.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport random\nimport unittest\n\n\nclass Post(object):\n \"\"\"Class representing an item. See VotingGraph for general comments.\"\"\"\n\n def __init__(self, id, page, content, udate, true_value=None, is_testing=False, inertia=1.0):\n \"\"\"\n Initializes an item.\n :param known_value: None if we don't know the truth value of the item; +1 for True, -1 for False.\n :param true_value: The true value, if known.\n :param is_testing: If true, then we don't use the true value in the inference.\n \"\"\"\n self.id = id\n self.page = page\n self.udate = udate\n self.content = content\n self.inferred_value = None # Value computed by the inference.\n # True value (ground truth)\n self.true_value = 0.0 if true_value is None else 1.0 if true_value else -1.0\n self.is_testing = is_testing\n # Inertia for changing the belief.\n self.inertia = inertia\n self.users = [] # List of users who voted on the item.\n\n def __repr__(self):\n return repr(dict(\n id=self.id,\n inferred_value=self.inferred_value,\n true_value=self.true_value,\n is_testing=self.is_testing,\n correct=self.is_correctly_classified(),\n users=[u.id for _, u in self.users]\n ))\n\n def add_user(self, u, pol):\n \"\"\"Add user u with polarity pol to the item.\"\"\"\n self.users.append((pol, u))\n\n def set_is_testing(self, is_testing):\n self.is_testing = is_testing\n\n def set_true_value(self, true_value):\n self.true_value = None if true_value is None else 1.0 if true_value else -1.0\n\n def is_correctly_classified(self):\n \"\"\"Returns (t, c), where t is 1 whenever we can measure whether the\n classification is correct, and c is the correctness (0 = wrong, 1 = correct).\n \"\"\"\n if (not self.is_testing) or self.true_value is None or self.inferred_value is None:\n return (0.0, 0.0)\n else:\n return (1.0, 1.0) if self.inferred_value * self.true_value > 0 else (1.0, 0.0)\n\n def initialize(self):\n \"\"\"Initializes the item, setting its inferred_value to the known value\n unless we are testing.\"\"\"\n self.inferred_value = None if self.is_testing else self.true_value\n\n def compute_post_value(self):\n \"\"\"Performs one step of inference for the item.\"\"\"\n if (self.true_value is None) or self.is_testing:\n # Performs actual inference\n pos_w = neg_w = self.inertia\n for pol, u in self.users:\n if u.inferred_value is not None:\n delta = pol * u.inferred_value\n # print \" Item \", self.id, \"from user\", u.id, \"polarity\", pol, \"delta:\", delta # debug\n if delta > 0:\n pos_w += delta\n else:\n neg_w -= delta\n self.inferred_value = (pos_w - neg_w) / (pos_w + neg_w)\n #print (\"Item\", self.id, \"inferred value\", pos_w, neg_w, self.inferred_value) # debug\n else:\n # The value is known, and we are allowed to use it.\n self.inferred_value = self.true_value\n # print \"Item\", self.id, \"inferred = truth\", self.inferred_value\n\n def degree(self):\n return len(self.users)\n" }, { "alpha_fraction": 0.5300314426422119, "alphanum_fraction": 0.5355756878852844, "avg_line_length": 33.149349212646484, "blob_id": "427524c3dc9160a96fa44da2d90b167b2c684d38", "content_id": "cbc042f0da3d3e7425d434367abb5e124c83deee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5411, "license_type": "no_license", "max_line_length": 99, "num_lines": 154, "path": "/harmonic_blc/utils.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\r\nfrom typing import Any\r\nimport json\r\nimport os\r\nimport requests\r\nimport numpy as np\r\nfrom scipy.sparse import dok_matrix, csr_matrix\r\nfrom sklearn import linear_model\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import cross_val_score\r\nimport csv\r\nimport random\r\nfrom random import randrange\r\n\r\n#addresses of datasets - posts + comments\r\nreal_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\dataset\\\\real_json\\\\'\r\nfake_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\dataset\\\\fake_json\\\\'\r\ndatasets = []\r\nfor dir in [real_data, fake_data]:\r\n for post_file in os.listdir(dir):\r\n datasets.append(dir+post_file)\r\n\r\n#create vectors of posts, users, t_vector\r\ndef create_vectors(datasets=datasets, mode=0):\r\n \"\"\" mode is used to split dataset in different ways (used for testing accuraccy)\r\n 0 - normal (all posts used)\r\n 1 - one out (all posts except one random are used, and the skiped one is returned as posts_out)\r\n 2 - half out (half of the posts are used, and the skiped ones are returned as posts_out)\"\"\"\r\n\r\n users = []\r\n posts = []\r\n t_vector = [] # vector of truthfulness for posts\r\n all = []\r\n\r\n posts_out = []\r\n size = len(datasets)\r\n if mode == 1:\r\n posts_out.append(randrange(size))\r\n elif mode == 2:\r\n #MODIFIKOVATI TAKO DA SE IYBACUJE FAKE/2 I REAL/2 A NE (REAL+FAKE)/2\r\n posts_out = random.sample(range(size), int(size/2))\r\n posts_out.sort()\r\n\r\n for post_file, cnt in zip(datasets, range(size)):\r\n with open(post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n post_id = post[\"page\"]+post[\"post_udate\"] #ZA SADA TO JE PAGE_NAME + UDATE\r\n\r\n if posts_out and posts_out[0]==cnt:\r\n posts_out.remove(cnt)\r\n posts_out.append(post_file)\r\n continue\r\n\r\n if post_id!= '' and not post_id in posts:\r\n posts.append(post_id)\r\n t_vector.append(post_file[0:len(real_data)] == real_data)\r\n\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n ### VISAK\r\n print(len(users),\"USERS\")\r\n print(len(all),\"ALL\")\r\n print(len(posts),\"POSTS\")\r\n #for i in range(len(posts)):\r\n # print(t_vector[i], posts[i])\r\n\r\n return posts, users, t_vector, posts_out\r\n\r\n\r\ndef create_cut_vectors(datasets, min_post_like=10, min_user_like=30, print_results=False):\r\n posts = []\r\n t_vector = []\r\n users = []\r\n all = [] # temporary\r\n \"\"\"returns the dataset filtered with these parameters:\r\n min_post_like: post with at least n likes\r\n min_user_like: users that have given at least n likes\r\n print_results: if True, prints the filtering effect\r\n output: sparse like_matrix and page/hoax label columns\r\n \"\"\"\r\n for dir in datasets: # going through data directories\r\n for post_file in os.listdir(dir):\r\n with open(dir + post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n\r\n # posts filtering\r\n if ((len(post[\"comments\"]) + len(post[\"comments\"])) >= min_post_like):\r\n post_id = post[\"page\"] + post[\"post_udate\"] # ZA SADA TO JE PAGE_NAME + UDATE\r\n if post_id != '' and not post_id in posts:\r\n posts.append(post_id)\r\n t_vector.append(dir == real_data)\r\n\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n ### VISAK\r\n print(len(users))\r\n print(len(all))\r\n # users filtering\r\n for u in users:\r\n if (all.count(u) < min_user_like):\r\n users.remove(u)\r\n print(len(users))\r\n\r\n return posts, users, t_vector\r\n\r\n#get user id based on its user_link\r\ndef user2uid(user_link):\r\n try:\r\n return int(users.index(user_link))\r\n except:\r\n return False\r\n\r\n#get post id based on its data\r\ndef post2pid(post):\r\n try:\r\n return int(posts.index(post))\r\n except:\r\n return False\r\n\r\ndef classify_reaction(reaction):\r\n if reaction == \"like\":\r\n return 1\r\n elif reaction == \"love\":\r\n return 1\r\n elif reaction == \"haha\":\r\n return -1\r\n elif reaction == \"wow\":\r\n return 1\r\n elif reaction == \"angry\":\r\n return 1\r\n elif reaction == \"sad\":\r\n return -1\r\n\r\nposts, users, t_vector, posts_out = create_vectors()" }, { "alpha_fraction": 0.7641723155975342, "alphanum_fraction": 0.7641723155975342, "avg_line_length": 23.44444465637207, "blob_id": "fad9a5cf0d1311f3899d5160a50b2d9118c0443d", "content_id": "07e21340d7d094dc19603600c1f4293b79d3583f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 441, "license_type": "no_license", "max_line_length": 127, "num_lines": 18, "path": "/README.md", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "# fake_news\nFake news work\n\nCode for the research on automatic fake news detection in social setworks (with scraper for Facebook data) by Uroลก Janjiฤ‡ and Draginja Anฤ‘elkoviฤ‡\n\n## Folder structure \n* harmonic_blc: code for harmonic blc model\n* log_regression: code for logistic regression model\n* neural_network: code for neural network model using keras\n* scraper: code for obtaining the dataset from Facebook pages\n\n## Requirements\n\n\n## Credits\n\n\n## License\n\n" }, { "alpha_fraction": 0.5041288733482361, "alphanum_fraction": 0.5091376900672913, "avg_line_length": 30.328227996826172, "blob_id": "258aa2dfe73ce16b96d2af70b3ba8b9f15b381e1", "content_id": "6190d15a84c22853aa8a4a48c61b834abfabb9fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14799, "license_type": "no_license", "max_line_length": 131, "num_lines": 457, "path": "/scraper 2.0/scraper.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "import getpass\r\nimport calendar\r\nimport os\r\nimport platform\r\nimport sys\r\nimport urllib.request\r\nimport codecs\r\nimport json\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\n\r\n# Global Variables\r\ndriver = None\r\ndriverTemp=None\r\n\r\nlist=[]\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\ndef get_post(driver):\r\n p = get_page_neme(driver)\r\n c = get_post_content(driver)\r\n t = get_post_timestamp(driver)\r\n return p,c, t\r\n\r\ndef get_post_content(driver):\r\n try:\r\n content = driver.find_element_by_xpath(\".//div[@class='_5pbx userContent _3576']\").text\r\n return content\r\n except:\r\n try:\r\n content = driver.find_element_by_xpath(\".//div[contains(@class, '_1rg-')]\").text #_5aj7\r\n return content\r\n except:\r\n return \" \"\r\n\r\ndef get_page_neme(driver):\r\n try:\r\n page_name = driver.find_element_by_xpath(\".//span[contains(@class,'fwb fcg')]\").text\r\n return page_name\r\n except:\r\n try:\r\n page_name = driver.find_element_by_xpath(\".//a[contains(@class, 'profileLink')]\").text\r\n return page_name\r\n except:\r\n return \" \"\r\n\r\ndef get_post_timestamp(driver):\r\n try:\r\n timestamp = driver.find_element_by_xpath(\".//abbr[@class='_5ptz']\").get_attribute('data-utime')\r\n return timestamp\r\n except:\r\n try:\r\n timestamp = driver.find_element_by_xpath(\".//abbr[@class='_13db timestamp']\").get_attribute('data-utime')\r\n return timestamp\r\n except:\r\n return \" \"\r\n\r\n# -------------------------------------------------------------\r\n\r\n# --Helper Functions for Reactions\r\ndef get_reactions(driver):\r\n try:\r\n react = driver.find_element_by_xpath(\"//*[div[@class='_66lg']]\")\r\n return react\r\n except:\r\n try:\r\n react = driver.find_element_by_xpath(\"//*[div[@class='_ipp']]\")\r\n return react\r\n except:\r\n return \" \"\r\n\r\ndef get_reactions_links(divv, tag):\r\n try:\r\n return divv.find_element_by_tag_name(tag)\r\n except:\r\n return \"\"\r\n\r\ndef get_types_of_reactions():\r\n try:\r\n names=driverTemp.find_elements_by_xpath(\".//div[@class='_3p56']\")\r\n return names\r\n except:\r\n return \"\"\r\n\r\ndef scroll_reactions(kinds):\r\n try:\r\n while kinds.find_element_by_xpath(\".//div[@class='clearfix mtm uiMorePager stat_elem _52jv']\"):\r\n kinds.find_element_by_xpath(\".//div[@class='clearfix mtm uiMorePager stat_elem _52jv']\").click()\r\n scroll_reactions(kinds)\r\n return \" \"\r\n except:\r\n return \" \"\r\n\r\ndef get_divs_with_reactions():\r\n try:\r\n divs = driverTemp.find_elements_by_xpath(\".//div[@class='_5i_p']\")\r\n for d in divs:\r\n scroll_reactions(d)\r\n return divs\r\n except:\r\n return \"\"\r\n\r\ndef get_persons_who_reacted(kinds):\r\n try:\r\n scroll_reactions(kinds)\r\n persons = kinds.find_elements_by_xpath(\".//div[@class='_5j0e fsl fwb fcb']\")\r\n return persons\r\n except:\r\n return \" \"\r\n\r\ndef get_person_link(divv, tag):\r\n try:\r\n return divv.find_element_by_tag_name(tag)\r\n except:\r\n return \"\"\r\n\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\n\r\n# --Helper Functions for Comments\r\n\r\ndef scroll_comments(divv):\r\n try:\r\n while (divv.find_element_by_xpath(\".//div[@class='_4sxd']\")):\r\n divv.find_element_by_xpath(\".//div[@class='_4sxd']\").click()\r\n scroll_comments(divv)\r\n return \" \"\r\n except:\r\n return \" \"\r\n\r\ndef get_comments(driver):\r\n try:\r\n comments = driver.find_element_by_xpath(\"//*[div[@class='_3w53']]\")\r\n scroll_comments(comments)\r\n coms = comments.find_elements_by_xpath(\".//div[contains(@class, '_4eek')]\")\r\n return coms\r\n except:\r\n try:\r\n comments = driver.find_element_by_xpath(\"//*[div[@class='_3b-9 _j6a']]\")\r\n scroll_comments(comments)\r\n coms = comments.find_elements_by_xpath(\".//div[contains(@class, 'UFICommentContentBlock')]\")\r\n return coms\r\n except:\r\n return \" \"\r\n\r\ndef get_a_person_who_commented(d):\r\n try:\r\n commentator=d.find_element_by_xpath(\".//a[@class='_6qw4']\")\r\n tmp = commentator.text\r\n return commentator.text\r\n except:\r\n try:\r\n commentator = d.find_element_by_xpath(\".//a[contains(@class, 'UFICommentActorName')]\")\r\n tmp = commentator.text\r\n return commentator.text\r\n except:\r\n return \" \"\r\n\r\ndef get_a_content_of_the_comment(d):\r\n try:\r\n content=d.find_element_by_xpath(\".//span[@class='_3l3x']/span\")\r\n return content.text\r\n except:\r\n try:\r\n content = d.find_element_by_xpath(\".//span[@class='UFICommentBody']/span\")\r\n return content.text\r\n except:\r\n return \" \"\r\n\r\n#def get_timestamp_of_the_comment(d):\r\n\r\ndef get_reactions_on_the_comment(d):\r\n try:\r\n reaction_div=d.find_element_by_xpath(\".//div[@class='_6cuq']\")\r\n return reaction_div\r\n except:\r\n return \" \"\r\n\r\ndef get_comments_reactions_link(divv, tag):\r\n try:\r\n solution=\"\"\r\n if (divv.find_element_by_tag_name(tag)):\r\n solution=divv.find_element_by_tag_name(tag)\r\n return solution\r\n except:\r\n return \"\"\r\n\r\n# -----------------------------------------------------------------------------\r\n# -----------------------------------------------------------------------------\r\ndef replace(react):\r\n if react == \"ะกะฒะธั’ะฐ ะผะธ ัะต\":\r\n return \"like\"\r\n elif react ==\"ะ‰ัƒั‚\":\r\n return \"angry\"\r\n elif react ==\"ะฅะฐั…ะฐ\":\r\n return \"haha\"\r\n elif react ==\"ะขัƒะถะฐะฝ\":\r\n return \"sad\"\r\n elif react ==\"Wow\":\r\n return \"wow\"\r\n elif react ==\"ะ’ะพะปะธ\":\r\n return \"love\"\r\n else:\r\n return react\r\n\r\ndef extract_and_write_posts(post_link, post_content, reactions, comments):\r\n\r\n try:\r\n data = {}\r\n reaction_json = []\r\n comments_json=[]\r\n\r\n data['page_name']=post_content[0]\r\n data['post_content']=post_content[1]\r\n data['post_utime']= post_content[2]\r\n data['post_link'] = post_link\r\n\r\n # reactions links\r\n reactions_link = get_reactions_links(reactions, \"a\").get_attribute('href')\r\n\r\n driverTemp.get(reactions_link)\r\n\r\n # people who reacted\r\n types_of_reaction = get_types_of_reactions()\r\n big_divs = get_divs_with_reactions()\r\n\r\n\r\n j = 0\r\n for reaction_name in types_of_reaction:\r\n\r\n persons = get_persons_who_reacted(big_divs[j])\r\n number_of_reactions = str(len(persons))\r\n\r\n\r\n for person in persons:\r\n\r\n user={}\r\n\r\n person_link = get_person_link(person, \"a\").get_attribute('href')\r\n\r\n user['reaction']=replace(reaction_name.text)\r\n user['user']=person.text\r\n user['user_link']=person_link.split(\"fref\")[0][:-1]\r\n reaction_json.append(user)\r\n j += 1\r\n\r\n data['reactions']=reaction_json\r\n\r\n # comments\r\n #all_comments = get_a_comment_divs(comments)\r\n all_comments = comments\r\n number_of_comments = str(len(all_comments))\r\n\r\n\r\n for one_comment in all_comments:\r\n\r\n commentator = get_a_person_who_commented(one_comment)\r\n commentator_link = get_person_link(one_comment, \"a\").get_attribute('href')\r\n content = get_a_content_of_the_comment(one_comment)\r\n\r\n comment_reactions_json=[]\r\n com={}\r\n com['user']=commentator\r\n com['user_link'] =commentator_link.split(\"fref\")[0][:-1]\r\n com['content'] =content\r\n\r\n #comments_json.append(com)\r\n react_div = get_reactions_on_the_comment(one_comment)\r\n react_link = get_comments_reactions_link(react_div, \"a\")\r\n if (react_link != \"\"):\r\n react_link = react_link.get_attribute('href')\r\n\r\n # reactions on a comment\r\n driverTemp.get(react_link)\r\n # people who reacted\r\n types_of_react = get_types_of_reactions()\r\n b_d = get_divs_with_reactions()\r\n\r\n l = 0\r\n for react_name in types_of_react:\r\n\r\n per = get_persons_who_reacted(b_d[l])\r\n num_of_react = str(len(per))\r\n\r\n for p in per:\r\n\r\n u={}\r\n p_link = get_person_link(p, \"a\").get_attribute('href')\r\n\r\n u['reaction']=replace(react_name.text)\r\n u['user'] =p.text\r\n u['user_link'] =p_link.split(\"fref\")[0][:-1]\r\n comment_reactions_json.append(u)\r\n\r\n l += 1\r\n com['reactions']=comment_reactions_json\r\n else:\r\n com['reactions']=[]\r\n comments_json.append(com)\r\n\r\n\r\n data['comments']=comments_json\r\n #fjson_name = filename + \".json\"\r\n\r\n\r\n except:\r\n print(\"Exception (extract_and_write_posts)\", \"Status =\", sys.exc_info()[0])\r\n\r\n return data\r\n\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\n\r\ndef save_to_file(post_link, post_content, reactions, comments):\r\n try:\r\n # dealing with Posts\r\n data = extract_and_write_posts(post_link, post_content, reactions, comments)\r\n tmp = post_link.replace(':','').replace('?','').split('/')\r\n\r\n #DESTINATION\r\n #json_name = \"C:\\\\Users\\\\Win 10\\\\Desktop\\\\fake_news_project\\\\Data\\\\real\\\\\" + tmp[3] + tmp[5] + \".json\"\r\n json_name = \"C:\\\\Users\\\\Win 10\\\\Desktop\\\\fake_news_project\\\\Data\\\\real\\\\\" + data[\"page_name\"]+ data[\"post_utime\"] + \".json\"\r\n\r\n with open(json_name, 'w', encoding='utf8') as outfile:\r\n json.dump(data, outfile, indent=4, sort_keys=True, ensure_ascii=False)\r\n print (\"saved data to:\", json_name)\r\n return\r\n except:\r\n print(\"Exception (save_to_file)\", sys.exc_info()[0])\r\n\r\n return\r\n\r\n\r\ndef create_original_link(url):\r\n if url.find(\".php\") != -1:\r\n original_link = \"https://en-gb.facebook.com/\" + ((url.split(\"=\"))[1])\r\n\r\n if original_link.find(\"&\") != -1:\r\n original_link = original_link.split(\"&\")[0]\r\n\r\n elif url.find(\"fnr_t\") != -1:\r\n original_link = \"https://en-gb.facebook.com/\" + ((url.split(\"/\"))[-1].split(\"?\")[0])\r\n elif url.find(\"_tab\") != -1:\r\n original_link = \"https://en-gb.facebook.com/\" + (url.split(\"?\")[0]).split(\"/\")[-1]\r\n else:\r\n original_link = url\r\n\r\n return original_link\r\n\r\n\r\ndef scrap_data():\r\n \"\"\"Given some parameters, this function can scrap friends/photos/videos/about/posts(statuses) of a profile\"\"\"\r\n print(\"Posts:\")\r\n\r\n #SOURCE\r\n file_posts = codecs.open(os.path.join(os.path.dirname(__file__), \"real.txt\"), \"r\", \"utf-8\")\r\n\r\n lines=file_posts.readlines()\r\n file_posts.close()\r\n\r\n for adr in lines:\r\n page = adr[:-2]\r\n\r\n try:\r\n driver.get(page)\r\n\r\n comments=get_comments(driver)\r\n reactions=get_reactions(driver)\r\n post_data = get_post(driver)\r\n except:\r\n print(\"Exception (scrap_data)\", sys.exc_info()[0])\r\n\r\n save_to_file(page, post_data, reactions, comments)\r\n\r\ndef login(email, password):\r\n \"\"\" Logging into our own profile \"\"\"\r\n\r\n try:\r\n global driver\r\n global driverTemp\r\n\r\n options = Options()\r\n\r\n # Code to disable notifications pop up of Chrome Browser\r\n options.add_argument(\"--disable-notifications\")\r\n options.add_argument(\"--disable-infobars\")\r\n options.add_argument(\"--mute-audio\")\r\n # options.add_argument(\"headless\")\r\n\r\n try:\r\n platform_ = platform.system().lower()\r\n if platform_ in ['linux', 'darwin']:\r\n driver = webdriver.Chrome(executable_path=\"./chromedriver\", options=options)\r\n driverTemp = webdriver.Chrome(executable_path=\"./chromedriver\", options=options)\r\n else:\r\n driver = webdriver.Chrome(executable_path=\"./chromedriver.exe\", options=options)\r\n driverTemp = webdriver.Chrome(executable_path=\"./chromedriver.exe\", options=options)\r\n except:\r\n print(\"Kindly replace the Chrome Web Driver with the latest one from \"\r\n \"http://chromedriver.chromium.org/downloads\"\r\n \"\\nYour OS: {}\".format(platform_)\r\n )\r\n exit()\r\n\r\n driver.get(\"https://en-gb.facebook.com\")\r\n driver.maximize_window()\r\n # filling the form\r\n driver.find_element_by_name('email').send_keys(email)\r\n driver.find_element_by_name('pass').send_keys(password)\r\n\r\n # clicking on login button\r\n driver.find_element_by_id('loginbutton').click()\r\n\r\n #the same for driverTemp\r\n driverTemp.get(\"https://en-gb.facebook.com\")\r\n driverTemp.find_element_by_name('email').send_keys(email)\r\n driverTemp.find_element_by_name('pass').send_keys(password)\r\n driverTemp.find_element_by_id('loginbutton').click()\r\n\r\n except Exception as e:\r\n print(\"There's some error in log in.\")\r\n print(sys.exc_info()[0])\r\n exit()\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# -----------------------------------------------------------------------------\r\n\r\ndef main():\r\n # Getting email and password from user to login into his/her profile\r\n email = \"uros.ng@gmail.com\"\r\n # password = getpass.getpass()\r\n password = \"\"\r\n\r\n print(\"\\nStarting Scraping...\")\r\n\r\n login(email, password)\r\n scrap_data()\r\n driver.close()\r\n driverTemp.close()\r\n\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\n# -------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n # get things rolling\r\n main()\r\n" }, { "alpha_fraction": 0.5587635636329651, "alphanum_fraction": 0.5723505616188049, "avg_line_length": 44, "blob_id": "b340ca5ebd4a6d828da51211281e48154ec83648", "content_id": "ac5f56fbcd952767596a859ce9fc382679ced350", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2944, "license_type": "no_license", "max_line_length": 135, "num_lines": 64, "path": "/scraper/htm_to_json.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\r\nfrom typing import Any\r\nimport json\r\nimport scraper_comments\r\nimport scraper_reactions\r\nimport os\r\nimport urllib.parse as urllib\r\n\r\n#post + comments\r\nreal = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\real\\\\'\r\nfake = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\fake\\\\'\r\nreal_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\real_json\\\\'\r\nfake_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\fake_json\\\\'\r\n\r\n#scrape post + comments\r\ndef get_post_data():\r\n for filename in os.listdir(real):\r\n scraper_comments.scrape_post(real+filename, real_data+filename)\r\n for filename in os.listdir(fake):\r\n scraper_comments.scrape_post(fake+filename, fake_data+filename)\r\n\r\n#reactions\r\nreactions_html = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\reactions_html\\\\'\r\nreactions_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\reactions_data\\\\'\r\n\r\n#scrape reactions\r\ndef get_reactions_data():\r\n for filename in os.listdir(reactions_html):\r\n scraper_reactions.scrape_reactions(reactions_html+filename, reactions_data+filename)\r\n\r\n#replace 'reaction_link' with actual list of reactions\r\ndef add_reactions_to_post_data():\r\n for post_file in os.listdir(real_data):\r\n with open(real_data+post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n if post[\"reactions_link\"] != \"\":\r\n for reactions_file in os.listdir(reactions_data):\r\n with open(reactions_data+reactions_file, encoding='utf-8') as f1:\r\n reactions = json.load(f1)\r\n a = post[\"reactions_link\"][-35:]\r\n b = reactions[\"reactions_link\"][-35:]\r\n if post[\"reactions_link\"][-35:] == reactions[\"reactions_link\"][-35:]:\r\n with open(real_data+post_file, 'a', encoding='utf8') as outfile:\r\n json.dump({\"reactions\": reactions[\"reactions\"]}, outfile, indent=4, sort_keys=True, ensure_ascii=False)\r\n print(\"OKE\", a, b)\r\n\r\n for post_file in os.listdir(fake_data):\r\n with open(fake_data+post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n if post[\"reactions_link\"] != \"\":\r\n for reactions_file in os.listdir(reactions_data):\r\n with open(reactions_data+reactions_file, encoding='utf-8') as f1:\r\n reactions = json.load(f1)\r\n a = post[\"reactions_link\"][-35:]\r\n b = reactions[\"reactions_link\"][-35:]\r\n if post[\"reactions_link\"][-35:] == reactions[\"reactions_link\"][-35:]:\r\n with open(fake_data+post_file, 'a', encoding='utf8') as outfile:\r\n json.dump({\"reactions\": reactions[\"reactions\"]}, outfile, indent=4, sort_keys=True, ensure_ascii=False)\r\n print(\"OKE\", a, b)\r\n\r\n#START\r\nget_post_data()\r\nget_reactions_data()\r\nadd_reactions_to_post_data()\r\n" }, { "alpha_fraction": 0.513830840587616, "alphanum_fraction": 0.5227860808372498, "avg_line_length": 36.961238861083984, "blob_id": "c521e9a859673e8f6ff8747636a86668e3c1c473", "content_id": "88283641346774d4f143a20ced43e7ba7b71e108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10050, "license_type": "no_license", "max_line_length": 99, "num_lines": 258, "path": "/neural_network/nn_keras.py", "repo_name": "uros94/fake_news", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\r\nfrom typing import Any\r\nimport json\r\nimport os\r\nimport requests\r\nimport numpy as np\r\nfrom scipy.sparse import dok_matrix, csr_matrix\r\nimport csv\r\nimport random\r\nfrom random import randrange\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n#addresses of datasets - posts + comments\r\nreal_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\dataset\\\\real_json\\\\'\r\nfake_data = 'C:\\\\Users\\\\Win 10\\\\Desktop\\\\dataset\\\\fake_json\\\\'\r\ndatasets = []\r\nfor dir in [real_data, fake_data]:\r\n for post_file in os.listdir(dir):\r\n datasets.append(dir+post_file)\r\n\r\n#create vectors of posts, users, t_vector\r\ndef create_vectors(datasets=datasets, mode=0):\r\n \"\"\" mode is used to split dataset in different ways (used for testing accuraccy)\r\n 0 - normal (all posts used)\r\n 1 - one out (all posts except one random are used, and the skiped one is returned as posts_out)\r\n 2 - half out (half of the posts are used, and the skiped ones are returned as posts_out)\"\"\"\r\n\r\n users = []\r\n posts = []\r\n t_vector = [] # vector of truthfulness for posts\r\n all = []\r\n\r\n posts_out = []\r\n size = len(datasets)\r\n if mode == 1:\r\n posts_out.append(randrange(size))\r\n elif mode == 2:\r\n #MODIFIKOVATI TAKO DA SE IYBACUJE FAKE/2 I REAL/2 A NE (REAL+FAKE)/2\r\n posts_out = random.sample(range(size), int(size/2))\r\n posts_out.sort()\r\n\r\n for post_file, cnt in zip(datasets, range(size)):\r\n with open(post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n post_id = post[\"page\"]+post[\"post_udate\"] #ZA SADA TO JE PAGE_NAME + UDATE\r\n\r\n if posts_out and posts_out[0]==cnt:\r\n posts_out.remove(cnt)\r\n posts_out.append(post_file)\r\n continue\r\n\r\n if post_id!= '' and not post_id in posts:\r\n posts.append(post_id)\r\n t_vector.append(post_file[0:len(real_data)] == real_data)\r\n\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n ### VISAK\r\n print(len(users),\"USERS\")\r\n print(len(all),\"ALL\")\r\n print(len(posts),\"POSTS\")\r\n #for i in range(len(posts)):\r\n # print(t_vector[i], posts[i])\r\n\r\n return posts, users, t_vector, posts_out\r\n\r\n\r\ndef create_cut_vectors(datasets, min_post_like=10, min_user_like=30, print_results=False):\r\n posts = []\r\n t_vector = []\r\n users = []\r\n all = [] # temporary\r\n \"\"\"returns the dataset filtered with these parameters:\r\n min_post_like: post with at least n likes\r\n min_user_like: users that have given at least n likes\r\n print_results: if True, prints the filtering effect\r\n output: sparse like_matrix and page/hoax label columns\r\n \"\"\"\r\n for dir in datasets: # going through data directories\r\n for post_file in os.listdir(dir):\r\n with open(dir + post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n\r\n # posts filtering\r\n if ((len(post[\"comments\"]) + len(post[\"comments\"])) >= min_post_like):\r\n post_id = post[\"page\"] + post[\"post_udate\"] # ZA SADA TO JE PAGE_NAME + UDATE\r\n if post_id != '' and not post_id in posts:\r\n posts.append(post_id)\r\n t_vector.append(dir == real_data)\r\n\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n all.append(user)\r\n if not user in users:\r\n users.append(user)\r\n ### VISAK\r\n print(len(users))\r\n print(len(all))\r\n # users filtering\r\n for u in users:\r\n if (all.count(u) < min_user_like):\r\n users.remove(u)\r\n print(len(users))\r\n\r\n return posts, users, t_vector\r\n\r\n#get user id based on its user_link\r\ndef user2uid(users, user_link):\r\n try:\r\n return int(users.index(user_link))\r\n except:\r\n return False\r\n\r\n#get post id based on its data\r\ndef post2pid(posts, post):\r\n try:\r\n return int(posts.index(post))\r\n except:\r\n return False\r\n\r\ndef classify_reaction(reaction):\r\n if reaction == \"like\":\r\n return 0\r\n elif reaction == \"love\":\r\n return 1\r\n elif reaction == \"haha\":\r\n return 2\r\n elif reaction == \"wow\":\r\n return 3\r\n elif reaction == \"angry\":\r\n return 4\r\n elif reaction == \"sad\":\r\n return 5\r\n\r\n#create csv table representing data for linear regression\r\ndef create_matrix(datasets, users, posts, t_vector, create_csv=False):\r\n \"\"\"we have to increase the dimension of users because we have 6 possible reactions + comment\r\n [ uid ]\r\n [like][love][haha][wow ][angr][sad ][comm] \"\"\"\r\n like_matrix = np.zeros((len(users)*7, len(posts)), dtype=bool)\r\n\r\n for post_file in datasets:\r\n with open(post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n post_id = post[\"page\"] + post[\"post_udate\"] # ZA SADA TO JE PAGE_NAME + UDATE\r\n if post2pid(posts, post_id): #cant identify?? - skip\r\n j = post2pid(posts, post_id)\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + classify_reaction(reaction[\"reaction\"])\r\n like_matrix[i][j] = True\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + 6\r\n like_matrix[i][j] = True\r\n if create_csv:\r\n with open('C:\\\\Users\\\\Win 10\\\\Desktop\\\\dataset\\\\logreg.csv', 'w') as csvfile:\r\n wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\r\n wr.writerows(like_matrix)\r\n #for row in range(len()):\r\n # wr.writerow(like_matrix.getrow(row).toarray())\r\n csvfile.close()\r\n return like_matrix\r\n\r\ndef create_model_1(X, Y):\r\n model = Sequential()\r\n model.add(Dense(12, input_dim=X.shape[1], activation='relu'))\r\n model.add(Dense(8, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n # Compile model\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n # Fit the model\r\n model.fit(X, Y, epochs=150, batch_size=10)\r\n # Evaluate model\r\n scores = model.evaluate(X, Y)\r\n print(\"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\r\n return model\r\n\r\ndef predict(X, model):\r\n #create array\r\n predictions = model.predict(X)\r\n rounded = [round(x[0]) for x in predictions]\r\n #print(rounded)\r\n return rounded\r\n\r\ndef create_kword(post1, users, datasets=datasets):\r\n X = np.zeros((len(users)*7, 1), dtype=bool)\r\n for post_file in datasets:\r\n with open(post_file, encoding='utf-8') as f:\r\n post = json.load(f)\r\n post_id = post[\"page\"] + post[\"post_udate\"] # ZA SADA TO JE PAGE_NAME + UDATE\r\n if post_id == post1:\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + classify_reaction(reaction[\"reaction\"])\r\n X[i] = True\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + 6\r\n X[i] = True\r\n return X\r\n\r\ndef json2kword(post1, users, datasets=datasets):\r\n X = np.zeros((len(users)*7, 1), dtype=bool)\r\n with open(post1, encoding='utf-8') as f:\r\n post = json.load(f)\r\n if \"reactions\" in post:\r\n for reaction in post[\"reactions\"]:\r\n user = reaction[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + classify_reaction(reaction[\"reaction\"])\r\n X[i] = True\r\n if \"comments\" in post:\r\n for comment in post[\"comments\"]:\r\n user = comment[\"user_link\"]\r\n if user2uid(users, user): # cant identify?? - skip\r\n i = user2uid(users, user) * 7 + 6\r\n X[i] = True\r\n return X\r\n\r\n\r\n#START\r\nposts, users, t_vector, posts_out = create_vectors(datasets, 2)\r\nlike_matrix = create_matrix(datasets, users, posts, t_vector)\r\n\r\nmodel = create_model_1(like_matrix.transpose(), t_vector)\r\n\r\nfor p in posts_out:\r\n X = json2kword(p, users)\r\n Y = predict(X.transpose(), model)\r\n print (p, Y)" } ]
12
Teres-augustine/Python-Tasks
https://github.com/Teres-augustine/Python-Tasks
2f13d530e58777ff724cdb047dfe55351ee51a9c
80491b74f941fef3aff738fb7814fa2a551717da
763849ddf1b8326fc8edb736a88320eaade60142
refs/heads/master
2023-02-22T10:22:15.196835
2023-02-02T10:31:52
2023-02-02T10:31:52
230,195,164
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.5691056847572327, "avg_line_length": 22.600000381469727, "blob_id": "66673e56115e03a011d97d648fae5202f6eba7a7", "content_id": "aad85b68eeb0bb4f74ff7791cfdb87a17212396b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/task9.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "number=(1,2,3,4,5,6,7,8,9);\r\ncount_odd=0;\r\ncount_even=0;\r\nfor x in number:\r\n if not x%2:\r\n count_even+=1;\r\n else :\r\n count_odd+=1;\r\nprint(\"Number of even numbers : \",count_even);\r\nprint(\"Number of odd numbers : \",count_odd);\r\n" }, { "alpha_fraction": 0.4651162922382355, "alphanum_fraction": 0.5193798542022705, "avg_line_length": 18.83333396911621, "blob_id": "201e7c4eae9b2ce3cffc6fb4804fa6ac8ef269e8", "content_id": "4300214e35354582d79780df9b4a54051bad95d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/task8.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "def multiply(numbers):\r\n prd=1;\r\n for n in numbers:\r\n prd*=n;\r\n return prd\r\nprint(multiply((1,2,3,4,5,6)));\r\n\r\n\r\n" }, { "alpha_fraction": 0.5933734774589539, "alphanum_fraction": 0.5933734774589539, "avg_line_length": 28.18181800842285, "blob_id": "03cbd56abdb391e965379c6d2a37f61e6016389c", "content_id": "a450e70d5d9dff35063f2e84ce5e9626c6e57b99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/task10.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "class rectangle():\r\n def __init__(self, l,w):\r\n self.length=l;\r\n self.width=w;\r\n def area(self):\r\n return self.length*self.width;\r\na=int(input(\"Enter length of the rectangle : \"));\r\nb=int(input(\"Enter width of the rectangle : \"));\r\nobj=rectangle(a,b)\r\nprint(\"Area of rectangle : \",obj.area());\r\nprint();\r\n" }, { "alpha_fraction": 0.6404494643211365, "alphanum_fraction": 0.6404494643211365, "avg_line_length": 27.66666603088379, "blob_id": "c17065d0b14f27e400a065752be1f0935c6a58fd", "content_id": "b24944f2e32c01c3290de9d4d692a51de9137682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 37, "num_lines": 3, "path": "/task4.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "user_in=input(\"Enter any string : \");\r\nprint(user_in.upper());\r\nprint(user_in.lower());\r\n" }, { "alpha_fraction": 0.4887780547142029, "alphanum_fraction": 0.4887780547142029, "avg_line_length": 19.105262756347656, "blob_id": "bbce7e3bb3c25a45cdc3e3293b49b6ec08763acf", "content_id": "5de0c6a970763f06b7eafb5eaec186f7c7aae1a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 45, "num_lines": 19, "path": "/task7.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "a=float(input(\"Input the first number : \"));\r\nb=float(input(\"Input the second number : \"));\r\nc=float(input(\"Input the third number : \"));\r\nif a>b:\r\n if a<c:\r\n median = a;\r\n elif b>c:\r\n median =b;\r\n else :\r\n median =c;\r\nelse :\r\n if a>c:\r\n median =a;\r\n elif b<c:\r\n median = b;\r\n else:\r\n median =c;\r\nprint(\"The median is : \");\r\nprint(median);\r\n" }, { "alpha_fraction": 0.619140625, "alphanum_fraction": 0.634765625, "avg_line_length": 49.20000076293945, "blob_id": "b9ce275f05374caea993b9c880e70dfe5e901b91", "content_id": "a86344a38c4af373c3a2e867697c841f11699da7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 124, "num_lines": 10, "path": "/task5.py", "repo_name": "Teres-augustine/Python-Tasks", "src_encoding": "UTF-8", "text": "print(\"List of months : January, February, March, April, May, June, July, August, Septemeber, October, November, December\");\r\nmonth_name = input(\"Input the name of the month : \");\r\nif month_name=='February' :\r\n print(\"No. of days = 28/29 days \");\r\nelif month_name in('April','June','September','November') :\r\n print(\"No. of days = 30 days \");\r\nelif month_name in('January','March','May','July','August','October','December') :\r\n print(\"No. of days = 31 days \");\r\nelse :\r\n print(\"Wrong Month name\");\r\n" } ]
6
futurecolors/django-email-login
https://github.com/futurecolors/django-email-login
4db9e5a9a567a4384bbe8441ceb99c28262af150
8251abbc47c3b6df15df0eb78497021a3420be13
291f4700e9a6c6585bb85b37219d3ae7fb6c0af4
refs/heads/master
2021-01-10T20:46:54.303820
2013-04-23T12:19:42
2013-04-23T12:19:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6152597665786743, "alphanum_fraction": 0.6185064911842346, "avg_line_length": 22.69230842590332, "blob_id": "c7ecd0704c1cf6becf4446292173578fac4cd394", "content_id": "47abd3743917bf8c9710477303e73fe485417c3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "permissive", "max_line_length": 72, "num_lines": 26, "path": "/test_settings.py", "repo_name": "futurecolors/django-email-login", "src_encoding": "UTF-8", "text": "# coding: utf-8\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n },\n}\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.sessions',\n 'django.contrib.contenttypes',\n 'django.contrib.staticfiles',\n 'email_login',\n 'test_app'\n)\n\nSECRET_KEY = '_'\nROOT_URLCONF = 'test_app.urls'\n\nSTATIC_ROOT = '/var/www/localhost/htdocs/mysite/static/'\nSTATIC_URL = '/static/'\n\nAUTHENTICATION_BACKENDS = ('email_login.auth_backend.EmailBackend',\n 'django.contrib.auth.backends.ModelBackend',)\n" }, { "alpha_fraction": 0.6443014740943909, "alphanum_fraction": 0.6452205777168274, "avg_line_length": 46.30434799194336, "blob_id": "fd8482451f6fc6974e1ebd4b758e39b705b00ec1", "content_id": "ea0318d53e9c7ae626761ad6d66fa1cfbeb99e8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "permissive", "max_line_length": 111, "num_lines": 23, "path": "/email_login/tests.py", "repo_name": "futurecolors/django-email-login", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\n\nclass TestEmailAuthenticationFormClean(TestCase):\n\n def test_empty(self):\n response = self.client.post(reverse('auth_login'), data={})\n self.assertFormError(response, 'form', 'email', [u'This field is required.'])\n self.assertFormError(response, 'form', 'password', [u'This field is required.'])\n\n def test_wrong_email(self):\n response = self.client.post(reverse('auth_login'), data={'email': '1@1.ru', 'password': 'pass'})\n self.assertFormError(response, 'form', None, [u'Please enter a correct email address and password.'])\n\n def test_success(self):\n User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')\n response = self.client.post(reverse('auth_login'), data={'email': 'lennon@thebeatles.com',\n 'password': 'johnpassword'})\n self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)\n" }, { "alpha_fraction": 0.7385159134864807, "alphanum_fraction": 0.7455830574035645, "avg_line_length": 27.299999237060547, "blob_id": "a6145358ac7a8c6e7b847db3e5b8d2cafd16b64a", "content_id": "262cec1492a669cc48b8fdebbd30e5ef1f4ac957", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "permissive", "max_line_length": 89, "num_lines": 20, "path": "/test_app/urls.py", "repo_name": "futurecolors/django-email-login", "src_encoding": "UTF-8", "text": "from django.conf.urls.defaults import *\n\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\n\nadmin.autodiscover()\n\n# Insert email_login overrides\nfrom email_login import adminsite\nsite = adminsite.EmailLoginAdminSite()\n\n# duplicate the normal admin's registry until ticket #8500 get's fixed\nsite._registry = admin.site._registry\n\n\nurlpatterns = patterns('',\n (r'^admin/', include(site.urls)),\n (r'^accounts/', include('email_login.urls')),\n (r'^accounts/profile/', TemplateView.as_view(template_name='email_login/base.html')),\n)\n" }, { "alpha_fraction": 0.6430020332336426, "alphanum_fraction": 0.6430020332336426, "avg_line_length": 40.08333206176758, "blob_id": "5cea6f89e6edcdd0b15cd96d1ce48c153dae49f5", "content_id": "7fbb19492db3ed44b1df173472b92197ee782319", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "permissive", "max_line_length": 96, "num_lines": 12, "path": "/email_login/urls.py", "repo_name": "futurecolors/django-email-login", "src_encoding": "UTF-8", "text": "# We need to override login view, the rest is ok.\nfrom django.conf.urls.defaults import *\nfrom django.contrib.auth.urls import urlpatterns as auth_urls\nfrom forms import EmailAuthenticationForm\n\nurlpatterns = patterns('',\n url(r'^login/$', 'django.contrib.auth.views.login',\n {'template_name': 'email_login/login.html',\n 'authentication_form': EmailAuthenticationForm}, name='auth_login'),\n)\n\nurlpatterns += auth_urls\n" }, { "alpha_fraction": 0.6616235375404358, "alphanum_fraction": 0.6636601686477661, "avg_line_length": 34.07143020629883, "blob_id": "edd9360e03752f91388a39d2d1a7324849f508cf", "content_id": "d3727a2e6265fc32212569e4491a958df3dab90e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3437, "license_type": "permissive", "max_line_length": 170, "num_lines": 98, "path": "/README.rst", "repo_name": "futurecolors/django-email-login", "src_encoding": "UTF-8", "text": "django-email-login\n==================\n\nLog in via email for django 1.4. Forked from `django-email-login`_\n\n.. image:: https://travis-ci.org/futurecolors/django-email-login.png?branch=master\n :target: https://travis-ci.org/futurecolors/django-email-login\n\n.. image:: https://coveralls.io/repos/futurecolors/django-email-login/badge.png?branch=master\n :target: https://coveralls.io/r/futurecolors/django-email-login/\n\n.. _django-email-login: https://bitbucket.org/tino/django-email-login\n\nGoals\n=====\n\nThe goal of this app is to easily transform django's auth system to allow\nlogin with an email adress, instead of a username. This should work inside the\nadmin as well as outside. Therefore, email adresses need to be unique.\n\nThe username of the User will be the hash of it's email adress. As it means\nnothing, it will be hidden in the admin changelist view.\n\nInstall\n=======\n\nInstall with ``pip install django-email-login`` or checkout from Bitbucket ``hg clone https://bitbucket.org/tino/django-email-login`` and run ``python setup.py install``.\n\nUsage\n=====\n\n1. Append ``'email_login'`` to your ``INSTALLED_APPS`` setting\n#. Insert ``'email_login.auth_backend.EmailBackend'`` as first in the \n ``AUTHENTICATION_BACKENDS`` settings tuple.\n#. Add the following in you root ``urls.py`` *after* ``admin.autodiscover()``::\n\n # Insert email_login overrides\n from email_login import useradmin, adminsite\n site = adminsite.EmailLoginAdminSite()\n # duplicate the normal admin's registry until ticket #8500 get's fixed\n site._registry = admin.site._registry\n \n#. Instead of using::\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n\n use::\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(site.urls)),\n\n to include the admin in your root ``urls.py``.\n \n#. To use login outside of the admin, add::\n \n (r'^account/', include('email_login.urls')),\n \n to your ``urls.py``\n\n.. note:: \n Your admin account needs to have an email address, otherwise you won't be\n able to sign in!\n \n.. note::\n The admin will display the username in the top right corner of the logged\n in user if the user has no firstname. If you want to override that, over-\n ride the ``admin/base.html`` template.\n \nIn conjunction with django-user-creation\n========================================\n\nIf you want to use this app in conjunction with `django-user-creation`_, you\nhave to create your own ``ModelAdmin`` for ``User``. You may do so by adding a\n``useradmin.py`` file to your project with the following contents::\n\n from django.contrib import admin\n from django.contrib.auth.models import User\n from user_creation.forms import EmailAccountCreationForm\n from email_login.useradmin import EmailLoginAdmin\n\n\n class MyUserAdmin(EmailLoginAdmin):\n add_form = EmailAccountCreationForm\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2', 'email_user')}\n ),\n )\n\n admin.site.unregister(User)\n admin.site.register(User, MyUserAdmin)\n\nand adding the line ``import useradmin`` to your ``urls.py`` **after** the\noverrides described above.\n\n.. _django-user-creation: http://bitbucket.org/tino/django-user-creation\n" } ]
5
mathismathis/mathismathis-automatically-facebook-login-and-sign-in
https://github.com/mathismathis/mathismathis-automatically-facebook-login-and-sign-in
96d4ab752ea404ef7ad2d132f7d6db51beb9426b
b6af6f16c76d7ced10cbe251eef3ecc1b241ca45
d99f9b71d1642afff19f43226bf37bf85a69a2fc
refs/heads/master
2020-12-02T05:11:17.281233
2019-12-30T11:09:58
2019-12-30T11:09:58
230,900,518
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8703703880310059, "alphanum_fraction": 0.8703703880310059, "avg_line_length": 53, "blob_id": "a95c4781cc1ba90edbfbe052389869b899998e80", "content_id": "41a75202afe077045832e4d0571cc6d5c32da55e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/README.md", "repo_name": "mathismathis/mathismathis-automatically-facebook-login-and-sign-in", "src_encoding": "UTF-8", "text": "mathismathis-automatically-facebook-login-and-sign-in\n" }, { "alpha_fraction": 0.6961652040481567, "alphanum_fraction": 0.6961652040481567, "avg_line_length": 29.727272033691406, "blob_id": "d02cd9511c01f8901bccd6c739f8df3d04977f65", "content_id": "4ac1e040e312247f029bceee6d77612a3abf3361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 75, "num_lines": 11, "path": "/automactically facebook log.py", "repo_name": "mathismathis/mathismathis-automatically-facebook-login-and-sign-in", "src_encoding": "UTF-8", "text": "\n#automactically facebook log\n\nfrom selenium import webdriver\n\na=webdriver.Firefox()\n\na.get(\"https://www.facebook.com/\")\na.find_element_by_xpath(\"//input[@id='email']\").send_keys(\"enter email id\")\na.find_element_by_xpath(\"//input[@id='pass']\").send_keys(\"enter password\")\na.find_element_by_xpath(\"//label[@id='loginbutton']\").click()\na.quit()\n" } ]
2
hornom/2018-robot
https://github.com/hornom/2018-robot
4b800dc42411eb317ac45e99a3949bd4a50bf731
26c2d5ad7f0803b0508259622964c6804b7c54e3
6619587ac00aa9cb75f30d084ce6ffb49843e798
refs/heads/master
2021-04-28T13:21:35.800649
2018-02-19T13:31:18
2018-02-19T13:31:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5337726473808289, "alphanum_fraction": 0.5428336262702942, "avg_line_length": 21.88679313659668, "blob_id": "e8bca56ac4655a840dbf69e860d2f29d16afa1fe", "content_id": "bdf4cd15a7951d4bf95dd9645edfa44315c93b73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 43, "num_lines": 53, "path": "/components/elevator.py", "repo_name": "hornom/2018-robot", "src_encoding": "UTF-8", "text": "\nimport wpilib\n\nclass Elevator:\n\n elevator_motor = wpilib.Talon\n\n elevator_top = wpilib.DigitalInput\n elevator_mid = wpilib.DigitalInput\n elevator_bottom = wpilib.DigitalInput\n\n active = False # gotoMiddle var name\n activeTop = False # gotoTop var name\n\n speed = 0\n\n def up(self):\n self.speed = -1\n\n def down(self):\n self.speed = 1\n\n def gotoTop(self):\n self.activeTop = True\n\n def gotoMiddle(self):\n self.active = True\n\n def gotoBottom(self):\n if self.active:\n self.spepd = -1\n if self.elevator_top.get():\n self.speed = 0\n self.active = False\n\n def execute(self): # This is the start\n if self.active: # gotoMiddle action\n self.speed = -1\n if self.elevator_mid.get():\n self.speed = 0\n self.active = False\n\n self.elevator_motor.set(self.speed)\n self.speed = 0\n\n # gotoTop action\n if self.activeTop:\n self.speed = -1\n if self.elevator_top.get():\n self.speed = 0\n self.active = False\n\n self.elevator_motor.set(self.speed)\n self.speed = 0\n" }, { "alpha_fraction": 0.6145952343940735, "alphanum_fraction": 0.6259977221488953, "avg_line_length": 22.078947067260742, "blob_id": "095498c6840154adfc725e1fa883da9aca5fe94a", "content_id": "13a1513566dfe267920bfcbb75adef1f9555f9ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "no_license", "max_line_length": 53, "num_lines": 38, "path": "/robot.py", "repo_name": "hornom/2018-robot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport magicbot\nimport wpilib\n\nimport components.elevator\n\n\nclass Robot(magicbot.MagicRobot):\n\n elevator = components.elevator.Elevator\n\n def createObjects(self):\n\n # Elevator motor/sensors\n self.elevator_motor = wpilib.Talon(1)\n self.elevator_top = wpilib.DigitalInput(1)\n self.elevator_mid = wpilib.DigitalInput(2)\n self.elevator_bottom = wpilib.DigitalInput(3)\n\n self.joy = wpilib.Joystick(0)\n\n def teleopInit(self):\n pass\n\n def teleopPeriodic(self):\n if self.joy.getRawButton(1):\n self.elevator.up()\n if self.joy.getRawButton(2):\n self.elevator.down()\n if self.joy.getRawButton(4):\n self.elevator.gotoTop()\n if self.joy.getRawButton(3):\n self.elevator.gotoMiddle()\n\n\nif __name__ == '__main__':\n wpilib.run(Robot)\n" } ]
2
rjonnal/octoblob
https://github.com/rjonnal/octoblob
fa15ad5fc0384b9a73175d5b064f10bca8b60766
680cd0cb3d8da47726d46c1285ff0ebd215cec6f
589fa0b489269a87b577874423dc1bc1a7662e47
refs/heads/main
2023-07-21T03:21:16.333278
2023-07-12T14:29:33
2023-07-12T14:29:33
327,063,294
4
1
null
2021-01-05T16:58:10
2021-09-22T00:49:09
2021-09-25T16:02:35
Jupyter Notebook
[ { "alpha_fraction": 0.604938268661499, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 21.404254913330078, "blob_id": "d93bfd213e0981eb51f945ee9fa820e6ebcb129c", "content_id": "037e43d8f7dc4d8b45efb102256bdcfbadbce6d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 73, "num_lines": 47, "path": "/test_flatten_volume.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import numpy as np\nimport octoblob.functions as blobf\nimport octoblob.diagnostics_tools as blobd\nfrom matplotlib import pyplot as plt\nimport os,sys\n\nmake_data = False\nfolder_name = 'examples/test_2_bscans'\n\nd = blobd.Diagnostics(folder_name)\n\nif make_data:\n fake_data_folder = 'fake_bscans'\n os.makedirs(fake_data_folder,exist_ok=True)\n\n z = np.arange(100)\n\n a = []\n for k in range(10):\n a.append(np.sin(z*0.5*np.random.randn()))\n\n a = np.sum(a,axis=0)\n\n a[np.where(a>=0)] = 1\n a[np.where(a<0)] = 0\n\n b = np.array([a]*250).T\n\n\n motion = []\n for k in range(10):\n motion.append(np.sin(z*0.1*np.random.randn()))\n motion = np.sum(motion,axis=0)\n motion = np.round(motion).astype(int)\n\n for idx,m in enumerate(motion):\n print(m)\n temp = np.roll(b,m,axis=0)\n temp = temp + np.random.randn(temp.shape[0],temp.shape[1])\n np.save(os.path.join(fake_data_folder,'bscan_%05d.npy'%idx),temp)\n\n plt.figure()\n plt.plot(motion)\n\n\n\nblobf.flatten_volume(folder_name,diagnostics=d)\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 19.22222137451172, "blob_id": "a82316f880f076f3958e3003a05caee61a0dd561", "content_id": "201867d41147d23f85d553799532db3cc35b97a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 182, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/examples/optopol_interface/reset.sh", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "#! /usr/bin/bash\n\nrm -rvf *_bscans\nrm -rvf *_diagnostics\nrm -rvf *Amp.bin\nrm -rvf *Phase.bin\nrm -rvf processing_parameters.json\nrm -rvf layer_velocities_results\nrm -rvf octoblob.log\n" }, { "alpha_fraction": 0.7733137607574463, "alphanum_fraction": 0.7844575047492981, "avg_line_length": 91.16216278076172, "blob_id": "c1ca3b08387a1b81ac9cf1678ec1f625c8d5e314", "content_id": "fc8f83652dbf64a34904ea5d5b940ce21bab3f63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3410, "license_type": "no_license", "max_line_length": 786, "num_lines": 37, "path": "/examples/handling_bscan_artifacts/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# B-scan processing for data with artifacts such as lens reflexes\n\nStandard B-scan processing uses an automated algorithm for cropping the retina out of B-scans. This algorithm computes the axial centroid of the B-scan *intensity*, and selects 160 pixels on either side of that center of mass as the relevant portion of the B-scan. This works fine for retinal B-scans, but for other kinds of data, especially if they contain artifacts such as lens or mirror reflexes, automatic cropping will center the image about those artifacts instead of the features of interest. This script employs a B-scan generating function that does not automatically crop B-scans. The function is called `spectra_to_bscan_nocrop`. Instead of cropping around the center of mass of the image, it just crops out half of the B-scan--that is, one of the complex conjugate pairs. \n\nIn addition, this example illustrates how to use the `x1` and `x2` parameters of the `DataSource` constructor and the `get_source` convenience function. When a dataset has significant artifacts not generated by the OCT sample (i.e. generated by lens reflections or stray light), and if these artifacts are similar in amplitude to the sample image, automated optimization of mapping and dispersion parameters will fail. They will result in parameters that maximize the brightness/sharpness of the artifact instead of the sample. Thus it is convenient to crop the B-scans laterally, and this can be done by passing parameters `x1` and `x2` to `get_source`, thus specifying the starting end ending columns of the B-scan to be used for optimization.\n\nThis version of `process.py` creates a single rough figure (seen below) in `figs/artifact_example.png` that allows the user to set values of `x1` and `x2` correctly. The user must know that the feature to be seen (in this case a mirror behind 40 dB of attenuation) is located toward the left side of the image, and observe that the artifact begins after the 200th column of the B-scan. This will allow the user to choose values of 0 and 200 for `x1` and `x2` respectively.\n\n![Rough spectra and B-scan visualizations to help set lateral cropping parameters x1 and x2.](./figs/artifact_example.png)\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* artifacts.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [artifacts.unp](https://www.dropbox.com/s/5qk7gbfbx1gg62i/artifacts.unp?dl=0)\n.\n\n* artifacts.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [artifacts.xml](https://www.dropbox.com/s/6syd272xlebtubm/artifacts.xml?dl=0).\n\nAfter downloading, put them into the `examples/handling_bscan_artifacts` folder.\n\n\n## B-scan processing\n\n1. (Optional) Edit the file `process.py`, and edit the value assigned to `data_filename`.\n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/handling_bscan_artifacts` folder and run the program by issuing `python process.py` at the command prompt. If you've skipped step 1, you'll need to specify the `.unp` filename at the command prompt, e.g., `python process.py artifacts.unp`.\n" }, { "alpha_fraction": 0.6733668446540833, "alphanum_fraction": 0.6783919334411621, "avg_line_length": 18.899999618530273, "blob_id": "ec3e197e4d0e4d0346dec255895fd1c69bd20fd8", "content_id": "8321962f2b91e1172aaf0d4bbb368a2968c92a23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 44, "num_lines": 10, "path": "/browse.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob import data_browser\nimport sys\n\ntry:\n file_filters = sys.argv[1:]\nexcept:\n file_filters=['*.npy','*.png']\n\nb = data_browser.Browser()\nb.browse(root='.',file_filters=file_filters)\n" }, { "alpha_fraction": 0.5861690640449524, "alphanum_fraction": 0.5942187905311584, "avg_line_length": 28.0744686126709, "blob_id": "4a82ee6cd139352aac297020844aafaba688bcc7", "content_id": "cb2601776f28c0ba3d4f9257aedf6ba1ca4f6b5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2733, "license_type": "no_license", "max_line_length": 79, "num_lines": 94, "path": "/plotting_functions.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport sys,os\nfrom matplotlib import animation\n\nprint_dpi = 300\nscreen_dpi = 100\nIPSP = 2\n\ndef mdsavefig(fn,dpi=print_dpi):\n plt.savefig(fn,dpi=dpi)\n print('![](%s)'%fn)\n\ndef pad(ax=None,frac=0.1):\n \"\"\"Add some vertical padding to a plot.\"\"\"\n if ax is None:\n ax = plt.gca()\n ymin = np.inf\n ymax = -np.inf\n for line in ax.lines:\n yd = line.get_ydata()\n if yd.min()<ymin:\n ymin = yd.min()\n if yd.max()>ymax:\n ymax = yd.max()\n \n yr = ymax-ymin\n ylim = ((ymin-yr*frac,ymax+yr*frac))\n ax.set_ylim(ylim)\n \n\ndef dots(ax=None,border_fraction=0.03,markersize=4):\n if ax is None:\n ax = plt.gca()\n \n ylim = ax.get_ylim()\n xlim = ax.get_xlim()\n\n lines = ax.lines\n\n xmax = -np.inf\n ymax = -np.inf\n xmin = np.inf\n ymin = np.inf\n\n ymin,ymax = ylim\n xmin,xmax = xlim\n \n doty = (ymin+ymax)/2.0\n xr = xmax-xmin\n leftx = np.linspace(xmin+xr*border_fraction*0.1,xmin+xr*border_fraction,3)\n rightx = np.linspace(xmax-xr*border_fraction*0.1,xmax-xr*border_fraction,3)\n for lx in leftx:\n ax.plot(lx,doty,'k.',markersize=markersize)\n print(lx,doty)\n for rx in rightx:\n ax.plot(rx,doty,'k.',markersize=markersize)\n \n \ndef despine(ax=None,do='tr'):\n \"\"\"Remove the spines from a plot. (These are the lines drawn\n around the edge of the plot.)\"\"\"\n if ax is None:\n ax = plt.gca()\n if 't' in do:\n ax.spines[\"top\"].set_visible(False)\n if 'r' in do:\n ax.spines[\"right\"].set_visible(False)\n if 'b' in do:\n ax.spines[\"bottom\"].set_visible(False)\n if 'l' in do:\n ax.spines[\"left\"].set_visible(False)\n\ndef setup_plots(mode='paper',style='seaborn-deep',font_size=9,font='Arial'):\n \n if font=='Times New Roman':\n plt.rc('font', family='serif')\n plt.rc('font', serif=['Times New Roman'])\n elif font=='Arial':\n plt.rc('font', family='sans-serif')\n plt.rcParams['font.sans-serif'] = 'Arial'\n\n plt.style.use(style)\n plt.rc('font', size=font_size) # controls default text sizes\n plt.rc('axes', titlesize=font_size) # fontsize of the axes title\n plt.rc('axes', labelsize=font_size) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=font_size) # fontsize of the tick labels\n plt.rc('ytick', labelsize=font_size) # fontsize of the tick labels\n plt.rc('legend', fontsize=font_size) # legend fontsize\n plt.rc('figure', titlesize=font_size) # fontsize of the figure title\n\ndef get_color_cycle():\n color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n return color_cycle\n" }, { "alpha_fraction": 0.666323184967041, "alphanum_fraction": 0.6824660897254944, "avg_line_length": 35.85443115234375, "blob_id": "ef391b0e5cf42c22ec4a8966fe87ed2f4653ceed", "content_id": "b2684a19226f93ab49acbdfd19caa9bbcc60d807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5823, "license_type": "no_license", "max_line_length": 106, "num_lines": 158, "path": "/examples/quick_optimize/mdopt.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\n\nfrom octoblob import mapping_dispersion_optimizer as mdo\n#from octoblob import dispersion_optimizer as mdo\n\nfrom octoblob import file_manager\nimport pathlib\n\n\n# This example shows how to generate dispersion parameters for a specified UNP dataset using optimization.\ndata_filename = sys.argv[1]\nsrc = blobf.get_source(data_filename)\n\ntry:\n sample_index = int(sys.argv[2])\nexcept IndexError:\n sample_index = src.n_samples//2\n \n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# New prototype fbg_align function, which uses cross-correlation instead of feature-\n# based alignment of spectra.\n# Set a limit on the maximum index where the FBG trough could possibly be located.\n# This is a critical parameter, as it avoids cross correlation of spectra based on\n# structural information; this would prevent the FBG features from dominating the\n# cross-correlation and introduce additional phase noise.\n# Correlation threshold is the minimum correlation required to consider two spectra\n# to be in phase with one another\ndef fbg_align(spectra,fbg_max_index=150,correlation_threshold=0.9,diagnostics=None):\n # crop the frame to the FBG region\n f = spectra[:fbg_max_index,:].copy()\n\n if not diagnostics is None:\n fig = diagnostics.figure(figsize=(6,4))\n axes = fig.subplots(2,2)\n axes[0][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[0][1].plot(f[:,k])\n\n # group the spectra by amount of shift\n # this step avoids having to perform cross-correlation operations on every\n # spectrum; first, we group them by correlation with one another\n # make a list of spectra to group\n to_do = list(range(f.shape[1]))\n # make a list for the groups of similarly shifted spectra\n groups = []\n ref = 0\n\n # while there are spectra left to group, do the following loop:\n while(True):\n groups.append([ref])\n to_do.remove(ref)\n for tar in to_do:\n c = np.corrcoef(f[:,ref],f[:,tar])[0,1]\n if c>correlation_threshold:\n groups[-1].append(tar)\n to_do.remove(tar)\n if len(to_do)==0:\n break\n ref = to_do[0]\n\n subframes = []\n for g in groups:\n subf = f[:,g]\n subframes.append(subf)\n\n # now decide how to shift the groups of spectra by cross-correlating their means\n # we'll use the first group as the reference group:\n group_shifts = [0]\n ref = np.mean(subframes[0],axis=1)\n # now, iterate through the other groups, compute their means, and cross-correlate\n # with the reference. keep track of the cross-correlation peaks in the list group_shifts\n for taridx in range(1,len(subframes)):\n tar = np.mean(subframes[taridx],axis=1)\n xc = np.fft.ifft(np.fft.fft(ref)*np.fft.fft(tar).conj())\n shift = np.argmax(xc)\n if shift>len(xc)//2:\n shift = shift-len(xc)\n group_shifts.append(shift)\n\n # now, use the groups and the group_shifts to shift all of the spectra according to their\n # group membership:\n for g,s in zip(groups,group_shifts):\n for idx in g:\n spectra[:,idx] = np.roll(spectra[:,idx],s)\n f[:,idx] = np.roll(f[:,idx],s)\n\n if not diagnostics is None:\n axes[1][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[1][1].plot(f[:,k])\n diagnostics.save(fig)\n\n return spectra\n\n\ndef spectra_to_bscan(mdcoefs,spectra,diagnostics=None):\n # only the fbg_align function is called locally (from this script);\n # most of the OCT processing is done by blob functions (blobf.XXXX)\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = blobf.dc_subtract(spectra,diagnostics=diagnostics)\n spectra = blobf.crop_spectra(spectra,diagnostics=diagnostics)\n\n if len(mdcoefs)==4:\n spectra = blobf.k_resample(spectra,mdcoefs[:2],diagnostics=diagnostics)\n spectra = blobf.dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n elif len(mdcoefs)==2:\n spectra = blobf.dispersion_compensate(spectra,mdcoefs,diagnostics=None)\n \n spectra = blobf.gaussian_window(spectra,sigma=0.9,diagnostics=None)\n\n # Now generate the bscan by FFT:\n bscan = np.fft.fft(spectra,axis=0)\n # remove the upper half of the B-scan and leave only the bottom half:\n bscan = bscan[bscan.shape[0]//2:,:]\n\n # could additionally crop the B-scan if desired;\n # for example, could remove the top 10 rows, bottom 50 rows, and 10 columns\n # from the left and right edges:\n # bscan = bscan[10:-50,10:-10]\n\n # it; we'll also remove 50 rows near the DC (bottom of the image):\n bscan = bscan[:-50,:]\n \n if not diagnostics is None:\n fig = diagnostics.figure()\n axes = fig.subplots(1,1)\n im = axes.imshow(20*np.log10(np.abs(bscan)),aspect='auto')\n plt.colorbar(im)\n diagnostics.save(fig)\n return bscan\n\n\nsample = src.get_frame(sample_index)\n\n\n# modify the next line to use the local spectra_to_bscan function by removing 'blobf.':\ncoefs = mdo.optimize(sample,spectra_to_bscan,show=False,verbose=False,diagnostics=diagnostics)\nprint(coefs)\n\nplt.figure()\nplt.subplot(1,2,1)\nunoptimized = spectra_to_bscan([0,0,0,0],sample)\nplt.imshow(20*np.log10(np.abs(unoptimized)),cmap='gray',clim=(40,80))\nplt.subplot(1,2,2)\noptimized = spectra_to_bscan(coefs,sample)\nplt.imshow(20*np.log10(np.abs(optimized)),cmap='gray',clim=(40,80))\nplt.suptitle(coefs)\nplt.show()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 19, "blob_id": "e1b7a22ee03673968fcba1e3b662f083921ddd83", "content_id": "1b63d6f9ad410c55e993304fd3c645e8b2c04179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "no_license", "max_line_length": 34, "num_lines": 6, "path": "/examples/single_flash_general_org/reset.sh", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "#! /usr/bin/bash\n\nrm -rvf test*\nrm -rvf processing_parameters.json\nrm -rvf plot_velocities_results\nrm -rvf octoblob.log\n" }, { "alpha_fraction": 0.7307926416397095, "alphanum_fraction": 0.7370768189430237, "avg_line_length": 41.153846740722656, "blob_id": "01fead7acc426b7a9f9b6fc9329d8b94bc6e4653", "content_id": "bb33f088d21ba66f64f23b1460c37d420c541e3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4933, "license_type": "no_license", "max_line_length": 138, "num_lines": 117, "path": "/examples/handling_bscan_artifacts/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\n# This example illustrates how to process a dataset that has artifacts such as lens reflexes and/or laser noise.\n# The two key steps are 1) avoiding automatic cropping, which depends on the center of mass of the structure in\n# the image (i.e., the location of the retina) to determine reasonable cropping points in depth (z); 2) forcing\n# some horizontal cropping to remove the bright artifacts from the image; this latter step is critical for automatic\n# optimization of dispersion/mapping coefficients.\n\n# If we want automatic cropping (useful in most contexts) we use the function blobf.spectra_to_bscan, but in this case\n# the artifact dominates the B-scan's center of mass and we have to use the full depth of the B-scan for optimization,\n# thus we only use blobf.spectra_to_bscan_nocrop\n\nno_parallel = True\n\nuse_multiprocessing = False\ntry:\n assert not no_parallel\n import multiprocessing as mp\n use_multiprocessing = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('multiprocessing imported')\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept ImportError as ie:\n logging.info('Failed to import multiprocessing: %s'%ie)\n logging.info('Processing serially.')\nexcept AssertionError as ae:\n logging.info('Multiprocessing banned by no_parallel.')\n \ndata_filename = None\n\nif data_filename is None:\n try:\n data_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check data_filename. %s not found or data_filename not passed at command line.'%data_filename)\n\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(data_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n# Get a plain frame for viewing\nsrc = blobf.get_source(data_filename)\nf = src.get_frame(10)\nplt.figure()\nplt.subplot(1,2,1)\nplt.imshow(f,aspect='auto')\nplt.title('raw spectral frame')\nplt.subplot(1,2,2)\nplt.imshow(np.log10(np.abs(np.fft.fft(f,axis=0))),aspect='auto')\nplt.title('FFT in k dimension')\nos.makedirs('./figs',exist_ok=True)\nplt.savefig('figs/artifact_example.png')\n\n\n# After looking at the artifact_example.png, we can see that the first 200 columns of the image should be used for\n# dispersion optimization, while skipping the rest of the image. Thus we'll re-create our source passing x1 and x2\n# parameters of 0 and 200.\n\n# Get an octoblob.DataSource object using the filename\nsrc = blobf.get_source(data_filename,x1=0,x2=200)\n\n# try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\ntry:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\nexcept KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(5)\n coefs = mdo.multi_optimize(samples,blobf.spectra_to_bscan_nocrop,show_all=False,show_final=True,verbose=False,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(data_filename)\n\nif __name__=='__main__':\n\n if use_multiprocessing:\n def proc(k):\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n bscan = blobf.spectra_to_bscan_nocrop(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n pool = mp.Pool(n_cores)\n pool.map(proc,range(src.n_total_frames))\n\n else:\n\n for k in range(src.n_total_frames):\n\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n bscan = blobf.spectra_to_bscan_nocrop(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n" }, { "alpha_fraction": 0.78010094165802, "alphanum_fraction": 0.782624363899231, "avg_line_length": 85.671875, "blob_id": "b243f00524066d2dc86b9dcebcac9ad0b6655e78", "content_id": "bedb6300c9cb5b17562d3902412ab81ec1695020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5548, "license_type": "no_license", "max_line_length": 592, "num_lines": 64, "path": "/examples/org_averaging/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Averaging ORG responses for arbitrarily chosen layers\n\n## Folder contents\n\n* plot_general_org.py: an interactive tool for visualizing phase changes between arbitrary, user-selected layers\n\n* compute_average_responses.py: a script for averaging responses created using `plot_general_org.py`\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* org_averaging_data.zip: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [org_averaging_data.zip](https://www.dropbox.com/s/z9q2tiw53dg5q24/org_averaging_data.zip). (Use the `Download` button instead of exploring the contents of the zip file.)\n.\n\nAfter downloading, put it into the `examples/org_averaging` folder and unzip.\n\n\n## ORG visualization\n\n1. Run the program `plot_general_org.py` by issuing `python plot_general_org.py` at the command prompt, in the same folder. If run this way, the program searches recursively for folders called `org` in the current directory and its subdirectories. Alternatively, you may issue `python plot_general_org.py ./test_bscans` to search only that subdirectory (recursively). In these cases, the program will run on each of the `org` folders it finds. Finally, you may specify a particular org folder with `python plot_general_org.py ./test_bscans/org`, in which case it will run only on that folder.\n\n2. The input required by the user is clicking the end points of two line segments, one at a time. These line segments determine the layers between which phase velocities are computed. The user must click these line segments in a particular order--the left end of the top line segment, the right end of the top line segment, the left end of the bottom line segment, and the right end of the bottom line segment. The program will attempt to convert these line segments into arbitrary paths tracing the contour of the underlying layer by using the `refine_z` parameter:\n\n```python\n# refine_z specifies the number of pixels (+/-) over which the\n# program may search to identify a local peak. The program begins by asking\n# the user to trace line segments through two layers of interest. These layers\n# may not be smooth. From one A-scan to the next, the brightest pixel or \"peak\"\n# corresponding to the layer may be displaced axially from the intersection\n# of the line segment with the A-scan. refine_z specifies the distance (in either\n# direction, above or below that intersection) where the program may search for a\n# brighter pixel with which to compute the phase. The optimal setting here will\n# largely be determined by how isolated the layer of interest is. For a relatively\n# isolated layer, such as IS/OS near the fovea, a large value may be best. For\n# closely packed layers such as COST and RPE, smaller values may be useful. The\n# user receives immediate feedback from the program's selection of bright pixels\n# and can observe whether refine_z is too high (i.e., causing the wrong layer\n# to be segmented) or too low (i.e., missing the brightest pixels.\n```\n\nSelection of these line segments causes the $v$ plot for that region to appear in the right panel. When multiple regions are created, multiple plots are generated on the right, with the rectangles and plot lines color-coordinated for comparison. The `backspace` key deletes the last region, and clicking outside of the B-scan on the left clears all of the regions. The `enter` key saves the figure and associated data in two places: the working directory, in a folder called `layer_velocities_results` and in the `org` folder containing the raw ORG data.\n\n## ORG averaging\n\nTo produce averaged ORG plots, run the `compute_average_responses.py` script. Without modification, it will search for data saved in the previous *ORG visualization* step, and average all located data. It will produce a large figure with plots of the individual responses, as well as a plot of the average response. These will all be contained in the resulting `average.png` and `average.pdf` files, which will be overwritten by successive runs. The output files are meant to be manually cropped/cut for use.\n\n**Important:** because the results of plotting above are stored in multiple locations, you have to guard against averaging together multiple copies of the same responses. This can be accomplished in two ways:\n\n1. Pass a command line argument to `compute_average_responses.py`, as in `python compute_average_responses.py layer_velocity_results` or, if as described below, you have already renamed that folder to describe the ORG region of interest, something like `python compute_average_responses.py OS_results` or `python compute_average_responses.py layer_velocity_results_SRS`.\n\n## Multiple ORG types\n\nIf you are working on multiple ORG measurements in the same folder, e.g. IS, OS, and SRS, please change the names of the `layer_velocities_results` folders accordingly, e.g., `IS_results`, `OS_results`, `layer_velocity_results_SRS`, etc., and then either 1) specify the folder as an argument, e.g. `python compute_average_responses.py IS_results`, or 2) copy `compute_average_responses.py` into the results folder of interest and run it there. Otherwise, results from different layers will be averaged together, which makes no sense.\n\n## Example results\n\n### Average cone inner segment ORG responses\n\n![Cone inner segment ORG responses](./figs/average_response.png)\n\n" }, { "alpha_fraction": 0.7514845132827759, "alphanum_fraction": 0.7785353064537048, "avg_line_length": 67.8787841796875, "blob_id": "bce9ce48afc98e660d06a6e3cf9741dc4bb4e46a", "content_id": "09c03d9887b232afaf75c2c753d15cbf4bd3613c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4547, "license_type": "no_license", "max_line_length": 566, "num_lines": 66, "path": "/examples/optopol_interface/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Basic ORG processing for Optopol data\n\nThis example illustrates ORG processing for Optopol data that have already been processed into OCT amplitude and phase, and stored in `.bin` files.\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* plot_general_org.py: an interactive tool for visualizing phase changes between arbitrary, user-selected layers\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n[amplitude bin file](https://www.dropbox.com/s/efpieltzhry23nn/GB_1905021_GB_TrackingON_20230217101223_2_101x250x2048x2_Amp.bin?dl=1)\n\n[phase bin file](https://www.dropbox.com/s/6xprkbeg8iff0xb/GB_1905021_GB_TrackingON_20230217101223_2_101x250x2048x2_Phase.bin?dl=1)\n\nAfter downloading, put them into the `examples/optopol_interface` folder.\n\n## OCT/ORG processing\n\n1. Edit the file `process.py`, and edit the values assigned to `org_start_frame`, `org_end_frame` as needed. For single flash experiments, only a subset of B-scans must be processed; see the code comment for details. \n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/optopol_interface` folder and run the program by issuing `python process.py FILENAME` at the command prompt. `FILENAME` should be replaced by the name of the file containing **amplitude** data, e.g., `GB_1905021_GB_TrackingON_20230217101223_2_101x250x2048x2_Amp.bin`. This will take a few minutes. The ORG processing in particular is somewhat slow.\n\n## ORG visualization\n\n0. If necessary, edit the following parameters in `plot_general_org.py`:\n\n```python\nzlim = (400,600) # depth limits for profile plot in um\nvlim = (-10,10) # velocity limits for plotting in um/s\nz_um_per_pixel = 3.0\n```\n\n1. Run the program `plot_general_org.py` by issuing `python plot_general_org.py` at the command prompt, in the same folder. If run this way, the program searches recursively for folders called `org` in the current directory and its subdirectories. This folder will by default be a subfolder of the `FILENAME_bscans` folder.\n\n2. The input required by the user is clicking the end points of two line segments, one at a time. These line segments determine the layers between which phase velocities are computed. The user must click these line segments in a particular order--the left end of the top line segment, the right end of the top line segment, the left end of the bottom line segment, and the right end of the bottom line segment. The program will attempt to convert these line segments into arbitrary paths tracing the contour of the underlying layer by using the `refine_z` parameter:\n\n```python\n# refine_z specifies the number of pixels (+/-) over which the\n# program may search to identify a local peak. The program begins by asking\n# the user to trace line segments through two layers of interest. These layers\n# may not be smooth. From one A-scan to the next, the brightest pixel or \"peak\"\n# corresponding to the layer may be displaced axially from the intersection\n# of the line segment with the A-scan. refine_z specifies the distance (in either\n# direction, above or below that intersection) where the program may search for a\n# brighter pixel with which to compute the phase. The optimal setting here will\n# largely be determined by how isolated the layer of interest is. For a relatively\n# isolated layer, such as IS/OS near the fovea, a large value may be best. For\n# closely packed layers such as COST and RPE, smaller values may be useful. The\n# user receives immediate feedback from the program's selection of bright pixels\n# and can observe whether refine_z is too high (i.e., causing the wrong layer\n# to be segmented) or too low (i.e., missing the brightest pixels.\n```\n\nSelection of these line segments causes the $v$ plot for that region to appear in the right panel. When multiple regions are created, multiple plots are generated on the right, with the rectangles and plot lines color-coordinated for comparison. The `backspace` key deletes the last region, and clicking outside of the B-scan on the left clears all of the regions. The `enter` key saves the figure and associated data in two places: the working directory, in a folder called `layer_velocities_results` in the `org` folder containing the raw ORG data.\n\n## Example results\n\n### Cone outer segment ORG responses\n\n![Cone outer segment ORG responses](./figs/cone_os_org_optopol.png)\n\n" }, { "alpha_fraction": 0.739393949508667, "alphanum_fraction": 0.739393949508667, "avg_line_length": 19.625, "blob_id": "383f3f6fc62bcd1e5c02d6f477e6277f32126af1", "content_id": "e80ccf072cc5054144b875ff1eb92994b1193879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 165, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/examples/processing_flipped/reset.sh", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "#! /usr/bin/bash\n\nrm -rvf *_bscans\nrm -rvf *_diagnostics\nrm -rvf artifacts.*\nrm -rvf processing_parameters.json\nrm -rvf plot_velocities_results\nrm -rvf octoblob.log\n" }, { "alpha_fraction": 0.5211988091468811, "alphanum_fraction": 0.5378856658935547, "avg_line_length": 35.25136947631836, "blob_id": "7878b76739fd4227234bc395b44c1c31cbf353c7", "content_id": "f54a127c38eee23520d2519eafdbb98ac93ea431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39672, "license_type": "no_license", "max_line_length": 179, "num_lines": 1094, "path": "/volume_tools.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport os,sys,glob\nimport time\nimport logging\ntry:\n from .ticktock import tick, tock\nexcept ImportError:\n tick = lambda: 0.0\n tock = lambda x: 0.0\n \nimport scipy.ndimage as spn\nimport scipy.interpolate as spi\nimport imageio\nimport multiprocessing as mp\nfrom itertools import repeat\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(\"octoblob.log\"),\n logging.StreamHandler()\n ]\n)\n\n################################# Intro ####################################\n# This version of volume_tools is meant to be a drop-in replacement for the\n# previous version, but with a completely different underlying methodology.\n# The previous version was made faster by only registering sub-volumes, and\n# thus necessitated lots of bookkeeping. This one is going to work by registering\n# windowed volumes to a reference volume. It'll be slower, but simpler, with\n# fewer corner cases to worry about, and fewer assumptions.\n#############################################################################\n\n\n# for loading M-scan series as volumes, we average the abs along this dimension:\nM_SCAN_DIMENSION = 2\nscreen_dpi = 100\ncol_width_inches = 2.5\nrow_height_inches = 2.5\nlarge_integer = 10000000000\ndefault_clim = (45,85)\n\n\ndef norm(im):\n return (im - np.nanmean(im)/np.nanstd(im))\n\ndef gaussian_filter(shape,sigmas):\n f = np.zeros(shape)\n sy,sz,sx = shape\n wy,wz,wx = sigmas\n ZZ,YY,XX = np.meshgrid(np.arange(sz),np.arange(sy),np.arange(sx))\n ZZ = ZZ - sz/2.0\n YY = YY - sy/2.0\n XX = XX - sx/2.0\n\n zz = ZZ**2/(2*wz**2)\n yy = YY**2/(2*wy**2)\n xx = XX**2/(2*wx**2)\n g = np.exp(-(xx+yy+zz))\n\n g = np.fft.fftshift(g)\n return g\n\ndef rect_filter(shape,radii,diagnostics=False):\n f = np.zeros(shape)\n sy,sz,sx = shape\n wy,wz,wx = radii\n ZZ,YY,XX = np.meshgrid(np.arange(sz),np.arange(sy),np.arange(sx))\n ZZ = ZZ - sz/2.0\n YY = YY - sy/2.0\n XX = XX - sx/2.0\n\n zz = ZZ**2/(wz**2)\n yy = YY**2/(wy**2)\n xx = XX**2/(wx**2)\n\n rad = np.sqrt(zz+yy+xx)\n g = np.zeros(rad.shape)\n g[rad<=1] = 1\n\n if diagnostics:\n plt.figure()\n for k in range(sy):\n plt.clf()\n plt.imshow(g[k,:,:],clim=(g.min(),g.max()))\n plt.colorbar()\n plt.title('%s of %s'%(k+1,sy))\n plt.pause(.1)\n plt.close()\n\n g = np.fft.fftshift(g)\n return g\n\n\ndef show3d(vol,mode='center',aspect='auto'):\n sy,sz,sx = vol.shape\n temp = np.abs(vol)\n ncol,nrow = 3,1\n #plt.figure(figsize=(ncol*col_width_inches,nrow*row_height_inches),dpi=screen_dpi)\n if mode=='center':\n plt.subplot(1,3,1)\n plt.imshow(temp[sy//2,:,:],cmap='gray',aspect=aspect)\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(temp[:,sz//2,:],cmap='gray',aspect=aspect)\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(temp[:,:,sx//2].T,cmap='gray',aspect=aspect)\n plt.title('z-y')\n elif mode=='average':\n plt.subplot(1,3,1)\n plt.imshow(temp.mean(0),cmap='gray',aspect=aspect)\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(temp.mean(1),cmap='gray',aspect=aspect)\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(temp.mean(2).T,cmap='gray',aspect=aspect)\n plt.title('z-y')\n elif mode=='max':\n plt.subplot(1,3,1)\n plt.imshow(np.max(temp,axis=0),cmap='gray',aspect=aspect)\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(np.max(temp,axis=1),cmap='gray',aspect=aspect)\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(np.max(temp,axis=2).T,cmap='gray',aspect=aspect)\n plt.title('z-y')\n elif mode=='nxc':\n reg_coords = list(np.unravel_index(np.argmax(vol),vol.shape))\n plt.subplot(1,3,1)\n plt.imshow(temp[reg_coords[0],:,:],cmap='gray',aspect=aspect)\n plt.plot(reg_coords[2],reg_coords[1],'g+')\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(temp[:,reg_coords[1],:],cmap='gray',aspect=aspect)\n plt.plot(reg_coords[2],reg_coords[0],'g+')\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(temp[:,:,reg_coords[2]].T,cmap='gray',aspect=aspect)\n plt.plot(reg_coords[0],reg_coords[1],'g+')\n plt.title('z-y')\n \n\ndef nxc3d(ref,tar,diagnostics=False):\n\n # Differences from previous versions:\n # 1. We should expect not to receive NaN pixels\n # 2. Handle the upsampling/downsampling externally\n \n #ref = norm(ref)\n #tar = norm(tar)\n\n pref = np.zeros(ref.shape,dtype=ref.dtype)\n ptar = np.zeros(tar.shape,dtype=tar.dtype)\n\n pref[:] = norm(ref)\n ptar[:] = norm(tar)\n \n if diagnostics:\n show3d(pref)\n show3d(ptar)\n plt.show()\n \n n_slow,n_depth,n_fast = pref.shape\n\n #logging.info('Registering volumes of shape %dx%dx%d (slow x depth x fast).'%(n_slow,n_depth,n_fast))\n t0 = tick()\n\n rsx,rsz,rsy = pref.shape\n tsx,tsz,tsy = ptar.shape\n \n sx = max(rsx,tsx)\n sz = max(rsz,tsz)\n sy = max(rsy,tsy)\n \n t0 = tick()\n s = (sx,sz,sy)\n fref = np.fft.fftn(pref,s=s)\n ftar = np.fft.fftn(ptar,s=s)\n dt = tock(t0)\n #nxc = np.real(np.fft.ifftn(fref*np.conj(ftar)))\n nxc = np.real(np.fft.ifftn(fref*np.conj(ftar)/np.abs(fref*np.conj(ftar))))\n logging.info('Registration took %0.3f sec.'%dt)\n return nxc\n\n\nclass Coordinates:\n \"\"\"A Coordinates object keeps track of the 3D coordinates for each A-scan in a Volume object.\"\"\"\n def __init__(self,n_slow,n_depth,n_fast):\n self.x,self.y = np.meshgrid(np.arange(n_fast),np.arange(n_slow))\n self.z = np.zeros(self.x.shape,dtype=np.int)\n self.sy,self.sx = self.z.shape\n self.correlation = np.zeros(self.x.shape)\n \n def move_x(self,dx,boundaries):\n self.x[boundaries.y1:boundaries.y2,boundaries.x1:boundaries.x2]+=dx\n\n def move_y(self,dy,boundaries):\n self.y[boundaries.y1:boundaries.y2,boundaries.x1:boundaries.x2]+=dy\n\n def move_z(self,dz,boundaries):\n self.z[boundaries.y1:boundaries.y2,boundaries.x1:boundaries.x2]+=dz\n\n def set_correlation(self,corr,boundaries):\n self.correlation[boundaries.y1:boundaries.y2,boundaries.x1:boundaries.x2]=corr\n\n \nclass Boundaries:\n\n def __init__(self,y1,y2,z1,z2,x1,x2):\n sy = y2-y1\n sz = z2-z1\n sx = x2-x1\n self.x1 = x1\n self.x2 = x2\n self.y1 = y1\n self.y2 = y2\n self.z1 = z1\n self.z2 = z2\n self.shape = (sy,sz,sx)\n \nclass Volume:\n\n def __init__(self,bscan_folder,use_cache=False,diagnostics=False,hold_volume_in_ram=True,resampling=1,motion_correct=False,autocrop=True,cropz=150):\n\n t0 = tick()\n \n self.bscan_folder = bscan_folder\n print(self.bscan_folder)\n self.bscan_filenames = sorted(glob.glob(os.path.join(self.bscan_folder,'*.npy')))\n self.resampling=resampling\n self.motion_correct = motion_correct\n self.autocrop = autocrop\n self.cropz = cropz\n \n # determine volume shape from file list length and sample B-scan:\n self.n_slow = len(self.bscan_filenames)\n logging.info('Creating a Volume object based on %d bscans in %s.'%(self.n_slow,bscan_folder))\n \n temp = np.load(self.bscan_filenames[0])\n self.n_depth,self.n_fast = temp.shape\n\n\n \n # set default coordinates:\n self.coordinates = Coordinates(self.n_slow,self.n_depth,self.n_fast)\n\n self.moved = False\n \n self.use_cache = use_cache\n self.cache_dir = os.path.join(self.bscan_folder,'volume')\n self.cache_filename = os.path.join(self.cache_dir,'volume_%0.1f.npy'%self.resampling)\n \n os.makedirs(self.cache_dir,exist_ok=True)\n\n self.hold_volume_in_ram = hold_volume_in_ram\n\n volume = self.build_volume()\n \n self.unique_id = self.make_id(volume)\n logging.info('Initializing volume with id %s.'%self.unique_id)\n sy,sz,sx = volume.shape\n\n if self.hold_volume_in_ram:\n self.volume = volume\n\n self.ac_max_dict = {}\n self.is_reference = False\n\n self.y_grid = np.arange(0,sy)/self.resampling\n\n def make_id(self,volume):\n sy,sx,sz = volume.shape\n vals = []\n for y in range(0,sy,sy//3):\n for x in range(0,sx,sx//3):\n for z in range(0,sz,sz//3):\n val = volume[y,x,z]\n if np.isnan(val):\n val = 0.0\n vals.append(val)\n vals = tuple(vals)\n out = '%d'%hash(vals)\n out = out.replace('-','m')\n return out\n \n def build_volume(self,diagnostics=False):\n t0 = tick()\n\n if self.use_cache and os.path.exists(self.cache_filename):\n logging.info('Loading volume from %s.'%self.cache_filename)\n volume = np.load(self.cache_filename)\n else:\n logging.info('Building volume in %s.'%self.bscan_folder)\n temp = np.load(self.bscan_filenames[0])\n dtype = temp.dtype\n\n volume_temp = []\n for rf in self.bscan_filenames:\n temp = np.load(rf)\n is_stack = len(temp.shape)>2\n temp = np.abs(temp)\n if is_stack:\n temp = np.nanmean(temp,axis=M_SCAN_DIMENSION)\n\n if diagnostics:\n plt.cla()\n plt.imshow(temp,cmap='gray')\n plt.pause(.1)\n\n volume_temp.append(temp)\n\n volume_temp = np.array(volume_temp,dtype=dtype)\n\n #self.flythrough(0,volume=volume_temp)\n #self.flythrough(1,volume=volume_temp)\n #self.flythrough(2,volume=volume_temp)\n \n # resample volume\n if self.resampling==1:\n volume = volume_temp\n else:\n sy,sz,sx = volume_temp.shape\n \n ry_vec = np.arange(0,sy-1,1.0/self.resampling)\n rz_vec = np.arange(0,sz-1,1.0/self.resampling)\n rx_vec = np.arange(0,sx-1,1.0/self.resampling)\n\n ryy,rzz,rxx = np.meshgrid(ry_vec,rz_vec,rx_vec)\n\n y_vec = np.arange(sy)\n z_vec = np.arange(sz)\n x_vec = np.arange(sx)\n\n points = list(zip(ryy.ravel(),rzz.ravel(),rxx.ravel()))\n interpolator = spi.RegularGridInterpolator((y_vec,z_vec,x_vec),volume_temp)\n volume = interpolator(points)\n # volume = np.reshape(volume,(len(rx_vec),len(ry_vec),len(rz_vec)))\n # volume = np.reshape(volume,(len(rx_vec),len(rz_vec),len(ry_vec)))\n # volume = np.reshape(volume,(len(ry_vec),len(rx_vec),len(rz_vec)))\n # volume = np.reshape(volume,(len(ry_vec),len(rz_vec),len(rx_vec)))\n # volume = np.reshape(volume,(len(rz_vec),len(rx_vec),len(ry_vec)))\n volume = np.reshape(volume,(len(rz_vec),len(ry_vec),len(rx_vec)))\n volume = np.transpose(volume,(1,0,2))\n\n\n if self.autocrop:\n avol = np.abs(volume)\n prof = np.mean(np.mean(avol,axis=2),axis=0)\n z = np.arange(len(prof))\n com = np.sum(prof*z)/np.sum(prof)\n com = int(round(com))\n volume = volume[:,com-self.cropz:com+self.cropz,:]\n \n if self.motion_correct:\n crop_edge = 50\n nxc_threshold = 0.05\n sy,sz,sx = volume.shape\n # use the bscan with the highest mean intensity as the reference:\n mprof = np.mean(np.mean(np.abs(volume),axis=2),axis=1)\n startidx = np.argmax(mprof)\n dx_vec = np.zeros(len(mprof),dtype=int)\n dz_vec = np.zeros(len(mprof),dtype=int)\n ref = volume[startidx,:,crop_edge:-crop_edge]\n\n for y in range(startidx,-1,-1):\n tar = volume[y,:,crop_edge:-crop_edge]\n nxc = np.fft.fft2(tar)*np.conj(np.fft.fft2(ref))\n nxc = nxc/np.abs(nxc)\n nxc = np.fft.ifft2(nxc)\n nxc = np.real(nxc)\n dz,dx = np.unravel_index(np.argmax(nxc),nxc.shape)\n if dz>sz//2:\n dz = dz - sz\n if dx>sx//2:\n dx = dx - sx\n if nxc.max()>nxc_threshold and np.sqrt(dz**2+dx**2)<10:\n dz_vec[y] = dz\n dx_vec[y] = dx\n ref = tar\n \n ref = volume[startidx,:,crop_edge:-crop_edge]\n for y in range(startidx+1,sy):\n tar = volume[y,:,crop_edge:-crop_edge]\n nxc = np.fft.fft2(tar)*np.conj(np.fft.fft2(ref))\n nxc = nxc/np.abs(nxc)\n nxc = np.fft.ifft2(nxc)\n nxc = np.real(nxc)\n dz,dx = np.unravel_index(np.argmax(nxc),nxc.shape)\n if dz>sz//2:\n dz = dz - sz\n if dx>sx//2:\n dx = dx - sx\n if nxc.max()>nxc_threshold and np.sqrt(dz**2+dx**2)<10:\n dz_vec[y] = dz\n dx_vec[y] = dx\n ref = tar\n\n dx_vec = np.cumsum(dx_vec).astype(int)\n dz_vec = np.cumsum(dz_vec).astype(int)\n\n for y in range(sy):\n volume[y,:,:] = np.roll(volume[y,:,:],(dz_vec[y],dx_vec[y]))\n \n #self.flythrough(1,volume=volume)\n #self.flythrough(2,volume=volume)\n #sys.exit()\n np.save(self.cache_filename,volume)\n \n #self.flythrough(0,volume=volume)\n logging.info('Done; took %0.3f sec.'%tock(t0))\n \n return volume\n\n\n def write_bitmaps(self,output_folder=None,axis=1,do_dB=True,clim=default_clim,bmp_fmt='tif'):\n\n if axis==0:\n filename_format='bscan_%05d.'+bmp_fmt\n elif axis==1:\n filename_format='enface_%05d.'+bmp_fmt\n \n if output_folder is None:\n output_folder=os.path.join(self.bscan_folder,bmp_fmt)\n \n os.makedirs(output_folder,exist_ok=True)\n \n vol = self.get_volume()\n sy,sz,sx = vol.shape\n avol = np.abs(vol)\n\n if do_dB:\n avol = 20*np.log10(avol)\n \n if not clim is None:\n avol = np.clip(avol,clim[0],clim[1])\n \n vmax = np.nanmax(avol)\n vmin = np.nanmin(avol)\n\n if bmp_fmt == 'tif':\n avol = (avol - vmin)/(vmax-vmin)*(2**16-1)\n avol[np.isnan(avol)] = 0\n avol = np.round(avol).astype(np.uint16)\n elif bmp_fmt == 'png':\n avol = (avol - vmin)/(vmax-vmin)*(2**8-1)\n avol[np.isnan(avol)] = 0\n avol = np.round(avol).astype(np.uint8)\n \n if axis==0:\n for y in range(sy):\n outfn = os.path.join(output_folder,filename_format%y)\n imageio.imwrite(outfn,avol[y,:,:])\n logging.info('Writing %s to %s.'%(bmp_fmt,outfn))\n elif axis==1:\n for z in range(sz):\n outfn = os.path.join(output_folder,filename_format%z)\n imageio.imwrite(outfn,avol[:,z,:])\n logging.info('Writing %s to %s.'%(bmp_fmt,outfn))\n\n with open(os.path.join(output_folder,'raw_image_stats.txt'),'w') as fid:\n fid.write('volume max: %0.3f\\n'%vmax)\n fid.write('volume min: %0.3f\\n'%vmin)\n \n def get_volume(self,diagnostics=False):\n if self.hold_volume_in_ram:\n logging.info('get_volume returning volume in RAM.')\n return self.volume\n else:\n logging.info('get_volume returning result of build_volume().')\n return self.build_volume()\n\n def move(self,shifts,boundaries,nxc_max=0.0):\n if self.is_reference:\n try:\n assert not any(shifts)\n except AssertionError:\n logging.info('move: assertion error on reference')\n return\n self.coordinates.move_y(shifts[0],boundaries)\n self.coordinates.move_z(shifts[1],boundaries)\n self.coordinates.move_x(shifts[2],boundaries)\n self.coordinates.set_correlation(nxc_max,boundaries)\n self.moved = True\n\n\n def get_window(self,y,mode='gaussian',width=3):\n if mode=='gaussian':\n out = np.exp(-(self.y_grid-y)**2/(2*width**2))\n elif mode=='rect':\n out = np.zeros(self.y_grid.shape)\n out[np.where(np.abs(self.y_grid-y)<width)] = 1\n \n return out\n\n def flythrough(self,axis=1,volume=None):\n plt.figure()\n if volume is None:\n volume = self.get_volume()\n nframes = volume.shape[axis]\n for k in range(nframes):\n if axis==0:\n im = volume[k,:,:]\n elif axis==1:\n im = volume[:,k,:]\n elif axis==2:\n im = volume[:,:,k].T\n \n im = 20*np.log10(np.abs(im))\n plt.cla()\n plt.imshow(im,clim=(40,80),cmap='gray')\n plt.pause(.0001)\n plt.close()\n \n def register_to(self,reference_volume,sigma=10):\n\n rcache = '.registration_cache'\n os.makedirs(rcache,exist_ok=True)\n\n pair_id = reference_volume.unique_id+'_'+self.unique_id\n cache_fn = os.path.join(rcache,'reg_info_%s_%d.npy'%(pair_id,sigma))\n \n #self.flythrough(0)\n #self.flythrough(1)\n #sys.exit()\n \n t0 = tick()\n rvol = reference_volume.get_volume()\n tvol = self.get_volume()\n sy,sz,sx = tvol.shape\n \n try:\n volume_info = np.load(cache_fn)\n except:\n ac_max_key = '_'.join([str(s) for s in reference_volume.get_volume().shape])\n try:\n ac_max = self.ac_max_dict[ac_max_key]\n except Exception as e:\n nxc = nxc3d(rvol,rvol)\n ac_max = nxc.max()\n self.ac_max_dict[ac_max_key] = ac_max\n\n volume_info = []\n\n for y0 in np.arange(self.n_slow):\n temp = tvol.copy()\n temp = np.transpose(temp,(1,2,0))\n win = self.get_window(y0,'gaussian',sigma*self.resampling)\n xc_correction = float(len(win))/np.sum(win)\n temp = temp*win\n temp = np.transpose(temp,(2,0,1))\n\n #sy,sz,sx = temp.shape\n nxc = nxc3d(rvol,temp)\n\n #show3d(temp,mode='max')\n #plt.show()\n #plt.clf()\n #show3d(np.fft.fftshift(nxc),mode='max')\n #plt.pause(.1)\n #plt.show()\n #sys.exit()\n\n reg_coords = list(np.unravel_index(np.argmax(nxc),nxc.shape))\n nxc_max = np.max(nxc)/ac_max*xc_correction\n\n for idx in range(len(nxc.shape)):\n if reg_coords[idx]>nxc.shape[idx]//2:\n reg_coords[idx] = reg_coords[idx]-nxc.shape[idx]\n\n chunk_info = reg_coords+[nxc_max]\n volume_info.append(chunk_info)\n logging.info('Anchor %d of %d: %s'%(y0,self.n_slow,chunk_info))\n\n volume_info = np.array(volume_info)\n np.save(cache_fn,volume_info)\n \n for y0 in np.arange(self.n_slow):\n b = Boundaries(y0,y0+1,0,sz,0,sx)\n chunk_info = volume_info[y0]\n self.move(chunk_info[:3].astype(int),b,chunk_info[3])\n #plt.clf()\n #plt.imshow(self.coordinates.x-np.arange(sx))\n #plt.colorbar()\n #plt.pause(.0001)\n\n t1 = tock(t0)\n logging.info('register_to took %0.3f s'%t1)\n\n \n\ndef dummy_function(x):\n return x\n\n\ndef registration_function(volume,reference):\n volume.register_to(reference)\n\nclass VolumeSeries:\n\n def __init__(self,reference_folder,resampling=1.0,sigma=10,signal_function=np.abs,hold_volume_in_ram=True):\n self.volumes = []\n self.signal_function = signal_function\n self.resampling = resampling\n self.sigma = sigma\n self.hold_volume_in_ram = hold_volume_in_ram\n self.add_reference(reference_folder,hold_volume_in_ram=hold_volume_in_ram)\n self.unique_id = self.reference.unique_id\n reference_folder = reference_folder.strip('/').strip('\\\\')\n self.ref_tag = os.path.split(reference_folder)[1].strip('/').strip('\\\\')\n self.folder = os.path.join('registered/%s_%s'%(self.ref_tag,self.unique_id),'%0.1f_%0.1f'%(self.resampling,self.sigma))\n os.makedirs(self.folder,exist_ok=True)\n\n try:\n data = np.random.rand(10)\n with mp.Pool(4) as p:\n p.map(dummy_function,data)\n self.use_multiprocessing = True\n except Exception as e:\n print(e)\n self.use_multiprocessing = False\n\n def __getitem__(self,n):\n return self.volumes[n]\n\n def add_reference(self,volume_folder,hold_volume_in_ram=True):\n vol = Volume(volume_folder,resampling=self.resampling,hold_volume_in_ram=hold_volume_in_ram)\n self.reference = vol\n\n def add_target(self,volume_folder,hold_volume_in_ram=True):\n vol = Volume(volume_folder,resampling=self.resampling,hold_volume_in_ram=hold_volume_in_ram)\n self.volumes.append(vol)\n\n def register(self):\n info_folder = os.path.join(self.folder,'info')\n os.makedirs(info_folder,exist_ok=True)\n nvol = len(self.volumes)\n\n for idx,v in enumerate(self.volumes):\n t0 = tick()\n v.register_to(self.reference,sigma=self.sigma)\n dt = tock(t0)\n with open('progress.txt','a') as fid:\n fid.write('%d of %d, %0.1f s each\\n'%(idx+1,nvol,dt))\n \n def render(self,threshold_percentile=0.0,diagnostics=False,display_function=lambda x: 20*np.log10(x),display_clim=None,make_bscan_flythrough=True,make_enface_flythrough=True):\n\n bscan_png_folder = os.path.join(self.folder,'bscans_png')\n enface_png_folder = os.path.join(self.folder,'enface')\n bscan_folder = os.path.join(self.folder,'bscans')\n diagnostics_folder = os.path.join(self.folder,'info')\n volumes_folder = os.path.join(self.folder,'volumes')\n \n if make_bscan_flythrough:\n os.makedirs(bscan_png_folder,exist_ok=True)\n if make_enface_flythrough:\n os.makedirs(enface_png_folder,exist_ok=True)\n \n os.makedirs(bscan_folder,exist_ok=True)\n os.makedirs(diagnostics_folder,exist_ok=True)\n os.makedirs(volumes_folder,exist_ok=True)\n \n n_slow, n_depth, n_fast = self.volumes[0].get_volume().shape\n \n # find the maximum depth\n max_n_depth = np.max([v.n_depth for v in self.volumes])\n\n zmin = large_integer\n for v in self.volumes:\n #v.coordinates.z = -v.coordinates.z\n \n if v.coordinates.z.min()<zmin:\n zmin = v.coordinates.z.min()\n\n for v in self.volumes:\n v.coordinates.z = v.coordinates.z - zmin\n \n # find the new max in z\n zmax = -large_integer\n\n for v in self.volumes:\n if v.coordinates.z.max()>zmax:\n zmax = v.coordinates.z.max()\n\n sum_array = np.zeros((n_slow,zmax+max_n_depth,n_fast))\n counter_array = np.zeros((n_slow,zmax+max_n_depth,n_fast))\n\n y_slices = []\n x_slices = []\n z_slices = []\n \n for idx,v in enumerate(self.volumes):\n temp = np.zeros(sum_array.shape,dtype=np.complex128)\n single_counter = np.zeros(sum_array.shape,dtype=np.complex128)\n \n vol = v.get_volume()\n sy,sz,sx = vol.shape\n\n # plt.figure()\n # plt.imshow(v.coordinates.z,interpolation='none')\n # plt.colorbar()\n # plt.title(idx)\n # plt.show()\n\n for y in range(sy):\n for x in range(sx):\n ascan = vol[y,:,x]\n ypos = v.coordinates.y[y,x]\n xpos = v.coordinates.x[y,x]\n zpos = v.coordinates.z[y,x]\n\n if ypos>=0 and ypos<n_slow and xpos>=0 and xpos<n_fast:\n temp[ypos,zpos:zpos+sz,xpos]+=self.signal_function(ascan)\n counter_array[ypos,zpos:zpos+sz,xpos]+=1\n single_counter[ypos,zpos:zpos+sz,xpos]+=1\n \n # np.save(os.path.join(info_folder,'xcoord_%05d.npy'%idx),v.coordinates.x)\n # np.save(os.path.join(info_folder,'ycoord_%05d.npy'%idx),v.coordinates.y)\n # np.save(os.path.join(info_folder,'zcoord_%05d.npy'%idx),v.coordinates.z)\n # np.save(os.path.join(info_folder,'corr_%05d.npy'%idx),v.coordinates.correlation)\n\n # with open(os.path.join(info_folder,'bscan_source_%05d.txt'%idx),'w') as fid:\n # fid.write('%s\\n'%v.bscan_folder)\n\n \n sum_array+=self.signal_function(temp)\n # store some slices of temp for debugging:\n temp = np.abs(temp)\n \n y_slices.append(temp[temp.shape[0]//2,:,:])\n x_slices.append(temp[:,:,temp.shape[2]//2])\n z_slices.append(temp[:,temp.shape[1]//2,:])\n out = temp.copy()\n out[single_counter==0]=np.nan\n out = out/single_counter\n single_folder = os.path.join(volumes_folder,'%03d'%idx)\n os.makedirs(single_folder,exist_ok=True)\n for y in range(out.shape[0]):\n outfn = os.path.join(single_folder,'complex_bscan_%05d.npy'%y)\n np.save(outfn,out[y,:,:])\n \n sum_array[counter_array==0]=np.nan\n av = sum_array/counter_array\n\n if diagnostics:\n dB_clim = None#(40,80)\n ncol,nrow = 3,1\n for idx,(ys,zs,xs) in enumerate(zip(y_slices,z_slices,x_slices)):\n plt.figure(figsize=(ncol*col_width_inches,nrow*row_height_inches),dpi=screen_dpi)\n plt.suptitle('%s\\nvolume %d'%(self.folder,idx))\n plt.subplot(1,3,1)\n plt.imshow(ys,cmap='gray',aspect='equal')\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(zs,cmap='gray',aspect='equal')\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(xs.T,cmap='gray',aspect='equal')\n plt.title('z-y')\n plt.savefig(os.path.join(diagnostics_folder,'single_volume_%05d_slices.png'%idx),dpi=150)\n\n plt.figure(figsize=(ncol*col_width_inches,nrow*row_height_inches),dpi=screen_dpi)\n plt.suptitle('%s\\nfull volume projections'%self.folder)\n plt.subplot(1,3,1)\n plt.imshow(display_function(np.nanmean(av,0)),clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(display_function(np.nanmean(av,1)),clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(display_function(np.nanmean(av,2)).T,clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('z-y')\n plt.savefig(os.path.join(diagnostics_folder,'average_volume_projections.png'),dpi=150)\n\n \n plt.figure(figsize=(ncol*col_width_inches,nrow*row_height_inches),dpi=screen_dpi)\n plt.suptitle('%s\\ncentral slices'%self.folder)\n plt.subplot(1,3,1)\n plt.imshow(display_function(av[av.shape[0]//2,:,:]),clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('z-x')\n plt.subplot(1,3,2)\n plt.imshow(display_function(av[:,av.shape[1]//2,:]),clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('y-x')\n plt.subplot(1,3,3)\n plt.imshow(display_function(av[:,:,av.shape[2]//2].T),clim=display_clim,aspect='equal',cmap='gray')\n plt.colorbar()\n plt.title('z-y')\n plt.savefig(os.path.join(diagnostics_folder,'average_volume_slices.png'),dpi=150)\n\n asy,asz,asx = av.shape\n save_dpi = 100.0\n fsz = asz/save_dpi\n fsx = asx/save_dpi\n plt.close('all')\n\n valid_values = av[~np.isnan(av)]\n valid_values = display_function(valid_values)\n\n if display_clim is None:\n display_clim = np.percentile(valid_values,(1,99.9))\n \n fsz = asz/save_dpi\n fsx = asx/save_dpi\n fig = plt.figure(figsize=(fsx,fsz),dpi=save_dpi*2)\n ax = fig.add_axes([0,0,1,1])\n ax.set_xticks([])\n ax.set_yticks([])\n\n for k in range(asy):\n frame = av[k,:,:]\n np.save(os.path.join(bscan_folder,'bscan_%05d.npy'%k),frame)\n\n \n if make_bscan_flythrough:\n for k in range(asy):\n frame = av[k,:,:]\n frame[np.isnan(frame)] = display_clim[0]\n frame = display_function(frame)\n ax.clear()\n ax.imshow(frame,cmap='gray',interpolation='none',clim=display_clim)\n plt.savefig(os.path.join(bscan_png_folder,'bscan_%05d.png'%k),dpi=save_dpi)\n plt.pause(.000001)\n plt.close()\n\n fsy = asy/save_dpi\n fsx = asx/save_dpi\n fig = plt.figure(figsize=(fsx,fsy),dpi=save_dpi*2)\n ax = fig.add_axes([0,0,1,1])\n ax.set_xticks([])\n ax.set_yticks([])\n \n if make_enface_flythrough:\n for k in range(asz):\n frame = av[:,k,:]\n frame[np.isnan(frame)] = display_clim[0]\n frame = display_function(frame)\n ax.clear()\n ax.imshow(frame,cmap='gray',interpolation='none',clim=display_clim)\n plt.savefig(os.path.join(enface_png_folder,'enface_%05d.png'%k),dpi=save_dpi)\n plt.pause(.000001)\n plt.close()\n\n plt.close('all')\n\n\n\n\n\n\n\n\n\n \nclass oldSyntheticVolume:\n\n def __init__(self,n_slow,n_depth,n_fast,diagnostics=False,sphere_diameter=11,motion=None,rpower=10000,regular=False,plane_thickness=0):\n # rpower: higher numbers = sparser objects 50000 creates just a few\n self.dzf = 0.0\n self.dyf = 0.0\n self.dxf = 0.0\n \n self.dz = 0\n self.dy = 0\n self.dx = 0\n\n self.zstd = 0.03\n self.ystd = 0.02\n self.xstd = 0.03\n\n self.motion = motion\n \n self.n_fast = n_fast\n self.n_slow = n_slow\n self.n_depth = n_depth\n \n self.yscanner = 0\n self.xscanner = 0\n \n cache_dir = '.synthetic_volume_cache'\n os.makedirs(cache_dir,exist_ok=True)\n\n if regular:\n regstring = '_reg'\n else:\n regstring = '_rand'\n \n cache_fn = os.path.join(cache_dir,'%d_%d_%d_synthetic_source_%d%s_%d_%d.npy'%(n_slow,n_depth,n_fast,rpower,regstring,sphere_diameter,plane_thickness))\n\n try:\n self.source = np.load(cache_fn)\n except FileNotFoundError:\n source_dims = (n_slow*2,n_depth*2,n_fast*2)\n\n self.source = np.random.random(source_dims)**rpower\n self.source[np.where(self.source<0.5)] = 0\n self.source[np.where(self.source)] = 1\n\n layer_thickness = 10\n for z in range(0,n_depth*2,layer_thickness*2):\n self.source[:,z:z+layer_thickness,:] = 0\n\n #sphere_diameter = 11\n sphere = np.zeros((sphere_diameter,sphere_diameter,sphere_diameter))\n XX,YY,ZZ = np.meshgrid(np.arange(sphere_diameter),np.arange(sphere_diameter),np.arange(sphere_diameter))\n v = -1\n XX = XX-(sphere_diameter+v)/2.0\n YY = YY-(sphere_diameter+v)/2.0\n ZZ = ZZ-(sphere_diameter+v)/2.0\n rad = np.sqrt(XX**2+YY**2+ZZ**2)\n sphere[rad<sphere_diameter/2-1] = 1\n\n self.source = spn.convolve(self.source,sphere)\n self.source = (self.source-np.min(self.source))/(np.max(self.source)-np.min(self.source))\n\n peak = 6000.0\n \n self.source = self.source*peak\n \n noise = np.random.standard_normal(source_dims)*np.sqrt(peak)+5*np.sqrt(peak)\n self.source = self.source + noise\n self.source[self.source<=1] = 1.0\n\n if plane_thickness:\n self.source[:,n_depth:n_depth+plane_thickness,:] = peak\n \n np.save(cache_fn,self.source)\n \n if diagnostics:\n for k in range(self.source.shape[0]):\n plt.cla()\n plt.imshow(np.abs(self.source[k,:,:]))\n plt.title(k)\n plt.pause(.00001)\n plt.close()\n \n #self.history = [(self.dy,self.dz,self.dx)]\n self.history = []\n self.scanner_history = []\n \n def step(self,volume_rigid=False,bscan_rigid=False,motion_factor=1.0,reference_rigid=False):\n\n self.history.append((self.dy,self.dz,self.dx))\n self.scanner_history.append(np.sqrt(self.yscanner**2+self.xscanner**2))\n\n if reference_rigid and len(self.history)<self.n_slow*self.n_fast:\n reference_rigid_factor = 0.0\n else:\n reference_rigid_factor = 1.0\n \n if self.motion is None:\n self.dzf = self.dzf + np.random.randn()*self.zstd*motion_factor*reference_rigid_factor\n self.dyf = self.dyf + np.random.randn()*self.ystd*motion_factor*reference_rigid_factor\n self.dxf = self.dxf + np.random.randn()*self.xstd*motion_factor*reference_rigid_factor\n\n limit = 10\n\n if np.abs(self.dzf)>limit:\n self.dzf = 0.0\n if np.abs(self.dxf)>limit:\n self.dxf = 0.0\n if np.abs(self.dyf)>limit:\n self.dyf = 0.0\n\n else:\n self.dzf = self.dzf + self.motion[1]\n self.dyf = self.dyf + self.motion[0]\n self.dxf = self.dxf + self.motion[2]\n\n #if not volume_rigid or (self.xscanner==(self.n_fast-1) and self.yscanner==(self.n_slow-1)):\n\n make_move = False\n if not bscan_rigid and not volume_rigid:\n make_move = True\n elif not bscan_rigid and volume_rigid:\n sys.exit('bscan_rigid is False but volume_rigid is True--inconsistent.')\n elif bscan_rigid and not volume_rigid:\n make_move = self.xscanner==0\n elif bscan_rigid and volume_rigid:\n make_move = (self.xscanner==0 and self.yscanner==0)\n else:\n sys.exit('something bad has happened.')\n \n if make_move:\n self.dz = int(round(self.dzf))\n self.dy = int(round(self.dyf))\n self.dx = int(round(self.dxf))\n \n self.xscanner = (self.xscanner+1)%self.n_fast\n if self.xscanner==0:\n self.yscanner = (self.yscanner+1)%self.n_slow\n \n\n\n def get_bscan(self,diagnostics=False,volume_rigid=False,bscan_rigid=False,motion_factor=1.0,reference_rigid=False):\n ascans = []\n \n for k in range(self.n_fast):\n self.step(volume_rigid=volume_rigid,bscan_rigid=bscan_rigid,motion_factor=motion_factor,reference_rigid=reference_rigid)\n x = (self.xscanner-self.n_fast//2)+self.source.shape[2]//2+self.dx\n y = (self.yscanner-self.n_slow//2)+self.source.shape[0]//2+self.dy\n z1 = -self.n_depth//2+self.source.shape[1]//2+self.dz\n z2 = z1+self.n_depth\n ascans.append(self.source[y,z1:z2,x])\n \n bscan = np.array(ascans).T\n logging.info('xscanner: %d, yscanner: %d, dx: %d, dy: %d, dz: %d'%(self.xscanner,self.yscanner,self.dx,self.dy,self.dz))\n if diagnostics:\n plt.cla()\n plt.imshow(np.abs(bscan))\n plt.pause(.001)\n return bscan\n \n\n def plot_history(self):\n t = np.arange(len(self.history))\n y = [tup[0] for tup in self.history]\n z = [tup[1] for tup in self.history]\n x = [tup[2] for tup in self.history]\n scanner_zeros = np.where(np.array(self.scanner_history)==0)[0]\n \n plt.figure(figsize=(3*col_width_inches,row_height_inches),dpi=screen_dpi)\n plt.subplot(1,3,1)\n plt.plot(t,y)\n for scanner_zero in scanner_zeros:\n plt.axvline(scanner_zero,color='r')\n plt.xlabel('time')\n plt.ylabel('y')\n plt.subplot(1,3,2)\n plt.plot(t,z)\n for scanner_zero in scanner_zeros:\n plt.axvline(scanner_zero,color='r')\n plt.xlabel('time')\n plt.ylabel('z')\n plt.subplot(1,3,3)\n plt.plot(t,x)\n for scanner_zero in scanner_zeros:\n plt.axvline(scanner_zero,color='r')\n plt.xlabel('time')\n plt.ylabel('x')\n\n\n \n def save_volume(self,folder_name,diagnostics=False,volume_rigid=False,bscan_rigid=False,motion_factor=1.0,reference_rigid=False):\n os.makedirs(folder_name,exist_ok=True)\n for k in range(self.n_slow):\n outfn = os.path.join(folder_name,'complex_bscan_stack_%05d.npy'%k)\n np.save(outfn,self.get_bscan(diagnostics,volume_rigid=volume_rigid,\n bscan_rigid=bscan_rigid,motion_factor=motion_factor,\n reference_rigid=reference_rigid))\n logging.info('Saving B-scan %d to %s.'%(k,outfn))\n\n def save_volumes(self,folder_root,n,diagnostics=False,volume_rigid=False,bscan_rigid=False,motion_factor=1.0,reference_rigid=False):\n for k in range(n):\n self.save_volume('%s_%03d'%(folder_root,k),diagnostics,volume_rigid=volume_rigid,bscan_rigid=bscan_rigid,motion_factor=motion_factor,reference_rigid=reference_rigid)\n info_folder = os.path.join(os.path.split(folder_root)[0],'info')\n os.makedirs(info_folder,exist_ok=True)\n self.plot_history()\n plt.savefig(os.path.join(info_folder,'eye_movements.png'),dpi=300)\n np.save(os.path.join(info_folder,'eye_movements.npy'),np.array(self.history))\n\ndef make_simple_volume_series(folder_name):\n sx = 7\n sy = 6\n sz = 5\n\n x,y,z = 2,1,1\n width = 1\n\n n_vol = 3\n\n src = np.random.random((sy,sz,sx))*100\n\n for v in range(n_vol):\n out = np.copy(src)\n out[y+v:y+v+width,z+v:z+v+width,x+v:x+v+width] = 1000\n out_folder = os.path.join(folder_name,'synthetic_%02d'%v)\n os.makedirs(out_folder,exist_ok=True)\n for ny in range(sy):\n bscan = out[ny,:,:].astype(np.complex128)\n plt.cla()\n plt.imshow(np.abs(bscan),clim=(0,5000))\n plt.title('%d, %d'%(v,ny))\n plt.pause(.1)\n outfn = os.path.join(out_folder,'complex_bscan_stack_%05d.npy'%ny)\n np.save(outfn,bscan)\n \n" }, { "alpha_fraction": 0.6155855059623718, "alphanum_fraction": 0.6368440389633179, "avg_line_length": 33.503143310546875, "blob_id": "4b5ba7e88f3bf6731d7beba61a9c7d0bbe1ea94c", "content_id": "d1a48656934000565ca5666fa552e35b9b2d0a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16464, "license_type": "no_license", "max_line_length": 103, "num_lines": 477, "path": "/functions.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.optimize as spo\nimport sys,os,glob,logging\nimport scipy.interpolate as spi\nimport scipy.signal as sps\n\n###################################################\n# Simplified OCT functions for exporatory analysis,\n# REPL applications, and illustration\n###################################################\n\n#######################################\n## Constants here--adjust as necessary:\n\ndB_lims = [45,85]\ncrop_height = 300 # height of viewable B-scan, centered at image z centroid (center of mass)\n\n# step sizes for incrementing/decrementing coefficients:\nmapping_steps = [1e-4,1e-2]\ndispersion_steps = [1e-10,1e-8]\n# let's start doing this explicitly with a function in this module, instead of buried inside\n# the OCTRawData class; fbg_position used to be set to 90; now set it to None and handle it separately\nfbg_position = None\nbit_shift_right = 4\nwindow_sigma = 0.9\n\nk_crop_1 = 100\nk_crop_2 = 1490\n\n\n#######################################\n\n# Now we'll define some functions for the half-dozen or so processing\n# steps:\n\ndef get_source(fn,diagnostics=None,x1=None,x2=None):\n src = None\n if os.path.splitext(fn)[1].lower()=='.unp':\n from octoblob.data_source import DataSource\n #import octoblob as blob\n print(fn)\n src = DataSource(fn,x1=x1,x2=x2)\n return src\n\ndef crop_spectra(spectra,diagnostics=None):\n if not diagnostics is None:\n fig = diagnostics.figure(figsize=(8,4))\n plt.subplot(1,2,1)\n plt.imshow(np.abs(spectra),aspect='auto')\n plt.title('pre cropping')\n spectra = spectra[k_crop_1:k_crop_2,:]\n \n if not diagnostics is None:\n plt.subplot(1,2,2)\n plt.imshow(np.abs(spectra),aspect='auto')\n plt.title('post cropping')\n diagnostics.save(fig)\n \n return spectra\n\ndef load_spectra(fn,index=0):\n ext = os.path.splitext(fn)[1]\n if ext.lower()=='.unp':\n src = get_source(fn)\n \n index = index%(n_slow*n_vol)\n spectra = src.get_frame(index)\n elif ext.lower()=='.npy':\n spectra = np.load(fn)\n else:\n sys.exit('File %s is of unknown type.'%fn)\n return spectra.astype(np.float)\n\n\ndef fbg_align(spectra,fbg_search_distance=15,noise_samples=70,diagnostics=None):\n if not diagnostics is None:\n fig = diagnostics.figure()\n plt.subplot(2,2,1)\n plt.imshow(np.abs(spectra[:150,:]),aspect='auto')\n plt.subplot(2,2,3)\n for k in range(spectra.shape[1]):\n plt.plot(np.abs(spectra[:150,k]))\n \n spectra[:noise_samples,:] = spectra[noise_samples,:]\n prof = np.nanmean(np.diff(spectra,axis=0),axis=1)\n idx = np.argmax(np.diff(prof))\n fbg_locations = np.zeros(spectra.shape[1],dtype=np.int)\n temp = np.zeros(len(prof))\n\n for k in range(spectra.shape[1]):\n temp[:] = np.diff(spectra[:,k],axis=0)\n temp[:idx-fbg_search_distance] = 0\n temp[idx+fbg_search_distance:] = 0\n fbg_locations[k] = np.argmin(temp[:])\n if False:\n plt.cla()\n plt.plot(temp)\n plt.axvline(fbg_locations[k],color='r')\n plt.xlim((70,110))\n plt.pause(.1)\n\n fbg_locations = fbg_locations - int(np.median(fbg_locations))\n for k in range(spectra.shape[1]):\n spectra[:,k] = np.roll(spectra[:,k],-fbg_locations[k])\n\n if not diagnostics is None:\n plt.subplot(2,2,2)\n plt.imshow(np.abs(spectra[:150,:]),aspect='auto')\n plt.subplot(2,2,4)\n for k in range(spectra.shape[1]):\n plt.plot(np.abs(spectra[:150,k]))\n diagnostics.save(fig)\n \n return spectra\n\n# We need a way to estimate and remove DC:\ndef dc_subtract(spectra,diagnostics=None):\n \"\"\"Estimate DC by averaging spectra spatially (dimension 1),\n then subtract by broadcasting.\"\"\"\n if not diagnostics is None:\n fig = diagnostics.figure()\n plt.subplot(1,2,1)\n plt.imshow(np.abs(spectra),aspect='auto')\n \n dc = spectra.mean(1)\n # Do the subtraction by array broadcasting, for efficiency.\n # See: https://numpy.org/doc/stable/user/basics.broadcasting.html\n out = (spectra.T-dc).T\n if not diagnostics is None:\n plt.subplot(1,2,2)\n plt.imshow(out,aspect='auto')\n diagnostics.save(fig)\n return out\n\n\n# Next we need a way to adjust the values of k at each sample, and then\n# interpolate into uniformly sampled k:\ndef k_resample(spectra,coefficients,diagnostics=None):\n \"\"\"Resample the spectrum such that it is uniform w/r/t k.\n Notes:\n 1. The coefficients here are for a polynomial defined on\n pixels, so they're physically meaningless. It would be\n better to define our polynomials on k, because then\n we could more easily quantify and compare the chirps\n of multiple light sources, for instance. Ditto for the\n dispersion compensation code.\n \"\"\"\n if not any(coefficients):\n return spectra\n\n if not diagnostics is None:\n fig = diagnostics.figure()\n plt.subplot(1,2,1)\n plt.imshow(np.abs(spectra),aspect='auto')\n \n coefficients = list(coefficients) + [0.0,0.0]\n # x_in specified on array index 1..N+1\n x_in = np.arange(1,spectra.shape[0]+1)\n\n # define an error polynomial, using the passed coefficients, and then\n # use this polynomial to define the error at each index 1..N+1\n error = np.polyval(coefficients,x_in)\n x_out = x_in + error\n\n # using the spectra measured at indices x_in, interpolate the spectra at indices x_out\n # See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n interpolator = spi.interp1d(x_in,spectra,axis=0,kind='cubic',fill_value='extrapolate')\n interpolated = interpolator(x_out)\n if not diagnostics is None:\n plt.subplot(1,2,2)\n plt.imshow(interpolated,aspect='auto')\n diagnostics.save(fig)\n \n return interpolated\n\n# Next we need to dispersion compensate; for historical reasons the correction polynomial\n# is defined on index x rather than k, but for physically meaningful numbers we should\n# use k instead\ndef dispersion_compensate(spectra,coefficients,diagnostics=None):\n if not any(coefficients):\n return spectra\n\n if not diagnostics is None:\n fig = diagnostics.figure()\n plt.subplot(1,2,1)\n plt.imshow(np.abs(spectra),aspect='auto')\n\n coefs = list(coefficients) + [0.0,0.0]\n # define index x:\n x = np.arange(1,spectra.shape[0]+1)\n # define the phasor and multiply by spectra using broadcasting:\n dechirping_phasor = np.exp(-1j*np.polyval(coefs,x))\n dechirped = (spectra.T*dechirping_phasor).T\n if not diagnostics is None:\n plt.subplot(1,2,2)\n plt.imshow(np.abs(spectra),aspect='auto')\n diagnostics.save(fig)\n \n return dechirped\n\n\n# Next we multiply the spectra by a Gaussian window, in order to reduce ringing\n# in the B-scan due to edges in the spectra:\ndef gaussian_window(spectra,sigma,diagnostics=None):\n if sigma>1e5:\n return spectra\n \n if not diagnostics is None:\n fig = diagnostics.figure()\n plt.subplot(1,2,1)\n plt.imshow(np.abs(spectra),aspect='auto')\n \n # Define a Gaussian window with passed sigma\n x = np.exp(-((np.linspace(-1.0,1.0,spectra.shape[0]))**2/sigma**2))\n x = x/np.mean(x)\n #plt.figure()\n #plt.plot(x)\n #plt.show()\n #sys.exit()\n # Multiply spectra by window using broadcasting:\n out = (spectra.T*x).T\n\n if not diagnostics is None:\n plt.subplot(1,2,2)\n plt.imshow(np.abs(spectra),aspect='auto')\n diagnostics.save(fig)\n \n return out\n\n\n# # Now let's define a processing function that takes the spectra and two dispersion coefficients\n# # and produces a B-scan:\n# def process_bscan(spectra,mapping_coefficients=[0.0],dispersion_coefficients=[0.0],window_sigma=0.9):\n# spectra = dc_subtract(spectra)\n# # When we call dispersion_compensate, we have to pass the c3 and c2 coefficients as well as\n# # two 0.0 values, to make clear that we want orders 3, 2, 1, 0. This enables us to use the\n# # polyval function of numpy instead of writing the polynomial ourselves, e.g. c3*x**3+c2*x**x**2,\n# # since the latter is more likely to cause bugs.\n# spectra = k_resample(spectra,mapping_coefficients)\n# spectra = dispersion_compensate(spectra,dispersion_coefficients)\n# spectra = gaussian_window(spectra,sigma=window_sigma)\n# bscan = np.fft.fft(spectra,axis=0)\n# return bscan\n\n\n# Image quality metrics\ndef iq_max(im):\n \"\"\"Image max\"\"\"\n return np.max(im)\n\ndef iq_maxes(im):\n \"\"\"Mean of brightest\\n1 pct of pixels\"\"\"\n temp = im.ravel()\n N = round(len(temp)*0.01)\n temp = np.partition(-temp, N)\n result = -temp[:N]\n return np.mean(result)\n\ndef gradient_mean(im):\n \"\"\"Mean of absolute\\nz-derivative\"\"\"\n return np.mean(np.abs(np.diff(im,axis=0)))\n\ndef gradient_median(im):\n \"\"\"Median of absolute\\nz-derivative\"\"\"\n return np.mean(np.abs(np.diff(im,axis=0)))\n\ndef average_aline_contrast(im):\n \"\"\"Mean of A-scan\\nMichelson contrast\"\"\" \n x = np.max(im,axis=0)\n n = np.min(im,axis=0)\n return np.mean((x-n)/(x+n))\n\ndef sharpness(im):\n \"\"\"Image sharpness\"\"\"\n return np.sum(im**2)/(np.sum(im)**2)\n\ndef center_sharpness(im,fraction=0.5):\n \"\"\"Image sharpness\"\"\"\n sy,sx = im.shape\n mid = sy//2\n x1 = mid-round(sx*0.5*fraction)\n x2 = mid+round(sx*0.5*fraction)\n return np.sum(im[:,x1:x2]**2)/(np.sum(im[:,x1:x2])**2)\n\ndef crop_bscan0(bscan,top_crop=350,bottom_crop=30,diagnostics=None):\n sz,sx = bscan.shape\n bscan = bscan[sz//2:,:]\n bscan = bscan[top_crop:-bottom_crop,:]\n return bscan\n\ndef get_bscan_boundaries(bscan,height,intensity=True):\n sy,sx = bscan.shape\n prof = np.mean(np.abs(bscan),axis=1)\n prof = prof-np.min(prof)\n if intensity:\n prof = prof**2\n z = np.arange(len(prof))\n com = np.sum(prof*z)/np.sum(prof)\n z1 = int(np.round(com))-height//2\n z2 = int(np.round(com))+height//2\n return z1,z2\n\n\ndef insert_bscan(bscan,z1,z2,height):\n sy,sx = bscan.shape\n z1 = max(0,z1)\n z2 = min(bscan.shape[0],z2)\n out = np.zeros((height,sx),dtype=bscan.dtype)\n out[:(z2-z1),:] = bscan[z1:z2,:]\n return out\n\ndef crop_bscan(bscan,height=320,complex_conjugate_present=True):\n sy,sx = bscan.shape\n if complex_conjugate_present:\n bscan = bscan[sy//2:-30,:]\n z1,z2 = get_bscan_boundaries(bscan,height)\n out = insert_bscan(bscan,z1,z2,height)\n return out\n\ndef dB(arr):\n return 20*np.log10(np.abs(arr))\n\ndef threshold_mask(arr,threshold):\n out = np.zeros(arr.shape)\n out[np.where(arr>threshold)] = 1.0\n return out\n\ndef percentile_mask(arr,percentile_threshold):\n threshold = np.percentile(arr,percentile_threshold)\n return threshold_mask(arr,threshold)\n\ndef spectra_to_bscan(mdcoefs,spectra,diagnostics=None):\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = dc_subtract(spectra,diagnostics=diagnostics)\n spectra = crop_spectra(spectra,diagnostics=diagnostics)\n \n if diagnostics is not None:\n fig = diagnostics.figure(figsize=(6,4))\n plt.subplot(2,2,1)\n plt.imshow(dB(crop_bscan(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('raw B-scan')\n \n spectra = k_resample(spectra,mdcoefs[:2],diagnostics=None)\n\n if diagnostics is not None:\n plt.subplot(2,2,2)\n plt.imshow(dB(crop_bscan(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after k-resampling')\n\n spectra = dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n\n if diagnostics is not None:\n plt.subplot(2,2,3)\n plt.imshow(dB(crop_bscan(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after dispersion_compensation')\n\n spectra = gaussian_window(spectra,sigma=0.9,diagnostics=None)\n \n if diagnostics is not None:\n plt.subplot(2,2,4)\n plt.imshow(dB(crop_bscan(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after windowing')\n diagnostics.save(fig)\n \n bscan = np.fft.fft(spectra,axis=0)\n bscan = crop_bscan(bscan)\n return bscan\n\ndef spectra_to_bscan_nocrop(mdcoefs,spectra,diagnostics=None):\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = dc_subtract(spectra,diagnostics=diagnostics)\n spectra = crop_spectra(spectra,diagnostics=diagnostics)\n\n # A dummy function that does nothing, just to keep a placeholder for the crop_bscan calls\n # in the imshow functions below; it'll make it easer to find/replace if there's something\n # there and we want to go back to cropping.\n idfunc = lambda x: x[x.shape[0]//2:-30,:]\n \n if diagnostics is not None:\n fig = diagnostics.figure(figsize=(12,8))\n plt.subplot(2,2,1)\n plt.imshow(dB(idfunc(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('raw B-scan')\n \n spectra = k_resample(spectra,mdcoefs[:2],diagnostics=None)\n\n if diagnostics is not None:\n plt.subplot(2,2,2)\n plt.imshow(dB(idfunc(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after k-resampling')\n\n spectra = dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n\n if diagnostics is not None:\n plt.subplot(2,2,3)\n plt.imshow(dB(idfunc(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after dispersion_compensation')\n\n spectra = gaussian_window(spectra,sigma=0.9,diagnostics=None)\n \n if diagnostics is not None:\n plt.subplot(2,2,4)\n plt.imshow(dB(idfunc(np.fft.fft(spectra,axis=0))),aspect='auto',clim=(45,85),cmap='gray')\n plt.title('after windowing')\n diagnostics.save(fig)\n \n bscan = idfunc(np.fft.fft(spectra,axis=0))\n \n return bscan\n\ndef flatten_volume(folder,nref=3,diagnostics=None):\n flist = glob.glob(os.path.join(folder,'*.npy'))\n flist.sort()\n N = len(flist)\n \n # grab a section from the middle of the volume to use as a reference\n ref_size = nref\n ref_flist = flist[N//2-ref_size//2:N//2+ref_size//2+1]\n ref = np.abs(np.load(ref_flist[0])).astype(np.float)\n for f in ref_flist[1:]:\n ref = ref + np.abs(np.load(f)).astype(np.float)\n ref = ref/float(ref_size)\n ref = np.mean(ref,axis=1)\n\n coefs = []\n shifts = []\n\n out_folder = os.path.join(folder,'flattened')\n os.makedirs(out_folder,exist_ok=True)\n\n pre_corrected_fast_projection = []\n post_corrected_fast_projection = []\n\n for f in flist:\n tar_bscan = np.load(f)\n tar = np.mean(np.abs(tar_bscan).astype(np.float),axis=1)\n\n pre_corrected_fast_projection.append(tar)\n \n num = np.fft.fft(tar)*np.conj(np.fft.fft(ref))\n denom = np.abs(num)\n nxc = np.real(np.fft.ifft(num/denom))\n shift = np.argmax(nxc)\n if shift>len(nxc)//2:\n shift = shift-len(nxc)\n shifts.append(shift)\n coefs.append(np.max(nxc))\n logging.info('flatten_volume cross-correlating file %s'%f)\n \n\n shifts = np.array(shifts)\n shifts = sps.medfilt(shifts,3)\n shifts = np.round(-shifts).astype(np.int)\n \n for f,shift in zip(flist,shifts):\n tar_bscan = np.load(f)\n tar_bscan = np.roll(tar_bscan,shift,axis=0)\n\n proj = np.mean(np.abs(tar_bscan).astype(np.float),axis=1)\n post_corrected_fast_projection.append(proj)\n logging.info('flatten_volume rolling file %s by %d'%(f,shift))\n out_fn = os.path.join(out_folder,os.path.split(f)[1])\n np.save(out_fn,tar_bscan)\n\n\n if diagnostics is not None:\n pre_corrected_fast_projection = np.array(pre_corrected_fast_projection).T\n post_corrected_fast_projection = np.array(post_corrected_fast_projection).T\n fig = diagnostics.figure(figsize=(9,3))\n ax1,ax2,ax3 = fig.subplots(1,3)\n ax1.imshow(pre_corrected_fast_projection,aspect='auto',cmap='gray')\n ax2.imshow(post_corrected_fast_projection,aspect='auto',cmap='gray')\n ax3.plot(np.mean(pre_corrected_fast_projection,axis=1),label='pre')\n ax3.plot(np.mean(post_corrected_fast_projection,axis=1),label='post')\n ax3.legend()\n diagnostics.save(fig)\n\n \n" }, { "alpha_fraction": 0.7239193320274353, "alphanum_fraction": 0.7247838377952576, "avg_line_length": 37.5444450378418, "blob_id": "a9096313444f004feda29cbc7175a9230a61479c", "content_id": "453569323faf8850ada62e085ee1358621720445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3470, "license_type": "no_license", "max_line_length": 131, "num_lines": 90, "path": "/examples/generating_bscans/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\nno_parallel = True\n\n\nuse_multiprocessing = False\ntry:\n assert not no_parallel\n import multiprocessing as mp\n use_multiprocessing = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('multiprocessing imported')\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept ImportError as ie:\n logging.info('Failed to import multiprocessing: %s'%ie)\n logging.info('Processing serially.')\nexcept AssertionError as ae:\n logging.info('Multiprocessing banned by no_parallel.')\n \ndata_filename = None\n\nif data_filename is None:\n try:\n data_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check data_filename. %s not found or data_filename not passed at command line.'%data_filename)\n\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(data_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n# Get an octoblob.DataSource object using the filename\nsrc = blobf.get_source(data_filename)\n\n# try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\ntry:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\nexcept KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(5)\n coefs = mdo.multi_optimize(samples,blobf.spectra_to_bscan,show_all=False,show_final=True,verbose=False,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(data_filename)\n\nif __name__=='__main__':\n\n if use_multiprocessing:\n def proc(k):\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n bscan = blobf.spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n pool = mp.Pool(n_cores)\n pool.map(proc,range(src.n_total_frames))\n\n else:\n\n for k in range(src.n_total_frames):\n\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n bscan = blobf.spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n" }, { "alpha_fraction": 0.6962719559669495, "alphanum_fraction": 0.7151864171028137, "avg_line_length": 31.864864349365234, "blob_id": "a6c966e14bab0b4a165260c8e2527e133a70ff9a", "content_id": "b486da874bbd41d05f3081e3f5bdde4a64d1dfd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3648, "license_type": "no_license", "max_line_length": 115, "num_lines": 111, "path": "/examples/org_averaging/compute_average_responses.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nimport sys,os,glob,shutil\nimport logging\nimport octoblob.functions as blobf\nimport octoblob.org_tools as blobo\nimport octoblob.plotting_functions as blobp\nimport pathlib\n\ntry:\n root_folder = sys.argv[1]\nexcept:\n root_folder = '.'\n\n# When you run plot_general_org.py or plot_cone_org.py, you save results (using the Enter key)\n# to two locations: the working folder (wherever you run the script) and also into the *_bscans/org\n# subfolders. In both places, a subfolder called layer_velocities_results is created, and the\n# results are stored there. In other words, they are duplicated. (I'm not sure why I left this\n# unnecessary duplication, but now that it's there different downstream scripts assume different\n# locations, so we have to keep it this way for now.)\n\n# This program can take a list of the *_velocity.npy files and generate an average result from these.\n# The program will find all of the *_velocity.npy files below the root folder and average these. It will\n# avoid duplicates (such as those in the working folder being duplicates of those in the org subfolders)\n# and only average unique responses.\n\n# Specify root_folder as the first argument to this script call, to avoid ambiguities about which\n# data it will execute on.\n\nvelocity_files = [str(v) for v in pathlib.Path(root_folder).rglob('*velocity.npy')]\n\n# IMPORTANT! Please check that stimulus_index is set correctly. It should be set to the same value used in\n# plot_general_org.py\n\nstimulus_index = 20\n\n# in the average plot, do you want the component plots potted too?\nplot_background = True\n\n# figure dimensions/dpi\nscreen_dpi = 50\npanel_width_in = 4.0\npanel_height_in = 4.0\n\n# parameters for the response plot lines\nmain_alpha = 1.0\nmain_linewidth = 1.5\n\nbackground_alpha = 0.25\nbackground_linewidth = 1.5\n\nsingle_color = 'k'\naverage_color = 'b'\n\ntlim = (-0.04,0.04) # time limits for plotting ORG in s\nvlim = (-5,5) # velocity limits for plotting in um/s\n\nz_um_per_pixel = 3.0\n\nsingle_responses = []\nbscans = []\nused = []\n\ndef match(a,b):\n return a.find(b)>-1 or b.find(a)>-1\n\nfor vf in velocity_files:\n short_fn = os.path.split(vf)[1]\n if not any([match(short_fn,fn) for fn in used]):\n single_responses.append(np.load(vf))\n used.append(short_fn)\n\nsingle_responses = np.array(single_responses)\naverage_response = np.mean(single_responses,axis=0)\n\nn_files = len(single_responses)\nn_plots = n_files+1\nn_t = single_responses.shape[1]\n\nt = np.arange(n_t)-stimulus_index\nt = t*2.5e-3+10e-3\n\nplt.figure(figsize=(panel_width_in*n_plots,panel_height_in),dpi=screen_dpi)\nfor row in range(0,n_files):\n ax = plt.subplot(1,n_plots,row+1)\n ax.plot(t,single_responses[row,:],color=single_color,linewidth=main_linewidth,alpha=main_alpha)\n ax.set_xlabel('time (s)')\n ax.set_ylabel('$v_{OS}$ ($\\mu m$/s)')\n ax.plot([0.0,0.0],[vlim[0]*.75,vlim[1]*.75],color='g',linestyle='--')\n ax.set_xlim(tlim)\n ax.set_ylim(vlim)\n ax.grid(False)\n blobp.despine(ax,'btlr')\n \nax = plt.subplot(1,n_plots,n_files+1)\nax.plot(t,average_response,color=average_color,linewidth=main_linewidth,alpha=main_alpha)\nax.set_xlabel('time (s)')\nax.set_ylabel('$\\overline{v_{OS}}$ ($\\mu m$/s)')\nax.plot([0.0,0.0],[vlim[0]*.75,vlim[1]*.75],color='g',linestyle='--')\nax.set_xlim(tlim)\nax.set_ylim(vlim)\nax.grid(False)\nblobp.despine(ax,'btlr')\n\nif plot_background:\n for row in range(0,n_files):\n ax.plot(t,single_responses[row,:],color=single_color,linewidth=background_linewidth,alpha=background_alpha)\n\nplt.savefig('average_response.png',dpi=300)\nplt.savefig('average_response.pdf')\nplt.show()\n" }, { "alpha_fraction": 0.6038797497749329, "alphanum_fraction": 0.6115435361862183, "avg_line_length": 36.25893020629883, "blob_id": "e21aac291b0a1ae4805a9f50eb2804c34e336d6e", "content_id": "53f601b43f4c24aabb617465fb4a17324cdabf67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8351, "license_type": "no_license", "max_line_length": 233, "num_lines": 224, "path": "/data_source.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from builtins import *\nimport sys,os,time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.signal as sps\nimport scipy.interpolate as spi\nimport scipy.io as sio\nimport glob\nimport shutil\nimport logging\nfrom octoblob import logger\nfrom octoblob import config_reader\nfrom octoblob import diagnostics_tools\nfrom octoblob import plotting_functions as opf\nimport json\n\nclass DataSource:\n \"\"\"An object that supplies raw OCT data from UNP files and also digests associated\n XML files specifying acquisition parameters.\"\"\"\n def __init__(self,filename,n_skip=0,x1=None,x2=None):\n cfg_filename = filename.replace('.unp','')+'.xml'\n cfg = config_reader.get_configuration(cfg_filename)\n\n self.cfg = cfg\n self.dtype = np.uint16\n self.n_vol = cfg['n_vol']\n self.n_slow = cfg['n_slow']\n self.n_fast = cfg['n_fast']\n self.n_depth = cfg['n_depth']\n self.n_repeats = cfg['n_bm_scans']\n\n if x1 is None:\n self.x1 = 0\n else:\n self.x1 = x1\n \n if x2 is None:\n self.x2 = self.n_fast\n else:\n self.x2 = x2\n \n self.bytes_per_pixel = self.dtype(1).itemsize\n\n self.n_bytes = self.n_vol*self.n_slow*self.n_fast*self.n_depth*self.bytes_per_pixel\n self.n_total_frames = self.n_vol*self.n_slow\n self.current_frame_index = 0\n self.filename = filename\n self.bit_shift_right = 4\n self.n_skip = n_skip\n\n self.saturation_value = np.iinfo(self.dtype).max\n\n file_size = os.stat(self.filename).st_size\n skip_bytes = self.n_skip*self.n_depth*self.bytes_per_pixel\n\n \n \n self.diagnostics = diagnostics_tools.Diagnostics(self.filename)\n \n try:\n assert file_size==self.n_bytes\n print('Data source established:')\n self.log_info()\n print()\n \n except AssertionError as ae:\n print('File size incorrect.\\n%d\\texpected\\n%d\\tactual'%(self.n_bytes,file_size))\n self.log_info()\n\n def has_more_frames(self):\n return self.current_frame_index<self.n_total_frames\n\n def next_frame(self,diagnostics=False):\n frame = self.get_frame(self.current_frame_index,diagnostics=diagnostics)\n self.current_frame_index+=1\n return frame\n\n\n def get_samples(self,n):\n \"\"\"Get n equally spaced samples from this data set.\"\"\"\n samples = []\n stride = self.n_total_frames//n\n for k in range(0,self.n_total_frames,stride):\n frame = self.get_frame(k)\n samples.append(frame)\n return samples\n \n def log_info(self):\n logging.info(self.get_info())\n\n def get_info(self,spaces=False):\n temp = '\\nn_vol\\t\\t%d\\nn_slow\\t\\t%d\\nn_repeats\\t%d\\nn_fast\\t\\t%d\\nn_depth\\t\\t%d\\nbytes_per_pixel\\t%d\\ntotal_expected_size\\t%d'%(self.n_vol,self.n_slow,self.n_repeats,self.n_fast,self.n_depth,self.bytes_per_pixel,self.n_bytes)\n if spaces:\n temp = temp.replace('\\t',' ')\n return temp\n \n def get_frame(self,frame_index,volume_index=0,diagnostics=False):\n '''Get a raw frame from a UNP file. This function will\n try to read configuration details from a UNP file with\n the same name but .xml extension instead of .unp.\n Parameters:\n frame_index: the index of the desired frame; must\n include skipped volumes if file contains multiple\n volumes, unless volume_index is provided\n Returns:\n a 2D numpy array of size n_depth x n_fast\n '''\n frame = None\n # open the file and read in the b-scan\n with open(self.filename,'rb') as fid:\n # Identify the position (in bytes) corresponding to the start of the\n # desired frame; maintain volume_index for compatibility with functional\n # OCT experiments, which have multiple volumes.\n position = volume_index * self.n_depth * self.n_fast * self.n_slow * self.bytes_per_pixel + frame_index * self.n_depth * self.n_fast * self.bytes_per_pixel + self.n_skip * self.n_depth * self.bytes_per_pixel\n \n # Skip to the desired position for reading.\n fid.seek(position,0)\n\n # Use numpy fromfile to read raw data.\n frame = np.fromfile(fid,dtype=self.dtype,count=self.n_depth*self.n_fast)\n\n if frame.max()>=self.saturation_value:\n if diagnostics:\n satfig = plt.figure(figsize=(opf.IPSP,opf.IPSP),dpi=opf.screen_dpi)\n plt.hist(frame,bins=100)\n plt.title('Frame saturated with pixels >= %d.'%self.saturation_value)\n self.diagnostics.save(satfig,'saturated',frame_index)\n\n logging.info('Frame saturated, with pixels >= %d.'%self.saturation_value)\n\n if diagnostics:\n bitshiftfig = plt.figure(figsize=(opf.IPSP,2*opf.IPSP),dpi=opf.screen_dpi)\n bitshiftax1,bitshiftax2 = bitshiftfig.subplots(2,1)\n\n bitshiftax1.hist(frame,bins=100)\n bitshiftax1.set_title('before %d bit shift'%self.bit_shift_right)\n\n # Bit-shift if necessary, e.g. for Axsun/Alazar data\n if self.bit_shift_right:\n frame = np.right_shift(frame,self.bit_shift_right)\n\n if diagnostics:\n bitshiftax2.hist(frame,bins=100)\n bitshiftax2.set_title('after %d bit shift'%self.bit_shift_right)\n self.diagnostics.save(bitshiftfig,'bit_shift',frame_index)\n\n # Reshape into the k*x 2D array\n frame = frame.reshape(self.n_fast,self.n_depth).T\n frame = frame.astype(np.float)\n frame = frame[:,self.x1:self.x2]\n return frame\n\n\nclass DataSourceOptopol:\n \"\"\"An object that supplies raw OCT data from Optopol files and also digests associated\n XML files specifying acquisition parameters.\"\"\"\n def __init__(self,filename,n_skip=0):\n\n dims = np.fromfile(filename,dtype=np.int32,count=3)\n self.n_depth,self.n_fast,self.n_slow = dims\n self.n_vol = 1\n self.n_repeats = 1\n self.dtype = np.int32\n self.bytes_per_pixel = self.dtype(1).itemsize\n \n self.n_bytes = self.n_vol*self.n_slow*self.n_fast*self.n_depth*self.bytes_per_pixel\n self.n_total_frames = self.n_vol*self.n_slow\n self.current_frame_index = 0\n self.filename = filename\n self.bit_shift_right = 4\n self.n_skip = n_skip\n\n self.saturation_value = np.iinfo(self.dtype).max\n\n file_size = os.stat(self.filename).st_size\n skip_bytes = self.n_skip*self.n_depth*self.bytes_per_pixel\n\n self.diagnostics = diagnostics_tools.Diagnostics(self.filename)\n \n try:\n assert file_size==self.n_bytes+32*3 # include 3 32-bit header integers\n print('Data source established:')\n self.log_info()\n print()\n \n except AssertionError as ae:\n print('File size incorrect.\\n%d\\texpected\\n%d\\tactual'%(self.n_bytes,file_size))\n self.log_info()\n\n def has_more_frames(self):\n return self.current_frame_index<self.n_total_frames\n\n def next_frame(self,diagnostics=False):\n frame = self.get_frame(self.current_frame_index,diagnostics=diagnostics)\n self.current_frame_index+=1\n return frame\n\n def get_samples(self,n):\n \"\"\"Get n equally spaced samples from this data set.\"\"\"\n samples = []\n stride = self.n_total_frames//n\n for k in range(0,self.n_total_frames,stride):\n samples.append(self.get_frame(k))\n return samples\n \n def log_info(self):\n logging.info(self.get_info())\n\n def get_info(self,spaces=False):\n temp = '\\nn_vol\\t\\t%d\\nn_slow\\t\\t%d\\nn_repeats\\t%d\\nn_fast\\t\\t%d\\nn_depth\\t\\t%d\\nbytes_per_pixel\\t%d\\ntotal_expected_size\\t%d'%(self.n_vol,self.n_slow,self.n_repeats,self.n_fast,self.n_depth,self.bytes_per_pixel,self.n_bytes)\n if spaces:\n temp = temp.replace('\\t',' ')\n return temp\n \n def get_frame(self,frame_index,volume_index=0,diagnostics=False):\n frame = None\n return frame\n\n \n\nif __name__=='__main__':\n\n df = DataSource('./data/test_1.unp')\n frame = df.next_frame(diagnostics=True)\n \n" }, { "alpha_fraction": 0.527373194694519, "alphanum_fraction": 0.5278754234313965, "avg_line_length": 27.840579986572266, "blob_id": "b18ea164942e8fd325d5a87d581517e92d2f7d9d", "content_id": "5373af475424499c84a7ea58492c9e535ec24715", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1991, "license_type": "no_license", "max_line_length": 81, "num_lines": 69, "path": "/parameters.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import json\nimport numpy as np\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super(NpEncoder, self).default(obj)\n\nclass Parameters(dict):\n \"\"\"Implement a python dictionary that has persistent json storage as a record\n of processing and data analysis.\"\"\"\n def __init__(self, filename, verbose=False):\n self.filename = filename\n \n self.verbose = verbose\n try:\n temp = self.load()\n for k in temp.keys():\n self[k] = temp[k]\n except Exception as e:\n print(e)\n pass\n \n def __getitem__(self, key):\n val = dict.__getitem__(self, key)\n if self.verbose:\n print('GET', key)\n return val\n\n def __setitem__(self, key, val):\n if self.verbose:\n print('SET', key, val)\n dict.__setitem__(self, key, val)\n self.save()\n \n def __repr__(self):\n dictrepr = dict.__repr__(self)\n return '%s(%s)' % (type(self).__name__, dictrepr)\n\n def clear(self):\n keys = list(self.keys())\n for k in keys:\n dict.pop(self,k)\n self.save()\n \n \n def get_param_filename(self,filename):\n outfile = filename.replace('.unp','')+'_parameters.json'\n return outfile\n\n def load(self):\n # load a json file into a dictionary\n with open(self.filename,'r') as fid:\n dstr = fid.read()\n dictionary = json.loads(dstr)\n return dictionary\n\n def save(self):\n temp = {}\n for k in self.keys():\n temp[k] = self[k]\n dstr = json.dumps(temp,indent=4, sort_keys=True, cls=NpEncoder)\n with open(self.filename,'w') as fid:\n fid.write(dstr)\n\n" }, { "alpha_fraction": 0.6766279935836792, "alphanum_fraction": 0.6951476335525513, "avg_line_length": 37.67441940307617, "blob_id": "9e040552cf6fb126ae7de9c266e90ec03dc169df", "content_id": "d7be2555fff4884a0367aeef9ec2cac1d24dcc4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16631, "license_type": "no_license", "max_line_length": 121, "num_lines": 430, "path": "/examples/minimal_working_example/minimal_working_example.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Processing file 16_53_25.unp acquired using conventional Axsun OCT system\n# Minimal working example for this system\n\n# Python 3.10.9\n# Numpy 1.23.5\n# Matplotlib version 3.7.0\n# Scipy version 1.10.0\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os,sys\nimport scipy.optimize as spo\nimport scipy.interpolate as spi\n\n# print library version information\nimport platform\nimport numpy\nimport scipy\nimport matplotlib\nprint('Python %s'%platform.python_version())\nprint('Numpy %s'%numpy.__version__)\nprint('Scipy %s'%scipy.__version__)\nprint('Matplotlib %s'%matplotlib.__version__)\n\n\n###################################################################\n###################################################################\n# Processing parameters\n# This section contains all of the constants used to process this\n# dataset. Some of these are derived from the XML file created during\n# acquisition. The XML file is shown in this section as well.\n\n# You have to modify the path below to point at your data:\nfilename = '/home/rjonnal/Dropbox/Data/conventional_org/flash/minimal_working_example/16_53_25.unp'\n\n# The data (.unp file, and .xml file if desired) can be downloaded from:\n# https://www.dropbox.com/scl/fo/o9nskz1bkw0mkfc6iqhir/h?rlkey=ijdhh1ta648ajlmvvqql3qu48&dl=0\n\n# Data dimensions are recorded in separate 16_53_25.xml:\n###### XML ######################################\n# <?xml version=\"1.0\" encoding=\"utf-8\"?>\n# <MonsterList>\n# <!--Program Generated Easy Monster-->\n# <Monster>\n# <Name>Goblin</Name>\n# <Time\n# Data_Acquired_at=\"9/21/2021 4:53:25 PM\" />\n# <Volume_Size\n# Width=\"1536\"\n# Height=\"250\"\n# Number_of_Frames=\"400\"\n# Number_of_Volumes=\"1\"\n# BscanWidth=\"736\"\n# BscanOffset=\"32\" />\n# <Scanning_Parameters\n# X_Scan_Range=\"1907\"\n# X_Scan_Offset=\"650\"\n# Y_Scan_Range=\"0\"\n# Y_Scan_Offset=\"-500\"\n# Number_of_BM_scans=\"1\" />\n# <Dispersion_Parameters\n# C2=\"-9E-06\"\n# C3=\"3E-09\" />\n# <Fixation_Target\n# X=\"32\"\n# Y=\"64\" />\n# </Monster>\n# </MonsterList>\n########## End XML #################################\n\n# The parameters we take from the XML file are:\n# n_vol (Number_of_Volumes) = 1\n# n_slow (Number_of_Frames) = 400\n# n_repeats (Number_of_BM_scans) = 1\n# n_fast (Height) = 250\n# n_depth (Width) = 1536\n\n# We also have the following a priori information:\n# The data type is unsigned integer (16 bit)\n# Each 16-bit integer must be right-shifted 4 bits to express the digitized value;\n# in other words, the 12 meaningful bits are put into the first 12 places in the\n# 16-bit integer, effectively multiplying each pixel by 16.\n\nn_vol = 1\nn_slow = 400\nn_repeats = 1\nn_fast = 250\nn_depth = 1536\ndtype = np.uint16\nbit_shift_right = 4\nbytes_per_pixel = 2\n\n# Describing the index of the frame we want, in terms of volume_index\n# and frame_index: each UNP file may contain multiple volumes, so to get\n# a single frame we need to index both the volume and the frame within\n# that volume\nvolume_index = 0 # this file contains only one volume, so anything >0 causes error\nframe_index = 50 # arbitrary frame between 0 and n_slow-1 (399, in this case)\n\n# Where to crop the spectra before dispersion compensation, processing\n# into B-scans, etc.\nk_crop_1 = 100\nk_crop_2 = 1490\n\n\n# For FBG alignment, specify the maximum index (in the k dimension) where the FBG\n# could be found and the correlation threshold required to assume two spectra,\n# cropped at that index (i.e., containing only the FBG portion and not the main\n# sample-induced fringes), are aligned with one another (i.e., requiring no shifting)\nfbg_max_index = 150\nfbg_region_correlation_threshold = 0.9\n\n# Define (and create, if necessary, a folder for figures)\nfig_folder = 'figures'\nos.makedirs(fig_folder,exist_ok=True)\n\ndB_clims = (40,None)\n\n# End of processing parameters section\n###################################################################\n###################################################################\n\n\n# Getting a single frame of raw data from the UNP file\n# The UNP file has no header information, only the spectral data\n\n\n# Calculate the entry point into the file:\nbytes_per_volume = n_depth * n_fast * n_slow * bytes_per_pixel\nbytes_per_frame = n_depth * n_fast * bytes_per_pixel\npixels_per_frame = n_depth * n_fast\nposition = volume_index * bytes_per_volume + frame_index * bytes_per_frame\n\n# Open the file in a `with` block, using numpy's convenient binary-reading\n# function `fromfile`:\nwith open(filename,'rb') as fid:\n fid.seek(position,0)\n frame = np.fromfile(fid,dtype=dtype,count=pixels_per_frame)\n\nframe = np.right_shift(frame,bit_shift_right)\n\n# Reshape the frame into the correct dimensions, transpose so that the k/lambda dimension is\n# vertical, and cast as floating point type to avoid truncation errors in downstream calculations:\nframe = np.reshape(frame,(n_fast,n_depth)).T\nframe = frame.astype(float)\n\n\n# A general note about figures in this plot. They'll all be in `if` blocks, so they can be\n# turned on and off easily. Also, we create a new figure in each block, and save the call\n# to `plt.show()` until the end of the script.\n\n# If desired, show the frame and plot its average over x (i.e., average spectrum)\nshow_figures = True\nif show_figures:\n plt.figure()\n plt.subplot(1,2,1)\n plt.imshow(frame,aspect='auto')\n plt.subplot(1,2,2)\n plt.plot(np.mean(frame,axis=1))\n plt.title('lateral mean')\n plt.suptitle('Raw data')\n plt.savefig(os.path.join(fig_folder,'raw_data.png'))\n \n# The next step is to align the spectra to their FBG features. The spectra are cropped\n# around the FBG feature (up to index 150 in the spectral scan), and the individual\n# spectral scans are cross-correlated. This could be done by cross-correlating each\n# one to a reference scan (e.g., the first one). In practice, it's faster to group them\n# by correlation and cross-correlate the groups. It's a little more complicated than\n# necessary, but speeds things up.\n# Set a limit on the maximum index where the FBG trough could possibly be located.\n# This is a critical parameter, as it avoids cross correlation of spectra based on\n# structural information; this would prevent the FBG features from dominating the\n# cross-correlation and introduce additional phase noise.\n# Correlation threshold is the minimum correlation required to consider two spectra\n# to be in phase with one another\n# We'll package the FBG alignment into a function to keep things somewhat neat:\ndef fbg_align(spectra,max_index,correlation_threshold):\n # crop the frame to the FBG region\n f = spectra[:fbg_max_index,:].copy()\n\n # group the spectra by amount of shift\n # this step avoids having to perform cross-correlation operations on every\n # spectrum; first, we group them by correlation with one another\n # make a list of spectra to group\n to_do = list(range(f.shape[1]))\n # make a list for the groups of similarly shifted spectra\n groups = []\n ref = 0\n\n # while there are spectra left to group, do the following loop:\n while(True):\n groups.append([ref])\n to_do.remove(ref)\n for tar in to_do:\n c = np.corrcoef(f[:,ref],f[:,tar])[0,1]\n if c>correlation_threshold:\n groups[-1].append(tar)\n to_do.remove(tar)\n if len(to_do)==0:\n break\n ref = to_do[0]\n\n subframes = []\n for g in groups:\n subf = f[:,g]\n subframes.append(subf)\n\n # now decide how to shift the groups of spectra by cross-correlating their means\n # we'll use the first group as the reference group:\n group_shifts = [0]\n ref = np.mean(subframes[0],axis=1)\n # now, iterate through the other groups, compute their means, and cross-correlate\n # with the reference. keep track of the cross-correlation peaks in the list group_shifts\n for taridx in range(1,len(subframes)):\n tar = np.mean(subframes[taridx],axis=1)\n xc = np.fft.ifft(np.fft.fft(ref)*np.fft.fft(tar).conj())\n shift = np.argmax(xc)\n if shift>len(xc)//2:\n shift = shift-len(xc)\n group_shifts.append(shift)\n\n # now, use the groups and the group_shifts to shift all of the spectra according to their\n # group membership:\n for g,s in zip(groups,group_shifts):\n for idx in g:\n spectra[:,idx] = np.roll(spectra[:,idx],s)\n f[:,idx] = np.roll(f[:,idx],s)\n\n return spectra\n\n\n# Use our function to align the spectra:\nspectra = fbg_align(frame,max_index=fbg_max_index,correlation_threshold=fbg_region_correlation_threshold)\n\n# show the FBG-aligned frame:\nif show_figures:\n plt.figure()\n plt.subplot(1,2,1)\n plt.imshow(spectra,aspect='auto')\n plt.subplot(1,2,2)\n plt.plot(np.mean(spectra,axis=1))\n plt.title('lateral mean')\n plt.suptitle('FBG-aligned')\n plt.savefig(os.path.join(fig_folder,'fbg_aligned.png'))\n\n\n# Now we DC-subtract the spectra. We estimate the DC by averaging the spectra together,\n# and subtract it from each one (using [array broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html)).\ndc = spectra.mean(1)\n\nspectra = (spectra.T-dc).T\n\n# non-broadcasting version, for reference, which\n# does the same thing as the broadcasting version above,\n# but obviates the for-loop:\n# for x in range(spectra.shape[1]):\n# spectra[:,x] = spectra[:,x]-dc\n\n\n\n# show the DC-subtracted frame:\nif show_figures:\n plt.figure()\n plt.subplot(1,2,1)\n plt.imshow(spectra,aspect='auto')\n plt.subplot(1,2,2)\n plt.plot(spectra[:,100])\n plt.title('scan 100')\n plt.suptitle('DC subtracted')\n plt.savefig(os.path.join(fig_folder,'dc_subtracted.png'))\n\n\n\n# The next steps are optimization of mapping and dispersion coefficients. This will be\n# done using numerical optimization. But in order to do that we need to write a function\n# that takes our FBG-aligned/DC-subtracted spectra, mapping coefficients, and dispersion\n# coefficients, and produces a B-scan. We need this function first because the objective\n# function for optimization operates on the sharpness of the resulting B-scan.\n\n# Mapping correction (k_resample)\n# By \"mapping\" we mean the process by which we infer the wave number (k) at which each\n# of our spectral samples were measured. We cannot in general assume that k is a linear\n# function of sample index. This is obviously true in cases where the spectrum is sampled\n# uniformly with respect to lambda, since k=(2 pi)/lambda. In those cases, we minimally\n# require interpolation into uniformly sampled k space. However, we shouldn't generally\n# assume uniform sampling in lambda either, since swept-sources like the Broadsweeper\n# and spectrometers may not behave linearly in time/space. Even sources with k-clocks,\n# such as the Axsun swept source, may have mapping errors.\n# To correct the mapping error we do the following:\n# 1. Interpolate from lambda-space into k-space (not required for the Axsun source used\n# to acquire these data).\n# 2. Let s(m+e(m)) be the acquired spectrum, with indexing error e(m). We determine a polynomial\n# e(m) = c3*m^3+c2*m^2, with coefficients c3 and c2, and then we interpolate from s(m+e(m))\n# to s(m+e(m)-e(m))=s(m).\n\n# Dispersion correction (dispersion_compensate)\n# This is a standard approach, described in multiple sources [add citations]. We define a unit\n# amplitude phasor exp[j (mc3*k^3 + mc2*k^2)] with two coefficients mc3 and mc2, and multiply this\n# by the acquired spectra.\n\ndef k_resample(spectra,coefficients):\n # If all coefficients are 0, return the spectra w/o further computation:\n if not any(coefficients):\n return spectra\n\n # the coefficients passed into this function are just the 3rd and 2nd order ones; we\n # add zeros so that we can use convenience functions like np.polyval that handle the\n # algebra; the input coefficients are [mc3,mc2], either a list or numpy array;\n # cast as a list to be on the safe side.\n coefficients = list(coefficients) + [0.0,0.0]\n\n # For historic, MATLAB-related reasons, the index m is defined between 1 and the spectral\n # length. This is a good opportunity to mention \n # x_in specified on array index 1..N+1\n x_in = np.arange(1,spectra.shape[0]+1)\n\n # define an error polynomial, using the passed coefficients, and then\n # use this polynomial to define the error at each index 1..N+1\n error = np.polyval(coefficients,x_in)\n x_out = x_in + error\n\n # using the spectra measured at indices x_in, interpolate the spectra at indices x_out\n # See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n interpolator = spi.interp1d(x_in,spectra,axis=0,kind='cubic',fill_value='extrapolate')\n interpolated = interpolator(x_out)\n return interpolated\n \n# Next we need to dispersion compensate; for historical reasons the correction polynomial\n# is defined on index x rather than k, but for physically meaningful numbers we should\n# use k instead\ndef dispersion_compensate(spectra,coefficients):\n # If all coefficients are 0, return the spectra w/o further computation:\n if not any(coefficients):\n return spectra\n\n # the coefficients passed into this function are just the 3rd and 2nd order ones; we\n # add zeros so that we can use convenience functions like np.polyval that handle the\n # algebra; the input coefficients are [dc3,dc2], either a list or numpy array;\n # cast as a list to be on the safe side.\n coefs = list(coefficients) + [0.0,0.0]\n # define index x:\n x = np.arange(1,spectra.shape[0]+1)\n \n # define the phasor and multiply by spectra using broadcasting:\n dechirping_phasor = np.exp(-1j*np.polyval(coefs,x))\n dechirped = (spectra.T*dechirping_phasor).T\n \n return dechirped\n\n\n# Now we can define our B-scan making function, which consists of:\n# 1. k-resampling\n# 2. dispersion compensation\n# 3. windowing (optionally)\n# 3. DFT\n# We package the mapping and dispersion coefficients into a single list or array,\n# in this order: 3rd order mapping coefficient, 2nd order mapping coefficient,\n# 3rd order dispersion coefficient, 2nd order dispersion coefficient\ndef spectra_to_bscan(spectra,mapping_dispersion_coefficients):\n mapping_coefficients = mapping_dispersion_coefficients[:2]\n dispersion_coefficients = mapping_dispersion_coefficients[2:]\n\n spectra = k_resample(spectra,mapping_coefficients)\n spectra = dispersion_compensate(spectra,dispersion_coefficients)\n\n if True:\n # use a sigma (standard deviation) equal to 0.9 times the half-width\n # of the spectrum; this is arbitrary and was selected empirically, sort of\n sigma = 0.9\n window = np.exp(-((np.linspace(-1.0,1.0,spectra.shape[0]))**2/sigma**2))\n # multiply by broadcasting:\n spectra = (spectra.T*window).T\n\n bscan = np.fft.fft(spectra,axis=0)\n\n # remove one of the conjugate pairs--the top (inverted) one, by default\n bscan = bscan[bscan.shape[0]//2:,:]\n \n return bscan\n\n# Let's make a B-scan with 0 for all mapping and dispersion coefficients, and show\n# it.\nbscan_uncorrected = spectra_to_bscan(spectra,[0.0,0.0,0.0,0.0])\n\nif show_figures:\n plt.figure()\n plt.imshow(20*np.log10(np.abs(bscan_uncorrected)),cmap='gray',clim=dB_clims,aspect='auto')\n plt.colorbar()\n plt.title('B-scan w/o mapping or dispersion correction')\n plt.savefig(os.path.join(fig_folder,'bscan_uncorrected.png'))\n\n\n# Now we are ready to run a four parameter optimization of the mapping and dispersion\n# coefficients.\n\n# First we need an objective function--one to be minimized. It will be based on image\n# sharpness.\ndef sharpness(im):\n \"\"\"Image sharpness\"\"\"\n return np.sum(im**2)/(np.sum(im)**2)\n\ndef objective_function(mapping_dispersion_coefficients,spectra):\n bscan = spectra_to_bscan(spectra,mapping_dispersion_coefficients)\n bscan = np.abs(bscan)\n bscan_sharpness = sharpness(bscan)\n print(1.0/bscan_sharpness)\n return 1.0/bscan_sharpness # remember this is a minimization algorithm\n\n# initial guess for mapping_dispersion_coefficients\ninitial_guess = [0.0,0.0,0.0,0.0]\n\n# run the optimizer\nresult = spo.minimize(objective_function,initial_guess,args=(spectra))\n\n# get the optimized coefficients\ncoefs = result.x\n\nbscan_corrected = spectra_to_bscan(spectra,coefs)\n\nif show_figures:\n plt.figure()\n plt.imshow(20*np.log10(np.abs(bscan_corrected)),cmap='gray',clim=dB_clims,aspect='auto')\n plt.colorbar()\n plt.title('B-scan w/ mapping and dispersion correction')\n plt.savefig(os.path.join(fig_folder,'bscan_corrected.png'))\n\n \n# If the script made any figures, show them now:\nif plt.gcf().number > 0:\n plt.show()\n\n" }, { "alpha_fraction": 0.4428904354572296, "alphanum_fraction": 0.452913761138916, "avg_line_length": 29.571428298950195, "blob_id": "b447252b4669d5955097e0a784f6915656a704d7", "content_id": "6061b950a117743cd2069b773cca4231ee27ebde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4290, "license_type": "no_license", "max_line_length": 91, "num_lines": 140, "path": "/data_browser.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport os,sys,glob,time\nimport logging\nfrom octoblob import logger\nimport pathlib\nimport numpy as np\nimport matplotlib.image as mpimg\n\nclass Browser:\n\n def browse(self,root='.',file_filters=['*.npy','*.png'],figsize=(6,6)):\n\n save_folder = 'browser_saves'\n os.makedirs(save_folder,exist_ok=True)\n \n files = []\n for ff in file_filters:\n files = files + list(pathlib.Path(root).rglob(ff))\n\n files = list(files)\n files = sorted(files)\n\n global npy_dB\n global index,N\n global projection_axis\n\n projection_axis=0\n \n npy_dB = 1\n \n index = 0\n N = len(files)\n\n \n def tree(f):\n head = str(f)\n items = []\n while len(head):\n head,tail = os.path.split(head)\n items.append(tail)\n items = items[::-1]\n return items\n\n def title(f):\n t = tree(f)\n out = ''\n for item_index,item in enumerate(t):\n out = out + item_index*' ' + item + '\\n'\n return out\n\n def last_modified(f):\n epoch_time = os.path.getmtime(f)\n return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch_time))\n\n def temp_filename(f):\n return '_'.join(tree(f)).strip()\n \n def on_press(event):\n global index,npy_dB,projection_axis\n print('press', event.key)\n sys.stdout.flush()\n if event.key in ['pageup','up','left']:\n index = (index-1)%N\n draw()\n elif event.key in ['pagedown','down','right']:\n index = (index+1)%N\n draw()\n if event.key in ['ctrl+pageup','ctrl+up','ctrl+left']:\n index = (index-10)%N\n draw()\n elif event.key in ['ctrl+pagedown','ctrl+down','ctrl+right']:\n index = (index+10)%N\n draw()\n if event.key in ['ctrl+shift+pageup','ctrl+shift+up','ctrl+shift+left']:\n index = (index-100)%N\n draw()\n elif event.key in ['ctrl+shift+pagedown','ctrl+shift+down','ctrl+shift+right']:\n index = (index+100)%N\n draw()\n elif event.key == 'escape':\n plt.close('all')\n elif event.key == 'z':\n save()\n elif event.key == 'd':\n npy_dB = 1 - npy_dB\n draw()\n elif event.key == 'a':\n projection_axis = (projection_axis+1)%3\n draw()\n \n def draw():\n global index,N\n f = files[index]\n cstr = '(%d/%d):'%(index,N)\n tstr = '(%s)'%last_modified(f)\n \n fig.suptitle(cstr+title(f)+tstr,fontsize=8,ha='left',x=0.0,y=1.0,va='top')\n ext = os.path.splitext(f)[1]\n if ext.lower()=='.npy':\n print('npy')\n npydraw(f)\n elif ext.lower()=='.png':\n print('png')\n pngdraw(f)\n \n\n def pngdraw(pngfile):\n img = mpimg.imread(pngfile)\n ax.clear()\n ax.imshow(img,aspect='auto',cmap='gray')\n fig.canvas.draw()\n\n def npydraw(npyfile):\n global npy_dB,projection_axis\n dat = np.load(npyfile)\n dat = np.abs(dat)\n if len(dat.shape)==3:\n dat = np.mean(dat,projection_axis)\n \n if npy_dB:\n dat = 20*np.log10(dat)\n clim = (40,90)\n else:\n clim = np.percentile(dat,(5,99.5))\n \n ax.clear()\n ax.imshow(dat,aspect='auto',cmap='gray',clim=clim)\n fig.canvas.draw()\n\n def save():\n f = files[index]\n ffn = os.path.join(save_folder,temp_filename(f))\n print('Saving current image to %s.'%ffn)\n plt.savefig(ffn,dpi=300)\n \n fig, ax = plt.subplots()\n fig.set_size_inches(figsize)\n fig.canvas.mpl_connect('key_press_event', on_press)\n draw()\n plt.show()\n\n \n" }, { "alpha_fraction": 0.7748567461967468, "alphanum_fraction": 0.7805864214897156, "avg_line_length": 79.1891860961914, "blob_id": "dd46584839a84eae2f9fa624e2cf208976a2caec", "content_id": "94245a138b9602930e00c88946b53e8197d5944f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2967, "license_type": "no_license", "max_line_length": 860, "num_lines": 37, "path": "/examples/single_flash_cone_org/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Basic single flash ORG processing for automatically segmented cone OS layers\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* plot_cone_org.py: an interactive tool for visualizing outer segment phase changes\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* test.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [test.unp](https://www.dropbox.com/s/pf6b951mlntqq9l/test.unp?dl=1)\n.\n\n* test.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [test.xml](https://www.dropbox.com/s/ux5qlinqq6y1zy4/test.xml?dl=1).\n\nAfter downloading, put them into the `examples/single_flash_org` folder.\n\n\n## OCT/ORG processing\n\n1. Edit the file `process.py`, and edit the values assigned to `data_filename`, `org_start_frame`, and `org_end_frame` as needed. For single flash experiments, only a subset of B-scans must be processed; see the code comment for details. For flicker experiments, the entire set of B-scans must be processed.\n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/single_flash_cone_org` folder and run the program by issuing `python process.py` at the command prompt. This will take a few minutes. The ORG processing in particular is somewhat slow.\n\n## ORG visualization\n\n1. Run the program `plot_cone_org.py` by issuing `python plot_cone_org.py` at the command prompt, in the same folder. If run this way, the program searches recursively for folders called `org` in the current directory and its subdirectories. Alternatively, you may issue `python plot_cone_org.py ./test_bscans` to search only that subdirectory (recursively). In these cases, the program will run on each of the `org` folders it finds. Finally, you may specify a particular org folder with `python plot_cone_org.py ./test_bscans/org`, in which case it will run only on that folder.\n\n2. The input required by the user is clicking the upper left and lower right corners of a rectangle containing just the IS/OS and COST bands to be analyzed, in the B-scan on the left. Within this rectangle, the bands should be approximately equidistant, to facilitate a simple segmentation algorithm. Selection of a rectangle causes the $v_{OS}$ plot for that region to appear in the right panel. When multiple rectangles are created, multiple plots are generated on the right, with the rectangles and plot lines color-coordinated for comparison. The `backspace` key deletes the last rectangle, and clicking outside of the B-scan on the left clears all of the rectangles. The `enter` key saves the figure and associated data in two places: the working directory, in a folder called `plot_velocities_results` and in the `org` folder containing the raw ORG data.\n" }, { "alpha_fraction": 0.7468247413635254, "alphanum_fraction": 0.7603725790977478, "avg_line_length": 39.72413635253906, "blob_id": "0d66f9fbba3e5a20e42a1227d514e6681532b9c8", "content_id": "b3bc7306672bb93209780a9ec8faf6296d6ea7a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 304, "num_lines": 29, "path": "/examples/generating_bscans/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Basic B-scan processing\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* test.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [test.unp](https://www.dropbox.com/s/pf6b951mlntqq9l/test.unp?dl=1)\n.\n\n* test.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [test.xml](https://www.dropbox.com/s/ux5qlinqq6y1zy4/test.xml?dl=1).\n\nAfter downloading, put them into the `examples/generating_bscans` folder.\n\n\n## B-scan processing\n\n1. (Optional) Edit the file `process.py`, and edit the value assigned to `data_filename`.\n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/process.py` folder and run the program by issuing `python process.py` at the command prompt. If you've skipped step 1, you'll need to specify the `.unp` filename at the command prompt, e.g., `python process.py test.unp`.\n" }, { "alpha_fraction": 0.7769317626953125, "alphanum_fraction": 0.7851099967956543, "avg_line_length": 72.875, "blob_id": "e49741ffc6a8a71618d63b2e7575463c3ead99e8", "content_id": "2f6e173a370ebe281f76440d6ebb02a9653b47df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3546, "license_type": "no_license", "max_line_length": 970, "num_lines": 48, "path": "/examples/quick_optimize/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Additional control of the FBG alignment algorithm\n\nSpectra from the Axsun 100 kHz laser in the protoclinical ORG system are not well-aligned because of a k-clock problem. Therefore we have a fiber Bragg grating (FBG) at the laser output, which generates a notch at a specific value of k. In practice, due to polarization-sensitivity of the FBG and ambiguity introduced by balanced detection, the FBG notch manifests as a series of either notches or peaks--essentially a high-frequency fringe. When the Axsun was used for OCTA applications, a simple algorithm was used to align spectra--the most negative gradient (e.g., the falling edge of the trough) was identified and all spectra were aligned to it. Recently, this algorithm has not worked well, and additional control of FBG alignment is required in top-level scripts. Default behavior identifies the largest positive gradient, because this proved more generally effective than the most negative gradient. This example illustrates how to depart from default behavior.\n\nThe new FBG alignment function provided in the `process.py` uses cross-correlation instead of feature identification to align spectra. Broadly, this should be more resistant to variation in the FBG appearance among experiments, since it doesn't require the FBG features to be consistent.\n\nUnlike most processing scripts, this `process.py` script does not call `octoblob.functions.spectra_to_bscan()` but instead defines its own `spectra_to_bscan` in order to utilize the custom FBG alignment function. Exposing the guts of the `spectra_to_bscan` function in the script offers some additional advantages: it permits users to observe the logic of the function and to specify parameters for other steps in processing such as cropping and windowing.\n\n![Example diagnostic image for cross-correlation-based FBG alignment.](./figs/fbg_example.png)\n\n\nPlease pay attention to the details of the `spectra_to_bscan` function. In particular, in this example, the cropping is hard coded in this section:\n\n```python\n # artifact.png has a lens flare artifact after the 150th column, so we'll remove\n # it; we'll also remove 50 rows near the DC (bottom of the image):\n bscan = bscan[:-50,:150]\n```\n\nIf you don't want to crop your B-scans, or if you want to crop them differently, you'll have to modify this section.\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* artifacts.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [artifacts.unp](https://www.dropbox.com/s/5qk7gbfbx1gg62i/artifacts.unp?dl=0)\n.\n\n* artifacts.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [artifacts.xml](https://www.dropbox.com/s/6syd272xlebtubm/artifacts.xml?dl=0).\n\nAfter downloading, put them into the `examples/handling_bscan_artifacts` folder.\n\n\n## B-scan processing\n\n1. (Optional) Edit the file `process.py`, and edit the value assigned to `data_filename`.\n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/handling_bscan_artifacts` folder and run the program by issuing `python process.py` at the command prompt. If you've skipped step 1, you'll need to specify the `.unp` filename at the command prompt, e.g., `python process.py artifacts.unp`.\n" }, { "alpha_fraction": 0.7586776614189148, "alphanum_fraction": 0.766281008720398, "avg_line_length": 39.75675582885742, "blob_id": "7bff2866991fb42f2eaee2bcfc3c693010fff441", "content_id": "d4e80e9509c39810925c1fc1cd3cab2b4499315e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3025, "license_type": "no_license", "max_line_length": 131, "num_lines": 74, "path": "/projects/org_filtering/process_for_filtering_testing.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\ndata_filename = None\n\nif data_filename is None:\n try:\n data_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check data_filename. %s not found or data_filename not passed at command line.'%data_filename)\n\n\n# For ORG processing we needn't process all the frames. 400 frames are acquired\n# in each measurememnt, at a rate of 400 Hz. The stimulus onset is at t=0.25 s,\n# corresponding to the 100th frame. 50 milliseconds before stimulus is sufficient\n# to establish the baseline, and the main ORG response takes place within 100\n# milliseconds of the stimulus. Thus:\norg_start_frame = 0\norg_end_frame = 140\n\norg_frames = list(range(org_start_frame,org_end_frame))\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(data_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n# Get an octoblob.DataSource object using the filename\nsrc = blobf.get_source(data_filename)\n\n# try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\ntry:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\nexcept KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(5)\n coefs = mdo.multi_optimize(samples,blobf.spectra_to_bscan,show_all=False,show_final=True,verbose=False,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(data_filename)\n\nfor k in range(src.n_total_frames):\n # skip this frame if it's not in the ORG frame range\n if not k in org_frames:\n continue\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n if os.path.exists(outfn):\n continue\n bscan = blobf.spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n# Skip this for now. Needs discussion.\n#blobf.flatten_volume(bscan_folder,diagnostics=diagnostics)\n\n# Process the ORG blocks\norg_tools.process_org_blocks(bscan_folder)\n \n" }, { "alpha_fraction": 0.6523759365081787, "alphanum_fraction": 0.653854250907898, "avg_line_length": 33.562042236328125, "blob_id": "4e99a1e3ee57ac114b5766b06455a52bdee3d4df", "content_id": "1c7f2e45775bac8b0dbf9f74b1b59661e81db014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4735, "license_type": "no_license", "max_line_length": 135, "num_lines": 137, "path": "/crawler.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n \ntry:\n import multiprocessing as mp\n do_mp = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept:\n do_mp = False\n\ntry:\n with open('crawler_blacklist','r') as fid:\n crawler_blacklist = [f.strip() for f in fid.readlines()]\n logging.info('crawler_blacklist found: %s'%crawler_blacklist)\nexcept FileNotFoundError as fnfe:\n crawler_blacklist = []\n logging.info('no crawler_blacklist found')\n \norg_frames_only = True\n\n#org_frames = list(range(org_tools.org_start_frame,org_tools.org_end_frame))\n\n# just for the ./examples folder:\norg_frames = list(range(40))\n\ndo_all_frames_tag = 'fovea'\n\nstart_clean = 'clean' in sys.argv[1:]\n\n\ndef process(data_filename,do_mp=False):\n\n data_filename = str(data_filename)\n diagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n params_filename = file_manager.get_params_filename(data_filename)\n params = parameters.Parameters(params_filename,verbose=True)\n\n src = blobf.get_source(data_filename)\n\n # get the total number of frames:\n if org_frames_only:\n n_total_frames = len(org_frames)\n else:\n n_total_frames = src.n_total_frames\n\n # try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\n try:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\n except KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(5)\n coefs = mdo.multi_optimize(samples,blobf.spectra_to_bscan,show_all=False,show_final=True,verbose=False,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n # get the folder name for storing bscans\n bscan_folder = file_manager.get_bscan_folder(data_filename)\n # check to see how many bscans there are in it:\n bscans = glob.glob(os.path.join(bscan_folder,'*.npy'))\n # if any are missing, reprocess:\n if len(bscans)<n_total_frames:\n logging.info('File %s missing B-scans. Re-processing.'%data_filename)\n for k in range(src.n_total_frames):\n if org_frames_only and not k in org_frames and data_filename.lower().find(do_all_frames_tag)==-1:\n continue\n bscan = blobf.spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n else:\n logging.info('File %s B-scans processed. Skipping.'%data_filename)\n\n\n #blobf.flatten_volume(bscan_folder,diagnostics=diagnostics)\n \n org_tools.process_org_blocks(bscan_folder)\n \n\n\nif __name__=='__main__':\n\n try:\n root_folder = sys.argv[1]\n except IndexError as ie:\n sys.exit(ie)\n\n if start_clean:\n file_manager.clean(False)\n file_manager.clean(True)\n \n unp_files_temp = pathlib.Path(root_folder).rglob('*.unp')\n unp_files_temp = [str(f) for f in unp_files_temp]\n unp_files = []\n for unp_file in unp_files_temp:\n file_blacklisted = False\n for item in crawler_blacklist:\n if unp_file[:len(item)]==item:\n logging.info('blacklisted %s for matching %s'%(unp_file,item))\n file_blacklisted = True\n if not file_blacklisted:\n unp_files.append(unp_file)\n\n logging.info('Processing these files:')\n for uf in unp_files:\n logging.info('\\t %s'%uf)\n \n def multiprocessing_function(f):\n logging.info('Crawling %s.'%f)\n try:\n process(f)\n except Exception as e:\n logging.info('Error: %s. Skipping %s.'%(e,f))\n \n if do_mp:\n p = mp.Pool(n_cores)\n p.map(multiprocessing_function,unp_files)\n else:\n for unp_file in unp_files:\n logging.info('Crawling %s.'%unp_file)\n try:\n process(unp_file)\n except Exception as e:\n logging.info(e)\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.655667781829834, "avg_line_length": 34.078739166259766, "blob_id": "8e2931c3fbd1989f596a9ecd93ae2ccbf8a9c5c6", "content_id": "43dc6c89104a25d75ff56384a1ac3b6ff89cacff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4455, "license_type": "no_license", "max_line_length": 165, "num_lines": 127, "path": "/dispersion_optimizer.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob import functions as blobf\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport time\nimport scipy.optimize as spo\nimport sys\nimport numba\nimport logging\nfrom time import sleep\n\ndB_clim = (45,85)\nfbg_search_distance = 11\n\ndef dB(arr):\n return 20*np.log10(np.abs(arr))\n\ndef obj_md(mdcoefs,spectra,bscan_function,iqf,ax=None,verbose=False):\n \"\"\"Optimize mapping and dispersion\"\"\"\n bscan = bscan_function(mdcoefs,spectra)\n bscan = np.abs(bscan)\n iq = iqf(bscan)\n if ax is not None:\n ax.clear()\n show_bscan(ax,bscan)\n plt.pause(0.0000001)\n if verbose:\n logging.info('Coefs %s -> %s value of %0.1e.'%(mdcoefs,iqf.__doc__,iq))\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n sleep(0.0001)\n return 1.0/iq\n\ndef show_bscan(ax,bscan):\n ax.imshow(dB(bscan),cmap='gray',clim=dB_clim)\n\ndef optimize(spectra,bscan_function,show=False,verbose=False,maxiters=200,diagnostics=None):\n\n if show:\n realtime_figure = plt.figure()\n realtime_axis = realtime_figure.subplots(1,1)\n realtime_axis.clear()\n else:\n realtime_axis = None\n \n # confused about bounds--documentation says they can be used with Nelder-Mead, but warnings\n # say that they can't\n dispersion_bounds = [(-5e-7,5e-7),(-1e-4,1e-4)]\n bounds = dispersion_bounds\n \n # spo.minimize accepts an additional argument, a dictionary containing further\n # options; we want can specify an error tolerance, say about 1% of the bounds.\n # we can also specify maximum iterations:\n optimization_options = {'xatol':1e-6,'maxiter':200,'disp':False}\n\n # optimization algorithm:\n # See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html\n method = 'nelder-mead'\n\n init = [0.0,0.0]\n\n if diagnostics is not None:\n fig = diagnostics.figure()\n plt.subplot(1,2,1)\n plt.imshow(blobf.dB(bscan_function(init,spectra)),aspect='auto',clim=(45,85),cmap='gray')\n \n sys.stdout.write('Optimizing ')\n \n res = spo.minimize(obj_md,init,args=(spectra,bscan_function,blobf.sharpness,realtime_axis,verbose),bounds=bounds,method=method,options=optimization_options)\n \n sys.stdout.write('\\n')\n sys.stdout.flush()\n \n if diagnostics is not None:\n plt.subplot(1,2,2)\n plt.imshow(blobf.dB(bscan_function(res.x,spectra)),aspect='auto',clim=(45,85),cmap='gray')\n diagnostics.save(fig)\n\n if show:\n realtime_figure.close()\n \n return res.x\n\n\ndef multi_optimize(spectra_list,bscan_function,show_all=False,show_final=False,verbose=False,maxiters=200,diagnostics=None):\n results_coefficients = []\n results_iq = []\n \n for spectra in spectra_list:\n coefs = optimize(spectra,bscan_function,show=show_all,verbose=verbose,maxiters=maxiters,diagnostics=diagnostics)\n results_coefficients.append(coefs)\n iq = obj_md(coefs,spectra,bscan_function,blobf.sharpness)\n results_iq.append(iq)\n\n winner = np.argmin(results_iq)\n logging.info(results_iq)\n logging.info('winner is index %d'%winner)\n\n \n for rc,riq in zip(results_coefficients,results_iq):\n logging.info(rc,riq)\n\n if diagnostics is not None:\n for idx,(spectra,coefs,iq) in enumerate(zip(spectra_list,results_coefficients,results_iq)):\n logging.info('iq from optimization: %0.3f'%iq)\n logging.info('iq from obj_md: %0.3f'%obj_md(coefs,spectra,bscan_function,blobf.sharpness))\n sfig = diagnostics.figure()\n sax = sfig.add_subplot(1,1,1)\n show_bscan(sax,bscan_function(coefs,spectra))\n if idx==winner:\n plt.title('winner %0.3f'%obj_md(coefs,spectra,bscan_function,blobf.sharpness))\n else:\n plt.title('loser %0.3f'%obj_md(coefs,spectra,bscan_function,blobf.sharpness))\n diagnostics.save(sfig,ignore_limit=True)\n\n return results_coefficients[winner]\n\n\ndef progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush() # As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113#comment50529068_27871113)\n" }, { "alpha_fraction": 0.5497222542762756, "alphanum_fraction": 0.5817953944206238, "avg_line_length": 34.322784423828125, "blob_id": "4a46835bd13053c9beda0388ce24d5f4240c57ce", "content_id": "a677aa56065d15a88f55d1103c4585c22dae3697", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16743, "license_type": "no_license", "max_line_length": 207, "num_lines": 474, "path": "/examples/single_flash_general_org/plot_general_org.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nimport sys,os,glob,shutil\nimport logging\nimport octoblob.functions as blobf\nimport octoblob.org_tools as blobo\nimport pathlib\n# The index of the processed ORG blocks at which the stimulus was delivered.\n# A few cases:\n# 1. Typical cone ORG applications. We process blocks B-scans 80 through 140.\n# The stimulus flash is given at B-scan 100, which is the 20th processed\n# B-scan. Thus, stimulus_index=20\n# 2. Noise/filtering project. We want to see all the pre-stimulus blocks, thus\n# we process B-scans 0 through 140. The stimulus flash is given at 0.25 s\n# (with a B-scan rate of 400 Hz and period of 2.5 ms), thus the stimulus\n# flash is given at the 100th B-scan, and stimulus_index = 100\n\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nplt.rcParams[\"font.size\"] = 10\n#plt.rcParams.update({'figure.autolayout': True})\n\nstimulus_index = 20\nfigure_dpi = 48\nfigsize_inches = (15,10)\n\nbox_alpha = 0.75\nbox_linewidth = 2.0\nbox_padding = 3.0\n\nline_alpha = 1.0\nline_linewidth = 1.0\n\norg_plot_linewidth = 1.0\norg_plot_alpha = 1.0\n\nmean_org_plot_alpha = 1.0\nmean_org_plot_linewidth = 1\n\ntlim = (-0.04,0.04) # time limits for plotting ORG in s\nzlim = (350,650) # depth limits for profile plot in um\n\nvlim = (-5,5) # velocity limits for plotting in um/s\n\nz_um_per_pixel = 3.0\n\n# refine_z specifies the number of pixels (+/-) over which the\n# program may search to identify a local peak. The program begins by asking\n# the user to trace line segments through two layers of interest. These layers\n# may not be smooth. From one A-scan to the next, the brightest pixel or \"peak\"\n# corresponding to the layer may be displaced axially from the intersection\n# of the line segment with the A-scan. refine_z specifies the distance (in either\n# direction, above or below that intersection) where the program may search for a\n# brighter pixel with which to compute the phase. The optimal setting here will\n# largely be determined by how isolated the layer of interest is. For a relatively\n# isolated layer, such as IS/OS near the fovea, a large value may be best. For\n# closely packed layers such as COST and RPE, smaller values may be useful. The\n# user receives immediate feedback from the program's selection of bright pixels\n# and can observe whether refine_z is too high (i.e., causing the wrong layer\n# to be segmented) or too low (i.e., missing the brightest pixels.\nrefine_z = 1\n\ndef level(im):\n rv = get_level_roll_vec(im)\n return shear(im,rv)\n\ndef shear(im,roll_vec):\n out = np.zeros(im.shape)\n for idx,r in enumerate(roll_vec):\n out[:,idx] = np.roll(im[:,idx],r)\n return out\n\ndef get_roll_vec(im,row_per_col):\n sy,sx = im.shape\n roll_vec = (np.arange(sx)-sx/2.0)*row_per_col\n roll_vec = np.round(roll_vec).astype(int)\n return roll_vec\n\ndef get_level_roll_vec(im,limit=0.1,N=16):\n rpc_vec = np.linspace(-limit,limit,N)\n rotated_profiles = []\n roll_vecs = []\n for rpc in rpc_vec:\n rv = get_roll_vec(im,rpc)\n sheared = shear(im,rv)\n roll_vecs.append(rv)\n rotated_profiles.append(np.mean(sheared,axis=1))\n\n rotated_profiles = np.array(rotated_profiles)\n rpmax = np.max(rotated_profiles,axis=1)\n widx = np.argmax(rpmax)\n return roll_vecs[widx]\n\ndef path2str(f):\n head,tail = os.path.split(f)\n tails = []\n while len(head)>0:\n tails.append(tail)\n head,tail = os.path.split(head)\n tails = tails[::-1]\n return '_'.join(tails)\n \ndef collect_files(src,dst):\n flist = glob.glob(os.path.join(src,'*'))\n os.makedirs(dst,exist_ok=True)\n \n for f in flist:\n outf = os.path.join(dst,path2str(f))\n shutil.move(f,outf)\n\n\ndef phase_to_nm(phase):\n return phase/(4*np.pi*1.38)*1050.0\n\ndef nm_to_phase(nm):\n return nm*(4*np.pi*1.38)/1050.0\n\n# pay attention to the default value of stim_index, since the b-scans right after stimulus\n# determine how the data are displayed to the user; until late 2022, we've been collecting 400\n# @ 400 Hz, and the stimulus is delivered 0.25 seconds into the series, i.e. at frame 100; however\n# we only process B-scans 80-140, i.e. 50 ms before stimulus through 100 ms after stimulus, and\n# thus the stim_index is 20\ndef plot(folder,stim_index=stimulus_index):\n\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n phase_slope_flist = glob.glob(os.path.join(folder,'*phase_slope.npy'))\n phase_slope_flist.sort()\n amplitude_flist = glob.glob(os.path.join(folder,'*amplitude.npy'))\n amplitude_flist.sort()\n\n\n # now we load the other data that may be useful for filtering:\n correlations_flist = glob.glob(os.path.join(folder,'*correlations.npy'))\n correlations_flist.sort()\n\n masked_temporal_variance_flist = glob.glob(os.path.join(folder,'*masked_temporal_variance.npy'))\n masked_temporal_variance_flist.sort()\n\n phase_slope_fitting_error_flist = glob.glob(os.path.join(folder,'*phase_slope_fitting_error.npy'))\n phase_slope_fitting_error_flist.sort()\n\n temporal_variance_flist = glob.glob(os.path.join(folder,'*temporal_variance.npy'))\n temporal_variance_flist = [f for f in temporal_variance_flist if f.find('masked')==-1]\n temporal_variance_flist.sort()\n\n #t = np.arange(len(amplitude_flist))*0.0025-0.24\n t = (-stim_index+np.arange(len(amplitude_flist)))*0.0025+10e-3\n \n display_bscan = np.load(amplitude_flist[stim_index])\n dB = 20*np.log10(display_bscan)\n dbclim = np.percentile(dB,(30,99.99))\n \n markersize = 8.0\n \n global rois,click_points,index,abscans,pbscans,tag,correlations,masked_temporal_variance,phase_slope_fitting_error_bscans,temporal_variance\n \n tag = folder.replace('/','_').replace('\\\\','_')\n roll_vec = get_level_roll_vec(display_bscan)\n display_bscan = shear(display_bscan,roll_vec)\n\n\n abscans = []\n pbscans = []\n correlations = []\n masked_temporal_variance = []\n phase_slope_fitting_error_bscans = []\n temporal_variance = []\n \n for pf,af,cf,mtvf,psfef,tvf in zip(phase_slope_flist,amplitude_flist,correlations_flist,masked_temporal_variance_flist,phase_slope_fitting_error_flist,temporal_variance_flist):\n abscans.append(shear(np.load(af),roll_vec))\n pbscans.append(shear(np.load(pf),roll_vec))\n correlations.append(np.load(cf))\n masked_temporal_variance.append(np.load(mtvf))\n phase_slope_fitting_error_bscans.append(shear(np.load(psfef),roll_vec))\n temporal_variance.append(np.load(tvf))\n #plt.figure()\n #plt.imshow(abscans[-1])\n #plt.show()\n \n abscans = np.array(abscans)\n\n pbscans = np.array(pbscans)\n correlations = np.array(correlations)\n masked_temporal_variance = np.array(masked_temporal_variance)\n phase_slope_fitting_error_bscans = np.array(phase_slope_fitting_error_bscans)\n temporal_variance = np.array(temporal_variance)\n\n \n rois = []\n click_points = []\n index = 0\n\n fig = plt.figure()\n fig.set_size_inches(figsize_inches)\n fig.set_dpi(figure_dpi)\n \n ax1 = fig.add_axes([0.03,0.03,.38,0.94])\n ax2 = fig.add_axes([0.51,0.6,0.38,0.37])\n ax3 = fig.add_axes([0.51,0.1,0.38,0.37])\n \n fig.tight_layout()\n \n ax1.set_xlim((10,235))\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_aspect('auto')\n ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')\n \n ax2.set_ylim(vlim)\n ax2.set_xlim(tlim)\n ax2.set_xlabel('time (s)')\n ax2.set_ylabel('$v$ ($\\mu m$/s)')\n ax2.axhline(0,color='k',alpha=0.25)\n \n ax3.set_xlabel('depth ($\\mu m$)')\n #ax3.set_xlim(zlim)\n ax3.set_yticks([])\n ax3.set_ylabel('amplitude (ADU)')\n \n ax1.set_xlim((10,235))\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_aspect('auto')\n ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')\n \n ax2.axvline(0.0,color='g',linestyle='--')\n plt.pause(.0001)\n\n\n def draw_rois():\n ax1.clear()\n ax1.set_xlim((10,235))\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_aspect('auto')\n ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')\n \n ax3.clear()\n ax3.set_xlim(zlim)\n\n l1zmean = 500\n l2zmean = 500\n for k,roi in enumerate(rois):\n\n full_profile = roi[7]\n full_profile = full_profile-np.min(full_profile)\n full_profile_pv = np.max(full_profile)\n\n if k==0:\n offset0 = full_profile_pv*0.2\n\n offset = offset0*k\n \n z_um = np.arange(len(full_profile))*z_um_per_pixel\n\n \n #all_prof = np.mean(np.mean(abscans,axis=2),axis=0)\n #com = int(round(np.sum(all_prof*z_um)/np.sum(all_prof)))\n #zlim = (com-200,com+200)\n\n \n x1 = roi[5]\n x2 = roi[6]\n\n bx1 = x1-box_padding\n bx2 = x2+box_padding\n \n x = np.arange(x1,x2)\n\n layer_1_z = roi[3][stim_index,:]\n layer_2_z = roi[4][stim_index,:]\n\n bz1 = np.min(layer_1_z)-box_padding\n bz2 = np.max(layer_2_z)+box_padding\n \n ax1.plot(x,layer_1_z,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)\n ax1.plot(x,layer_2_z,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)\n\n ax1.plot([bx1,bx2,bx2,bx1,bx1],[bz1,bz1,bz2,bz2,bz1],alpha=box_alpha,linewidth=box_linewidth)\n\n ax3.plot(z_um,full_profile-offset,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)\n\n l1zmean = np.mean(layer_1_z)*z_um_per_pixel\n l2zmean = np.mean(layer_2_z)*z_um_per_pixel\n \n ax3.axvline(l1zmean,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth,linestyle=':')\n ax3.axvline(l2zmean,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth,linestyle=':') \n \n ax2.clear()\n ax2.set_ylim(vlim)\n ax2.set_xlim(tlim)\n \n ax3.set_xlabel('depth ($\\mu m$)')\n lzmean = (l1zmean+l2zmean)/2.0\n new_zlim = (lzmean-150,lzmean+150)\n ax3.set_xlim(new_zlim)\n ax3.set_yticks([])\n\n \n osv_mat = []\n layer_amplitude_mean_mat = []\n \n for k,roi in enumerate(rois):\n layer_amplitude_mean = roi[1]\n osv = roi[2]\n \n osv_mat.append(osv)\n layer_amplitude_mean_mat.append(layer_amplitude_mean)\n \n ax2.plot(t,osv,linewidth=org_plot_linewidth,alpha=org_plot_alpha,color=colors[k%len(colors)])\n\n \n if len(rois)>1:\n osv_mat = np.array(osv_mat)\n layer_amplitude_mean_mat = np.array(layer_amplitude_mean_mat)\n mosv = np.nanmean(osv_mat,axis=0)\n mlayer_amplitude_mean = np.nanmean(layer_amplitude_mean_mat,axis=0)\n \n ax2.plot(t,mosv,color='k',alpha=mean_org_plot_alpha,linewidth=mean_org_plot_linewidth)\n\n ax2.set_xlabel('time (s)')\n ax2.set_ylabel('$v$ ($\\mu m$/s)')\n ax2.axvline(0.0,color='g',linestyle='--')\n ax3.set_ylabel('amplitude (ADU)')\n\n \n plt.pause(.1)\n \n \n def onclick(event):\n\n global rois,click_points,index,abscans,pbscans,tag,correlations,masked_temporal_variance,phase_slope_fitting_error_bscans,temporal_variance\n\n if event.button==1:\n if event.xdata is None and event.ydata is None:\n # clicked outside plot--clear everything\n print('Clearing.')\n click_points = []\n rois = []\n draw_rois()\n\n if event.inaxes==ax1:\n if event.button==1:\n xnewclick = event.xdata\n ynewclick = event.ydata\n click_points.append((int(round(xnewclick)),int(round(ynewclick))))\n\n if len(click_points)==1:\n #ax1.clear()\n #ax1.imshow(20*np.log10(display_bscan),clim=(45,85),cmap='gray')\n #ax1.plot(click_points[0][0],click_points[0][1],'bo')\n plt.pause(.1)\n\n\n if len(click_points)==2:\n x1,x2 = [a[0] for a in click_points] \n z1,z2 = [a[1] for a in click_points]\n ax1.plot([x1,x2],[z1,z2],'w-')\n plt.pause(.1)\n\n if len(click_points)==4:\n\n x1,x2,x3,x4 = [a[0] for a in click_points] \n z1,z2,z3,z4 = [a[1] for a in click_points]\n valid = True\n print('x1=%0.1f,x2=%0.1f,z1=%0.1f,z2=%0.1f'%(x1,x2,z1,z2))\n print('x3=%0.1f,x4=%0.1f,z3=%0.1f,z4=%0.1f'%(x3,x4,z3,z4))\n try:\n\n if True:\n layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile = blobo.extract_layer_velocities_lines(abscans,pbscans,x1,x2,z1,z2,x3,x4,z3,z4,stim_index=stim_index,refine_z=refine_z)\n else:\n layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile = blobo.extract_layer_velocities_region(abscans,pbscans,x1,x2,z1,z2,stim_index=stim_index,refine_z=refine_z)\n \n except Exception as e:\n print('ROI could not be processed:',e)\n valid = False\n click_points = []\n\n if valid:\n # osv is now in radians/block\n # we want it in nm/s\n # osv * blocks/sec * nm/radian\n # nm/radian = 1060.0/(2*np.pi)\n osv = 1e-3*phase_to_nm(osv)/2.5e-3\n\n rois.append((click_points,layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile))\n click_points = []\n\n draw_rois()\n index+=1\n \n elif event.button==3:\n x = event.xdata\n y = event.ydata\n new_rois = []\n \n for idx,roi in enumerate(rois):\n x1,y1 = roi[0][0]\n x2,y2 = roi[0][1]\n if x1<x<x2 and y1<y<y2:\n pass\n else:\n new_rois.append(roi)\n rois = new_rois\n draw_rois()\n\n\n\n def onpress(event):\n global rois,click_points,index,tag\n if event.key=='enter':\n outfolder = os.path.join(folder,'layer_velocities_results')\n print('Saving results to %s.'%outfolder)\n os.makedirs(outfolder,exist_ok=True)\n np.save(os.path.join(outfolder,'display_bscan.npy'),display_bscan)\n nrois = len(rois)\n fx1,fx2,fx3,fx4 = [a[0] for a in rois[0][0]]\n fz1,fz2,fz3,fz4 = [a[1] for a in rois[0][0]]\n froi_tag = '%s_%d_%d_%d_%d_'%(tag,fx1,fx2,fz1,fz3)\n\n \n fig.savefig(os.path.join(outfolder,'figure_%d_rois %s.png'%(nrois,froi_tag)),dpi=300)\n fig.savefig(os.path.join(outfolder,'figure_%d_rois_%s.pdf'%(nrois,froi_tag)))\n fig.savefig(os.path.join(outfolder,'figure_%d_rois_%s.svg'%(nrois,froi_tag)))\n \n for roi in rois:\n \n x1,x2,x3,x4 = [a[0] for a in roi[0]]\n z1,z2,z3,z4 = [a[1] for a in roi[0]]\n roi_tag = '%s_%d_%d_%d_%d_'%(tag,x1,x2,z1,z3)\n fnroot = os.path.join(outfolder,roi_tag)\n np.save(fnroot+'rect_points.npy',roi[0])\n np.save(fnroot+'amplitude.npy',roi[1])\n np.save(fnroot+'velocity.npy',roi[2])\n np.save(fnroot+'layer_1_z.npy',roi[3])\n np.save(fnroot+'layer_2_z.npy',roi[4])\n\n collect_files(outfolder,'./layer_velocities_results')\n elif event.key=='backspace':\n rois = rois[:-1]\n click_points = []\n draw_rois()\n \n \n cid = fig.canvas.mpl_connect('button_press_event',onclick)\n pid = fig.canvas.mpl_connect('key_press_event',onpress)\n\n #plt.subplot(1,2,2,label='foo')\n plt.show()\n return rois\n\n\nif __name__=='__main__':\n\n\n if len(sys.argv)<2:\n folder = '.'\n else:\n folder = sys.argv[1]\n\n\n if os.path.split(folder)[1]=='org':\n plot(folder)\n else:\n org_folders = pathlib.Path(folder).rglob('org')\n org_folders = [str(f) for f in org_folders]\n org_folders.sort()\n for of in org_folders:\n print('Working on %s.'%of)\n try:\n plot(of)\n except IndexError as ie:\n continue\n" }, { "alpha_fraction": 0.6613349914550781, "alphanum_fraction": 0.6769521236419678, "avg_line_length": 37.168270111083984, "blob_id": "0a4c03e38202449fcee92079cd164c8101af8803", "content_id": "b9e102974367215fe05ffde6fbc269e80c7ae750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7940, "license_type": "no_license", "max_line_length": 127, "num_lines": 208, "path": "/examples/fast_parallel_oct_org_processing/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\n\n# This example provides a new FBG alignment function based on cross-correlation. It may prove to be more robust than\n# previous methods that used 'feature'-based alignment (e.g. aligning to the largest positive or negative gradients)\n\nno_parallel = False\n\n# default for use_multiprocessing is False; it will be changed to True if mp libraries are imported correctly and the user\n# has not banned mp by setting no_parallel to True\nuse_multiprocessing = False\ntry:\n assert not no_parallel\n import multiprocessing as mp\n use_multiprocessing = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('multiprocessing imported')\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept ImportError as ie:\n logging.info('Failed to import multiprocessing: %s'%ie)\n logging.info('Processing serially.')\nexcept AssertionError as ae:\n logging.info('Multiprocessing banned by no_parallel.')\n \n\n# New prototype fbg_align function, which uses cross-correlation instead of feature-\n# based alignment of spectra.\n# Set a limit on the maximum index where the FBG trough could possibly be located.\n# This is a critical parameter, as it avoids cross correlation of spectra based on\n# structural information; this would prevent the FBG features from dominating the\n# cross-correlation and introduce additional phase noise.\n# Correlation threshold is the minimum correlation required to consider two spectra\n# to be in phase with one another\ndef fbg_align(spectra,fbg_max_index=150,correlation_threshold=0.9,diagnostics=None):\n # crop the frame to the FBG region\n f = spectra[:fbg_max_index,:].copy()\n\n if not diagnostics is None:\n fig = diagnostics.figure(figsize=(6,4))\n axes = fig.subplots(2,2)\n axes[0][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[0][1].plot(f[:,k])\n\n # group the spectra by amount of shift\n # this step avoids having to perform cross-correlation operations on every\n # spectrum; first, we group them by correlation with one another\n # make a list of spectra to group\n to_do = list(range(f.shape[1]))\n # make a list for the groups of similarly shifted spectra\n groups = []\n ref = 0\n\n # while there are spectra left to group, do the following loop:\n while(True):\n groups.append([ref])\n to_do.remove(ref)\n for tar in to_do:\n c = np.corrcoef(f[:,ref],f[:,tar])[0,1]\n if c>correlation_threshold:\n groups[-1].append(tar)\n to_do.remove(tar)\n if len(to_do)==0:\n break\n ref = to_do[0]\n\n subframes = []\n for g in groups:\n subf = f[:,g]\n subframes.append(subf)\n\n # now decide how to shift the groups of spectra by cross-correlating their means\n # we'll use the first group as the reference group:\n group_shifts = [0]\n ref = np.mean(subframes[0],axis=1)\n # now, iterate through the other groups, compute their means, and cross-correlate\n # with the reference. keep track of the cross-correlation peaks in the list group_shifts\n for taridx in range(1,len(subframes)):\n tar = np.mean(subframes[taridx],axis=1)\n xc = np.fft.ifft(np.fft.fft(ref)*np.fft.fft(tar).conj())\n shift = np.argmax(xc)\n if shift>len(xc)//2:\n shift = shift-len(xc)\n group_shifts.append(shift)\n\n # now, use the groups and the group_shifts to shift all of the spectra according to their\n # group membership:\n for g,s in zip(groups,group_shifts):\n for idx in g:\n spectra[:,idx] = np.roll(spectra[:,idx],s)\n f[:,idx] = np.roll(f[:,idx],s)\n\n if not diagnostics is None:\n axes[1][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[1][1].plot(f[:,k])\n diagnostics.save(fig)\n\n return spectra\n\n\ndef spectra_to_bscan(mdcoefs,spectra,diagnostics=None):\n # only the fbg_align function is called locally (from this script);\n # most of the OCT processing is done by blob functions (blobf.XXXX)\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = blobf.dc_subtract(spectra,diagnostics=diagnostics)\n spectra = blobf.crop_spectra(spectra,diagnostics=diagnostics)\n spectra = blobf.k_resample(spectra,mdcoefs[:2],diagnostics=diagnostics)\n spectra = blobf.dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n spectra = blobf.gaussian_window(spectra,sigma=0.9,diagnostics=None)\n\n # Now generate the bscan by FFT:\n bscan = np.fft.fft(spectra,axis=0)\n # remove the upper half of the B-scan and leave only the bottom half:\n bscan = bscan[bscan.shape[0]//2:,:]\n\n # could additionally crop the B-scan if desired;\n # for example, could remove the top 10 rows, bottom 50 rows, and 10 columns\n # from the left and right edges:\n # bscan = bscan[10:-50,10:-10]\n\n # it; we'll also remove 50 rows near the DC (bottom of the image):\n bscan = bscan[:-50,:]\n \n if not diagnostics is None:\n fig = diagnostics.figure()\n axes = fig.subplots(1,1)\n im = axes.imshow(20*np.log10(np.abs(bscan)),aspect='auto')\n plt.colorbar(im)\n diagnostics.save(fig)\n return bscan\n\n\ndef process_file(data_filename,start=None,end=None,do_org=False):\n \n src = blobf.get_source(data_filename)\n if start is None:\n start = 0\n if end is None:\n end = src.n_total_frames\n\n # Create a diagnostics object for inspecting intermediate processing steps\n diagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n # Create a parameters object for storing and loading processing parameters\n params_filename = file_manager.get_params_filename(data_filename)\n params = parameters.Parameters(params_filename,verbose=True)\n\n\n ##### For processing a large amount of data, no optimization is employed because\n ##### this script assumes you have set optimization parameters correctly using another\n ##### approach (e.g., automatic optimization of a high-quality set or manual optimization)\n coefs = [ 5.86980460e-04,-6.38096235e-05,1.70400294e-08,-1.67170383e-04]\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\n\n # get the folder name for storing bscans\n bscan_folder = file_manager.get_bscan_folder(data_filename)\n\n for k in range(start,end):\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n if os.path.exists(outfn):\n continue\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n # use the local spectra_to_bscan function, not the blobf. version\n bscan = spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n if do_org:\n org_tools.process_org_blocks(bscan_folder)\n \n\nif __name__=='__main__':\n\n files = glob.glob('*.unp')\n\n def proc(f):\n file_size = os.stat(f).st_size\n if file_size==307200000:\n process_file(f,start=80,end=140,do_org=True)\n else:\n process_file(f)\n\n #proc(files[2])\n \n if use_multiprocessing:\n pool = mp.Pool(n_cores)\n pool.map(proc,files)\n\n else:\n for f in files:\n process_file(f)\n\n" }, { "alpha_fraction": 0.7652581930160522, "alphanum_fraction": 0.7652581930160522, "avg_line_length": 22.66666603088379, "blob_id": "3aafb2b928dafa07ceb12996a23651c37e7b3b47", "content_id": "222781cda979a59a0e2d9e577af029e9f423f85b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 213, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/examples/org_averaging/reset.sh", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "#! /usr/bin/bash\n\nrm -rvf *_bscans\nrm -rvf processing_parameters.json\nrm -rvf layer_velocities_results\nrm -rvf octoblob.log\nrm -rfv average_response.png\nrm -rfv average_response.pdf\nrm -rfv org_averaging_data.zip\n" }, { "alpha_fraction": 0.5506272912025452, "alphanum_fraction": 0.5648865699768066, "avg_line_length": 32.22779083251953, "blob_id": "aa81a0d0404ee630db6d58ea878f30a8fd173457", "content_id": "d1a900c4712f16520af30cb96c65f39197a714e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14587, "license_type": "no_license", "max_line_length": 168, "num_lines": 439, "path": "/org_tools.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport glob,os,sys,shutil\nimport octoblob.histogram as blobh\nimport octoblob.diagnostics_tools as blobd\nimport logging\n\n# some parameters for limiting processing of B-scans\norg_stimulus_frame = 100\norg_start_frame = 80\norg_end_frame = 140\n\n\ndef get_block_filenames(folder,file_filter='*.npy',block_size=5):\n files = sorted(glob.glob(os.path.join(folder,file_filter)))\n first_first = 0\n last_first = len(files)-block_size\n out = []\n for k in range(first_first,last_first):\n out.append(list(files[k] for k in list(range(k,k+block_size))))\n return out\n\ndef get_frames(filename_list):\n stack = []\n for f in filename_list:\n stack.append(np.load(f))\n stack = np.array(stack)\n return stack\n\ndef compute_phase_velocity(stack,diagnostics=None):\n amplitude_mean = np.mean(np.abs(stack),axis=0)\n phase_stack = np.angle(stack)\n mask = blobh.make_mask(amplitude_mean,diagnostics=diagnostics)\n phase_stack = np.transpose(phase_stack,(1,2,0))\n phase_stack = blobh.bulk_motion_correct(phase_stack,mask,diagnostics=diagnostics)\n phase_stack = np.transpose(phase_stack,(2,0,1))\n\n stack = np.abs(stack)*np.exp(1j*phase_stack)\n\n if diagnostics is not None:\n fig = diagnostics.figure()\n plt.subplot(phase_stack.shape[2]+1,1,1)\n plt.imshow(amplitude_mean,aspect='auto',interpolation='none')\n\n for k in range(phase_stack.shape[2]):\n plt.subplot(phase_stack.shape[2]+1,1,k+2)\n plt.imshow(mask*phase_stack[:,:,k],aspect='auto',interpolation='none')\n\n diagnostics.save(fig)\n\n\ndef process_org_blocks(folder,block_size=5,signal_threshold_fraction=0.1,histogram_threshold_fraction=0.1,first_start=None,last_start=None,diagnostics=None,redo=False):\n\n bscan_files = glob.glob(os.path.join(folder,'complex*.npy'))\n bscan_files.sort()\n\n bscans = []\n for f in bscan_files:\n bscans.append(np.load(f))\n \n N = len(bscan_files)\n\n if first_start is None:\n first_start = 0\n if last_start is None:\n last_start = N-block_size\n\n out_folder = os.path.join(folder,'org')\n \n # if os.path.exists(out_folder):\n # if not redo:\n # sys.exit('%s exists; rerun process_org_blocks with redo=True or delete %s'%(out_folder,out_folder))\n # else:\n # shutil.rmtree(out_folder)\n\n os.makedirs(out_folder,exist_ok=True)\n\n #for start_index in range(first_start,last_start+1):\n def process_block(block,start_index):\n # for each block:\n # 0. an average amplitude bscan\n bscan = np.nanmean(np.abs(block),axis=0)\n outfn = os.path.join(out_folder,'block_%04d_amplitude.npy'%start_index)\n np.save(outfn,bscan)\n \n # 1. create masks for signal statistics and bulk motion correction\n histogram_mask = np.zeros(bscan.shape)\n signal_mask = np.zeros(bscan.shape)\n\n # there may be nans, so use nanmax\n histogram_threshold = np.nanmax(bscan)*histogram_threshold_fraction\n signal_threshold = np.nanmax(bscan)*signal_threshold_fraction\n\n histogram_mask = blobh.make_mask(bscan,histogram_threshold,diagnostics)\n signal_mask = blobh.make_mask(bscan,signal_threshold,diagnostics)\n \n outfn = os.path.join(out_folder,'block_%04d_signal_mask.npy'%start_index)\n np.save(outfn,signal_mask)\n outfn = os.path.join(out_folder,'block_%04d_histogram_mask.npy'%start_index)\n np.save(outfn,histogram_mask)\n\n\n # 3. do bulk-motion correction on block:\n block_phase = np.angle(block)\n\n # transpose dimension b/c bulk m.c. requires the first two\n # dims to be depth and x, and the third dimension to be\n # repeats\n transposed = np.transpose(block_phase,(1,2,0))\n\n corrected_block_phase = blobh.bulk_motion_correct(transposed,histogram_mask,diagnostics=diagnostics)\n\n corrected_block_phase = np.transpose(corrected_block_phase,(2,0,1))\n block = np.abs(block)*np.exp(1j*corrected_block_phase)\n \n # 4. estimate(s) of correlation of B-scans (single values)\n corrs = []\n for im1,im2 in zip(block[:-1],block[1:]):\n corrs.append(np.corrcoef(np.abs(im1).ravel(),np.abs(im2).ravel())[0,1])\n\n outfn = os.path.join(out_folder,'block_%04d_correlations.npy'%start_index)\n np.save(outfn,corrs)\n \n # 5. temporal variance of pixels--all pixels and bright pixels (single values)\n varim = np.nanvar(np.abs(block),axis=0)\n var = np.nanmean(varim)\n var_masked = np.nanmean(varim[np.where(signal_mask)])\n outfn = os.path.join(out_folder,'block_%04d_temporal_variance.npy'%start_index)\n np.save(outfn,var)\n outfn = os.path.join(out_folder,'block_%04d_masked_temporal_variance.npy'%start_index)\n np.save(outfn,var_masked)\n\n \n # 6. phase slopes and residual fitting error for all pixels (2D array)\n\n slopes = np.ones(bscan.shape)*np.nan\n fitting_error = np.ones(bscan.shape)*np.nan\n \n st,sz,sx = corrected_block_phase.shape\n t = np.arange(st)\n\n for z in range(sz):\n for x in range(sx):\n if not signal_mask[z,x]:\n continue\n phase = corrected_block_phase[:,z,x]\n # bug 0: line below does not exist in original ORG processing code:\n #phase = phase%(2*np.pi)\n \n phase = np.unwrap(phase)\n poly = np.polyfit(t,phase,1)\n\n # bug 1: line below used to say poly[1]!\n slope = poly[0]\n fit = np.polyval(poly,t)\n err = np.sqrt(np.mean((fit-phase)**2))\n slopes[z,x] = slope\n fitting_error[z,x] = err\n outfn = os.path.join(out_folder,'block_%04d_phase_slope.npy'%start_index)\n np.save(outfn,slopes)\n outfn = os.path.join(out_folder,'block_%04d_phase_slope_fitting_error.npy'%start_index)\n np.save(outfn,fitting_error)\n\n for start_index in range(first_start,last_start+1):\n logging.info('process_org_block start %d current %d end %d'%(first_start,start_index,last_start))\n # look to see if this block has already been calculated; unless redo is True,\n # if it has, then skip\n test_fn = os.path.join(out_folder,'block_%04d_phase_slope_fitting_error.npy'%start_index)\n if os.path.exists(test_fn) and not redo:\n continue\n\n block = bscans[start_index:start_index+block_size]\n block_files = bscan_files[start_index:start_index+block_size]\n logging.info('process_org_block processing files %s'%block_files)\n block = np.array(block)\n process_block(block,start_index)\n\n\n\ndef get_stacks_folder(folder):\n phase_slope_flist = glob.glob(os.path.join(folder,'*phase_slope.npy'))\n phase_slope_flist.sort()\n amplitude_flist = glob.glob(os.path.join(folder,'*amplitude.npy'))\n amplitude_flist.sort()\n\n abscans = []\n pbscans = []\n for af,pf in zip(amplitude_flist,phase_slope_flist):\n abscans.append(np.load(af))\n pbscans.append(np.load(pf))\n\n abscans = np.array(abscans)\n pbscans = np.array(pbscans)\n return abscans,pbscans\n \ndef extract_layer_velocities_folder(folder,x1,x2,z1,y2):\n abscans,pbscans = get_stacks_folder(folder)\n return extract_layer_velocities(abscans,pbscans,x1,x2,z1,z2)\n \n\n\ndef extract_layer_velocities_region(abscans,pbscans,x1,x2,z1,z2,stim_index=None,refine_z=0):\n if stim_index is None:\n amean = np.mean(abscans,axis=0)\n else:\n amean = np.mean(abscans[stim_index:stim_index+5,:,:],axis=0)\n\n full_profile = np.mean(amean[:,x1:x2],axis=1)\n \n isos_points = []\n cost_points = []\n #amean[:z1,:] = np.nan\n #amean[z2:,:] = np.nan\n\n temp = np.nanmean(amean[z1:z2,x1:x2],axis=1)\n mprof = np.zeros(len(temp)+2)\n mprof[1:-1] = temp\n z1-=1\n z2-=1\n \n \n left = mprof[:-2]\n center = mprof[1:-1]\n right = mprof[2:]\n thresh = np.std(amean)\n\n peaks = np.where(np.logical_and(center>left,center>right))[0]+1\n\n peakvals = [mprof[p] for p in peaks]\n height_order = np.argsort(peakvals)[::-1]\n\n peaks = peaks[height_order[:2]]\n peaks.sort()\n #peaks = peaks[:2]\n\n if False:\n plt.figure()\n plt.plot(mprof)\n for pidx in peaks:\n plt.plot(pidx,mprof[pidx],'ro')\n plt.show()\n\n dpeak = peaks[1]-peaks[0]\n\n os_velocity = []\n os_amplitude = []\n isos_z = []\n cost_z = []\n \n for idx in range(abscans.shape[0]):\n isos_p = []\n isos_a = []\n cost_p = []\n cost_a = []\n abscan = abscans[idx]\n pbscan = pbscans[idx]\n isos_z.append([])\n cost_z.append([])\n for x in range(x1,x2):\n\n if refine_z:\n dzvec = list(range(-refine_z,refine_z+1))\n amps = []\n for dz in dzvec:\n amps.append(abscan[z1+peaks[0]+dz,x]+abscan[z1+peaks[1]+dz,x])\n dz = dzvec[np.argmax(amps)]\n else:\n dz = 0\n\n \n zisos = z1+peaks[0]+dz\n zcost = z1+peaks[1]+dz\n\n isos_p.append(pbscans[idx][zisos,x])\n cost_p.append(pbscans[idx][zcost,x])\n isos_a.append(abscans[idx][zisos,x])\n cost_a.append(abscans[idx][zcost,x])\n isos_z[-1].append(zisos)\n cost_z[-1].append(zcost)\n \n os_p = [c-i for c,i in zip(cost_p,isos_p)]\n os_a = [(c+i)/2.0 for c,i in zip(cost_a,isos_a)]\n os_velocity.append(np.nanmean(os_p))\n os_amplitude.append(np.nanmean(os_a))\n \n os_velocity = -np.array(os_velocity)\n os_amplitude = np.array(os_amplitude)\n isos_z = np.array(isos_z)\n cost_z = np.array(cost_z)\n return os_amplitude,os_velocity,isos_z,cost_z,x1,x2,full_profile\n\n\ndef extract_layer_velocities_lines(abscans,pbscans,x1,x2,z1,z2,x3,x4,z3,z4,stim_index=None,refine_z=0):\n if stim_index is None:\n amean = np.mean(abscans,axis=0)\n else:\n amean = np.mean(abscans[stim_index:stim_index+5,:,:],axis=0)\n\n full_profile = np.mean(amean[:,x1:x2],axis=1)\n \n isos_points = []\n cost_points = []\n\n # reconcile x1, x2, x3, and x4 and reduce to x1 and x2 for both\n # lines\n x1 = max(x1,x3)\n x2 = min(x2,x4)\n\n N = x2-x1\n z_top = np.round(np.linspace(z1,z2,N)).astype(int)\n z_bottom = np.round(np.linspace(z3,z4,N)).astype(int)\n\n os_velocity = []\n os_amplitude = []\n isos_z = []\n cost_z = []\n \n for idx in range(abscans.shape[0]):\n isos_p = []\n isos_a = []\n cost_p = []\n cost_a = []\n abscan = abscans[idx]\n pbscan = pbscans[idx]\n isos_z.append([])\n cost_z.append([])\n for xidx,x in enumerate(range(x1,x2)):\n zt = z_top[xidx]\n zb = z_bottom[xidx]\n constant_os_length = False\n \n if constant_os_length:\n if refine_z:\n dzvec = list(range(-refine_z,refine_z+1))\n amps = []\n for dz in dzvec:\n amps.append(abscan[zt+dz,x]+abscan[zb+dz,x])\n dz = dzvec[np.argmax(amps)]\n else:\n dz = 0\n\n zisos = zt+dz\n zcost = zb+dz\n else:\n if refine_z:\n dzvec = list(range(-refine_z,refine_z+1))\n amps_isos = []\n for dz in dzvec:\n amps_isos.append(abscan[zt+dz,x])\n dz_isos = dzvec[np.argmax(amps_isos)]\n \n amps_cost = []\n for dz in dzvec:\n amps_cost.append(abscan[zb+dz,x])\n dz_cost = dzvec[np.argmax(amps_cost)]\n \n else:\n dz_isos = 0\n dz_cost = 0\n \n zisos = zt+dz_isos\n zcost = zb+dz_cost\n \n \n isos_p.append(pbscans[idx][zisos,x])\n cost_p.append(pbscans[idx][zcost,x])\n isos_a.append(abscans[idx][zisos,x])\n cost_a.append(abscans[idx][zcost,x])\n isos_z[-1].append(zisos)\n cost_z[-1].append(zcost)\n \n os_p = [c-i for c,i in zip(cost_p,isos_p)]\n os_a = [(c+i)/2.0 for c,i in zip(cost_a,isos_a)]\n os_velocity.append(np.nanmean(os_p))\n os_amplitude.append(np.nanmean(os_a))\n \n os_velocity = -np.array(os_velocity)\n os_amplitude = np.array(os_amplitude)\n isos_z = np.array(isos_z)\n cost_z = np.array(cost_z)\n return os_amplitude,os_velocity,isos_z,cost_z,x1,x2,full_profile\n\n\n\n\ndef extract_layer_velocities_rows(abscans,pbscans,x1,x2,z1,z2):\n amean = np.mean(abscans,axis=0)\n isos_points = []\n cost_points = []\n \n os_velocity = []\n os_amplitude = []\n isos_z = []\n cost_z = []\n \n for idx in range(abscans.shape[0]):\n isos_p = []\n isos_a = []\n cost_p = []\n cost_a = []\n abscan = abscans[idx]\n pbscan = pbscans[idx]\n isos_z.append([])\n cost_z.append([])\n for x in range(x1,x2):\n zisos = z1\n zcost = z2\n isos_p.append(pbscans[idx][zisos,x])\n cost_p.append(pbscans[idx][zcost,x])\n isos_a.append(abscans[idx][zisos,x])\n cost_a.append(abscans[idx][zcost,x])\n isos_z[-1].append(zisos)\n cost_z[-1].append(zcost)\n \n os_p = [c-i for c,i in zip(cost_p,isos_p)]\n os_a = [(c+i)/2.0 for c,i in zip(cost_a,isos_a)]\n os_velocity.append(np.nanmean(os_p))\n os_amplitude.append(np.nanmean(os_a))\n \n os_velocity = -np.array(os_velocity)\n os_amplitude = np.array(os_amplitude)\n isos_z = np.array(isos_z)\n cost_z = np.array(cost_z)\n return os_amplitude,os_velocity,isos_z,cost_z\n\nif __name__=='__main__':\n \n unp_files = glob.glob('./examples/*.unp')\n for unp_file in unp_files:\n d = blobd.Diagnostics(unp_file)\n folder = unp_file.replace('.unp','')+'_bscans'\n process_org_blocks(folder)\n\n \n # block_filenames = get_block_filenames(folder)\n # for bf in block_filenames:\n # print(bf)\n # frames = get_frames(bf)\n # compute_phase_velocity(frames,diagnostics=d)\n" }, { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.6102941036224365, "avg_line_length": 27.33333396911621, "blob_id": "770783c03379d69960ed3b3538295f846942895b", "content_id": "18e8ad1761698a56756192f192f9b436d7447de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2040, "license_type": "no_license", "max_line_length": 115, "num_lines": 72, "path": "/file_manager.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import os,sys\nimport pathlib\nimport shutil\nfrom octoblob import logger\nimport logging\n\ndef get_params_filename(data_filename):\n params_filename = os.path.join(os.path.split(data_filename)[0],'processing_parameters.json')\n return params_filename\n\n\ndef get_bscan_folder(data_filename,make=True):\n ext = os.path.splitext(data_filename)[1]\n bscan_folder = data_filename.replace(ext,'')+'_bscans'\n \n if make:\n os.makedirs(bscan_folder,exist_ok=True)\n return bscan_folder\n\n\ndef get_org_folder(data_filename,make=True):\n ext = os.path.splitext(data_filename)[1]\n bscan_folder = data_filename.replace(ext,'')+'_org'\n \n if make:\n os.makedirs(bscan_folder,exist_ok=True)\n return bscan_folder\n\nbscan_template = 'complex_bscan_%05d.npy'\n\n\ndef cleanup_folders(folder_filters=[],delete=False):\n folders = []\n for ff in folder_filters:\n temp = list(pathlib.Path('.').rglob(ff))\n for item in temp:\n if os.path.isdir(item):\n folders.append(item)\n\n for f in folders:\n if not delete:\n logging.info('Would delete %s.'%f)\n \n else:\n logging.info('Deleting %s.'%f)\n shutil.rmtree(f)\n\n\ndef cleanup_files(file_filters=[],delete=False):\n files = []\n for ff in file_filters:\n temp = list(pathlib.Path('.').rglob(ff))\n for item in temp:\n if os.path.isfile(item):\n files.append(item)\n\n for f in files:\n if not delete:\n logging.info('Would delete %s.'%f)\n \n else:\n logging.info('Deleting %s.'%f)\n os.remove(f)\n\ndef clean(delete=False):\n ans = input('Are you sure you want to delete all processing output and intermediates below this level? [y/N] ')\n if ans.lower()=='y':\n cleanup_folders(['*_diagnostics','*_bscans','*_org'],delete=delete)\n cleanup_files(['processing_parameters.json'],delete=delete)\n cleanup_files(['octoblob.log'],delete=delete)\n else:\n sys.exit('Exiting.')\n" }, { "alpha_fraction": 0.6056838631629944, "alphanum_fraction": 0.6284784078598022, "avg_line_length": 31.171428680419922, "blob_id": "6cf55b82d2ecb0c524aa7d9fde6ec9ef62de3fa7", "content_id": "aa69467a668a2dad76debf7f844618fe52796cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3378, "license_type": "no_license", "max_line_length": 77, "num_lines": 105, "path": "/config_reader.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import os,sys\nfrom datetime import datetime\nimport logging\nfrom xml.etree import ElementTree as ET\n\n\nXML_DICT = {}\n# populate XML_DICT with required parameters from Yifan's XML grammar\n# keys of this dictionary [x,y] are x = element tag and y = element attribute\n# the values of this dictionary (x,y) are x = our new name for the data and\n# y = the data type (i.e. a function that we can cast the output with)\nXML_DICT['Time','Data_Acquired_at'] = ('time_stamp',str)\nXML_DICT['Volume_Size','Width'] = ('n_depth',int)\nXML_DICT['Volume_Size','Height'] = ('n_fast',int)\nXML_DICT['Volume_Size','Number_of_Frames'] = ('n_slow',int)\nXML_DICT['Volume_Size','Number_of_Volumes'] = ('n_vol',int)\nXML_DICT['Scanning_Parameters','X_Scan_Range'] = ('x_scan_mv',int)\nXML_DICT['Scanning_Parameters','X_Scan_Offset'] = ('x_offset_mv',int)\nXML_DICT['Scanning_Parameters','Y_Scan_Range'] = ('y_scan_mv',int)\nXML_DICT['Scanning_Parameters','Y_Scan_Offset'] = ('y_offset_mv',int)\nXML_DICT['Scanning_Parameters','Number_of_BM_scans'] = ('n_bm_scans',int)\n\n\n\ndef get_configuration(filename):\n\n ''' Pull configuration parameters from Yifan's\n config file. An example configuration file is shown\n below. Calling get_configuration('temp.xml') returns\n a dictionary of parameters useful for processing the OCT\n stack, e.g. numbers of scans in x and y directions,\n voltage range of scanners, etc.\n\n Example XML config file:\n\n <?xml version=\"1.0\" encoding=\"utf-8\"?>\n <MonsterList>\n <!--Program Generated Easy Monster-->\n <Monster>\n <Name>Goblin</Name>\n <Time\n Data_Acquired_at=\"1/30/2018 12:21:22 PM\" />\n <Volume_Size\n Width=\"2048\"\n Height=\"400\"\n Number_of_Frames=\"800\"\n Number_of_Volumes=\"1\" />\n <Scanning_Parameters\n X_Scan_Range=\"3000\"\n X_Scan_Offset=\"0\"\n Y_Scan_Range=\"0\"\n Y_Scan_Offset=\"0\"\n Number_of_BM_scans=\"2\" />\n <Dispersion_Parameters\n C2=\"0\"\n C3=\"0\" />\n </Monster>\n </MonsterList>\n\n Example output dictionary:\n\n {'y_offset_mv': 0, 'x_offset_mv': 0, 'n_fast': 400, \n 'y_scan_mv': 0, 'n_slow': 800, 'n_vol': 1, \n 'x_scan_mv': 3000, 'time_stamp': '1/30/2018 12:21:22 PM', \n 'n_bm_scans': 2, 'n_depth': 2048}\n\n '''\n \n # append extension if it's not there\n if not filename[-4:].lower()=='.xml':\n filename = filename + '.xml'\n\n \n # use Python's ElementTree to get a navigable XML tree\n temp = ET.parse(filename).getroot()\n\n # start at the root, called 'Monster' for whatever reason:\n tree = temp.find('Monster')\n\n # make an empty output dictionary\n config_dict = {}\n\n # iterate through keys of specification (XML_DICT)\n # and find corresponding settings in the XML tree.\n # as they are found, insert them into config_dict with\n # some sensible but compact names, casting them as\n # necessary:\n for xml_key in XML_DICT.keys():\n node = tree.find(xml_key[0])\n config_value = node.attrib[xml_key[1]]\n xml_value = XML_DICT[xml_key]\n config_key = xml_value[0]\n config_cast = xml_value[1]\n config_dict[config_key] = config_cast(config_value)\n \n return config_dict\n \ndef make_configuration():\n\n config = {}\n for xml_value in XML_DICT.values():\n config_key = xml_value[0]\n config[config_key] = None\n\n return config\n" }, { "alpha_fraction": 0.706573486328125, "alphanum_fraction": 0.7268974184989929, "avg_line_length": 31.040817260742188, "blob_id": "48d6edb65a722fe47ce40f51c3ce5e52496c2c1e", "content_id": "d31b90917b3a070ce09d1ea701a771ad899af9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3149, "license_type": "no_license", "max_line_length": 116, "num_lines": 98, "path": "/examples/optopol_interface/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\n\n# For ORG processing we needn't process all the frames. 400 frames are acquired\n# in each measurememnt, at a rate of 400 Hz. The stimulus onset is at t=0.25 s,\n# corresponding to the 100th frame. 50 milliseconds before stimulus is sufficient\n# to establish the baseline, and the main ORG response takes place within 100\n# milliseconds of the stimulus. Thus:\norg_start_frame = 0\norg_end_frame = 250\n\n# Enter this here or pass at command line\namp_filename = None\nbscan_height = 400\n\nif amp_filename is None:\n try:\n amp_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check amp_filename. %s not found or amp_filename not passed at command line.'%amp_filename)\n\nphase_filename = amp_filename.replace('_Amp.bin','_Phase.bin')\n\norg_frames = list(range(org_start_frame,org_end_frame))\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(amp_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(amp_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(amp_filename)\n\n\ntemp = np.fromfile(amp_filename,dtype=np.uint8,count=12)\n\ndims = np.fromfile(amp_filename,dtype=np.int32,count=3)\n\nn_depth,n_fast,n_slow = dims\n\nassert org_start_frame<n_slow\nassert org_end_frame<=n_slow\n\ndef get_cube(fn,show_hist=False):\n dat = np.fromfile(fn,dtype=np.dtype('<f4'),offset=12,count=n_depth*n_fast*n_slow)\n if show_hist:\n plt.hist(dat)\n plt.show()\n dat = np.reshape(dat,dims[::-1])\n dat = np.transpose(dat,(0,2,1))\n dat = dat[:,::-1,:]\n return dat.astype(np.float)\n\namp = get_cube(amp_filename)\nphase = get_cube(phase_filename)\n\nbmean = np.mean(amp,axis=0)\nz1,z2 = blobf.get_bscan_boundaries(bmean,bscan_height,intensity=False)\nplt.figure()\nplt.imshow(20*np.log10(bmean),aspect='auto',cmap='gray',clim=(40,90))\nplt.colorbar()\nplt.pause(.1)\n\nfor k in range(org_start_frame,org_end_frame):\n bamp = blobf.insert_bscan(amp[k,:,:],z1,z2,bscan_height)\n bphase = blobf.insert_bscan(phase[k,:,:],z1,z2,bscan_height)\n bscan = bamp*np.exp(bphase*1j)\n bscan = bscan.astype(np.complex64)\n #bscan = amp[k,:,:]\n dB = blobf.dB(bscan)\n bphase = np.angle(bscan)\n plt.clf()\n plt.imshow(dB,clim=(45,90),cmap='gray')\n plt.colorbar()\n plt.pause(.01)\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n \n print(bscan.dtype)\n \n# Skip this for now. Needs discussion.\n#blobf.flatten_volume(bscan_folder,diagnostics=diagnostics)\n\n# Process the ORG blocks\norg_tools.process_org_blocks(bscan_folder)\n \n" }, { "alpha_fraction": 0.6786623001098633, "alphanum_fraction": 0.6873863935470581, "avg_line_length": 39.45098114013672, "blob_id": "bdfeda190bc5d40bb5e9fc1e86c92cbbfc9a7bb5", "content_id": "38698e081d5716ed22dff1269a98302910a9f08e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8253, "license_type": "no_license", "max_line_length": 130, "num_lines": 204, "path": "/examples/fbg_alignment/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\n\n# This example provides a new FBG alignment function based on cross-correlation. It may prove to be more robust than\n# previous methods that used 'feature'-based alignment (e.g. aligning to the largest positive or negative gradients)\n\nno_parallel = True\n\nuse_multiprocessing = False\ntry:\n assert not no_parallel\n import multiprocessing as mp\n use_multiprocessing = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('multiprocessing imported')\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept ImportError as ie:\n logging.info('Failed to import multiprocessing: %s'%ie)\n logging.info('Processing serially.')\nexcept AssertionError as ae:\n logging.info('Multiprocessing banned by no_parallel.')\n \ndata_filename = None\n\nif data_filename is None:\n try:\n data_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check data_filename. %s not found or data_filename not passed at command line.'%data_filename)\n\nsrc = blobf.get_source(data_filename)\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(data_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n\n# New prototype fbg_align function, which uses cross-correlation instead of feature-\n# based alignment of spectra.\n# Set a limit on the maximum index where the FBG trough could possibly be located.\n# This is a critical parameter, as it avoids cross correlation of spectra based on\n# structural information; this would prevent the FBG features from dominating the\n# cross-correlation and introduce additional phase noise.\n# Correlation threshold is the minimum correlation required to consider two spectra\n# to be in phase with one another\ndef fbg_align(spectra,fbg_max_index=150,correlation_threshold=0.9,diagnostics=None):\n # crop the frame to the FBG region\n f = spectra[:fbg_max_index,:].copy()\n\n if not diagnostics is None:\n fig = diagnostics.figure(figsize=(6,4))\n axes = fig.subplots(2,2)\n axes[0][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[0][1].plot(f[:,k])\n\n # group the spectra by amount of shift\n # this step avoids having to perform cross-correlation operations on every\n # spectrum; first, we group them by correlation with one another\n # make a list of spectra to group\n to_do = list(range(f.shape[1]))\n # make a list for the groups of similarly shifted spectra\n groups = []\n ref = 0\n\n # while there are spectra left to group, do the following loop:\n while(True):\n groups.append([ref])\n to_do.remove(ref)\n for tar in to_do:\n c = np.corrcoef(f[:,ref],f[:,tar])[0,1]\n if c>correlation_threshold:\n groups[-1].append(tar)\n to_do.remove(tar)\n if len(to_do)==0:\n break\n ref = to_do[0]\n\n subframes = []\n for g in groups:\n subf = f[:,g]\n subframes.append(subf)\n\n # now decide how to shift the groups of spectra by cross-correlating their means\n # we'll use the first group as the reference group:\n group_shifts = [0]\n ref = np.mean(subframes[0],axis=1)\n # now, iterate through the other groups, compute their means, and cross-correlate\n # with the reference. keep track of the cross-correlation peaks in the list group_shifts\n for taridx in range(1,len(subframes)):\n tar = np.mean(subframes[taridx],axis=1)\n xc = np.fft.ifft(np.fft.fft(ref)*np.fft.fft(tar).conj())\n shift = np.argmax(xc)\n if shift>len(xc)//2:\n shift = shift-len(xc)\n group_shifts.append(shift)\n\n # now, use the groups and the group_shifts to shift all of the spectra according to their\n # group membership:\n for g,s in zip(groups,group_shifts):\n for idx in g:\n spectra[:,idx] = np.roll(spectra[:,idx],s)\n f[:,idx] = np.roll(f[:,idx],s)\n\n if not diagnostics is None:\n axes[1][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[1][1].plot(f[:,k])\n diagnostics.save(fig)\n\n return spectra\n\n\ndef spectra_to_bscan(mdcoefs,spectra,diagnostics=None):\n # only the fbg_align function is called locally (from this script);\n # most of the OCT processing is done by blob functions (blobf.XXXX)\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = blobf.dc_subtract(spectra,diagnostics=diagnostics)\n spectra = blobf.crop_spectra(spectra,diagnostics=diagnostics)\n spectra = blobf.k_resample(spectra,mdcoefs[:2],diagnostics=diagnostics)\n spectra = blobf.dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n spectra = blobf.gaussian_window(spectra,sigma=0.9,diagnostics=None)\n\n # Now generate the bscan by FFT:\n bscan = np.fft.fft(spectra,axis=0)\n # remove the upper half of the B-scan and leave only the bottom half:\n bscan = bscan[bscan.shape[0]//2:,:]\n\n # could additionally crop the B-scan if desired;\n # for example, could remove the top 10 rows, bottom 50 rows, and 10 columns\n # from the left and right edges:\n # bscan = bscan[10:-50,10:-10]\n\n # artifact.png has a lens flare artifact after the 150th column, so we'll remove\n # it; we'll also remove 50 rows near the DC (bottom of the image):\n bscan = bscan[:-50,:150]\n \n if not diagnostics is None:\n fig = diagnostics.figure()\n axes = fig.subplots(1,1)\n im = axes.imshow(20*np.log10(np.abs(bscan)),aspect='auto')\n plt.colorbar(im)\n diagnostics.save(fig)\n return bscan\n\n\n# try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\ntry:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\nexcept KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(3)\n # modify the next line to use the local spectra_to_bscan function by removing 'blobf.':\n coefs = mdo.multi_optimize(samples,spectra_to_bscan,show_all=False,show_final=True,verbose=True,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(data_filename)\n\nif __name__=='__main__':\n\n if use_multiprocessing:\n def proc(k):\n # compute the B-scan from the spectra, using the provided dispersion coefficients;\n # use the local spectra_to_bscan function, not the blobf. version\n bscan = spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n pool = mp.Pool(n_cores)\n pool.map(proc,range(src.n_total_frames))\n\n else:\n\n for k in range(src.n_total_frames):\n\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n # use the local spectra_to_bscan function, not the blobf. version\n bscan = spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n" }, { "alpha_fraction": 0.786821722984314, "alphanum_fraction": 0.8139534592628479, "avg_line_length": 50.599998474121094, "blob_id": "62bcbe3cfda687255c0cb578270dab09f232b9e6", "content_id": "e148888f3fc733a63af5db6db04a265816ee7005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 258, "license_type": "no_license", "max_line_length": 171, "num_lines": 5, "path": "/examples/optopol_interface/NOTE.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "Download data from here:\n\nhttps://ucdavis.box.com/s/ghvrg2w7xxpd6hmfun8ws9ecybwr5bv9\n\nThe problem is that the wrong axial region comes into view when the plot window shows up, and then the clicked regions have nothing to do with the extracted phase profile.\n" }, { "alpha_fraction": 0.7719168066978455, "alphanum_fraction": 0.7819464802742004, "avg_line_length": 80.57575988769531, "blob_id": "6084843cb4995f8e426ef86242e09380236039ea", "content_id": "58a5ce9fd8fef6c4baac35cecedc9c4238ec9ad2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2692, "license_type": "no_license", "max_line_length": 971, "num_lines": 33, "path": "/examples/flicker_org/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Basic flicker ORG processing\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* test_flicker.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [test_flicker.unp](https://www.dropbox.com/s/fbms4ekrwvngt0a/test_flicker.unp?dl=0)\n.\n\n* test_flicker.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [test_flicker.xml](https://www.dropbox.com/s/hmny5xafcizj67q/test_flicker.xml?dl=0)\n\nAfter downloading, put them into the `examples/flicker_org` folder.\n\n\n## OCT/ORG processing\n\n1. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/flicker_org` folder and run the program by issuing `python process.py` at the command prompt. This may take 20-30 minutes. The ORG processing is slow, and 1600-2000 blocks must be processed.\n\n## ORG visualization\n\n1. Run the program `plot_velocities.py` by issuing `python plot_velocities.py` at the command prompt, in the same folder. If run this way, the program searches recursively for folders called `org` in the current directory and its subdirectories. Alternatively, you may issue `python plot_velocities.py ./test_bscans` to search only that subdirectory (recursively). In these cases, the program will run on each of the `org` folders it finds. Finally, you may specify a particular org folder with `python plot_velocities.py ./test_bscans/org`, in which case it will run only on that folder.\n\n2. The input required by the user is clicking the upper left and lower right corners of a rectangle containing just the IS/OS and COST bands to be analyzed, in the B-scan on the left. Within this rectangle, the bands should be approximately equidistant, to facilitate a simple segmentation algorithm. Selection of a rectangle causes the $v_{OS}$ plot for that region to appear in the center panel and the power spectrum of $v_{OS}$ plotted in log scale in the right panel, limited to the range [0 Hz, 30Hz]. When multiple rectangles are created, multiple plots are generated on the right, with the rectangles and plot lines color-coordinated for comparison. The `backspace` key deletes the last rectangle, and clicking outside of the B-scan on the left clears all of the rectangles. The `enter` key saves the figure and associated data in two places: the working directory, in a folder called `plot_velocities_results` and in the `org` folder containing the raw ORG data.\n" }, { "alpha_fraction": 0.7737002968788147, "alphanum_fraction": 0.7774137258529663, "avg_line_length": 68.36363983154297, "blob_id": "494263b445c2a51419b314178f3f8aac96220329", "content_id": "4c1348364be013ec55c61ba2a869a017bdbffd7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4578, "license_type": "no_license", "max_line_length": 592, "num_lines": 66, "path": "/examples/single_flash_general_org/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Basic single flash ORG processing for arbitrarily chosen layers\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* plot_general_org.py: an interactive tool for visualizing phase changes between arbitrary, user-selected layers\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n## Download test data\n\nTo run this example you must download the test data from the links below:\n\n* test.unp: the spectral data stored in raw binary 16 bit unsigned integer format. \n\n > Download [test.unp](https://www.dropbox.com/s/pf6b951mlntqq9l/test.unp?dl=1)\n.\n\n* test.xml: acquisition parameters stored by the OCT instrumetation software during acquisition. \n\n > Download [test.xml](https://www.dropbox.com/s/ux5qlinqq6y1zy4/test.xml?dl=1).\n\nAfter downloading, put them into the `examples/single_flash_general_org` folder.\n\n\n## OCT/ORG processing\n\n1. Edit the file `process.py`, and edit the values assigned to `data_filename`, `org_start_frame`, and `org_end_frame` as needed. For single flash experiments, only a subset of B-scans must be processed; see the code comment for details. For flicker experiments, the entire set of B-scans must be processed.\n\n2. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/single_flash_org` folder and run the program by issuing `python process.py` at the command prompt. This will take a few minutes. The ORG processing in particular is somewhat slow.\n\n## ORG visualization\n\n1. Run the program `plot_general_org.py` by issuing `python plot_general_org.py` at the command prompt, in the same folder. If run this way, the program searches recursively for folders called `org` in the current directory and its subdirectories. Alternatively, you may issue `python plot_general_org.py ./test_bscans` to search only that subdirectory (recursively). In these cases, the program will run on each of the `org` folders it finds. Finally, you may specify a particular org folder with `python plot_general_org.py ./test_bscans/org`, in which case it will run only on that folder.\n\n2. The input required by the user is clicking the end points of two line segments, one at a time. These line segments determine the layers between which phase velocities are computed. The user must click these line segments in a particular order--the left end of the top line segment, the right end of the top line segment, the left end of the bottom line segment, and the right end of the bottom line segment. The program will attempt to convert these line segments into arbitrary paths tracing the contour of the underlying layer by using the `refine_z` parameter:\n\n```python\n# refine_z specifies the number of pixels (+/-) over which the\n# program may search to identify a local peak. The program begins by asking\n# the user to trace line segments through two layers of interest. These layers\n# may not be smooth. From one A-scan to the next, the brightest pixel or \"peak\"\n# corresponding to the layer may be displaced axially from the intersection\n# of the line segment with the A-scan. refine_z specifies the distance (in either\n# direction, above or below that intersection) where the program may search for a\n# brighter pixel with which to compute the phase. The optimal setting here will\n# largely be determined by how isolated the layer of interest is. For a relatively\n# isolated layer, such as IS/OS near the fovea, a large value may be best. For\n# closely packed layers such as COST and RPE, smaller values may be useful. The\n# user receives immediate feedback from the program's selection of bright pixels\n# and can observe whether refine_z is too high (i.e., causing the wrong layer\n# to be segmented) or too low (i.e., missing the brightest pixels.\n```\n\nSelection of these line segments causes the $v$ plot for that region to appear in the right panel. When multiple regions are created, multiple plots are generated on the right, with the rectangles and plot lines color-coordinated for comparison. The `backspace` key deletes the last region, and clicking outside of the B-scan on the left clears all of the regions. The `enter` key saves the figure and associated data in two places: the working directory, in a folder called `layer_velocities_results` and in the `org` folder containing the raw ORG data.\n\n## Example results\n\n### Cone outer segment ORG responses\n\n![Cone outer segment ORG responses](./figs/cone_os_org.png)\n\n### Subretinal space ORG responses\n\n![Subretinal space ORG responses](./figs/subretinal_org.png)\n" }, { "alpha_fraction": 0.7097586989402771, "alphanum_fraction": 0.7306638360023499, "avg_line_length": 46.332637786865234, "blob_id": "b7eea189ba4ea57da1ee9ce316398ce642e4d6f0", "content_id": "f7ddbd2992e305396f72ad0dc32bc4e0d0087c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22626, "license_type": "no_license", "max_line_length": 797, "num_lines": 478, "path": "/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Octoblob: some Python tools for doing OCT/OCTA processing\n\nThis document contains instructions for installing, interactively running OCTA processing, and creating batch scripts. It is meant to illustrate the architecture of the processing tool chain, the distinction between the OCT/OCTA libraries and processing scripts, and other important (and confusing) issues. In actuality, the OCTA data will be processed using Python scripts (i.e., batch processing), with no interaction with the user.\n\n## Prerequisites\n\nUnfortunately there are two versions of Python currently in widespread use--2.7 and 3.7+ (referred to as Python 2 and Python 3). Currently, Octoblob is written in such a way that it is compatible with Python 2 and 3, but this is unlikely to continue to be the case. Here are instructions for a few cases; all downloads are located [here](https://www.anaconda.com/products/individual).\n\n1. You don't have Python installed at all, and know of no reason to have a Python 2 installation. In this case, just install the Python 3 Anaconda distribution.\n\n2. You use Python 2 extensively and don't want to mess anything up. In this case, you'll need to verify that you have Numpy, Scipy, and Matplotlib installed. Basic octoblob functions will work in Python 2.\n\n3. You use Python 2 but want to create a virtual Python 3 environment. You can do this by executing the following (assuming you want to call the virtual environment \"python3\"; you can call it whatever you want).\n\n ```conda create -n python3 python=3```\n \n ```source activate python3``` or ```conda activate python3```, depending on your OS.\n \n ```conda install numpy scipy matplotlib ipython jupyter```\n \n If you choose this option, you will have to remember to switch to your python3 environment using ```source activate python3``` before running any octoblob scripts.\n \n4. You don't need Anaconda of course. If you know what you're doing you can just install Python, Numpy, Scipy, Matplotlib, Ipython, Jupyter, and a virtual environment manager if you wish.\n\n## Environmental variables\n\nGenerally speaking, you should have one folder on your computer where you store Python modules--both those that you create and those that you download. As a convention, on the machines in our lab we always use ```C:\\code```, but anywhere is fine. Look at the environmental variables on your machine, and see if ```PYTHONPATH``` is defined. If it is, and ```C:\\code``` isn't part of it, add it to the path (using the correct delimiter, e.g. ';', for your system). If it's not defined, then create a new envornment variable ```PYTHONPATH``` and set it to ```C:\\code```.\n\n* [Linux instructions](https://stackoverflow.com/questions/45502996/how-to-set-environment-variable-in-linux-permanently)\n\n* [Mac instructions](https://stackoverflow.com/questions/63622010/how-to-set-environment-variables-permanently-in-mac-os-10-15-6)\n\n* [Windows instructions](https://stackoverflow.com/questions/17312348/how-do-i-set-windows-environment-variables-permanently)\n\n## Git and cloning\n\nThere are ways to proceed without installing Git, but Git is the best. Install Git, making sure to install the command line tools if you're using Windows. Then, using your terminal, change into ```C:\\code``` and issue:\n\n```git clone https://github.com/rjonnal/octoblob```\n\nIf your Python is installed and configured correctly, you can change directories into ```octoblob/tests```, type ```python test_basic.py```, and watch your computer make some B-scans. Try it, it's fun!\n\n## Getting started\n\n```octoblob``` is the unfortunate name I've chosen for the OCT/OCTA processing libraries. It is a descendent of the now obsolete ```octopod``` and ```cuttlefish``` libraries my lab has used in the past. We could have imported all the classes and functions from octoblob with ```from octoblob import *```, but it's better practice to keep the module name around, so that when module functions are called (e.g. ```bscan = blob.make_bscan(data)```), it's clear that the function is coming from the octoblob package, and clear where one needs to go to find the definition of the function.\n\n\n```python\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport octoblob as blob\n```\n\n## Some architectural principles\n\n1. One reasonable way to think about scientific software is to split it into two categories: **libraries** and **scripts**. Libraries are collections of functions (and *classes*--more on that later) where each function and class has a well-defined goal, and the implementations have been extensively tested or otherwise verified to be correct. We *really* don't want any errors in a library. Scripts are the day-to-day programs we run. Some are batch scripts that process lots of data autonomously, and others are exploratory, where we run them to see what the data looks like, often in order to help us design the next step in the processing pipeline. Sometimes a portion of a script becomes used so frequently that it makes sense to turn it into a library functions and thus simplify the script.\n\n2. Specifically with regard to the OCT/OCTA processing pipeline, I believe the libraries should be split into two parts: 1) a library for reading raw data and getting it organized, and 2) a library for turning raw data into OCT/OCTA images. The first of these is handled by a *class*, and the second is handled by a set of *functions*.\n\n3. **Classes**. If you're not familiar with object-oriented programming, all you need to know about a class is that it is a specification for an *object*, i.e. a list of functions and variables that are stored together and somehow insulated from the rest of the code. The raw OCT data is handled by a class, because it needs to keep track of lots of information about the data. We create an ```OCTRawData``` class, and it keeps track of how many bytes there are per pixel, how many pixels per spectrum, how many spectra per B-scan, etc. By implementing this with a class, we don't have to specify how to get the raw data every time we need a new frame. We just instantiate the object and then ask it for frames, which will be illustrated below.\n\n## Parameters for the ```OCTRawData``` class\n\nThe ```OCTRawData``` class needs to know how to get a frame out of the file, and to do that it needs a bunch of parameters. Let's specify these first. They should be self-explanatory, but trailing comments may clarify in some cases.\n\n\n```python\n# PARAMETERS FOR RAW DATA SOURCE\nfilename = '../octa_test_set.unp' # name of the raw data file\nn_vol = 1 # number of volumes\nn_slow = 4 # number of B-scans in each volume\nn_repeats = 5 # number of repeats for OCTA data\nn_fast = 2500 # number of A-scans per B-scan x number of repeats\nn_skip = 500 # number of A-scans to skip at the start\nn_depth = 1536 # number of pixels per spectrum\nbit_shift_right = 4 # ignore for now\ndtype=np.uint16 # the data type of the raw data\n\nfbg_position = 148 # if there is an FBG, approximately where is it located\nspectrum_start = 159 # where does the spectral data start (i.e. after FBG)\nspectrum_end = 1459 # where does the spectral data end (i.e., before any dead/dark time at the end)\n```\n\nNow we can instantiate the ```OCTRawData``` object, which will later be used to get frames.\n\n\n```python\nsrc = blob.OCTRawData(filename,n_vol,n_slow,n_fast,n_depth,n_repeats,\n fbg_position=fbg_position,\n spectrum_start=spectrum_start,spectrum_end=spectrum_end,\n bit_shift_right=bit_shift_right,\n n_skip=n_skip,dtype=dtype)\n```\n\n File size incorrect.\n 30720000\texpected\n 32256000\tactual\n n_vol\t\t1\n n_slow\t\t4\n n_repeats\t5\n n_fast\t\t2500\n n_depth\t\t1536\n bytes_per_pixel\t2\n total_expected_size\t30720000\n\n\nThe \"File size incorrect\" warning is just telling us that there are more bytes in the file than we need. This is because using Yifan's software and the Axsun source, there's no synchronization between the slow scanner and the data acquisition, such that the first set of N repeats can begin on any of the first N frames.\n\n## Parameters for OCT/OCTA processing\n\nIn addition to the raw data parameters, the code needs to know how to process the OCT data. These parameters are of greater interest to OCT scientists, and are subject to continual revision and refinement.\n\n\n```python\n# PROCESSING PARAMETERS\nmapping_coefficients = [12.5e-10,-12.5e-7,0.0,0.0]\ndispersion_coefficients = [0.0,1.5e-6,0.0,0.0]\n\nfft_oversampling_size = 4096\n\n# Cropping parameters:\nbscan_z1 = 3147\nbscan_z2 = -40 # negative indices are relative to the end of the array\nbscan_x1 = 0\nbscan_x2 = -100 # negative indices are relative to the end of the array\n```\n\n## Pulling and processing an OCTA frame\n\nLet's say we want to process one OCTA frame, using the OCTRawData object ```src``` defined above.\n\nFirst, we need to get the raw spectra. Let's adopt the convention of calling these **frames**. A frame has dimensions ```n_k * n_x```, where ```n_k``` is the number of points in the k-dimension (the vertical/first dimension, by convention) and ```n_x``` is the number of points in the fast scan dimension, including repeats. Our B-scans are 500 pixels wide, and we have 5 repeats, so a single frame will contain 2500 A-scans. Remember that Python, like every sane programming language, begins indices with 0, not 1. We'll get the first frame and see what it looks like.\n\n\n```python\nframe = src.get_frame(0)\nplt.figure(dpi=150)\nplt.imshow(frame,aspect='auto')\nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_12_0.png)\n\n\n## OCT processing pipeline\n\nThe next steps in the process are 1) DC-subtraction, 2) k-resampling, 3) dispersion compensation, 4) windowing, and 5) FFTing (and oversampling) the spectra into a B-scan. These are illustrated next.\n\n\n```python\nframe = blob.dc_subtract(frame)\nplt.figure(dpi=150)\nplt.imshow(frame,aspect='auto')\nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_14_0.png)\n\n\n\n```python\nframe = blob.k_resample(frame,mapping_coefficients)\nplt.figure(dpi=150)\nplt.imshow(frame,aspect='auto')\nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_15_0.png)\n\n\n\n```python\nframe = blob.dispersion_compensate(frame,dispersion_coefficients)\nplt.figure(dpi=150)\nplt.imshow(np.abs(frame),aspect='auto') # need 'abs' because dispersion compensation introduces imaginary component\nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_16_0.png)\n\n\n\n```python\nframe = blob.gaussian_window(frame,0.9)\nplt.figure(dpi=150)\nplt.imshow(np.abs(frame),aspect='auto') # need 'abs' because dispersion compensation introduces imaginary component\nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_17_0.png)\n\n\n### Let's have a look at the Gaussian window, just for fun, by running it on a vector of ones\n\n\n```python\nwindow_shape = blob.gaussian_window(np.ones(frame.shape[0]),0.9)\nplt.figure(dpi=150)\nplt.plot(window_shape)\nplt.show()\n```\n\n\n![png](./figs/output_19_0.png)\n\n\n### Now we generate a B-scan from the spectra\n\n\n```python\nbscan = blob.spectra_to_bscan(frame,oversampled_size=fft_oversampling_size,z1=bscan_z1,z2=bscan_z2)\n# need 'abs' because dispersion compensation introduces imaginary component\ndB_bscan = 20*np.log10(np.abs(bscan)) \n# define rough contrast lims--if our sensitivity is 90 dB and our dynamic range is 45 dB, then (45,90) will work.\nclim = (45,90)\nplt.figure(dpi=150)\nplt.imshow(dB_bscan,clim=(45,90),aspect='auto') \nplt.colorbar()\nplt.show()\n```\n\n\n![png](./figs/output_21_0.png)\n\n\n### Now we have to reshape the compound B-scan into a stack of 5 (n_repeats) individual B-scans\n\nWe'll check the shape of the stack (3D array), and then we'll visualize the first one in the stack, as sanity checks.\n\n\n```python\nstack_complex = blob.reshape_repeats(bscan,n_repeats,x1=bscan_x1,x2=bscan_x2)\nprint(stack_complex.shape)\n# remember that the original array bscan was complex; we used abs and log10 to visualize it before\ndB_first_bscan = 20*np.log10(np.abs(stack_complex[:,:,0]))\n# define rough contrast lims--if our sensitivity is 90 dB and our dynamic range is 45 dB, then (45,90) will work.\nplt.figure(dpi=150)\nplt.imshow(dB_first_bscan,clim=(45,90),aspect='auto') # need 'abs' because dispersion compensation introduces imaginary component\nplt.colorbar()\nplt.show()\n```\n\n (909, 400, 5)\n\n\n\n![png](./figs/output_23_1.png)\n\n\n### Lastly, we'll convert this stack of complex repeats into an angiogram\n\n\n```python\nphase_variance = blob.make_angiogram(stack_complex)\nplt.figure(dpi=150)\nplt.imshow(phase_variance,clim=(0,0.2*np.pi),aspect='auto') # need 'abs' because dispersion compensation introduces imaginary component\nplt.colorbar()\nplt.show()\n```\n\n![png](./figs/output_25_0.png)\n\n\n### Processing parameters\n\nNo processing parameters are stored alongside processing functions. They are all stored in the file ```processing_parameters.py``` at the top level. This improves transparancy of the pipeline, and forces values to be codified in releases, which is good. That way, for a given release of the software, the user will automatically get the parameters that were used by developers to process the test data and produce example images.\n\nHere's the full parameter file, with comments:\n\n```python\n# This file contains parameters for the processing\n# pipeline. It is meant to be frozen in releases of\n# octoblob, such that processing can be reproduced\n# perfectly without having to fiddle with them.\n\n# coefficients for resampling lambda into k\n# these coefficients c specify a polynomial p:\n# p(x) = c_0*x^3 + c_1*x^2 + c_2*x + c_3\n# p(x) is a the sampling error in x,\n# and the measured spectra are interpolated from\n# x -> x+p(x)\nk_resampling_coefficients = [12.5e-10,-12.5e-7,0,0]\n\n# these are the coefficients for the unit-amplitude\n# phasor used for removing dispersion chirp; if the\n# coefficients are c, then\n# p(x) = c_0*x^3 + c_1*x^2 + c_2*x + c_3\n# the dechirping phasor D is given by:\n# D = e^[-i*p(x)]\n# the spectra are dechirped by:\n# dechirped_spectrum = spectra*D\ndispersion_coefficients = [0.0,1.5e-6,0.0,0.0]\n\n# the width of the window for gaussian windowing:\ngaussian_window_sigma = 0.9\n\n# paramters for bulk motion estimation, including\n# smoothing by shifting bin edges; see Makita, 2006\n# for a detailed description of the approach;\n# in short, we do a histogram of the B-scan to B-scan\n# phase error, with a fixed number of bins (n_bins);\n# then, we shift the bin edges by a fraction of a\n# bin width and recompute the histogram; the fractional\n# shift is equal to 1/resample_factor\nbulk_motion_n_bins = 16\nbulk_motion_resample_factor = 24\nbulk_motion_n_smooth = 5\n\n# critical parameters: thresholds for bulk motion correction\n# and phase variance computation\nbulk_correction_threshold = 0.3\nphase_variance_threshold = 0.43\n```\n\n### The OCTA processing functions\n\nObviously a lot of the work is buried in the OCTA processing functions, and we'll eventually document all of those clearly as well. Here, for example, is the dispersion compensation function. ```pp.dispersion_coefficients``` refers to the value set in ```processing_parameters.py```.\n\n\n```python\ndef dispersion_compensate(spectra,coefficients=pp.dispersion_coefficients):\n # x_in specified on 1..N+1 to accord w/ Justin's code\n # fix this later, ideally as part of a greater effort\n # to define our meshes for mapping and dispersion compensation\n # on k instead of integer index\n x = np.arange(1,spectra.shape[0]+1)\n dechirping_phasor = np.exp(-1j*np.polyval(coefficients,x))\n return (spectra.T*dechirping_phasor).T\n```\n\n## The ```processors``` module and a complete example\n\nAn alternative to calling the core OCT library functions is to use the ```processors``` module. The ```examples/oct_processing``` folder contains a complete example. The point of this alternative example is to abstract away from the low-level detail and use high-level commands to process data. It contains the following:\n\n* Two ```.unp``` raw data files along with the XML files containing acquisition parameters\n\n* A Python script ```do_processing.py``` that illustrates the basic processing pipeline (see below)\n\n* A bash script ```cleanup.sh``` that will return the folder to its original, unprocessed state\n\nHere's the contents of ```do_processing.py```, along with explanatory comments:\n\n```python\n# import the relevant modules; by convention import octoblob as blob\nimport octoblob as blob\nimport multiprocessing as mp\nimport glob,sys,os,shutil\nfrom matplotlib import pyplot as plt\n\n# use glob to get a list of the .unp files:\nfiles = glob.glob('./*.unp')\n\n# construct the corresponding bscan folders, to check whether\n# data have been processed already in the for loop below\nbscan_folders = [fn.replace('.unp','_bscans') for fn in files]\n\n# loop through the data files and bscan folders:\nfor fn,bscan_folder in zip(files,bscan_folders):\n # only process if the bscans folder doesn't exist:\n if not os.path.exists(bscan_folder):\n # setup_processing reads data from the ```unp``` and ```xml``` files and\n # generates previews of the spectra and B-scan, while allowing the user to\n # use these previews in order to set processing parameters.\n # The end result is a ```.json``` file containing all the necessary basic\n # processing parameters. For advanced processing (such as OCTA, ORG, and\n # volume averaging), additional parameters are required, and these should\n # be stored in the same JSON file.\n # copy_existing=True causes the setup script to run only if there's no other\n # _parameters.json file in the folder. If there is, it will simply copy the\n # existing one instead of running the setup script. Use copy_existing=True if\n # all of the data in the folder were acquired with the same parameters--file sizes,\n # scan angles, retinal eccentricity, FBG characteristics, etc.\n blob.processors.setup_processing(fn,copy_existing=True)\n \n # if this data file's _parameters.json file contains dispersion and mapping\n # coefficients (i.e., if the optimizer has already been run, or if a sister\n # file's _parameters.json has been copied), then we skip the optimization step.\n try:\n # load the _parameters.json file\n d = blob.processors.load_dict(param_fn)\n # check that the keys 'c2', 'c3', 'm2', 'm3' are present in the resulting\n # dictionary:\n assert all([k in d.keys() for k in ['c2','c3','m2','m3']])\n except AssertionError:\n # if those keys were not all present, run the optimization:\n # optimize_mapping_dispersion uses the processing parameters set above\n # to run a numerical optimization of third and second order dispersion\n # and mapping coefficients; these are then written to the JSON file as well\n blob.processors.optimize_mapping_dispersion(fn,mode='brightness',diagnostics=True)\n\n # finally, process the B-scans\n # process_bscans generates B-scans from the raw spectra\n # when diagnostics is set to False, the diagnostics are written\n # only for the first B-scan processed, and the rest are processed\n # quickly\n blob.processors.process_bscans(fn,diagnostics=False,start_frame=20,end_frame=60)\n\n plt.close('all')\n```\n\nThe ```setup_processing``` function requires responses from the user. Below are the correct responses for the example datasets:\n\n```\nPlease enter a value for eye (RE or LE) [RE]: \nPlease enter a value for ecc_horizontal (degrees, negative for nasal, positive for temporal) [0.0]: 2.0\nPlease enter a value for ecc_vertical (degrees, negative for superior, positive for inferior) [0.0]: 0.0\nPlease enter a value for fbg_position (-1 for no FBG alignment) [85]: \nPlease enter a value for fbg_region_height (ignored if fbg_position is -1) [60]: \nPlease enter a value for spectrum_start [159]: \nPlease enter a value for spectrum_end [1459]: \nPlease enter a value for bscan_z1 [1000]: \nPlease enter a value for bscan_z2 [1300]: \nPlease enter a value for bscan_x1 [0]: \nPlease enter a value for bscan_x2 [250]: \nPlease enter a value for fft_oversampling_size [-1]: \nPlease enter a value for notes []: jonnal_0001, no retinal disease, ORG experiment, 66% bleaching\n```\n\nIf multiple data sets were acquired with the same acquisition parameters, copies of the first JSON file can be made. For a UNP file called ```X.unp```, the corresponding JSON file should be called ```X_parameters.json```. This is done automatically, if the argument ```copy_existing=True``` is passed to the ```setup_processing``` function, as shown above.\n\n## A few notes about data from the EyePod\n\nThe OCT acquisition software on different systems behaves differently. There is an XML file stored with each UNP data file, and the way in which acquisition parameters are written to this file, and the logic of the acquisiton parameters, differs among systems. The way in which octoblob digests the XML file and uses the resulting parameters is system-dependent. To try to address this inconsistency, I've added a file ```system_label.py``` that should contain one of the following assignments. Just comment out the one you don't want:\n\n```python\nsystem_label = 'clinical_org'\n# system_label = 'eyepod'\n```\n\nAlso, for some reason in the XML files generated on the EyePod system, when the number of repeats is >1, the parameter ```Height``` is set to the B-scan width multiplied by ```Number_of_BM_scans```. For example, if individual B-scans are 360 pixels wide, and the number of BM/repeats is set to 3, ```Height``` will be 1080. Processing B-scans this way will give 3 B-scans in each saved file. Of course, they can be cropped later, but to avoid this issue from the start, change ```Number_of_BM_scans``` to 1 and divide ```Height``` by the original number of BM scans. See below:\n\n### Original XML\n\n```xml\n <Volume_Size\n Width=\"1280\"\n Height=\"360\"\n Number_of_Frames=\"1080\"\n Number_of_Volumes=\"1\"\n BscanWidth=\"608\"\n BscanOffset=\"32\" />\n <Scanning_Parameters\n X_Scan_Range=\"1750\"\n X_Scan_Offset=\"0\"\n Y_Scan_Range=\"1750\"\n Y_Scan_Offset=\"0\"\n Number_of_BM_scans=\"3\" />\n```\n\n### Corrected XML\n```xml\n <Volume_Size\n Width=\"1280\"\n Height=\"360\"\n Number_of_Frames=\"360\"\n Number_of_Volumes=\"1\"\n BscanWidth=\"608\"\n BscanOffset=\"32\" />\n <Scanning_Parameters\n X_Scan_Range=\"1750\"\n X_Scan_Offset=\"0\"\n Y_Scan_Range=\"1750\"\n Y_Scan_Offset=\"0\"\n Number_of_BM_scans=\"1\" />\n```\n\n" }, { "alpha_fraction": 0.6924038529396057, "alphanum_fraction": 0.6933416724205017, "avg_line_length": 37.08333206176758, "blob_id": "8f8cad4aa58f11589699c3025b186dd980f06fc9", "content_id": "44c8d1ea18a2d13f2191e799fed8dd179c8b5e25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3199, "license_type": "no_license", "max_line_length": 135, "num_lines": 84, "path": "/examples/flicker_org/process.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\ndef process_dataset(data_filename):\n # Create a diagnostics object for inspecting intermediate processing steps\n diagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n # Create a parameters object for storing and loading processing parameters\n params_filename = file_manager.get_params_filename(data_filename)\n params = parameters.Parameters(params_filename,verbose=True)\n\n # Get an octoblob.DataSource object using the filename\n src = blobf.get_source(data_filename)\n\n # try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\n try:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\n except KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(5)\n coefs = mdo.multi_optimize(samples,blobf.spectra_to_bscan,show_all=False,show_final=True,verbose=False,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n # get the folder name for storing bscans\n bscan_folder = file_manager.get_bscan_folder(data_filename)\n\n \n \n while src.has_more_frames():\n\n k = src.current_frame_index\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n frame = src.next_frame()\n \n if not os.path.exists(outfn):\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n bscan = blobf.spectra_to_bscan(coefs,frame,diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n else:\n logging.info('%s exists. Skipping.'%outfn)\n\n # Skip this for now. Needs discussion.\n blobf.flatten_volume(bscan_folder,diagnostics=diagnostics)\n\n flattened_folder = os.path.join(bscan_folder,'flattened')\n \n # Process the ORG blocks\n org_tools.process_org_blocks(flattened_folder,redo=True)\n \n\nif __name__=='__main__':\n unp_file_list = glob.glob('*.unp')\n unp_file_list.sort()\n\n\n files_to_process = unp_file_list[:1]\n \n try:\n import multiprocessing as mp\n do_mp = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\n pool = mp.Pool(n_cores)\n pool.map(process_dataset,files_to_process)\n \n except ImportError as ie:\n for f in files_to_process:\n process_dataset(f)\n" }, { "alpha_fraction": 0.5715835094451904, "alphanum_fraction": 0.5802602767944336, "avg_line_length": 31.263158798217773, "blob_id": "c664ebca9b2ccad913bdf52781f1d61bc4e1f54c", "content_id": "fa0028a0df6349cabbfe2d7cc4e2a02c5cb4ec8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1844, "license_type": "no_license", "max_line_length": 71, "num_lines": 57, "path": "/diagnostics_tools.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport os,sys,glob\nimport logging\nfrom octoblob import logger\nimport inspect\n\nclass Diagnostics:\n\n def __init__(self,filename,limit=3):\n self.filename = filename\n ext = os.path.splitext(filename)[1]\n self.folder = filename.replace(ext,'')+'_diagnostics'\n os.makedirs(self.folder,exist_ok=True)\n self.limit = limit\n self.dpi = 150\n self.figures = {}\n self.labels = {}\n self.counts = {}\n self.done = []\n #self.fig = plt.figure()\n\n def log(self,title,header,data,fmt,clobber):\n print(title)\n print(header)\n print(fmt%data)\n \n def save(self,figure_handle,label=None,ignore_limit=False):\n label = inspect.currentframe().f_back.f_code.co_name\n if label in self.done:\n return\n \n subfolder = os.path.join(self.folder,label)\n index = self.counts[label]\n \n if index<self.limit or ignore_limit:\n outfn = os.path.join(subfolder,'%s_%05d.png'%(label,index))\n plt.figure(label)\n plt.suptitle(label)\n plt.savefig(outfn,dpi=self.dpi)\n logging.info('Saving %s.'%outfn)\n self.counts[label]+=1\n else:\n self.done.append(label)\n plt.close(figure_handle.number)\n \n\n def figure(self,figsize=(4,4),dpi=100,label=None):\n label = inspect.currentframe().f_back.f_code.co_name\n subfolder = os.path.join(self.folder,label)\n if not label in self.counts.keys():\n self.counts[label] = 0\n os.makedirs(subfolder,exist_ok=True)\n fig = plt.figure(label)\n fig.clear()\n fig.set_size_inches(figsize[0],figsize[1], forward=True)\n #out = plt.figure(figsize=figsize,dpi=dpi)\n return fig\n \n" }, { "alpha_fraction": 0.6098548173904419, "alphanum_fraction": 0.6268507242202759, "avg_line_length": 36.84684753417969, "blob_id": "f728462bfaaa7b1f68f47130005bb23f572b60c6", "content_id": "1695dd43d784ed156c0f9025ed66261a2cc8a503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21005, "license_type": "no_license", "max_line_length": 151, "num_lines": 555, "path": "/dispersion_ui.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QImage, QPixmap, QPalette, QPainter\nfrom PyQt5.QtPrintSupport import QPrintDialog, QPrinter\nfrom PyQt5.QtWidgets import QLabel, QSizePolicy, QScrollArea, QMessageBox, QMainWindow, QMenu, QAction, \\\n qApp, QFileDialog\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.optimize as spo\nimport sys,os,glob\nimport scipy.interpolate as spi\n\n#######################################\n## Constants here--adjust as necessary:\n\ndB_lims = [45,85]\ncrop_height = 300 # height of viewable B-scan, centered at image z centroid (center of mass)\n\n# step sizes for incrementing/decrementing coefficients:\nmapping_steps = [1e-9,1e-6]\ndispersion_steps = [1e-9,1e-8]\nfbg_position = 90\nbit_shift_right = 4\nwindow_sigma = 0.9\n\nui_width = 400\nui_height = 600\n#######################################\n\n# Now we'll define some functions for the half-dozen or so processing\n# steps:\n\ndef load_spectra(fn,index=50):\n ext = os.path.splitext(fn)[1]\n if ext.lower()=='.unp':\n from octoblob import config_reader,data_source\n import octoblob as blob\n \n src = data_source.DataSource(fn)\n\n index = index%(src.n_slow*src.n_vol)\n spectra = src.get_frame(index)\n elif ext.lower()=='.npy':\n spectra = np.load(fn)\n else:\n sys.exit('File %s is of unknown type.'%fn)\n return spectra\n\n# We need a way to estimate and remove DC:\ndef dc_subtract(spectra):\n \"\"\"Estimate DC by averaging spectra spatially (dimension 1),\n then subtract by broadcasting.\"\"\"\n dc = spectra.mean(1)\n # Do the subtraction by array broadcasting, for efficiency.\n # See: https://numpy.org/doc/stable/user/basics.broadcasting.html\n out = (spectra.T-dc).T\n return out\n\n\n# Next we need a way to adjust the values of k at each sample, and then\n# interpolate into uniformly sampled k:\ndef k_resample(spectra,coefficients):\n \"\"\"Resample the spectrum such that it is uniform w/r/t k.\n Notes:\n 1. The coefficients here are for a polynomial defined on\n pixels, so they're physically meaningless. It would be\n better to define our polynomials on k, because then\n we could more easily quantify and compare the chirps\n of multiple light sources, for instance. Ditto for the\n dispersion compensation code.\n \"\"\"\n coefficients = coefficients + [0.0,0.0]\n # x_in specified on array index 1..N+1\n x_in = np.arange(1,spectra.shape[0]+1)\n\n # define an error polynomial, using the passed coefficients, and then\n # use this polynomial to define the error at each index 1..N+1\n error = np.polyval(coefficients,x_in)\n x_out = x_in + error\n\n # using the spectra measured at indices x_in, interpolate the spectra at indices x_out\n # See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html\n interpolator = spi.interp1d(x_in,spectra,axis=0,kind='cubic',fill_value='extrapolate')\n interpolated = interpolator(x_out)\n return interpolated\n\n# Next we need to dispersion compensate; for historical reasons the correction polynomial\n# is defined on index x rather than k, but for physically meaningful numbers we should\n# use k instead\ndef dispersion_compensate(spectra,coefficients):\n coefs = list(coefficients) + [0.0,0.0]\n # define index x:\n x = np.arange(1,spectra.shape[0]+1)\n # define the phasor and multiply by spectra using broadcasting:\n dechirping_phasor = np.exp(-1j*np.polyval(coefs,x))\n dechirped = (spectra.T*dechirping_phasor).T\n return dechirped\n\n\n# Next we multiply the spectra by a Gaussian window, in order to reduce ringing\n# in the B-scan due to edges in the spectra:\ndef gaussian_window(spectra,sigma):\n # Define a Gaussian window with passed sigma\n x = np.exp(-((np.linspace(-1.0,1.0,spectra.shape[0]))**2/sigma**2))\n # Multiply spectra by window using broadcasting:\n out = (spectra.T*x).T\n return out\n\n\n# Now let's define a processing function that takes the spectra and two dispersion coefficients\n# and produces a B-scan:\ndef process_bscan(spectra,mapping_coefficients=[0.0],dispersion_coefficients=[0.0],window_sigma=0.9):\n spectra = dc_subtract(spectra)\n # When we call dispersion_compensate, we have to pass the c3 and c2 coefficients as well as\n # two 0.0 values, to make clear that we want orders 3, 2, 1, 0. This enables us to use the\n # polyval function of numpy instead of writing the polynomial ourselves, e.g. c3*x**3+c2*x**x**2,\n # since the latter is more likely to cause bugs.\n spectra = k_resample(spectra,mapping_coefficients)\n spectra = dispersion_compensate(spectra,dispersion_coefficients)\n spectra = gaussian_window(spectra,sigma=window_sigma)\n bscan = np.fft.fft(spectra,axis=0)\n return bscan\n\n\n\n# An example of optimizing dispersion:\n\n# First, we need an objective function that takes the two dispersion coefficients and outputs\n# a single value to be minimized; for simplicity, we'll use the reciprocal of the brightest\n# pixel in the image. An oddity here is that the function can see outside its scope and thus\n# has access to the variable 'spectra', defined at the top by loading from the NPY file. We\n# then call our process_bscans function, using the coefficients passed into this objective\n# function. From the resulting B-scan, we calculate our value to be minimized:\ndef obj_func(coefs,save=False):\n bscan = process_bscan(spectra,coefs)\n # we don't need the complex conjugate, so let's determine the size of the B-scan and crop\n # the bottom half (sz//2:) for use. (// means integer division--we can't index with floats;\n # also, the sz//2: is implied indexing to the bottom of the B-scan:\n sz,sx = bscan.shape\n bscan = bscan[sz//2:,:]\n # we also want to avoid DC artifacts from dominating the image brightness or gradients,\n # so let's remove the bottom, using negative indexing.\n # See: https://numpy.org/devdocs/user/basics.indexing.html\n bscan = bscan[:-50,:]\n # Finally let's compute the amplitude (modulus) max and return its reciprocal:\n bscan = np.abs(bscan)\n bscan = bscan[-300:] # IMPORTANT--THIS WON'T WORK IN GENERAL, ONLY ON THIS DATA SET 16_53_25\n out = 1.0/np.max(bscan)\n \n # Maybe we want to visualize it; change to False to speed things up\n if True:\n # clear the current axis\n plt.cla()\n # show the image:\n plt.imshow(20*np.log10(bscan),cmap='gray',clim=dB_lims)\n # pause:\n plt.pause(0.001)\n\n if save:\n order = len(coefs)+1\n os.makedirs('dispersion_compensation_results',exist_ok=True)\n plt.cla()\n plt.imshow(20*np.log10(bscan),cmap='gray',clim=dB_lims)\n plt.title('order %d\\n %s'%(order,list(coefs)+[0.0,0.0]),fontsize=10)\n plt.colorbar()\n plt.savefig('dispersion_compensation_results/order_%d.png'%order,dpi=150)\n return out\n\n\n# Now we can define some settings for the optimization:\n\ndef optimize_dispersion(spectra,obj_func,initial_guess):\n\n # spo.minimize accepts an additional argument, a dictionary containing further\n # options; we want can specify an error tolerance, say about 1% of the bounds.\n # we can also specify maximum iterations:\n optimization_options = {'xatol':1e-10,'maxiter':10000}\n\n # optimization algorithm:\n # See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html\n method = 'nelder-mead'\n\n # Now we run it; Nelder-Mead cannot use bounds, so we pass None\n res = spo.minimize(obj_func,initial_guess,method='nelder-mead',bounds=None,options=optimization_options)\n \n\nif len(sys.argv)>=2:\n if sys.argv[1]=='0':\n spectra = np.load('./spectra_00100.npy')\n bscan = process_bscan(spectra,window_sigma=900)\n plt.imshow(20*np.log10(np.abs(bscan))[-300:-100],cmap='gray',clim=dB_lims)\n plt.show()\n\n sys.exit()\n\nclass QImageViewer(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.printer = QPrinter()\n self.scaleFactor = 0.0\n self.mapping_coefficients = [0.0,0.0]\n self.dispersion_coefficients = [0.0,0.0]\n \n self.mapping_steps = mapping_steps\n self.dispersion_steps = dispersion_steps\n\n self.cropz1 = None\n self.cropz2 = None\n \n self.imageLabel = QLabel()\n self.imageLabel.setBackgroundRole(QPalette.Base)\n self.imageLabel.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)\n self.imageLabel.setScaledContents(True)\n\n self.scrollArea = QScrollArea()\n self.scrollArea.setBackgroundRole(QPalette.Dark)\n self.scrollArea.setWidget(self.imageLabel)\n self.scrollArea.setVisible(False)\n\n self.setCentralWidget(self.scrollArea)\n\n self.createActions()\n self.createMenus()\n\n self.setWindowTitle(\"Image Viewer\")\n self.resize(ui_width, ui_height)\n\n self.spectra = None\n self.crop_half_height = crop_height//2\n self.bscan_max_gradient = 0.0\n self.bscan_max_amplitude = 0.0\n self.window_sigma = window_sigma\n self.dB_lims = dB_lims\n self.bscan_max_amplitude_original = None\n self.image_loaded = False\n self.frame_index = 0\n \n if len(sys.argv)>=2:\n print(sys.argv)\n fn = sys.argv[1]\n self.spectra = load_spectra(fn,self.frame_index)\n self.image_loaded = True\n \n self.update_image()\n self.filename = fn\n self.fitToWindowAct.setEnabled(True)\n\n \n\n def update_image(self):\n image = self.get_bscan_qimage(self.spectra,mapping_coefficients=self.mapping_coefficients,dispersion_coefficients=self.dispersion_coefficients)\n self.imageLabel.setPixmap(QPixmap.fromImage(image))\n self.scaleFactor = 1.0\n \n self.scrollArea.setVisible(True)\n self.updateActions()\n\n if not self.fitToWindowAct.isChecked():\n self.imageLabel.adjustSize()\n\n def f(L):\n return ['%0.3e'%k for k in L]\n \n print('mapping coefficients: %s'%(f(self.mapping_coefficients)))\n print('dispersion coefficients: %s'%(f(self.dispersion_coefficients)))\n print('dB contrast lims: %s'%self.dB_lims)\n print('spectral Gaussian window sigma: %0.3f'%self.window_sigma)\n print('frame index: %d'%self.frame_index)\n print('bscan max amplitude: %0.1f'%self.bscan_max_amplitude)\n print('bscan max gradient: %0.1f'%self.bscan_max_gradient)\n print('bscan mean gradient: %0.1f'%self.bscan_mean_gradient)\n n_hash = min(round(self.bscan_max_amplitude/self.bscan_max_amplitude_original*100.0),120)\n print('max: '+'#'*n_hash)\n \n def open(self):\n options = QFileDialog.Options()\n # fileName = QFileDialog.getOpenFileName(self, \"Open File\", QDir.currentPath())\n fileName, _ = QFileDialog.getOpenFileName(self, 'QFileDialog.getOpenFileName()', '',\n 'Images (*.npy *.unp)', options=options)\n if fileName:\n # self.spectra = np.load(fileName)\n self.spectra = load_spectra(fileName)\n self.image_loaded = True\n #self.dispersion_coefficients = [0.0,0.0]\n #self.mapping_coefficients = [0.0,0.0]\n \n self.update_image()\n self.fitToWindowAct.setEnabled(True)\n self.filename = fileName\n \n def zoomIn(self):\n self.scaleImage(1.25)\n\n def zoomOut(self):\n self.scaleImage(0.8)\n\n def normalSize(self):\n self.imageLabel.adjustSize()\n self.scaleFactor = 1.0\n\n def fitToWindow(self):\n fitToWindow = self.fitToWindowAct.isChecked()\n self.scrollArea.setWidgetResizable(fitToWindow)\n if not fitToWindow:\n self.normalSize()\n\n self.updateActions()\n\n def about(self):\n QMessageBox.about(self, \"OCT Viewer\",\n \"<p>For the following adjustments, use CTRL for 10x and SHIFT-CTRL for 100x:<br>\"\n \"Y/H keys increase/decrease 3rd order mapping coefficient<br>\"\n \"U/J keys increase/decrease 2rd order mapping coefficient<br>\"\n \"I/K keys increase/decrease 3rd order dispersion coefficient<br>\"\n \"O/L keys increase/decrease 2rd order dispersion coefficient<br></p>\"\n \"<p>R/F increase/decrease lower contrast limit (dB)</p>\"\n \"<p>T/G increase/decrease upper contrast limit (dB)</p>\"\n \"<p>E/D increase/decrease spectral Gaussian window sigma</p>\"\n \"<p>Z sets dispersion and mapping coefficients to all zeros</p>\"\n \"<p>See menus for other options.</p>\")\n\n def createActions(self):\n self.openAct = QAction(\"O&pen...\", self, shortcut=\"Ctrl+P\", triggered=self.open)\n self.exitAct = QAction(\"E&xit\", self, shortcut=\"Ctrl+Q\", triggered=self.close)\n self.zoomInAct = QAction(\"Zoom &In (25%)\", self, shortcut=\"Ctrl+=\", enabled=False, triggered=self.zoomIn)\n self.zoomOutAct = QAction(\"Zoom &Out (25%)\", self, shortcut=\"Ctrl+-\", enabled=False, triggered=self.zoomOut)\n self.normalSizeAct = QAction(\"&Normal Size\", self, shortcut=\"Ctrl+S\", enabled=False, triggered=self.normalSize)\n self.fitToWindowAct = QAction(\"&Fit to Window\", self, enabled=False, checkable=True, shortcut=\"Ctrl+F\",\n triggered=self.fitToWindow)\n self.aboutAct = QAction(\"&About OCTView\", self, triggered=self.about)\n \n\n def c3up(self,multiplier=1.0):\n self.dispersion_coefficients[0]+=self.dispersion_steps[0]*multiplier\n self.update_image()\n def c3down(self,multiplier=1.0):\n self.dispersion_coefficients[0]-=self.dispersion_steps[0]*multiplier\n self.update_image()\n def c2up(self,multiplier=1.0):\n self.dispersion_coefficients[1]+=self.dispersion_steps[1]*multiplier\n self.update_image()\n def c2down(self,multiplier=1.0):\n self.dispersion_coefficients[1]-=self.dispersion_steps[1]*multiplier\n self.update_image()\n\n\n def m3up(self,multiplier=1.0):\n self.mapping_coefficients[0]+=self.mapping_steps[0]*multiplier\n self.update_image()\n def m3down(self,multiplier=1.0):\n self.mapping_coefficients[0]-=self.mapping_steps[0]*multiplier\n self.update_image()\n def m2up(self,multiplier=1.0):\n self.mapping_coefficients[1]+=self.mapping_steps[1]*multiplier\n self.update_image()\n def m2down(self,multiplier=1.0):\n self.mapping_coefficients[1]-=self.mapping_steps[1]*multiplier\n self.update_image()\n\n\n def ulimup(self):\n self.dB_lims[1]+=1\n self.update_image()\n\n def ulimdown(self):\n self.dB_lims[1]-=1\n self.update_image()\n\n def llimup(self):\n self.dB_lims[0]+=1\n self.update_image()\n\n def llimdown(self):\n self.dB_lims[0]-=1\n self.update_image()\n \n\n def wsup(self):\n self.window_sigma*=1.1\n self.update_image()\n\n def wsdown(self):\n self.window_sigma/=1.1\n self.update_image()\n\n def keyPressEvent(self, e):\n if not self.image_loaded:\n return\n \n mod = e.modifiers()\n\n if (mod & Qt.ControlModifier) and (mod & Qt.ShiftModifier):\n multiplier = 100.0\n elif (mod & Qt.ControlModifier):\n multiplier = 10.0\n else:\n multiplier = 1.0\n \n \n # if e.modifiers() == Qt.ControlModifier:\n # multiplier = 10.0\n # elif e.modifiers() == (Qt.ControlModifier | Qt.ShiftModifier):\n # multiplier = 100.0\n # else:\n # multiplier = 1.0\n \n if e.key() == Qt.Key_Y:\n self.m3up(multiplier)\n elif e.key() == Qt.Key_H:\n self.m3down(multiplier)\n \n elif e.key() == Qt.Key_U:\n self.m2up(multiplier)\n elif e.key() == Qt.Key_J:\n self.m2down(multiplier)\n\n elif e.key() == Qt.Key_I:\n self.c3up(multiplier)\n elif e.key() == Qt.Key_K:\n self.c3down(multiplier)\n \n elif e.key() == Qt.Key_O:\n self.c2up(multiplier)\n elif e.key() == Qt.Key_L:\n self.c2down(multiplier)\n\n elif e.key() == Qt.Key_R:\n self.llimup()\n elif e.key() == Qt.Key_F:\n self.llimdown()\n\n elif e.key() == Qt.Key_T:\n self.ulimup()\n\n elif e.key() == Qt.Key_G:\n self.ulimdown()\n \n elif e.key() == Qt.Key_E:\n self.wsup()\n elif e.key() == Qt.Key_D:\n self.wsdown()\n\n elif e.key() == Qt.Key_PageUp:\n self.frame_index+=1\n self.spectra = load_spectra(self.filename,self.frame_index)\n self.update_image()\n\n elif e.key() == Qt.Key_PageDown:\n self.frame_index-=1\n self.spectra = load_spectra(self.filename,self.frame_index)\n self.update_image()\n\n elif e.key() == Qt.Key_Z:\n self.dispersion_coefficients = [0.0,0.0]\n self.mapping_coefficients = [0.0,0.0]\n self.update_image()\n \n elif e.key() == Qt.Key_Escape:\n self.close()\n \n\n def createMenus(self):\n self.fileMenu = QMenu(\"&File\", self)\n self.fileMenu.addAction(self.openAct)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.exitAct)\n\n self.viewMenu = QMenu(\"&View\", self)\n self.viewMenu.addAction(self.zoomInAct)\n self.viewMenu.addAction(self.zoomOutAct)\n self.viewMenu.addAction(self.normalSizeAct)\n self.viewMenu.addSeparator()\n self.viewMenu.addAction(self.fitToWindowAct)\n\n self.helpMenu = QMenu(\"&Help\", self)\n self.helpMenu.addAction(self.aboutAct)\n\n self.menuBar().addMenu(self.fileMenu)\n self.menuBar().addMenu(self.viewMenu)\n self.menuBar().addMenu(self.helpMenu)\n\n def updateActions(self):\n self.zoomInAct.setEnabled(not self.fitToWindowAct.isChecked())\n self.zoomOutAct.setEnabled(not self.fitToWindowAct.isChecked())\n self.normalSizeAct.setEnabled(not self.fitToWindowAct.isChecked())\n\n def scaleImage(self, factor):\n self.scaleFactor *= factor\n self.imageLabel.resize(self.scaleFactor * self.imageLabel.pixmap().size())\n\n self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), factor)\n self.adjustScrollBar(self.scrollArea.verticalScrollBar(), factor)\n\n self.zoomInAct.setEnabled(self.scaleFactor < 3.0)\n self.zoomOutAct.setEnabled(self.scaleFactor > 0.333)\n\n def adjustScrollBar(self, scrollBar, factor):\n scrollBar.setValue(int(factor * scrollBar.value()\n + ((factor - 1) * scrollBar.pageStep() / 2)))\n\n def get_bscan_qimage(self,spectra,mapping_coefficients=[0.0],dispersion_coefficients=[0.0],window_sigma=0.9):\n bscan = process_bscan(spectra,mapping_coefficients,dispersion_coefficients,self.window_sigma)\n bscan = bscan[bscan.shape[0]//2:,:]\n bscan = np.abs(bscan)\n self.bscan_max_amplitude = np.max(bscan)\n abs_grad = np.abs(np.diff(bscan,axis=0))\n self.bscan_max_gradient = np.max(abs_grad)\n self.bscan_mean_gradient = np.mean(abs_grad)\n\n if self.bscan_max_amplitude_original is None:\n self.bscan_max_amplitude_original = self.bscan_max_amplitude\n\n bscan = 20*np.log10(bscan)\n #bscan = bscan.T\n\n\n if self.cropz1 is None:\n bprof = np.mean(bscan,axis=1)\n bprof = bprof - np.min(bprof)\n z = np.arange(len(bprof))\n com = int(round(np.sum(bprof*z)/np.sum(bprof)))\n bscan = bscan[com-self.crop_half_height:com+self.crop_half_height,:]\n self.cropz1 = com-self.crop_half_height\n self.cropz2 = com+self.crop_half_height\n else:\n bscan = bscan[self.cropz1:self.cropz2]\n\n bscan = np.clip(bscan,*self.dB_lims)\n\n bscan = (bscan-np.min(bscan))/(np.max(bscan)-np.min(bscan))\n bscan = bscan*255\n img = np.round(bscan).astype(np.uint8)\n\n img = np.zeros((bscan.shape[0], bscan.shape[1]), dtype=np.uint8)\n img[:] = bscan[:]\n # Turn up red channel to full scale\n #img[...,0] = 255\n qimage = QImage(img.data, img.shape[1], img.shape[0], img.shape[1], QImage.Format_Grayscale8)\n\n #qimage = QImage(bscan,bscan.shape[1],bscan.shape[0],QImage.Format_RGB888)\n return qimage\n\n\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication(sys.argv)\n imageViewer = QImageViewer()\n imageViewer.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.7845723628997803, "alphanum_fraction": 0.7873700857162476, "avg_line_length": 85.27586364746094, "blob_id": "8213dd6bd66fdee28d808e0b2ec98fc1b215dbbb", "content_id": "b20331670a0665790eb5d9a19d643c9e641cb74c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2502, "license_type": "no_license", "max_line_length": 808, "num_lines": 29, "path": "/examples/processing_flipped/README.md", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "# Processing flipped data\n\nLike the `examples/fbg_alignment` example, this example exposes more of the steps in OCT post-processing to the user, this time in order to permit 1) flipping of B-scans collected in the so-called \"EDI\" (extended depth imaging) mode. This simply means matching the reference arm length with the choroid, such that the outer retinal OCT is acquired with higher SNR than the inner retina. In our Axsun system, this results in the retina being inverted in the positive frequency DFT--the side of the DFT we ordinarily process. These B-scans must be flipped in order to do subsequent ORG processing.\n\nThe general goal of exposing more of the OCT post-processing to the user is to enable more flexibility. Here, instead of calling `octoblob.functions.spectra_to_bscan`, a local function `spectra_to_bscan` is called, which itself calls functions from the `octoblob.functions` library. Similar to `examples/fbg_alignment`, this version employs cross-correlation to align spectra to the FBG features. This approach appears, for the time being, to be as robust as the older method that aligned by most positive or most negative gradient, and has the additional benefit of not requiring the FBG features to be optimized (via the polarization controller just downstream of the source). As long as the FBG has some clear effect on the acquired spectra, they can be aligned and made phase stable by cross-correlation.\n\nThe flipping of the B-scan is done with a single line of code in the `spectra_to_bscan` function:\n\n```python\n# Flip the B-scan, since it was acquired near the top of the view, inverted\nbscan = bscan[::-1,:]\n```\n\nPlease see documentation on [extended slices](https://docs.python.org/release/2.3.5/whatsnew/section-slices.html) and the effect of negative strides for an explanation of how this works.\n\n## Folder contents\n\n* process.py: OCT/ORG processing script\n\n* plot_general_org.py: ORG visualization (see `examples/single_flash_org_general` for more information)\n\n* reset.sh: a bash script for deleting all of the downloaded and processed data, mainly for cleaning up this folder before pushing changes to github\n\n\n## B-scan processing \n\n1. Using the Anaconda terminal (command prompt), change into the `octoblob/examples/processing_flipped_bscans` folder and run the program by issuing `python process.py XXX.unp`, where `XXX.unp` contains raw data from inverted B-scan acquisition.\n\n2. By default, this script does ORG processing on the resulting B-scans as well.\n" }, { "alpha_fraction": 0.5422713756561279, "alphanum_fraction": 0.5626505017280579, "avg_line_length": 37.83213424682617, "blob_id": "89c34ffd17defc2ab97d9f5760ebcb68bc576ebd", "content_id": "0e4863a37a31843e45a73ecab3c0860e86f8b802", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16193, "license_type": "no_license", "max_line_length": 139, "num_lines": 417, "path": "/histogram.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "import sys,os,time\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.signal as sps\nimport scipy.interpolate as spi\nimport scipy.io as sio\n\n\ndef centers_to_edges(bin_centers):\n # convert an array of bin centers to bin edges, using the mean\n # spacing of the centers to determine bin width\n\n # check if sorted:\n assert all(bin_centers[1:]>bin_centers[:-1])\n\n bin_width = np.mean(np.diff(bin_centers))\n half_width = bin_width/2.0\n first_edge = bin_centers[0]-half_width\n last_edge = bin_centers[-1]+half_width\n return np.linspace(first_edge,last_edge,len(bin_centers)+1)\n\ndef bin_shift_histogram(vals,bin_centers,resample_factor=1,diagnostics=None):\n shifts = np.linspace(bin_centers[0]/float(len(bin_centers)),\n bin_centers[-1]/float(len(bin_centers)),resample_factor)\n\n #print('shifts:')\n #print(shifts)\n\n #print('bin centers:')\n #print(bin_centers)\n \n n_shifts = len(shifts)\n n_bins = len(bin_centers)\n\n all_counts = np.zeros((n_shifts,n_bins))\n all_edges = np.zeros((n_shifts,n_bins+1))\n\n for idx,s in enumerate(shifts):\n edges = centers_to_edges(bin_centers+s)\n all_counts[idx,:],all_edges[idx,:] = np.histogram(vals,edges)\n\n all_centers = (all_edges[:,:-1]+all_edges[:,1:])/2.0\n all_counts = all_counts/float(resample_factor)\n all_centers = all_centers\n\n if diagnostics is not None:\n bin_edges = centers_to_edges(bin_centers)\n bin_width = np.mean(np.diff(bin_edges))\n shift_size = np.mean(np.diff(shifts))\n \n fig = diagnostics.figure(figsize=(3*1,1),dpi=100)\n plt.subplot(1,3,1)\n plt.imshow(all_counts)\n plt.title('counts')\n plt.xlabel('bins')\n plt.ylabel('shifts')\n \n #plt.gca().set_yticks(np.arange(0,n_shifts,3))\n #plt.gca().set_yticklabels(['%0.2f'%s for s in shifts])\n\n #plt.gca().set_xticks(np.arange(0,n_bins,3))\n #plt.gca().set_xticklabels(['%0.2f'%bc for bc in bin_centers])\n plt.colorbar()\n\n all_counts = all_counts.T\n all_centers = all_centers.T.ravel()\n\n plt.subplot(1,3,2)\n plt.hist(vals,bin_edges,width=bin_width*0.8)\n plt.title('standard histogram')\n plt.subplot(1,3,3)\n plt.bar(all_centers.ravel(),all_counts.ravel(),width=shift_size*0.8)\n plt.title('bin shifted histogram')\n\n \n #diagnostics.save(fig)\n\n return all_counts.T.ravel(),all_centers.T.ravel()\n\ndef test_bin_shift_histogram(N=1000,mu=2.5,sigma=30.0):\n\n s1 = np.random.rand(N)*sigma\n s2 = s1+mu\n noise1 = np.random.randn(N)*2.0\n noise2 = np.random.randn(N)*2.0\n s1 = s1%(np.pi*2)\n s2 = s2%(np.pi*2)\n\n s1 = s1 + noise1\n s2 = s2 + noise2\n\n \n vals = s2-s1\n \n resample_factor = 4\n bin_edges_sparse = np.linspace(0,2*np.pi,16)\n bin_edges_dense = np.linspace(0,2*np.pi,16*resample_factor)\n #vals = (vals+np.pi)%(2*np.pi)-np.pi\n\n counts,centers = bin_shift_histogram(vals,bin_edges_sparse,resample_factor)\n fig = plt.figure()\n plt.subplot(1,3,1)\n plt.hist(vals,bin_edges_sparse)\n plt.title('sparse histogram')\n plt.xlim((0,2*np.pi))\n plt.subplot(1,3,2)\n plt.hist(vals,bin_edges_dense)\n plt.title('dense histogram')\n plt.xlim((0,2*np.pi))\n plt.subplot(1,3,3)\n plt.bar(centers,counts)\n plt.title('resampled histogram')\n plt.xlim((0,2*np.pi))\n \n#test_bin_shift_histogram()\n\ndef wrap_into_range(arr,phase_limits=(-np.pi,np.pi)):\n lower,upper = phase_limits\n above_range = np.where(arr>upper)\n below_range = np.where(arr<lower)\n arr[above_range]-=2*np.pi\n arr[below_range]+=2*np.pi\n return arr\n\n\ndef make_mask(im,threshold=None,diagnostics=None):\n if threshold is None:\n threshold = np.percentile(im,90)\n \n mask = np.zeros(im.shape)\n mask[np.where(im>threshold)] = 1\n\n if diagnostics is not None:\n fig = diagnostics.figure()\n ax = fig.subplots(1,1)\n ax.imshow(mask)\n diagnostics.save(fig)\n \n return mask\n\ndef get_phase_jumps(phase_stack,mask,\n n_bins=16,\n resample_factor=24,\n n_smooth=5,polynomial_smoothing=True,diagnostics=None):\n\n # Take a stack of B-scan phase arrays, with dimensions\n # (z,x,repeats), and return a bulk-motion corrected\n # version\n #phase_stack = np.transpose(phase_stack,(1,2,0))\n n_depth = phase_stack.shape[0]\n n_fast = phase_stack.shape[1]\n n_reps = phase_stack.shape[2]\n \n d_phase_d_t = np.diff(phase_stack,axis=2)\n # multiply each frame of the diff array by\n # the mask, so that only valid values remain;\n # Then wrap any values above pi or below -pi into (-pi,pi) interval.\n d_phase_d_t = wrap_into_range(d_phase_d_t)\n\n\n if diagnostics is not None:\n fig = diagnostics.figure(figsize=((n_reps-1)*1,2*1),dpi=100)\n plt.suptitle('phase shifts between adjacent frames in cluster')\n for rep in range(1,n_reps):\n plt.subplot(2,n_reps-1,rep)\n plt.imshow(d_phase_d_t[:,:,rep-1],aspect='auto')\n if rep==1:\n plt.ylabel('unmasked')\n if rep==n_reps-1:\n plt.colorbar()\n plt.title(r'$d\\theta_{%d,%d}$'%(rep,rep-1))\n plt.xticks([])\n plt.yticks([])\n plt.subplot(2,n_reps-1,rep+(n_reps-1))\n plt.imshow(mask*d_phase_d_t[:,:,rep-1],aspect='auto')\n if rep==1:\n plt.ylabel('masked')\n if rep==n_reps-1:\n plt.colorbar()\n plt.xticks([])\n plt.yticks([])\n diagnostics.save(fig)\n \n d_phase_d_t = np.transpose(np.transpose(d_phase_d_t,(2,0,1))*mask,(1,2,0))\n\n \n bin_edges = np.linspace(-np.pi,np.pi,n_bins)\n \n # The key idea here is from Makita, 2006, where it is well explained. In\n # addition to using the phase mode, we also do bin-shifting, in order to\n # smooth the histogram. Again departing from Justin's approach, let's\n # just specify the top level bins and a resampling factor, and let the\n # histogram function do all the work of setting the shifted bin edges.\n\n b_jumps = np.zeros((d_phase_d_t.shape[1:]))\n bin_counts = np.zeros((d_phase_d_t.shape[1:]))\n\n if diagnostics is not None:\n fig = diagnostics.figure(figsize=((n_reps-1)*1,1*1),dpi=100)\n total_bins = n_bins*resample_factor\n hist_sets = np.zeros((n_reps-1,n_fast,total_bins))\n\n for f in range(n_fast):\n valid_idx = np.where(mask[:,f])[0]\n for r in range(n_reps-1):\n vals = d_phase_d_t[valid_idx,f,r]\n if diagnostics is not None:\n # RSJ, 23 March 2020:\n # We'll add a simple diagnostic here--a printout of the number of samples and the\n # interquartile range. These can be used to determine the optimal bin width, following\n # Makita et al. 2006 \"Optical coherence angiography\" eq. 3.\n try:\n q75,q25 = np.percentile(vals,(75,25))\n IQ = q75-q25\n m = float(len(vals))\n h = 2*IQ*m**(-1/3)\n n_bins = np.ceil(2*np.pi/h)\n bscan_indices = '%01d-%01d'%(r,r+1)\n #diagnostics.log('histogram_optimal_bin_width',\n # header=['bscan indices','ascan index','IQ','m','h','n_bins'],\n # data=[bscan_indices,f,IQ,m,h,n_bins],\n # fmt=['%s','%d','%0.3f','%d','%0.3f','%d'],clobber=f==0)\n except IndexError:\n pass\n \n # if it's the first rep of the first frame, and diagnostics are requested, print the histogram diagnostics\n if f==0 and r==0:\n [counts,bin_centers] = bin_shift_histogram(vals,bin_edges,resample_factor,diagnostics=diagnostics)\n else:\n [counts,bin_centers] = bin_shift_histogram(vals,bin_edges,resample_factor,diagnostics=None)\n \n if diagnostics is not None:\n hist_sets[r,f,:] = counts\n bulk_shift = bin_centers[np.argmax(counts)]\n bin_count = np.max(counts)\n b_jumps[f,r] = bulk_shift\n bin_counts[f,r] = bin_count\n\n #if polynomial_smoothing:\n # polynomial_smooth_phase(bin_counts,b_jumps)\n \n if diagnostics is not None:\n for idx,hist_set in enumerate(hist_sets):\n plt.subplot(1,n_reps-1,idx+1)\n plt.imshow(hist_set,interpolation='none',aspect='auto',extent=(np.min(bin_centers),np.max(bin_centers),0,n_fast-1),cmap='gray')\n plt.yticks([])\n plt.xlabel(r'$d\\theta_{%d,%d}$'%(idx+1,idx))\n if idx==0:\n plt.ylabel('fast scan index')\n plt.colorbar()\n plt.autoscale(False)\n plt.plot(b_jumps[:,idx],range(n_fast)[::-1],'g.',alpha=0.2)\n plt.suptitle('shifted bulk motion histograms (count)')\n diagnostics.save(fig)\n\n # now let's show some examples of big differences between adjacent A-scans\n legendfontsize = 8\n lblfmt = 'r%d,x%d'\n pts_per_example = 10\n max_examples = 16\n dtheta_threshold = np.pi/2.0\n fig = diagnostics.figure(figsize=(1*4,1*4))\n example_count = 0\n for rep_idx,hist_set in enumerate(hist_sets):\n temp = np.diff(b_jumps[:,rep_idx])\n worrisome_indices = np.where(np.abs(temp)>dtheta_threshold)[0]\n # index n in the diff means the phase correction difference between\n # scans n+1 and n\n for bad_idx,scan_idx in enumerate(worrisome_indices):\n example_count = example_count + 1\n if example_count>max_examples:\n break\n plt.subplot(4,4,example_count)\n mask_line = mask[:,scan_idx]\n voffset = False\n vals = np.where(mask_line)[0][:pts_per_example]\n plt.plot(phase_stack[vals,scan_idx,rep_idx],'rs',label=lblfmt%(rep_idx,scan_idx))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx,rep_idx+1]+voffset*2*np.pi,'gs',label=lblfmt%(rep_idx+1,scan_idx))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx+1,rep_idx]+voffset*4*np.pi,'bs',label=lblfmt%(rep_idx,scan_idx+1))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx+1,rep_idx+1]+voffset*6*np.pi,'ks',label=lblfmt%(rep_idx+1,scan_idx+1))\n plt.xticks([])\n plt.legend(bbox_to_anchor=(0,-0.2,1,0.2), loc=\"upper left\",\n mode=\"expand\", borderaxespad=0, ncol=4, fontsize=legendfontsize)\n plt.title(r'$d\\theta=%0.1f$ rad'%temp[scan_idx])\n if example_count>=max_examples:\n break\n plt.suptitle('scans involved in each mode jump')\n diagnostics.save(fig)\n\n\n # now let's show some examples of small differences between adjacent A-scans\n dtheta_threshold = np.pi/20.0\n fig = diagnostics.figure(figsize=(1*4,1*4))\n example_count = 0\n for rep_idx,hist_set in enumerate(hist_sets):\n temp = np.diff(b_jumps[:,rep_idx])\n good_indices = np.where(np.abs(temp)<dtheta_threshold)[0]\n # index n in the diff means the phase correction difference between\n # scans n+1 and n\n for bad_idx,scan_idx in enumerate(good_indices):\n example_count = example_count + 1\n if example_count>max_examples:\n break\n plt.subplot(4,4,example_count)\n mask_line = mask[:,scan_idx]\n voffset = False\n vals = np.where(mask_line)[0][:pts_per_example]\n plt.plot(phase_stack[vals,scan_idx,rep_idx],'rs',label=lblfmt%(rep_idx,scan_idx))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx,rep_idx+1]+voffset*2*np.pi,'gs',label=lblfmt%(rep_idx+1,scan_idx))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx+1,rep_idx]+voffset*4*np.pi,'bs',label=lblfmt%(rep_idx,scan_idx+1))\n plt.xticks([])\n plt.plot(phase_stack[vals,scan_idx+1,rep_idx+1]+voffset*6*np.pi,'ks',label=lblfmt%(rep_idx+1,scan_idx+1))\n plt.xticks([])\n plt.legend(bbox_to_anchor=(0,-0.2,1,0.2), loc=\"upper left\",\n mode=\"expand\", borderaxespad=0, ncol=4, fontsize=legendfontsize)\n plt.title(r'$d\\theta=%0.1f$ rad'%temp[scan_idx])\n if example_count>=max_examples:\n break\n plt.suptitle('scans involved in each mode jump')\n diagnostics.save(fig)\n \n \n # Now unwrap to prevent discontinuities (although this may not impact complex variance)\n b_jumps = np.unwrap(b_jumps,axis=0)\n\n # Smooth by convolution. Don't forget to divide by kernel size!\n # b_jumps = sps.convolve2d(b_jumps,np.ones((n_smooth,1)),mode='same')/float(n_smooth)\n\n return b_jumps\n\ndef bulk_motion_correct(phase_stack,mask,\n n_bins=16,\n resample_factor=24,\n n_smooth=5,diagnostics=None):\n\n # Take a stack of B-scan phase arrays, with dimensions\n # (z,x,repeats), and return a bulk-motion corrected\n # version\n\n n_reps = phase_stack.shape[2]\n\n b_jumps = get_phase_jumps(phase_stack,mask,\n n_bins=n_bins,\n resample_factor=resample_factor,\n n_smooth=n_smooth,\n diagnostics=diagnostics)\n\n # Now, subtract b_jumps from phase_stack, not including the first repeat\n # Important: this is happening by broadcasting--it requires that the\n # last two dimensions of phase_stack[:,:,1:] be equal in size to the two\n # dimensions of b_jumps\n out = np.copy(phase_stack)\n\n if diagnostics is not None:\n #err_clim = (np.min(np.sum(b_jumps,axis=1)),np.max(np.sum(b_jumps,axis=1)))\n phase_clim = (-np.pi,np.pi)\n err_clim = [-np.pi-np.min(-np.sum(b_jumps,axis=1)),np.pi+np.max(-np.sum(b_jumps,axis=1))]\n if err_clim[1]<err_clim[0]:\n err_clim = [-ec for ec in err_clim]\n fig = diagnostics.figure(figsize=((n_reps-1)*1,2*1),dpi=100)\n plt.subplot(2,n_reps+1,1)\n plt.imshow(mask*phase_stack[:,:,0],clim=phase_clim,aspect='auto',interpolation='none')\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('frame 0')\n plt.ylabel('before correction')\n \n plt.subplot(2,n_reps+1,n_reps+2)\n plt.imshow(mask*out[:,:,0],clim=err_clim,aspect='auto',interpolation='none')\n plt.xticks([])\n plt.yticks([])\n plt.xlabel('frame 0')\n plt.ylabel('after correction')\n \n errs = []\n for rep in range(1,n_reps):\n # for each rep, the total error is the sum of\n # all previous errors\n err = np.sum(b_jumps[:,:rep],axis=1)\n errs.append(err)\n out[:,:,rep] = out[:,:,rep]-err\n if diagnostics:\n plt.subplot(2,n_reps+1,rep+1)\n plt.imshow(mask*phase_stack[:,:,rep],clim=phase_clim,aspect='auto',interpolation='none')\n plt.xlabel('frame %d'%rep)\n plt.xticks([])\n plt.yticks([])\n if rep==n_reps-1:\n plt.colorbar()\n\n plt.subplot(2,n_reps+1,n_reps+rep+2)\n plt.imshow(mask*out[:,:,rep],clim=err_clim,aspect='auto',interpolation='none')\n plt.xlabel('frame %d'%rep)\n plt.xticks([])\n plt.yticks([])\n if rep==n_reps-1:\n plt.colorbar()\n \n\n if diagnostics is not None:\n plt.subplot(2,n_reps+1,n_reps+1)\n for idx,err in enumerate(errs):\n plt.plot(err,label='f%d'%(idx+1))\n plt.legend()\n diagnostics.save(fig)\n \n out = wrap_into_range(out)\n\n return out\n\nif __name__=='__main__':\n test_bin_shift_histogram()\n" }, { "alpha_fraction": 0.5044264793395996, "alphanum_fraction": 0.539260983467102, "avg_line_length": 31.17337417602539, "blob_id": "1be4fcb3101765cfe404574d05f2392166491ef7", "content_id": "d28386b5c835d31004c0716e6a4a2a0f27274781", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10392, "license_type": "no_license", "max_line_length": 132, "num_lines": 323, "path": "/examples/flicker_org/plot_velocities_flicker.py", "repo_name": "rjonnal/octoblob", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nimport sys,os,glob,shutil\nimport logging\nimport octoblob.functions as blobf\nimport octoblob.org_tools as blobo\nimport pathlib\nplt.rcParams[\"font.family\"] = \"serif\"\nplt.rcParams[\"font.size\"] = 9\n\nbox_alpha = 0.75\nplot_alpha = 0.5\nbox_linewidth = 0.75\nplot_linewidth = 0.75\nmplot_alpha = 1.0\nmplot_linewidth = 1\nflim = (0,30)\n\ndef level(im):\n rv = get_level_roll_vec(im)\n return shear(im,rv)\n\ndef shear(im,roll_vec):\n out = np.zeros(im.shape)\n for idx,r in enumerate(roll_vec):\n out[:,idx] = np.roll(im[:,idx],r)\n return out\n\ndef get_roll_vec(im,row_per_col):\n sy,sx = im.shape\n roll_vec = (np.arange(sx)-sx/2.0)*row_per_col\n roll_vec = np.round(roll_vec).astype(int)\n return roll_vec\n\ndef get_level_roll_vec(im,limit=0.1,N=16):\n rpc_vec = np.linspace(-limit,limit,N)\n rotated_profiles = []\n roll_vecs = []\n for rpc in rpc_vec:\n rv = get_roll_vec(im,rpc)\n sheared = shear(im,rv)\n roll_vecs.append(rv)\n rotated_profiles.append(np.mean(sheared,axis=1))\n\n rotated_profiles = np.array(rotated_profiles)\n rpmax = np.max(rotated_profiles,axis=1)\n widx = np.argmax(rpmax)\n return roll_vecs[widx]\n\ndef path2str(f):\n head,tail = os.path.split(f)\n tails = []\n while len(head)>0:\n tails.append(tail)\n head,tail = os.path.split(head)\n tails = tails[::-1]\n return '_'.join(tails)\n \ndef collect_files(src,dst):\n flist = glob.glob(os.path.join(src,'*'))\n os.makedirs(dst,exist_ok=True)\n \n for f in flist:\n outf = os.path.join(dst,path2str(f))\n shutil.copyfile(f,outf)\n\n\ndef phase_to_nm(phase):\n return phase/(4*np.pi*1.38)*1050.0\n\ndef nm_to_phase(nm):\n return nm*(4*np.pi*1.38)/1050.0\n\n# pay attention to the default value of stim_index, since the b-scans right after stimulus\n# determine how the data are displayed to the user; until late 2022, we've been collecting 400\n# @ 400 Hz, and the stimulus is delivered 0.25 seconds into the series, i.e. at frame 100; however\n# we only process B-scans 80-140, i.e. 50 ms before stimulus through 100 ms after stimulus, and\n# thus the stim_index is 20\ndef plot(folder,stim_index=20):\n\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n phase_slope_flist = glob.glob(os.path.join(folder,'*phase_slope.npy'))\n phase_slope_flist.sort()\n amplitude_flist = glob.glob(os.path.join(folder,'*amplitude.npy'))\n amplitude_flist.sort()\n\n t = np.arange(len(amplitude_flist))*0.0025-0.04\n \n display_bscan = np.load(amplitude_flist[stim_index])\n dB = 20*np.log10(display_bscan)\n dbclim = np.percentile(dB,(30,99.99))\n \n markersize = 8.0\n \n global rois,click_points,index,abscans,pbscans\n\n roll_vec = get_level_roll_vec(display_bscan)\n display_bscan = shear(display_bscan,roll_vec)\n\n\n abscans = []\n pbscans = []\n for af,pf in zip(amplitude_flist,phase_slope_flist):\n abscans.append(shear(np.load(af),roll_vec))\n pbscans.append(shear(np.load(pf),roll_vec))\n\n tlim = (-0.05,float(len(amplitude_flist))*2.5e-3)\n\n \n abscans = np.array(abscans)\n pbscans = np.array(pbscans)\n \n rois = []\n click_points = []\n index = 0\n\n fig = plt.figure()\n fig.set_size_inches((6,3))\n fig.set_dpi(300)\n\n ax1 = fig.add_axes([0.03,0.03,.28,0.94])\n ax2 = fig.add_axes([0.36,0.15,0.28,0.82])\n ax3 = fig.add_axes([0.7,0.15,0.28,0.82])\n \n ax1.set_xlim((10,235))\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_aspect('auto')\n ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')\n \n ax2.set_ylim((-8,5))\n ax2.set_xlim(tlim)\n ax2.set_xlabel('time (s)')\n ax2.set_ylabel('$v_{OS}$ ($\\mu m$/s)')\n\n ax3.set_xlim(flim)\n ax3.set_xlabel('frequency (Hz)')\n ax3.set_ylabel('Power')\n \n plt.pause(.0001)\n\n\n def draw_rois():\n ax1.clear()\n ax1.set_xlim((10,235))\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.set_aspect('auto')\n ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')\n for k,roi in enumerate(rois):\n x1,x2 = [a[0] for a in roi[0]]\n z1,z2 = [a[1] for a in roi[0]]\n ax1.plot([x1,x2,x2,x1,x1],[z1,z1,z2,z2,z1],color=colors[k%len(colors)],alpha=box_alpha,linewidth=box_linewidth)\n\n ax2.clear()\n ax2.set_ylim((-8,5))\n ax2.set_xlim(tlim)\n osv_mat = []\n for k,roi in enumerate(rois):\n osv = roi[2]\n osv_mat.append(osv)\n ax2.plot(t,osv,linewidth=plot_linewidth,alpha=plot_alpha,color=colors[k%len(colors)])\n if len(rois)>1:\n osv_mat = np.array(osv_mat)\n mosv = np.nanmean(osv_mat,axis=0)\n ax2.plot(t,mosv,color='k',alpha=mplot_alpha,linewidth=mplot_linewidth)\n ax2.set_xlabel('time (s)')\n ax2.set_ylabel('$v_{OS}$ ($\\mu m$/s)')\n\n ax3.clear()\n #ax3.set_ylim((-8,5))\n ax3.set_xlim(flim)\n ps_mat = []\n for k,roi in enumerate(rois):\n osv = roi[2]\n power_spectrum = np.fft.fftshift(np.abs(np.fft.fft(osv))**2)\n ps_mat.append(power_spectrum)\n freq = np.fft.fftshift(np.fft.fftfreq(len(osv),2.5e-3))\n ax3.semilogy(freq,power_spectrum,linewidth=plot_linewidth,alpha=plot_alpha,color=colors[k%len(colors)])\n if len(rois)>1:\n ps_mat = np.array(ps_mat)\n mps = np.nanmean(ps_mat,axis=0)\n ax3.plot(freq,mps,color='k',alpha=mplot_alpha,linewidth=mplot_linewidth)\n ax3.set_xlabel('frequency (Hz)')\n ax3.set_ylabel('Power')\n \n\n \n plt.pause(.1)\n \n \n def onclick(event):\n\n global rois,click_points,index,abscans,pbscans\n\n if event.button==1:\n if event.xdata is None and event.ydata is None:\n # clicked outside plot--clear everything\n print('Clearing.')\n click_points = []\n rois = []\n draw_rois()\n # ax1.clear()\n # ax1.imshow(20*np.log10(display_bscan),clim=(45,90),cmap='gray',aspect='auto')\n # ax2.clear()\n # ax2.axvline(0.0,color='g',linestyle='--')\n # ax1.set_xticks([])\n # ax1.set_yticks([])\n # plt.pause(.001)\n\n if event.inaxes==ax1:\n if event.button==1:\n xnewclick = event.xdata\n ynewclick = event.ydata\n click_points.append((int(round(xnewclick)),int(round(ynewclick))))\n\n if len(click_points)==1:\n #ax1.clear()\n #ax1.imshow(20*np.log10(display_bscan),clim=(45,85),cmap='gray')\n #ax1.plot(click_points[0][0],click_points[0][1],'bo')\n plt.pause(.1)\n\n if len(click_points)==2:\n\n x1,x2 = [a[0] for a in click_points] \n z1,z2 = [a[1] for a in click_points]\n #ax1.clear()\n #ax1.imshow(20*np.log10(display_bscan),clim=(45,90),cmap='gray')\n valid = True\n try:\n osa,osv,isos_z,cost_z = blobo.extract_layer_velocities_region(abscans,pbscans,x1,x2,z1,z2,stim_index=stim_index)\n except Exception as e:\n print('ROI could not be processed:',e)\n valid = False\n click_points = []\n\n if valid:\n # osv is now in radians/block\n # we want it in nm/s\n # osv * blocks/sec * nm/radian\n # nm/radian = 1060.0/(2*np.pi)\n osv = 1e-3*phase_to_nm(osv)/2.5e-3\n\n rois.append((click_points,osa,osv,isos_z,cost_z))\n click_points = []\n\n draw_rois()\n index+=1\n \n elif event.button==3:\n x = event.xdata\n y = event.ydata\n new_rois = []\n \n for idx,roi in enumerate(rois):\n x1,y1 = roi[0][0]\n x2,y2 = roi[0][1]\n if x1<x<x2 and y1<y<y2:\n pass\n else:\n new_rois.append(roi)\n rois = new_rois\n draw_rois()\n\n\n\n def onpress(event):\n global rois,click_points,index\n if event.key=='enter':\n outfolder = os.path.join(folder,'plot_velocities')\n print('Saving results to %s.'%outfolder)\n os.makedirs(outfolder,exist_ok=True)\n np.save(os.path.join(outfolder,'display_bscan.npy'),display_bscan)\n nrois = len(rois)\n fx1,fx2 = [a[0] for a in rois[0][0]]\n fz1,fz2 = [a[1] for a in rois[0][0]]\n froi_tag = '%d_%d_%d_%d_'%(fx1,fx2,fz1,fz2)\n\n \n fig.savefig(os.path.join(outfolder,'figure_%d_rois %s.png'%(nrois,froi_tag)),dpi=300)\n fig.savefig(os.path.join(outfolder,'figure_%d_rois_%s.pdf'%(nrois,froi_tag)))\n for roi in rois:\n \n x1,x2 = [a[0] for a in roi[0]]\n z1,z2 = [a[1] for a in roi[0]]\n roi_tag = '%d_%d_%d_%d_'%(x1,x2,z1,z2)\n fnroot = os.path.join(outfolder,roi_tag)\n np.save(fnroot+'rect_points.npy',roi[0])\n np.save(fnroot+'outer_segment_amplitude.npy',roi[1])\n np.save(fnroot+'outer_segment_velocity.npy',roi[2])\n np.save(fnroot+'isos_z.npy',roi[3])\n np.save(fnroot+'cost_z.npy',roi[4])\n\n collect_files(outfolder,'./plot_velocities_results')\n elif event.key=='backspace':\n rois = rois[:-1]\n click_points = []\n draw_rois()\n \n \n cid = fig.canvas.mpl_connect('button_press_event',onclick)\n pid = fig.canvas.mpl_connect('key_press_event',onpress)\n\n #plt.subplot(1,2,2,label='foo')\n plt.show()\n return rois\n\n\nif __name__=='__main__':\n\n\n if len(sys.argv)<2:\n folder = '.'\n else:\n folder = sys.argv[1]\n\n org_folders = pathlib.Path(folder).rglob('org')\n org_folders = [str(f) for f in org_folders]\n org_folders.sort()\n for of in org_folders:\n print('Working on %s.'%of)\n plot(of)\n" } ]
43
geo2france/thesaurus_builder
https://github.com/geo2france/thesaurus_builder
16c5b3000b278351a4229f16d6110d917e35df60
f871820516190f536eaa74e1de92ac706398e916
5e42a126d2c29b6066c3af85f1ccb795f7057ab1
refs/heads/master
2021-09-06T15:45:42.110775
2018-02-08T06:12:44
2018-02-08T06:12:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.875, "avg_line_length": 5.833333492279053, "blob_id": "8825a99440641eca4ed8b36f831bb56870f497d1", "content_id": "1041c1fd7e156af4376747b7f17b035e5fc14a6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "permissive", "max_line_length": 7, "num_lines": 6, "path": "/requirements.txt", "repo_name": "geo2france/thesaurus_builder", "src_encoding": "UTF-8", "text": "pyproj\nshapely\nfiona\nclick\npyyaml\njinja2" }, { "alpha_fraction": 0.6290598511695862, "alphanum_fraction": 0.6367521286010742, "avg_line_length": 29.389610290527344, "blob_id": "d1726a30db963c3921eaa504472ec6cd26d92f2d", "content_id": "c836c4d0735c6f8032c5b9b83db017d9e18dc54b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2340, "license_type": "permissive", "max_line_length": 115, "num_lines": 77, "path": "/utils.py", "repo_name": "geo2france/thesaurus_builder", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Standard imports\nimport os.path\nimport xml.dom.minidom as minidom\n\n# Non standard imports (see requirements.txt)\nfrom shapely.geometry import shape\nfrom shapely.ops import unary_union\n# Fiona should be imported after shapely - see https://github.com/Toblerity/Shapely/issues/288\nimport fiona\n\n\nclass Bunch:\n \"\"\"\n See http://code.activestate.com/recipes/52308-the-simple-but-handy-collector-of-a-bunch-of-named/?in=user-97991\n \"\"\"\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\ndef u(s):\n \"\"\"\n decodes utf8\n \"\"\"\n if isinstance(s, unicode):\n return s.encode(\"utf-8\")\n if isinstance(s, str):\n return s.decode(\"utf-8\")\n # fix this, item may be unicode\n elif isinstance(s, list):\n return [i.decode(\"utf-8\") for i in s]\n\n\ndef get_geometry_from_file(input_file_path):\n \"\"\"\n Get the union of all the geometries contained in one shapefile.\n\n :param input_file_path: the path of the shapefile from which the geometry is computed\n :return: the geomtry resulting in the union of the geometries of the shapefile\n \"\"\"\n with fiona.open(input_file_path) as input_layer:\n geoms = [shape(feat['geometry']) for feat in input_layer]\n geom = unary_union(geoms)\n return geom\n\n\ndef prettify_xml(xml_string, minify=False, indent=\" \", newl=os.linesep):\n \"\"\"\n Function prettifying or minifying an xml string\n\n :param xml_string: The XML string to prettify or minify\n :param minify: True for minification and False for prettification\n :param indent: String used for indentation\n :param newl: String used for new lines\n :return: An XML string\n \"\"\"\n\n # Function used to remove XML blank nodes\n def remove_blanks(node):\n for x in node.childNodes:\n if x.nodeType == minidom.Node.TEXT_NODE:\n if x.nodeValue:\n x.nodeValue = x.nodeValue.strip()\n elif x.nodeType == minidom.Node.ELEMENT_NODE:\n remove_blanks(x)\n\n xml = minidom.parseString(u(xml_string))\n remove_blanks(xml)\n xml.normalize()\n\n if minify:\n pretty_xml_as_string = xml.toxml()\n else:\n pretty_xml_as_string = xml.toprettyxml(indent=indent, newl=newl)\n\n return pretty_xml_as_string\n" }, { "alpha_fraction": 0.5779596567153931, "alphanum_fraction": 0.5803583860397339, "avg_line_length": 33.740196228027344, "blob_id": "5138fa94182a21deab01b915864f74a39b057611", "content_id": "78601fc6b5f2b279b60531e2e27f08e3ca40d454", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7087, "license_type": "permissive", "max_line_length": 115, "num_lines": 204, "path": "/build_thesaurus_from_simple_shp.py", "repo_name": "geo2france/thesaurus_builder", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Standard imports\nimport datetime\nimport os\nimport os.path\nimport codecs\n\n# Non standard imports (see requirements.txt)\nimport click\nimport jinja2\nfrom shapely.geometry import shape\nfrom shapely.ops import transform\n# Fiona should be imported after shapely - see https://github.com/Toblerity/Shapely/issues/288\nimport fiona\nimport pyproj\nfrom functools import partial\nimport yaml\n\nfrom utils import Bunch\nfrom utils import prettify_xml\n\n\n@click.command()\n@click.option('-v', '--verbose', is_flag=True, default=False,\n help='Enables verbose mode')\n@click.option('--overwrite/--no-overwrite', default=False,\n help='Allows to overwrite an existing thesaurus file')\n@click.option('--compact/--no-compact', default=False,\n help='Write compact rdf file')\n@click.option('--cfg-path', type=click.Path(exists=True, dir_okay=False), default=\"config_simple_shp.yml\",\n help='Path of a config file.')\n@click.argument('output-dir', nargs=1, type=click.Path(exists=True, dir_okay=True, file_okay=False, writable=True))\ndef create_thesauri(\n verbose,\n overwrite,\n compact,\n output_dir,\n cfg_path):\n \"\"\"\n This command creates a SKOS thesaurus for the french municipalities based on the ADMIN EXPRESS dataset from IGN\n (french mapping national agency). The created thesaurus can be used in Geonetwork.\n\n Examples:\\n\n python build_thesaurus_from_simple_shp.py output\\n\n python build_thesaurus_from_simple_shp.py --verbose --overwrite output\\n\n python build_thesaurus_from_simple_shp.py --cfg-path ./temp/config_simple_shp.yml --overwrite temp/out\\n\n \"\"\"\n\n thesauri_builder = ShpThesauriBuilder(\n verbose=verbose,\n overwrite=overwrite,\n compact=compact,\n output_dir=output_dir,\n cfg_path=cfg_path)\n\n thesauri_builder.create_thesauri()\n\n click.echo(u\"Done. Goodbye\")\n\n\nclass ShpThesauriBuilder(object):\n\n def __init__(self,\n verbose,\n overwrite,\n compact,\n output_dir,\n cfg_path):\n\n self.verbose = verbose\n self.overwrite = overwrite\n self.compact = compact\n self.output_dir = output_dir\n\n with open(cfg_path, 'r') as yaml_file:\n self.cfg = yaml.load(yaml_file)\n\n # Configure the templates dir variables\n templates_dir_path = os.path.join(os.path.dirname(__file__), self.cfg['template_dir_name'])\n template_loader = jinja2.FileSystemLoader(searchpath=templates_dir_path)\n self.template_env = jinja2.Environment(\n loader=template_loader,\n trim_blocks=True,\n lstrip_blocks=True)\n\n # Thesauri list\n self.thesauri_list = self.cfg[\"thesauri\"].keys()\n click.echo(u\"Thesauri to be produced: {}\".format(\", \".join(self.thesauri_list)))\n\n def create_thesauri(self):\n\n for thesaurus_name in self.thesauri_list:\n self.create_thesaurus(thesaurus_name)\n\n def create_thesaurus(self, thesaurus_name):\n\n click.echo(u\"Thesaurus creation: {}\".format(thesaurus_name))\n\n thesaurus_cfg = self.cfg[\"thesauri\"].get(thesaurus_name, None)\n if not thesaurus_cfg:\n click.echo(u\" Unknown thesaurus name: {}.\".format(thesaurus_name))\n return\n\n # Output file name and path\n rdf_file_name = thesaurus_cfg.get('out')\n if rdf_file_name:\n rdf_file_path = os.path.join(self.output_dir, rdf_file_name)\n else:\n click.echo(u\" Output rdf file name not found. Stop here\")\n return\n\n if self.verbose:\n click.echo(u\" Output file path: {}\".format(rdf_file_path))\n\n # Test if the rdf file already exists when --no-overwrite\n if not self.overwrite and os.path.isfile(rdf_file_path):\n click.echo(u\" Output file {} already exists. Won't be overwritten.\".format(rdf_file_path))\n click.echo(u\" Add option --overwrite to overwrite it.\")\n return\n\n # Read the template file using the environment object\n if self.verbose:\n click.echo(u\" Loading template {}\".format(thesaurus_cfg['template']))\n\n try:\n template = self.template_env.get_template(thesaurus_cfg['template'])\n except Exception as e:\n click.echo(u\" Template {} not found. Stop here.\".format(thesaurus_cfg['template']))\n return\n\n # URI template\n uri_scheme = thesaurus_cfg.get('uri_scheme')\n uri_template = \"{}#{}\".format(uri_scheme, \"{}\")\n\n # Create the list of territories\n terr_list = []\n # depts = []\n # depts_geom = None\n check_fields = True\n shp_path = thesaurus_cfg['shp']\n\n if self.verbose:\n click.echo(u\" Read shapefile {}\".format(shp_path))\n\n with fiona.open(shp_path, 'r') as shp:\n # func to reproject geom\n project = partial(\n pyproj.transform,\n pyproj.Proj(shp.crs),\n pyproj.Proj(init='EPSG:4326'))\n\n for feat in shp:\n f_props = feat['properties']\n f_geom = shape(feat['geometry'])\n f_geom_wgs84 = transform(project, f_geom)\n lon_min, lat_min, lon_max, lat_max = f_geom_wgs84.bounds\n\n # On first item only, check ADE fields\n fields = thesaurus_cfg['fields']\n if check_fields:\n for f in fields:\n if not fields[f] in f_props:\n click.echo(u\" Fatal error: field {} not found in shapefile.\".format(fields[f]))\n return\n check_fields = False\n\n name = f_props[fields['name']].strip().replace(\"&\", \"&amp;\")\n code = f_props[fields['code']].strip()\n uri = None\n if uri_template:\n uri = uri_template.format(code)\n\n terr = Bunch(name=name,\n lon_min=lon_min, lat_min=lat_min, lon_max=lon_max, lat_max=lat_max,\n code=code,\n uri=uri)\n\n # if filter_geom and filter_dept:\n terr_list.append(terr)\n\n terr_list.sort(key=lambda t: t.code)\n\n # data passed to the template\n data = {\n \"title\": thesaurus_cfg[\"title\"],\n \"uri_scheme\": uri_scheme,\n \"date\": datetime.date.today().isoformat(),\n \"terr_list\": terr_list,\n \"thesaurus\": thesaurus_name\n }\n\n # Finally, process the template to produce our final text.\n rdf_content = template.render(data)\n rdf_content = prettify_xml(rdf_content, minify=self.compact)\n\n if self.verbose:\n click.echo(u\" Write output file {}\".format(rdf_file_path))\n\n with codecs.open(rdf_file_path, \"w\", \"utf-8\") as f:\n f.write(rdf_content)\n\nif __name__ == '__main__':\n create_thesauri()\n" }, { "alpha_fraction": 0.5636546611785889, "alphanum_fraction": 0.5684471130371094, "avg_line_length": 41.4163932800293, "blob_id": "0386903f884481473917607eddddb0be31f28980", "content_id": "ac15daa1f33abd637bb04cfcd71c25119ad2794a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12937, "license_type": "permissive", "max_line_length": 135, "num_lines": 305, "path": "/build_thesaurus_from_ade.py", "repo_name": "geo2france/thesaurus_builder", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Standard imports\nimport datetime\nimport os\nimport os.path\nimport codecs\n\n# Non standard imports (see requirements.txt)\nimport click\nimport jinja2\nfrom shapely.geometry import shape\nfrom shapely.ops import unary_union\nfrom shapely.ops import transform\n# Fiona should be imported after shapely - see https://github.com/Toblerity/Shapely/issues/288\nimport fiona\nimport pyproj\nfrom functools import partial\nimport yaml\n\nfrom utils import Bunch\nfrom utils import get_geometry_from_file\nfrom utils import prettify_xml\n\n\n@click.command()\n@click.option('-v', '--verbose', is_flag=True, default=False,\n help='Enables verbose mode')\n@click.option('--overwrite/--no-overwrite', default=False,\n help='Allows to overwrite an existing thesaurus file')\n@click.option('--compact/--no-compact', default=False,\n help='Write compact rdf file')\n@click.option('--dept-filter',\n help='List of departement numbers or names used to filter the municipalities.')\n@click.option('--filter-shp-path', type=click.Path(exists=True, dir_okay=False),\n help='Path of a shapefile used to spatially filter the entities.')\n@click.option('--cfg-path', type=click.Path(exists=True, dir_okay=False), default=\"config_ade.yml\",\n help='Path of a config file.')\n@click.option('--thesaurus', multiple=True, type=click.Choice(['commune', 'region', 'departement', 'epci']),\n help='Selection of the type of thesaurus to produce')\n@click.argument('output-dir', nargs=1, type=click.Path(exists=True, dir_okay=True, file_okay=False, writable=True))\ndef create_thesauri(\n verbose,\n overwrite,\n compact,\n thesaurus,\n output_dir,\n dept_filter=None,\n filter_shp_path=None,\n cfg_path=None):\n \"\"\"\n This command creates a SKOS thesaurus for the french municipalities based on the ADMIN EXPRESS dataset from IGN\n (french mapping national agency). The created thesaurus can be used in Geonetwork.\n\n Examples:\\n\n python build_thesaurus_from_ade.py output\\n\n python build_thesaurus_from_ade.py --verbose --overwrite output\\n\n python build_thesaurus_from_ade.py --verbose --overwrite --dept-filter \"60,02,somme\" output\\n\n python build_thesaurus_from_ade.py --verbose --overwrite --dept-filter \"60,02,somme\" output\\n\n python build_thesaurus_from_ade.py --dept-filter \"02,60,80\" output\\n\n python build_thesaurus_from_ade.py --dept-filter \" 02, oise, SOMME\" output\\n\n python build_thesaurus_from_ade.py --dept-filter \"02,60,80\" --filter-shp-path my_filter.shp output\\n\n python build_thesaurus_from_ade.py --cfg-path config_ade.yml --dept-filter \"02,60,80\" --overwrite temp\\n\n python build_thesaurus_from_ade.py --cfg-path ./temp/config.yml --dept-filter \"02,60,80\" --overwrite --thesaurus departement temp\\n\n \"\"\"\n\n thesauri_builder = AdeThesauriBuilder(\n verbose=verbose,\n overwrite=overwrite,\n compact=compact,\n thesaurus=thesaurus,\n output_dir=output_dir,\n dept_filter=dept_filter,\n filter_shp_path=filter_shp_path,\n cfg_path=cfg_path)\n\n thesauri_builder.create_thesauri()\n\n click.echo(u\"Done. Goodbye\")\n\n\nclass AdeThesauriBuilder(object):\n\n def __init__(self,\n verbose,\n overwrite,\n compact,\n thesaurus,\n output_dir,\n cfg_path,\n dept_filter=None,\n filter_shp_path=None):\n\n self.verbose = verbose\n self.overwrite = overwrite\n self.compact = compact\n self.thesaurus = thesaurus\n self.output_dir = output_dir\n self.filter_shp_path = filter_shp_path\n\n with open(cfg_path, 'r') as yaml_file:\n self.cfg = yaml.load(yaml_file)\n\n # Configure the templates dir variables\n templates_dir_path = os.path.join(os.path.dirname(__file__), self.cfg['template_dir_name'])\n template_loader = jinja2.FileSystemLoader(searchpath=templates_dir_path)\n self.template_env = jinja2.Environment(\n loader=template_loader,\n trim_blocks=True,\n lstrip_blocks=True)\n\n # Get the geometry of the spatial filter\n if verbose:\n click.echo(u\"Shapefile filter: {}\".format(filter_shp_path))\n self.spatial_filter_geom = None\n try:\n if filter_shp_path is not None:\n self.spatial_filter_geom = get_geometry_from_file(filter_shp_path)\n except Exception as e:\n click.echo(u\"The shapefile specified for spatial filtering could not be opened. \"\n u\"No spatial filter will be applied.\")\n\n # Create the list of departements\n self.dept_list = None\n if dept_filter is not None:\n self.dept_list = [dept.strip().lower() for dept in dept_filter.split(\",\")]\n if verbose:\n click.echo(u\"Departements filter: {}\".format('|'.join(self.dept_list)))\n\n if not self.thesaurus:\n self.thesaurus = (\"commune\", \"departement\", \"region\", \"epci\")\n\n click.echo(u\"Thesauri to be produced: {}\".format(\", \".join(self.thesaurus)))\n\n def create_thesauri(self):\n for thesaurus_type in self.thesaurus:\n self.create_thesaurus(thesaurus_type)\n\n def create_thesaurus(self, thesaurus_type):\n\n click.echo(u\"Thesaurus creation: {}\".format(thesaurus_type))\n\n thesaurus_cfg = self.cfg.get(thesaurus_type, None)\n if not thesaurus_cfg:\n click.echo(u\" Unknown thesaurus type: {}.\".format(thesaurus_type))\n return\n\n # Output file name and path\n rdf_file_name = thesaurus_cfg.get('out')\n if rdf_file_name:\n rdf_file_path = os.path.join(self.output_dir, rdf_file_name)\n else:\n click.echo(u\" Output rdf file name not found. Stop here\")\n return\n\n if self.verbose:\n click.echo(u\" Output file path: {}\".format(rdf_file_path))\n\n # Test if the rdf file already exists when --no-overwrite\n if not self.overwrite and os.path.isfile(rdf_file_path):\n click.echo(u\" Output file {} already exists. Won't be overwritten.\".format(rdf_file_path))\n click.echo(u\" Add option --overwrite to overwrite it.\")\n return\n\n # Read the template file using the environment object\n if self.verbose:\n click.echo(u\" Loading template {}\".format(thesaurus_cfg['template']))\n\n try:\n template = self.template_env.get_template(thesaurus_cfg['template'])\n except Exception as e:\n click.echo(u\" Template {} not found. Stop here.\".format(thesaurus_cfg['template']))\n return\n\n # Create the list of territories\n terr_list = []\n depts = []\n depts_geom = None\n check_ade = True\n ade_shp_path = os.path.join(self.cfg['ade_dir_name'], thesaurus_cfg['shp'])\n\n if self.verbose:\n click.echo(u\" Read shapefile {}\".format(ade_shp_path))\n\n if thesaurus_type in (\"region\", \"epci\"):\n\n # Reading departement shapefile to get departements list for each region\n dept_shp_file_name = self.cfg['departement']['shp']\n dept_shp_file_path = os.path.join(self.cfg['ade_dir_name'], dept_shp_file_name)\n\n if os.path.isfile(dept_shp_file_path):\n if self.verbose:\n click.echo(u\" Read shapefile {}\".format(dept_shp_file_path))\n else:\n click.echo(u\" Shapefile {} not found. Mandatory to list departements in regions. Stop here.\".format(\n dept_shp_file_path))\n return\n\n with fiona.open(dept_shp_file_path, 'r') as dept_shp:\n for d in dept_shp:\n dept = {\n \"dept_name\": d['properties'][self.cfg['departement']['fields']['nom']].strip(),\n \"dept_code\": d['properties'][self.cfg['departement']['fields']['code']].strip(),\n \"reg_code\": d['properties'][self.cfg['departement']['fields']['codereg']].strip(),\n \"geometry\": d['geometry'],\n }\n depts.append(dept)\n\n if self.dept_list:\n depts_geoms = [shape(dept['geometry']) for dept in depts\n if dept[\"dept_name\"].lower() in self.dept_list or\n dept[\"dept_code\"] in self.dept_list]\n depts_geom = unary_union(depts_geoms)\n\n with fiona.open(ade_shp_path, 'r') as shp:\n # func to reproject geom\n project = partial(\n pyproj.transform,\n pyproj.Proj(shp.crs),\n pyproj.Proj(init='EPSG:4326'))\n\n for feat in shp:\n f_props = feat['properties']\n f_geom = shape(feat['geometry'])\n f_geom_wgs84 = transform(project, f_geom)\n lon_min, lat_min, lon_max, lat_max = f_geom_wgs84.bounds\n\n # On first item only, check ADE fields\n fields = thesaurus_cfg['fields']\n if check_ade:\n for f in fields:\n if not fields[f] in f_props:\n click.echo(u\" Fatal error: field {} not found in shapefile.\".format(fields[f]))\n return\n check_ade = False\n\n name = f_props[fields['nom']].strip().replace(\"&\", \"&amp;\")\n code = f_props[fields['code']].strip()\n\n dept_name = ''\n dept_code = ''\n reg_code = ''\n reg_dept_codes = None\n\n # If municipalities, get dept infos for filter\n if thesaurus_type == 'commune':\n dept_name = f_props[fields['nomdept']].strip()\n dept_code = f_props[fields['codedept']].strip()\n # If departement, get region code\n elif thesaurus_type == 'departement':\n reg_code = f_props[fields['codereg']].strip()\n # If region, get departement list\n elif thesaurus_type == 'region':\n reg_dept_codes = [dept['dept_code'] for dept in depts\n if dept['reg_code'] == f_props[fields['code']].strip()]\n\n terr = Bunch(name=name,\n lon_min=lon_min, lat_min=lat_min, lon_max=lon_max, lat_max=lat_max,\n code=code, reg=reg_code, dept_reg=reg_dept_codes)\n\n # Add the object to the list of territories if non spatial filter, else we only add it to the list\n # if its geometry intersects the spatial filter\n filter_geom = self.spatial_filter_geom is None or f_geom.relate(self.spatial_filter_geom)[0] != 'F'\n\n if thesaurus_type == 'commune':\n filter_dept = self.dept_list is None or len(self.dept_list) == 0 or \\\n dept_name.lower() in self.dept_list or dept_code in self.dept_list\n elif thesaurus_type == 'epci':\n filter_dept = self.dept_list is None or len(self.dept_list) == 0 or \\\n depts_geom is None or f_geom.relate(depts_geom)[0] == '2'\n elif thesaurus_type == 'departement':\n filter_dept = self.dept_list is None or len(self.dept_list) == 0 or \\\n name.lower() in self.dept_list or code in self.dept_list\n elif thesaurus_type == 'region':\n filter_dept = self.dept_list is None or len(self.dept_list) == 0 or \\\n len(set(self.dept_list).intersection(reg_dept_codes)) > 0\n else:\n filter_dept = self.dept_list is None or len(self.dept_list) == 0 or \\\n depts_geom is None or f_geom.relate(depts_geom)[0] == '2'\n\n if filter_geom and filter_dept:\n terr_list.append(terr)\n\n terr_list.sort(key=lambda t: t.code)\n\n # data passed to the template\n data = {\n \"title\": thesaurus_cfg[\"title\"],\n \"date\": datetime.date.today().isoformat(),\n \"terr_list\": terr_list,\n \"thesaurus\": thesaurus_type\n }\n\n # Finally, process the template to produce our final text.\n rdf_content = template.render(data)\n rdf_content = prettify_xml(rdf_content, minify=self.compact)\n\n if self.verbose:\n click.echo(u\" Write output file {}\".format(rdf_file_path))\n\n with codecs.open(rdf_file_path, \"w\", \"utf-8\") as f:\n f.write(rdf_content)\n\nif __name__ == '__main__':\n create_thesauri()\n" }, { "alpha_fraction": 0.7551164627075195, "alphanum_fraction": 0.7674664855003357, "avg_line_length": 42.599998474121094, "blob_id": "ceb4dae1268a6de4e9672603aab3a1fb0d74161f", "content_id": "d39235bfc063df84cddae65ecf44e90618397e73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2884, "license_type": "permissive", "max_line_length": 129, "num_lines": 65, "path": "/README.md", "repo_name": "geo2france/thesaurus_builder", "src_encoding": "UTF-8", "text": "# thesaurus_builder\nOutils de gรฉnรฉration de thรฉsaurus pour GeoNetwork.\n\n* build_thesaurus_from_ade.py : crรฉation de thรฉsaurus ร  par des shapefiles ADMINEXPRESS de l'IGN\n* build_thesaurus_from_simple_shp.py : crรฉation de thรฉsaurus ร  d'un shapefile quelconque.\n\nNรฉcessite les modules listรฉs dans requirements.txt :\n * jinja2\n * fiona\n * shapely\n * click\n * pyproj\n * pyyaml\nCes modules peuvent รชtre installรฉs via la commande suivante :\n<pre>\npip install -r requirements\n</pre>\n\n## build_thesaurus_from_ade.py\n\nL'argument de la commande est le rรฉpertoire dans lequel les thรฉsaurus au format RDF seront crรฉรฉs.\n\nLes options :\n* --cfg-path : fichier de configuration dรฉcrivant les thesaurus ร  crรฉer. Par dรฉfaut il s'agit du fichier config_ade.yml\n ร  la racine du projet\n* --thesaurus : type de thesaurus ร  crรฉer (commune, epci, departement ou region. Cette option est rรฉpรฉtable. Si cette\n option n'est pas renseignรฉe, tous les thรฉsaurus dรฉcrits dans le fichier de configuration seront crรฉรฉs.\n* --verbose ou -v : mode verbeux\n* --overwrite/--np-overwrite : interdit ou autorise le remplacement des fichiers existants en sortie\n* --dept-filter : liste de noms ou numรฉros de dรฉpartements pour limiter la zone gรฉographique sur laquelle est crรฉรฉ\nle thรฉsaurus\n* --filter-shp-path : chemin vers un shapefile pour limiter la zone gรฉographique sur laquelle est crรฉรฉ le thรฉsaurus\n\nExemples :\n\n<pre>\npython build_thesaurus_from_ade.py output\npython build_thesaurus_from_ade.py --verbose --overwrite output\npython build_thesaurus_from_ade.py --verbose --overwrite --dept-filter \"60,02,somme\" output\npython build_thesaurus_from_ade.py --verbose --overwrite --dept-filter \"60,02,somme\" output\npython build_thesaurus_from_ade.py --dept-filter \"02,60,80\" output\npython build_thesaurus_from_ade.py --dept-filter \" 02, oise, SOMME\" output\npython build_thesaurus_from_ade.py --dept-filter \"02,60,80\" --filter-shp-path my_filter.shp output\npython build_thesaurus_from_ade.py --cfg-path config_ade.yml --dept-filter \"02,60,80\" --overwrite temp\npython build_thesaurus_from_ade.py --cfg-path ./temp/config.yml --dept-filter \"02,60,80\" --overwrite --thesaurus departement temp\n</pre>\n\n\n## build_thesaurus_from_simple_shp.py\n\nL'argument de la commande est le rรฉpertoire dans lequel les thรฉsaurus au format RDF seront crรฉรฉs.\n\nLes options :\n* --cfg-path : fichier de configuration dรฉcrivant les thesaurus ร  crรฉer. Par dรฉfaut il s'agit du fichier\n config_simple_shp.yml ร  la racine du projet\n* --verbose ou -v : mode verbeux\n* --overwrite/--np-overwrite : interdit ou autorise le remplacement des fichiers existants en sortie\n\nExemples :\n\n<pre>\npython build_thesaurus_from_simple_shp.py output\npython build_thesaurus_from_simple_shp.py --verbose --overwrite output\npython build_thesaurus_from_simple_shp.py --cfg-path ./temp/config.yml --overwrite temp/out\n</pre>\n" } ]
5
rasgr6-2017/simple_detection_temporary
https://github.com/rasgr6-2017/simple_detection_temporary
debffba294216e37a9122c64925b063f99bd38d2
4c208b037754ca2ea6aa7d3b37f2e5674dc90b7c
f37686e1a516690b94d8ebe89b000fd69dbcf23d
refs/heads/master
2021-07-08T11:03:05.503847
2017-10-04T21:35:54
2017-10-04T21:35:54
105,818,533
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 43, "blob_id": "c954e93a167f0a3c2d5a2e15a482747e372f0508", "content_id": "93a6eb1f873f1cc95f2e0873f35dbbd5fd7cf64b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "no_license", "max_line_length": 58, "num_lines": 2, "path": "/README.md", "repo_name": "rasgr6-2017/simple_detection_temporary", "src_encoding": "UTF-8", "text": "# simple_detection_temporary\nthis is a temporary repository for simple object detection\n" }, { "alpha_fraction": 0.6900354623794556, "alphanum_fraction": 0.7325718998908997, "avg_line_length": 29.214284896850586, "blob_id": "a21df546bbff0b5fc4c830a4a7a886b23df40a9d", "content_id": "a5285ef7389d0029546dc369e6c3ba4ddcf93829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2539, "license_type": "no_license", "max_line_length": 107, "num_lines": 84, "path": "/simple_segmentation_and_blob_detection/RAS_RGB_sub_seg_test1.py", "repo_name": "rasgr6-2017/simple_detection_temporary", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread('RAS_RGB_img/1.jpg')\n\nhimg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV_FULL)\n\nheight, width = himg.shape[:2]\nhimg = cv2.resize(himg, (int(0.3*width), int(0.3*height)), interpolation=cv2.INTER_CUBIC)\nimg = cv2.resize(img, (int(0.3*width), int(0.3*height)), interpolation=cv2.INTER_CUBIC)\nZ = himg.reshape((-1, 3))\nY = img.reshape((-1, 3))\n\n# convert to np.float32\nZ = np.float32(Z)\nY = np.float32(Y)\n\n# define criteria, number of clusters(K) and apply kmeans()\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\nret1, label1, center1 = cv2.kmeans(Z, 12, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\nret2, label2, center2 = cv2.kmeans(Y, 10, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n\n# Now convert back into uint8, and make original image\ncenter1 = np.uint8(center1)\nres = center1[label1.flatten()]\nres1 = res.reshape(himg.shape)\n\ncenter2 = np.uint8(center2)\nres = center2[label2.flatten()]\nres2 = res.reshape(img.shape)\n\ncv2.imshow('HSV-kmeans', res1)\ncv2.imshow('RGB-kmeans', res2)\ncv2.imshow('original', img)\ncv2.imshow('HSV-original', himg)\n#cv2.imshow('grey-HSV-kmeans', gimg)\n\n\"\"\"get an idea about what the following lines are from \nhttps://www.learnopencv.com/blob-detection-using-opencv-python-c/\"\"\"\n\n# Setup SimpleBlobDetector parameters.\nparams = cv2.SimpleBlobDetector_Params()\n\n# Change thresholds\nparams.minThreshold = 1;\nparams.maxThreshold = 255;\n\n# Filter by Area.\nparams.filterByArea = True\nparams.minArea = 500\n\n# Filter by Circularity\nparams.filterByCircularity = True\nparams.minCircularity = 0.05\n\n# Filter by Convexity\nparams.filterByConvexity = True\nparams.minConvexity = 0.3\n\n# Filter by Inertia\nparams.filterByInertia = True\nparams.minInertiaRatio = 0.3\n\n# Create a detector with the parameters\ndetector = cv2.SimpleBlobDetector_create(params)\n\ngimg = cv2.cvtColor(cv2.cvtColor(res1, cv2.COLOR_HSV2BGR_FULL), cv2.COLOR_BGR2GRAY)\n\n# Detect blobs.\nkeypoints = detector.detect(gimg)\n\n# Draw detected blobs as red circles.\n# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\nim_with_keypoints = cv2.drawKeypoints(gimg, keypoints, np.array([]), (0, 0, 255),\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n# Show keypoints\ncv2.imshow(\"Keypoints\", im_with_keypoints)\n\n\"\"\"above blob detection only implement grey image blob detection, you can find roughly \nhow to do that from http://www.shervinemami.info/blobs.html\"\"\"\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n" } ]
2
hiran75/deeplearning
https://github.com/hiran75/deeplearning
53653f5e59d3aa35070a7443c8099fce4e223cd6
075a550141548237283d5c1b272bfeee095610f4
15a5b84b096ddb5e3063d3ff087142da804d31f6
refs/heads/master
2021-07-07T17:01:00.984628
2020-11-14T09:42:00
2020-11-14T09:42:00
203,717,684
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5479753613471985, "alphanum_fraction": 0.5633803009986877, "avg_line_length": 32.42424392700195, "blob_id": "7516f845b5abf06170cc29036dd15b404d3b6dc0", "content_id": "52e2ae5a2799ba1d3ec2eb9ec5d10e76e416bea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7224, "license_type": "no_license", "max_line_length": 115, "num_lines": 198, "path": "/mylib.py", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "import os\r\n\r\ndef setting(model_name):\r\n # ๋ชจ๋ธ ์ €์žฅ ํด๋” ์„ค์ •\r\n MODEL_DIR = './model/' + model_name\r\n #print(MODEL_DIR)\r\n if not os.path.exists(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n return 1\r\n\r\nMODEL_DIR=\"\"\r\n\r\n\r\ndef recall_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n\r\ndef precision_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n\r\ndef f1_m(y_true, y_pred):\r\n precision = precision_m(y_true, y_pred)\r\n recall = recall_m(y_true, y_pred)\r\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\r\n\r\n\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\ndef make_network(features, hiddenlayer):\r\n # ๋ชจ๋ธ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ\r\n if hiddenlayer == 2:\r\n model = Sequential()\r\n model.add(Dense(30, input_dim=features, activation='relu'))\r\n model.add(Dense(30, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n\r\n if hiddenlayer == 3:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(15, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 4:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(20, activation='relu'))\r\n model.add(Dense(15, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 5:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(22, activation='relu'))\r\n model.add(Dense(18, activation='relu'))\r\n model.add(Dense(12, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 6:\r\n model = Sequential()\r\n model.add(Dense(30, input_dim=features, activation='relu'))\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(20, input_dim=features, activation='relu'))\r\n model.add(Dense(12, activation='relu'))\r\n model.add(Dense(8, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n #model.add(Dense(1, activation='softmax'))\r\n\r\n return model\r\n\r\n\r\n# pandas ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค.\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nclass visualizer:\r\n def visualizer_main(df):\r\n colormap = plt.cm.gist_heat #๊ทธ๋ž˜ํ”„์˜ ์ƒ‰์ƒ ๊ตฌ์„ฑ์„ ์ •ํ•ฉ๋‹ˆ๋‹ค.\r\n plt.figure(figsize=(12,12)) #๊ทธ๋ž˜ํ”„์˜ ํฌ๊ธฐ๋ฅผ ์ •ํ•ฉ๋‹ˆ๋‹ค.\r\n\r\n # ๊ทธ๋ž˜ํ”„์˜ ์†์„ฑ์„ ๊ฒฐ์ •ํ•ฉ๋‹ˆ๋‹ค. vmax์˜ ๊ฐ’์„ 0.5๋กœ ์ง€์ •ํ•ด 0.5์— ๊ฐ€๊นŒ์šธ ์ˆ˜๋ก ๋ฐ์€ ์ƒ‰์œผ๋กœ ํ‘œ์‹œ๋˜๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค.\r\n sns.heatmap(df.corr(),linewidths=0.1,vmax=0.5, cmap=colormap, linecolor='white', annot=True)\r\n plt.show()\r\n\r\n grid = sns.FacetGrid(df, col='class')\r\n grid.map(plt.hist, 'plasma', bins=10)\r\n plt.show()\r\n\r\n def fit(model, X_train, y_train, model_name, v_epoches, v_batch_size):\r\n\r\n MODEL_DIR = './model/' + model_name\r\n # ๋ชจ๋ธ ์ €์žฅ ์กฐ๊ฑด ์„ค์ •\r\n modelpath = MODEL_DIR + \"/{epoch:02d}-{val_loss:.4f}.hdf5\"\r\n checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=0, save_best_only=True)\r\n\r\n # ํ•™์Šต ์ž๋™ ์ค‘๋‹จ ์„ค์ •\r\n early_stopping_callback = EarlyStopping(monitor='val_loss', patience=100)\r\n\r\n # ๋ฐ์ดํ„ฐ ํ•™์Šต\r\n history = model.fit(X_train, y_train, validation_split=0.20, epochs=v_epoches, batch_size=v_batch_size,\r\n callbacks=[early_stopping_callback, checkpointer])\r\n\r\n y_vloss = history.history['val_loss']\r\n y_acc = history.history['acc']\r\n\r\n # x๊ฐ’์„ ์ง€์ •ํ•˜๊ณ  ์ •ํ™•๋„๋ฅผ ํŒŒ๋ž€์ƒ‰์œผ๋กœ, ์˜ค์ฐจ๋ฅผ ๋นจ๊ฐ„์ƒ‰์œผ๋กœ ํ‘œ์‹œ\r\n x_len = numpy.arange(len(y_acc))\r\n plt.plot(x_len, y_vloss, \"o\", c=\"red\", markersize=3)\r\n plt.plot(x_len, y_acc, \"o\", c=\"blue\", markersize=3)\r\n\r\n plt.show()\r\n\r\n return model\r\n\r\n def model_evaluate(model, model_name, X_test, y_test, features, v_epoches, v_batch_size):\r\n import datetime\r\n\r\n print('============================')\r\n print(\"ํŒŒ์ผ๋ช… \", model_name)\r\n print(\"์š”์†Œ๊ฐฏ์ˆ˜ \", features)\r\n print(\"================================== \")\r\n\r\n # ๊ฒฐ๊ณผ ์ถœ๋ ฅ\r\n # print(\"\\n epoches\", v_epoches, \"bat_size=\", v_batch_size)\r\n # print(\"\\n ํ•™์Šต์ค‘๋‹จ + ๋ชจ๋ธ ์„ฑ๋Šฅ๊ฐœ์„  : arly_stopping_callback:\")\r\n # print(\"\\n ์˜ˆ์ธก์ •ํ™•๋„: %.4f\" % (model.evaluate(X_test, y_test)[1]))\r\n\r\n # ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ ๊ฒ€์ฆ\r\n loss, accuracy, recall, precision, f1_socre = model.evaluate(X_test, y_test)\r\n # accuracy = model.evaluate(X_test, Y_test)\r\n print('DNN_', datetime.datetime.now())\r\n print(\"/n #accuracy, precision, recall, f1_score\")\r\n print(\" # %.4f, %.4f, %4f, %.4f\" % (accuracy, precision, recall, f1_socre))\r\n return accuracy\r\n\r\n def predict(model, X_test, y_test):\r\n # ์˜ˆ์ธก ๊ฐ’๊ณผ ์‹ค์ œ ๊ฐ’์˜ ๋น„๊ต\r\n Y_prediction = model.predict(X_test)\r\n for i in range(10):\r\n label = y_test[i]\r\n prediction = Y_prediction[i]\r\n if prediction > 0.5:\r\n pre_label = 1\r\n else:\r\n pre_label = 0\r\n print(\"์‹ค์ œ: \", label, \"์˜ˆ์ƒ\", pre_label)\r\n\r\n\r\nfrom sklearn.metrics import roc_curve\r\n\r\nprobs = model.predict(X_test).ravel()\r\nfpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, probs)\r\n\r\nfrom sklearn.metrics import auc\r\nauc_keras = auc(fpr_keras, tpr_keras)\r\n\r\nplt.figure(1)\r\nplt.plot([0, 1], [0, 1], 'k--')\r\nplt.plot(fpr_keras, tpr_keras, label='(AUC = {:.3f})'.format(auc_keras))\r\nplt.xlabel('False positive rate')\r\nplt.ylabel('True positive rate')\r\nplt.title('ROC(Receiver Operating Characteristic) curve')\r\nplt.legend(loc='best')\r\nplt.show()\r\n\r\n\r\n#๋„คํŠธ์›Œํฌ ์ƒ์„ฑ\r\nmodel = network.make_network(features, v_layer)\r\n\r\n# ๋ชจ๋ธ ์ปดํŒŒ์ผ\r\nmodel.compile(loss=cost_f,\r\n optimizer='adam',\r\n # metrics=['accuracy'])\r\n metrics=['accuracy', lib.recall_m, lib.precision_m, lib.f1_m])\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\n\r\n\r\n# ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding ํ•จ##########################\r\nle = LabelEncoder()\r\nle.fit(df_pre['dept'])\r\ndf_pre['dept'] = le.transform(df_pre['dept'])\r\n# ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding end ##########################\r\n" }, { "alpha_fraction": 0.5509862899780273, "alphanum_fraction": 0.5636910796165466, "avg_line_length": 25.9158878326416, "blob_id": "6bc049c39952f4c3425d5bbc7e515fafba007b42", "content_id": "0c70c497fa7c1049f8226f746873c506fa8c0cf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3237, "license_type": "no_license", "max_line_length": 95, "num_lines": 107, "path": "/preprocessing.py", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "import numpy\r\nimport tensorflow as tf\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef pre_datamanager(filename, week):\r\n # seed ๊ฐ’ ์„ค์ •\r\n seed = 0\r\n numpy.random.seed(seed)\r\n tf.random.set_seed(seed)\r\n\r\n df_pre = pd.read_csv(filename, header=0) # CSVํŒŒ์ผ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ํ•จ์ˆ˜๋ฅผ ์ด์šฉ\r\n\r\n # print(df_pre.info())\r\n features = len(df_pre.columns) - 1\r\n # ๋ฐ์ดํ„ฐ ๋‚ด๋ถ€์˜ ๊ธฐํ˜ธ๋ฅผ ์ˆซ์ž๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ--- (โ€ป2)\r\n\r\n df_pre = df_pre.sample(frac=1)\r\n\r\n # ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding ํ•จ##########################\r\n le = LabelEncoder()\r\n le.fit(df_pre['dept'])\r\n df_pre['dept'] = le.transform(df_pre['dept'])\r\n\r\n le.fit(df_pre['gender'])\r\n df_pre['gender'] = le.transform(df_pre['gender'])\r\n\r\n le.fit(df_pre['area'])\r\n df_pre['area'] = le.transform(df_pre['area'])\r\n\r\n # ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n #df_pre = df_pre[df_pre['entYn'] == 0]\r\n\r\n # ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n if week!=0:\r\n df_pre = df_pre[df_pre['weekseq'] == week]\r\n\r\n # data and label ๋ถ„๋ฆฌ\r\n dataset = df_pre.values\r\n X = dataset[:, 0:features]\r\n Y = dataset[:, features]\r\n\r\n # ํ…Œ์ŠคํŠธ, ๊ฒ€์ฆ๋ฐ์ดํ„ฐ ๋ถ„ํ•  7:3\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=seed)\r\n\r\n # ๋ฐ์ดํ„ฐ ์ •๊ทœํ™”\r\n X_train, X_test = X_train / 255, X_test / 255\r\n\r\n # over sampling\r\n from imblearn.over_sampling import SMOTE\r\n oversample_flage = True\r\n if oversample_flage:\r\n smote = SMOTE(random_state=0)\r\n X_train_over, y_train_over = smote.fit_sample(X_train, y_train)\r\n X_train = X_train_over\r\n y_train = y_train_over\r\n ##################over sampling end b##################\r\n\r\n return X_train, X_test, y_train, y_test, features\r\n\r\n\r\ndef pre_datamanager_noheader(filename, oversample_flage):\r\n # seed ๊ฐ’ ์„ค์ •\r\n seed = 0\r\n numpy.random.seed(seed)\r\n tf.random.set_seed(seed)\r\n\r\n df_pre = pd.read_csv(filename, header=None)\r\n\r\n #df_pre.dropna()\r\n # print(df_pre.info())\r\n features = len(df_pre.columns) - 1\r\n # ๋ฐ์ดํ„ฐ ๋‚ด๋ถ€์˜ ๊ธฐํ˜ธ๋ฅผ ์ˆซ์ž๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ--- (โ€ป2)\r\n\r\n df_pre = df_pre.sample(frac=1)\r\n\r\n\r\n # ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding ํ•จ##########################\r\n #le = LabelEncoder()\r\n #le.fit(df_pre[0])\r\n #df_pre[0] = le.transform(df_pre[0])\r\n # ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding end ##########################\r\n #df_pre\r\n\r\n # data and label ๋ถ„๋ฆฌ\r\n dataset = df_pre.values\r\n X = dataset[:, 0:features]\r\n Y = dataset[:, features]\r\n\r\n # ํ…Œ์ŠคํŠธ, ๊ฒ€์ฆ๋ฐ์ดํ„ฐ ๋ถ„ํ•  7:3\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=seed)\r\n\r\n # ๋ฐ์ดํ„ฐ ์ •๊ทœํ™”\r\n X_train, X_test = X_train / 255, X_test / 255\r\n\r\n # over sampling\r\n from imblearn.over_sampling import SMOTE\r\n\r\n oversample_flage= False\r\n if oversample_flage:\r\n smote = SMOTE(random_state=0)\r\n X_train_over, y_train_over = smote.fit_sample(X_train, y_train)\r\n X_train = X_train_over\r\n y_train = y_train_over\r\n\r\n return X_train, X_test, y_train, y_test, features\r\n\r\n\r\n" }, { "alpha_fraction": 0.5797579884529114, "alphanum_fraction": 0.603960394859314, "avg_line_length": 36.59574508666992, "blob_id": "3be1457cc51d00b28e15e0a89f7e4f5a8e2dd9c5", "content_id": "98c8d58b354eaa229f427b015d90b11659616c22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1834, "license_type": "no_license", "max_line_length": 67, "num_lines": 47, "path": "/dnn_network.py", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\ndef make_network(features, hiddenlayer):\r\n # ๋ชจ๋ธ ๋„คํŠธ์›Œํฌ ๊ตฌ์„ฑ\r\n if hiddenlayer == 2:\r\n model = Sequential()\r\n model.add(Dense(20, input_dim=features, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n\r\n if hiddenlayer == 3:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(15, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 4:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(20, activation='relu'))\r\n model.add(Dense(15, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 5:\r\n model = Sequential()\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(22, activation='relu'))\r\n model.add(Dense(18, activation='relu'))\r\n model.add(Dense(12, activation='relu'))\r\n model.add(Dense(5, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n if hiddenlayer == 6:\r\n model = Sequential()\r\n model.add(Dense(30, input_dim=features, activation='relu'))\r\n model.add(Dense(28, input_dim=features, activation='relu'))\r\n model.add(Dense(20, input_dim=features, activation='relu'))\r\n model.add(Dense(12, activation='relu'))\r\n model.add(Dense(8, activation='relu'))\r\n model.add(Dense(1, activation='sigmoid'))\r\n #model.add(Dense(1, activation='softmax'))\r\n\r\n return model\r\n\r\n" }, { "alpha_fraction": 0.6341219544410706, "alphanum_fraction": 0.6464511752128601, "avg_line_length": 23.85344886779785, "blob_id": "b4442aea74cc38034a3dec99422c27220be1f194", "content_id": "5af8eb3133cb4cda587638a31a3b1f50fb4994b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3233, "license_type": "no_license", "max_line_length": 91, "num_lines": 116, "path": "/testSMV.py", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "\r\nimport matplotlib.pyplot as plt\r\nfrom keras import backend as K\r\nimport pandas as pd\r\nimport numpy\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nfrom keras.layers import Dense\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import svm, metrics, model_selection\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport datetime\r\nimport os\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\ndef setting(model_name):\r\n # ๋ชจ๋ธ ์ €์žฅ ํด๋” ์„ค์ •\r\n MODEL_DIR = './model/' + model_name\r\n #print(MODEL_DIR)\r\n if not os.path.exists(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n return 1\r\n\r\n\r\nv_data = '20201114_v1' # ์‚ฌ์šฉํ•  ๋ฐ์ดํ„ฐ ์…‹\r\nfilename = \"./dataset/\" + v_data + \".csv\"\r\nmodel_name = v_data\r\n\r\n# seed ๊ฐ’ ์„ค์ •\r\nseed = 0\r\nnumpy.random.seed(seed)\r\ntf.random.set_seed(seed)\r\n\r\ndf_pre = pd.read_csv(filename, header=0) # CSVํŒŒ์ผ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ํ•จ์ˆ˜๋ฅผ ์ด์šฉ\r\n\r\n# print(df_pre.info())\r\nfeatures = len(df_pre.columns) - 1\r\n# ๋ฐ์ดํ„ฐ ๋‚ด๋ถ€์˜ ๊ธฐํ˜ธ๋ฅผ ์ˆซ์ž๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ--- (โ€ป2)\r\n\r\ndf_pre = df_pre.sample(frac=1)\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['entYn'] == 0]\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['weekseq'] == 15]\r\n\r\n\r\n# ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding ํ•จ##########################\r\nle = LabelEncoder()\r\nle.fit(df_pre['dept'])\r\ndf_pre['dept'] = le.transform(df_pre['dept'])\r\n\r\n# le.fit(df_pre['gender'])\r\n# df_pre['gender'] = le.transform(df_pre['gender'])\r\n\r\n# le.fit(df_pre['area'])\r\n# df_pre['area'] = le.transform(df_pre['area'])\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['entYn'] == 0]\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['weekseq'] == 15]\r\n#df_pre = df_pre[df_pre['yearterm'] == '20191']\r\n\r\n# data and label ๋ถ„๋ฆฌ\r\ndataset = df_pre.values\r\nX = dataset[:, 0:features]\r\nY = dataset[:, features]\r\n\r\n\r\n# ํ…Œ์ŠคํŠธ, ๊ฒ€์ฆ๋ฐ์ดํ„ฐ ๋ถ„ํ•  7:3\r\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=seed)\r\n\r\nsvm_model = svm.SVC() # ํ•™์Šต์‹œํ‚ค๊ธฐ\r\nsvm_model.fit(X_train, y_train)\r\n\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\ny_prediction = svm_model.predict(X_test)\r\n\r\nac_score = metrics.accuracy_score(y_test, y_prediction)\r\nY_prediction = svm_model.predict(X_test)\r\nnow = datetime.datetime.now()\r\n\r\nprint('SVM', now)\r\nprint('============================')\r\nprint(\"ํŒŒ์ผ๋ช… \", filename)\r\nprint(\"์š”์†Œ๊ฐฏ์ˆ˜ \", features)\r\nprint(\"================================== \")\r\n\r\nprint('SVM:\\nr\\r', classification_report(y_test, y_prediction))\r\nprint(\"SVM:์ •๋‹ต์œจ\", ac_score)\r\n\r\nfrom sklearn.metrics import roc_curve\r\n\r\nprobs = svm_model.predict(X_test).ravel()\r\nfpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, probs)\r\n\r\nfrom sklearn.metrics import auc\r\n\r\nauc_keras = auc(fpr_keras, tpr_keras)\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.figure(1)\r\nplt.plot([0, 1], [0, 1], 'k--')\r\nplt.plot(fpr_keras, tpr_keras, label='(AUC = {:.3f})'.format(auc_keras))\r\nplt.xlabel('False positive rate')\r\nplt.ylabel('True positive rate')\r\nplt.title('ROC(Receiver Operating Characteristic) curve')\r\nplt.legend(loc='best')\r\nplt.show()\r\n" }, { "alpha_fraction": 0.6012375354766846, "alphanum_fraction": 0.6173942685127258, "avg_line_length": 26.80198097229004, "blob_id": "a8a2b71439c3205c013c3ffca7d84703ca47a5db", "content_id": "5d6a2144f70385ed769a39f03fc1f84ed127430a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6254, "license_type": "no_license", "max_line_length": 103, "num_lines": 202, "path": "/testDNN.py", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "\r\nimport matplotlib.pyplot as plt\r\nfrom keras import backend as K\r\nimport pandas as pd\r\nimport numpy\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nfrom keras.layers import Dense\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n\r\nv_layer = 3\r\ncost_f = \"binary_crossentropy\"\r\nv_epoches = 500\r\nv_batch_size = 20\r\n\r\n##############################################################\r\n\r\nv_data = '20201114_v1' # ์‚ฌ์šฉํ•  ๋ฐ์ดํ„ฐ ์…‹\r\n\r\nfilename = \"./dataset/\" + v_data + \".csv\"\r\nmodel_name = v_data\r\n\r\nimport os\r\n\r\ndef setting(model_name):\r\n # ๋ชจ๋ธ ์ €์žฅ ํด๋” ์„ค์ •\r\n MODEL_DIR = './model/' + model_name\r\n #print(MODEL_DIR)\r\n if not os.path.exists(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n return 1\r\n\r\n\r\nseed = 0\r\nnumpy.random.seed(seed)\r\ntf.random.set_seed(seed)\r\n\r\ndf_pre = pd.read_csv(filename, header=0) # CSVํŒŒ์ผ์„ ๋ถˆ๋Ÿฌ์˜ค๋Š” ํ•จ์ˆ˜๋ฅผ ์ด์šฉ\r\n\r\n# print(df_pre.info())\r\nfeatures = len(df_pre.columns) - 1\r\n# ๋ฐ์ดํ„ฐ ๋‚ด๋ถ€์˜ ๊ธฐํ˜ธ๋ฅผ ์ˆซ์ž๋กœ ๋ณ€ํ™˜ํ•˜๊ธฐ--- (โ€ป2)\r\n\r\ndf_pre = df_pre.sample(frac=1)\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['entYn'] == 0]\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\ndf_pre = df_pre[df_pre['weekseq'] == 1]\r\n\r\n\r\n# ํ•™๊ณผ์ฝ”๋“œ๋ฅผ onhot encoding ํ•จ##########################\r\nle = LabelEncoder()\r\nle.fit(df_pre['dept'])\r\ndf_pre['dept'] = le.transform(df_pre['dept'])\r\n\r\n# le.fit(df_pre['gender'])\r\n# df_pre['gender'] = le.transform(df_pre['gender'])\r\n\r\n# le.fit(df_pre['area'])\r\n# df_pre['area'] = le.transform(df_pre['area'])\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['entYn'] == 0]\r\n\r\n# ์žฌํ•™์ƒ ๋ฐ์ดํ„ฐ๋งŒ ํ™œ์šฉ\r\n#df_pre = df_pre[df_pre['weekseq'] == 15]\r\n#df_pre = df_pre[df_pre['yearterm'] == '20191']\r\n\r\n# data and label ๋ถ„๋ฆฌ\r\ndataset = df_pre.values\r\nX = dataset[:, 0:features]\r\nY = dataset[:, features]\r\n\r\n# ํ…Œ์ŠคํŠธ, ๊ฒ€์ฆ๋ฐ์ดํ„ฐ ๋ถ„ํ•  7:3\r\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=seed)\r\n\r\n# ๋ฐ์ดํ„ฐ ์ •๊ทœํ™”\r\nX_train, X_test = X_train / 255, X_test / 255\r\n\r\n# over sampling\r\nfrom imblearn.over_sampling import SMOTE\r\n\r\noversample_flage = True\r\nif oversample_flage:\r\n smote = SMOTE(random_state=0)\r\n X_train_over, y_train_over = smote.fit_sample(X_train, y_train)\r\n X_train = X_train_over\r\n y_train = y_train_over\r\n\r\n# ๋ฐ์ดํ„ฐ ํ•™์Šต์‹œํ‚ค๊ธฐ --- (โ€ป4)\r\n\r\n# X_train, X_test, y_train, y_test, features= lib.pre_datamanager_noheader(filename, False)\r\n# ๋„คํŠธ์›Œํฌ ์ƒ์„ฑ\r\nmodel = Sequential()\r\nmodel.add(Dense(50, input_dim=features, activation='relu'))\r\nmodel.add(Dense(25, activation='relu'))\r\nmodel.add(Dense(20, activation='relu'))\r\nmodel.add(Dense(15, activation='relu'))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n\r\ndef recall_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n\r\ndef precision_m(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n\r\ndef f1_m(y_true, y_pred):\r\n precision = precision_m(y_true, y_pred)\r\n recall = recall_m(y_true, y_pred)\r\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\r\n\r\n# ๋ชจ๋ธ ์ปดํŒŒ์ผ\r\nmodel.compile(loss=cost_f,\r\n optimizer='adam',\r\n # metrics=['accuracy'])\r\n metrics=['accuracy', recall_m, precision_m, f1_m])\r\n\r\nMODEL_DIR = './model/' + model_name\r\n# ๋ชจ๋ธ ์ €์žฅ ์กฐ๊ฑด ์„ค์ •\r\nmodelpath = MODEL_DIR + \"/{epoch:02d}-{val_loss:.4f}.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=0, save_best_only=True)\r\n\r\n# ํ•™์Šต ์ž๋™ ์ค‘๋‹จ ์„ค์ •\r\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=100)\r\n\r\n# ๋ฐ์ดํ„ฐ ํ•™์Šต\r\nhistory = model.fit(X_train, y_train, validation_split=0.20, epochs=v_epoches, batch_size=v_batch_size,\r\n callbacks=[early_stopping_callback, checkpointer])\r\n\r\n\r\n#y_vloss = history.history['val_loss']\r\n#y_acc = history.history['acc']\r\n\r\n# x๊ฐ’์„ ์ง€์ •ํ•˜๊ณ  ์ •ํ™•๋„๋ฅผ ํŒŒ๋ž€์ƒ‰์œผ๋กœ, ์˜ค์ฐจ๋ฅผ ๋นจ๊ฐ„์ƒ‰์œผ๋กœ ํ‘œ์‹œ\r\n#x_len = numpy.arange(len(y_acc))\r\n#plt.plot(x_len, y_vloss, \"o\", c=\"red\", markersize=3)\r\n#plt.plot(x_len, y_acc, \"o\", c=\"blue\", markersize=3)\r\n\r\n#plt.show()\r\n\r\n# ๊ฒฐ๊ณผ ์ถœ๋ ฅ\r\nprint(\"\\n Accuracy: %.4f\" % (model.evaluate(X, Y)[1]))\r\n\r\nimport datetime\r\n\r\nprint('============================')\r\nprint(\"ํŒŒ์ผ๋ช… \", model_name)\r\nprint(\"์š”์†Œ๊ฐฏ์ˆ˜ \", features)\r\nprint(\"================================== \")\r\n\r\n# ๊ฒฐ๊ณผ ์ถœ๋ ฅ\r\n# print(\"\\n epoches\", v_epoches, \"bat_size=\", v_batch_size)\r\n# print(\"\\n ํ•™์Šต์ค‘๋‹จ + ๋ชจ๋ธ ์„ฑ๋Šฅ๊ฐœ์„  : arly_stopping_callback:\")\r\n# print(\"\\n ์˜ˆ์ธก์ •ํ™•๋„: %.4f\" % (model.evaluate(X_test, y_test)[1]))\r\n\r\n# ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ ๊ฒ€์ฆ\r\nloss, accuracy, recall, precision, f1_socre = model.evaluate(X_test, y_test)\r\n# accuracy = model.evaluate(X_test, Y_test)\r\nprint('DNN_', datetime.datetime.now())\r\nprint(\"================================== \")\r\nprint(\"ํŒŒ์ผ๋ช… \", filename)\r\nprint(\"์š”์†Œ๊ฐฏ์ˆ˜ \", features)\r\nprint(\"================================== \")\r\nprint(\"์ •๋‹ต๋ฅ  =\", accuracy)\r\n\r\nprint(\"/n #accuracy, precision, recall, f1_score\")\r\nprint(\" # %.4f, %.4f, %4f, %.4f\" % (accuracy, precision, recall, f1_socre))\r\n\r\nprint(\"#file:\", filename, \"\\n\\n model:\", model_name, \"\\n accuracy:\", accuracy)\r\n\r\nfrom sklearn.metrics import roc_curve\r\n\r\nprobs = model.predict(X_test).ravel()\r\nfpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, probs)\r\n\r\nfrom sklearn.metrics import auc\r\n\r\nauc_keras = auc(fpr_keras, tpr_keras)\r\n\r\nplt.figure(1)\r\nplt.plot([0, 1], [0, 1], 'k--')\r\nplt.plot(fpr_keras, tpr_keras, label='(AUC = {:.3f})'.format(auc_keras))\r\nplt.xlabel('False positive rate')\r\nplt.ylabel('True positive rate')\r\nplt.title('DNN Model ROC(Receiver Operating Characteristic) curve')\r\nplt.legend(loc='best')\r\nplt.show()" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 8.5, "blob_id": "76f9f1bca4fe2aff545831d06496e5371e08ce91", "content_id": "66a304d3ad8627932b2edeb154cbdddce132f5ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "hiran75/deeplearning", "src_encoding": "UTF-8", "text": "# deeplearning\n๋”ฅ๋Ÿฌ๋‹\n" } ]
6
yumetov/use-server
https://github.com/yumetov/use-server
c106bcad2d5fedad82e44c4ccda5a8c16f24040b
b8fa799448009d460253abf41dd671d767386c86
996ffb13ec6ed5639adc84c2376ad5d7888f1793
refs/heads/master
2020-07-25T04:27:22.316613
2019-09-12T23:53:18
2019-09-12T23:53:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6149584650993347, "alphanum_fraction": 0.623268723487854, "avg_line_length": 31.81818199157715, "blob_id": "7ddf977177c626c7da4fda4ceaa9e289a8fc5a58", "content_id": "d7dd5323225bc89b0339a9fca5fd5131440f8e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 89, "num_lines": 11, "path": "/encoders.py", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "encoders = {\n 'xling-many': {\n 'url': 'https://tfhub.dev/google/universal-sentence-encoder-xling-many/1'\n },\n 'multilingual': {\n 'url': 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/1'\n },\n 'multilingual-large': {\n 'url': 'https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/1'\n }\n}\n" }, { "alpha_fraction": 0.6918103694915771, "alphanum_fraction": 0.7068965435028076, "avg_line_length": 21.095237731933594, "blob_id": "6db4479ffd206b97b0e36a39837998c716ad9a66", "content_id": "72d7806fc9283788716a935deea4ba11ee240fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 69, "num_lines": 21, "path": "/get_sentence_vector.py", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "import json\nimport numpy\nimport urllib.request\n\n\nurl = 'http://tsubame.nagaokaut.ac.jp:55575/sentence_vector'\ndata = {\n 'model_name': 'xling-many',\n 'sentence': '่ค‡้›‘ใงใ‚ใ‚‹ใ‚ˆใ‚Šใฏๅนณๆ˜“ใงใ‚ใ‚‹ใปใ†ใŒใ„ใ„ใ€‚'\n}\nheaders = {\n 'Content-Type': 'application/json'\n}\n\nreq = urllib.request.Request(url, json.dumps(data).encode(), headers)\nwith urllib.request.urlopen(req) as res:\n vec = json.load(res)\n\nnumpy.set_printoptions(threshold=10)\nary = numpy.array(vec)\nprint(ary, ary.shape)\n" }, { "alpha_fraction": 0.6598591804504395, "alphanum_fraction": 0.6676056385040283, "avg_line_length": 27.979591369628906, "blob_id": "27c7ee730a7b260d0ec25a62f08cee373d413dfa", "content_id": "5af96b864260efcab8be679e0abdb9d7acc6766c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1420, "license_type": "no_license", "max_line_length": 79, "num_lines": 49, "path": "/server.py", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "import tensorflow.compat.v1 as tf\nimport tensorflow_hub as hub\nimport tf_sentencepiece\nfrom flask import Flask, request, jsonify\nfrom encoders import encoders\n\n\nfor model_name, model_detail in encoders.items():\n g = tf.Graph()\n with g.as_default():\n text_input = tf.placeholder(dtype=tf.string, shape=[None])\n embed = hub.Module(model_detail['url'])\n embedded_text = embed(text_input)\n init_options = tf.group(\n [tf.global_variables_initializer(), tf.tables_initializer()])\n g.finalize()\n session = tf.Session(graph=g)\n session.run(init_options)\n model_detail['text_input'] = text_input\n model_detail['embedded_text'] = embedded_text\n model_detail['session'] = session\n\n\napp = Flask(__name__)\n@app.route('/')\ndef itworks():\n return 'It works!'\n\n\n@app.route('/sentence_vector', methods=['POST'])\ndef get_sent_vecs():\n posted = request.get_json()\n sentence = posted['sentence']\n model_name = posted['model_name']\n\n model_detail = encoders[model_name]\n session = model_detail['session']\n embedded_text = model_detail['embedded_text']\n text_input = model_detail['text_input']\n\n sentence = [sentence]\n embed_result = session.run(embedded_text, feed_dict={text_input: sentence})\n embed_result = embed_result[0]\n sentence_vector = embed_result.tolist()\n\n return jsonify(sentence_vector)\n\n\napp.run(host='0.0.0.0', port=55575)\n" }, { "alpha_fraction": 0.6242235898971558, "alphanum_fraction": 0.6583850979804993, "avg_line_length": 19.125, "blob_id": "dccaf87cf924ea11991bc374131d13742c2de9a5", "content_id": "feed7f721c6c5eb6c32dfadb701d641d1af94a53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 322, "license_type": "no_license", "max_line_length": 42, "num_lines": 16, "path": "/Dockerfile", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "FROM tensorflow/tensorflow:latest-py3\n\nRUN pip install --quiet --upgrade pip && \\\n pip install --quiet \\\n tensorflow_hub==0.6.0 \\\n tf_sentencepiece==0.1.83 \\\n Flask==1.1.1\n\nWORKDIR /\n\nCOPY encoders.py .\nCOPY download_model.py .\nRUN [\"python\", \"download_model.py\"]\n\nCOPY server.py .\nCMD [\"python\", \"server.py\"]\n" }, { "alpha_fraction": 0.673394501209259, "alphanum_fraction": 0.7862385511398315, "avg_line_length": 24.952381134033203, "blob_id": "d692374b2ea39c198efb6dfed93aee32869a5013", "content_id": "9dbc355a06154abe8318010ecacb88098d3d9273", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1690, "license_type": "no_license", "max_line_length": 95, "num_lines": 42, "path": "/README.md", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "# USE Server\nUSE Serverใฏใ€ๆ–‡ใ‚’้€ใ‚‹ใจUniversal Sentence Encoderใซใ‚ˆใฃใฆๅพ—ใ‚‰ใ‚Œใ‚‹ๆ–‡ใƒ™ใ‚ฏใƒˆใƒซใ‚’่ฟ”ใ™ใ‚ตใƒผใƒใงใ™ใ€‚ \nHTTPใฎPOSTใง /sentence_vector ใซjsonๅฝขๅผใฎใƒชใ‚ฏใ‚จใ‚นใƒˆใ‚’่กŒใ„ใ€jsonๅฝขๅผใฎใƒฌใ‚นใƒใƒณใ‚นใ‚’่ฟ”ใ—ใพใ™ใ€‚ \nserver.py ใซ host='0.0.0.0', port=55575 ใงใƒใƒผใƒ‰ใ‚ณใƒผใƒ‡ใ‚ฃใƒณใ‚ฐใ•ใ‚Œใฆใ„ใพใ™ใ€‚ \nใƒชใ‚ฏใ‚จใ‚นใƒˆใจใƒฌใ‚นใƒใƒณใ‚นใฎไพ‹ใ‚’ไปฅไธ‹ใซ็คบใ—ใพใ™ใ€‚\n\n## ใƒชใ‚ฏใ‚จใ‚นใƒˆ\n```javascript\n{\n \"model_name\": \"xling-many\",\n \"sentence\": \"่ค‡้›‘ใงใ‚ใ‚‹ใ‚ˆใ‚Šใฏๅนณๆ˜“ใงใ‚ใ‚‹ใปใ†ใŒใ„ใ„ใ€‚\"\n}\n```\n\n## ใƒฌใ‚นใƒใƒณใ‚น\n```javascript\n[\n -0.004482856020331383,\n -0.01604999415576458,\n 0.01441269088536501,\nโ€ฆ\n -0.0068213301710784435,\n -0.05681886151432991,\n 0.0367775559425354\n]\n```\n\n## ใƒขใƒ‡ใƒซ\n\"model_name\" ใซใฏไฝฟ็”จใ™ใ‚‹Universal Sentence Encoderใฎใƒขใƒ‡ใƒซๅใ‚’ๆŒ‡ๅฎšใ—ใพใ™ใ€‚ \n็พๅœจใฏไปฅไธ‹ใฎ3ใคใซๅฏพๅฟœใ—ใฆใ„ใพใ™ใ€‚\n\n- xling-many (https://tfhub.dev/google/universal-sentence-encoder-xling-many/1)\n- multilingual (https://tfhub.dev/google/universal-sentence-encoder-multilingual/1)\n- multilingual-large (https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/1)\n\n## ใ‚ตใƒณใƒ—ใƒซใ‚ณใƒผใƒ‰\nPythonใฎใ‚นใ‚ฏใƒชใƒ—ใƒˆใ‹ใ‚‰POSTใ—ใฆnumpyใฎarrayใ‚’ๅพ—ใ‚‹ใฎใซๅฟ…่ฆใชๆœ€ไฝŽ้™ใฎใƒ—ใƒญใ‚ฐใƒฉใƒ ใ‚’ get_sentence_vector.py ใซ็คบใ—ใพใ™ใ€‚\n\n## ๆณจๆ„\n- ใƒ˜ใƒƒใƒ€ใƒผใฎ content-type ใซใฏ application/json ใ‚’ๆŒ‡ๅฎšใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™\n- ใƒขใƒ‡ใƒซใซใ‚ˆใฃใฆๆ–‡ใƒ™ใ‚ฏใƒˆใƒซใฎๆฌกๅ…ƒใŒ็•ฐใชใ‚‹ใŸใ‚ใ€ใƒฌใ‚นใƒใƒณใ‚นใฎ้…ๅˆ—้•ทใ‚‚็•ฐใชใ‚Šใพใ™\n- ๅญ˜ๅœจใ—ใชใ„ model_name ใ‚’ๆŒ‡ๅฎšใ™ใ‚‹ใชใฉใฎๆƒณๅฎšๅค–ใฎๅ…ฅๅŠ›ใซๅฏพใ™ใ‚‹ไพ‹ๅค–ๅ‡ฆ็†ใฏ่กŒใฃใฆใ„ใพใ›ใ‚“\n" }, { "alpha_fraction": 0.745945930480957, "alphanum_fraction": 0.745945930480957, "avg_line_length": 22.125, "blob_id": "92fad27a636898b79e15085be8ff66a7377a710d", "content_id": "17b612846bcf4cd46c916b9899447d276d01ca66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 49, "num_lines": 8, "path": "/download_model.py", "repo_name": "yumetov/use-server", "src_encoding": "UTF-8", "text": "import tensorflow_hub as hub\nimport tf_sentencepiece\nfrom encoders import encoders\n\n\nfor model_name, model_detail in encoders.items():\n url = model_detail['url']\n hub.Module(url)\n" } ]
6
thom6242/dispersed_data_project
https://github.com/thom6242/dispersed_data_project
07a625aad536f4e225353acbf29a2dc6a5779a5d
8dbba117a6f7899e944f5d5dc0f56c07a6d421b4
27e1bafc6fd2e34ff55141bda3d5e5f5d1b3c686
refs/heads/master
2020-04-12T00:29:19.449970
2018-12-17T23:17:41
2018-12-17T23:17:41
162,201,544
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5870901346206665, "alphanum_fraction": 0.6342213153839111, "avg_line_length": 24.0256404876709, "blob_id": "283b6c584aa15041e8d92fda853547fcb11628ed", "content_id": "a48af67209567a3f788bbdfa0fc69df5546dd7ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 76, "num_lines": 39, "path": "/wan_aware_scheduler.py", "repo_name": "thom6242/dispersed_data_project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport itertools\nimport numpy as np\nimport pandas as pd\n\nzones = ['us-east1-b','asia-south1-c','europe-west2-c']\nzone_combs = []\n\nnum_of_zones_req = 2\n\ndata = np.array([['','us-east1-b','asia-south1-c','europe-west2-c'],\n ['us-east1-b',1000,50,70],\n ['asia-south1-c',50,1000,20],\n\t\t['europe-west2-c',70,20,1000]])\n\nzone_WAN_info = (pd.DataFrame(data=data[1:,1:],\n index=data[1:,0],\n columns=data[0,1:]))\n\ndef getTotalWAN(zones):\n\ttotal_sum = 0\n\tfor comb in itertools.combinations(zones,2):\n\t\ttotal_sum += int(zone_WAN_info[comb[0]][comb[1]])\n\n\treturn total_sum\n\ndef get_top_k_zones():\n\n\tzones_with_WAN_info = []\n\n\tfor zone_comb in itertools.combinations(zones,num_of_zones_req):\n\t\tzone_combs.append(list(zone_comb))\n\t\tzones_with_WAN_info.append((getTotalWAN(list(zone_comb)),list(zone_comb)))\n\n\tzones_with_WAN_info.sort(reverse=True)\n\n\treturn zones_with_WAN_info[0][1]\n\nprint get_top_k_zones()\n" }, { "alpha_fraction": 0.6321290135383606, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 26.178861618041992, "blob_id": "dca207c65edee4fcf7abc1b33b0cee3976fcb7d0", "content_id": "cf43e63707f90f19b43b182a552053c3711fc2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3349, "license_type": "no_license", "max_line_length": 111, "num_lines": 123, "path": "/hybrid_scheduler.py", "repo_name": "thom6242/dispersed_data_project", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport time\nimport random\nimport json\nfrom kubernetes import client, config, watch\nfrom collections import defaultdict\nfrom sdcclient import SdcClient\nconfig.load_kube_config()\nv1 = client.CoreV1Api()\nsdclient = SdcClient(\"d27bca02-23c6-44f0-b1db-6b48fe177585\")\n#sysdig_metric = \"net.http.request.time\"\nsysdig_metric = \"cpu.used.percent\"\n#metrics = [{ \"id\": sysdig_metric, \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" } }]\nmetrics = [{ \"id\": \"cpu.used.percent\", \"aggregations\": { \"time\": \"timeAvg\", \"group\": \"avg\" } }]\nscheduler_name = \"hybrid\"\n\nzone_node_pod = defaultdict()\nzone_pod = defaultdict()\n\n\ndef nodes_available():\n for n in v1.list_node().items:\n for status in n.status.conditions:\n if status.status == \"True\" and status.type == \"Ready\" and n.spec.taints is None:\n zone = n.metadata.labels[\"zone\"]\n\t\tnode = n.metadata.name\n\t\tif zone not in zone_node_pod.keys():\n\t\t# add to dictionary that stores zone and nodes info along with pod count on each node\n\t\t\tzone_node_pod[zone] = {node:0}\n\t\t\tzone_pod[zone] = 0\n\t\telse:\n\t\t\tzone_node_pod[zone].update({node:0})\n\t\n\ndef is_balanced(pods_map):\n\tprev = -1\n\tfor k in pods_map.keys():\n\t\tpods = pods_map[k]\n\t\tif prev != -1 and pods != prev:\n\t\t\treturn False\n\t\tprev = pods\n\treturn True\n\n\t\ndef get_next(pods_map):\n\tfor k in pods_map.keys():\n\t\tpods_map[k] += 1\n\t\treturn k\n\n\ndef get_imbalanced(pods_map):\n\tavg = 0.0\n\tfor k in pods_map.keys():\n\t\tavg += pods_map[k]\n\tavg = avg/len(pods_map)\n\tfor k in pods_map.keys():\n\t\tpods = pods_map[k]\n\t\tif pods < avg:\n\t\t\tpods_map[k] += 1\n\t\t\treturn k\n\t\n\t\ndef get_optimal_node():\n\tif is_balanced(zone_pod):\n\t\tzone = get_next(zone_pod)\n\t\treturn best_request_time(zone_node_pod[zone].keys())\n\tzone = get_imbalanced(zone_pod)\n\treturn best_request_time(zone_node_pod[zone].keys())\n\n\ndef get_request_time(hostname):\n hostfilter = \"host.hostName = '%s'\" % hostname\n start = -60\n end = 0\n sampling = 60\n #import pdb\n #pdb.set_trace()\n metricdata = sdclient.get_data(metrics, start, end, sampling, filter=hostfilter)\n #import pdb\n #pdb.set_trace()\n request_time = float(metricdata[1].get('data')[0].get('d')[0])\n print hostname + \" (\" + sysdig_metric + \"): \" + str(request_time)\n return request_time\n\n\ndef best_request_time(nodes):\n #time.sleep(20)\n if not nodes:\n return []\n node_times = [get_request_time(hostname) for hostname in nodes]\n best_node = nodes[node_times.index(min(node_times))]\n print \"Best node: \" + best_node\n time.sleep(20)\n return best_node\n\n\n\ndef scheduler(name, node, namespace=\"default\"):\n body=client.V1Binding()\n target=client.V1ObjectReference()\n target.kind=\"Node\"\n target.apiVersion=\"v1\"\n target.name= node\n meta=client.V1ObjectMeta()\n meta.name=name\n body.target=target\n body.metadata=meta\n return v1.create_namespaced_binding_binding(name,namespace, body)\n\n\ndef main():\n w = watch.Watch()\n nodes_available()\n for event in w.stream(v1.list_namespaced_pod, \"default\"):\n if event['object'].status.phase == \"Pending\" and event['object'].spec.scheduler_name == scheduler_name:\n try:\n\t\tres = scheduler(event['object'].metadata.name, get_optimal_node())\n except client.rest.ApiException as e:\n pass\n\n\nif __name__ == '__main__':\n main() \n\n" } ]
2
urticazoku/selenium_final_test
https://github.com/urticazoku/selenium_final_test
aefd0cb0a0907ad70963ac1acaaab2e899354b86
21148acd14488aa55914a2dd8d64afa15e0ac276
16648d5f950e92030e94c493576553c24146ff32
refs/heads/master
2021-06-25T21:28:20.698458
2019-12-12T05:07:49
2019-12-12T05:07:49
226,815,294
0
0
null
2019-12-09T07:53:54
2019-12-12T05:08:05
2021-04-20T19:03:29
Python
[ { "alpha_fraction": 0.6890594959259033, "alphanum_fraction": 0.691938579082489, "avg_line_length": 39.11538314819336, "blob_id": "fc9634dd15484f0dadba3e84e178d5d19b55fc04", "content_id": "629ea211a15850fe9ec3a9e592c65628b930a0fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1042, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/pages/locators.py", "repo_name": "urticazoku/selenium_final_test", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n\nclass BasePageLocators():\n LOGIN_LINK = (By.CSS_SELECTOR, \"#login_link\")\n LOGIN_LINK_INVALID = (By.CSS_SELECTOR, \"#login_link_inc\")\n CART = (By.CSS_SELECTOR,'.hidden-xs a')\n USER_ICON = (By.CSS_SELECTOR, \".icon-user\")\n\nclass LoginPageLocators():\n\tLOGIN_FORM = (By.CSS_SELECTOR, \"#login_form\")\t\n\tREG_FORM = (By.CSS_SELECTOR, \"#register_form\")\n\tPASSWORD = (By.CSS_SELECTOR, \"#id_registration-password1\")\n\tPASSWORD_REP = (By.CSS_SELECTOR, \"#id_registration-password2\")\n\tEMAIL = (By.CSS_SELECTOR, '#id_registration-email')\n\tREG_BTN = (By.CSS_SELECTOR, '#register_form .btn-primary')\n\nclass ProductPageLocators():\n\tBUTTON = (By.CSS_SELECTOR, '.btn-add-to-basket')\n\tTITLE = (By.TAG_NAME, 'h1')\n\tPRICE = (By.CSS_SELECTOR, '.product_main .price_color')\n\tMSG = (By.CSS_SELECTOR, '.alertinner>strong')\n\tMSG_PRICE = (By.CSS_SELECTOR, '.alertinner p>strong')\n\tSUCCESS_MESSAGE = (By.CSS_SELECTOR, '.alertinner>strong')\n\nclass BasketPageLocators():\n\tEMPTY_MSG = (By.CSS_SELECTOR, '#content_inner p')" }, { "alpha_fraction": 0.6962365508079529, "alphanum_fraction": 0.6962365508079529, "avg_line_length": 36.29999923706055, "blob_id": "688dad103b11b9bc0a4a9a7d2ec5d4c9d83c6af2", "content_id": "0632a143f9714454dd31093890c74505f0d2ac56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/pages/basket_page.py", "repo_name": "urticazoku/selenium_final_test", "src_encoding": "UTF-8", "text": "from .base_page import BasePage\nfrom .locators import BasketPageLocators\n\nclass BasketPage(BasePage):\n def __init__(self, *args, **kwargs):\n super(BasketPage, self).__init__(*args, **kwargs)\n\n def cart_should_be_empty(self):\n \tmsg = self.browser.find_element(*BasketPageLocators.EMPTY_MSG).text\n \tassert \"empty\" in msg, \"Basket should be empty, but not\"" }, { "alpha_fraction": 0.5235849022865295, "alphanum_fraction": 0.7122641801834106, "avg_line_length": 16.66666603088379, "blob_id": "7319d15ae64427ca079cab57f491e258fb8fbc75", "content_id": "cb735c91e0c6af5c4ad7e696fd36caedf3dd91c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 212, "license_type": "no_license", "max_line_length": 23, "num_lines": 12, "path": "/requirements.txt", "repo_name": "urticazoku/selenium_final_test", "src_encoding": "UTF-8", "text": "atomicwrites==1.3.0\nEasyProcess==0.2.3\nmore-itertools==8.0.1\npackaging==19.2\npluggy==0.13.1\npy==1.8.0\npytest==5.1.1\nPyVirtualDisplay==0.2.1\nselenium==3.14.0\nsingledispatch==3.4.0.3\nvirtualenv==16.4.1\nzipp==0.6.0\n" }, { "alpha_fraction": 0.7156398296356201, "alphanum_fraction": 0.7156398296356201, "avg_line_length": 44.25, "blob_id": "01846e54c5785a508fae8f9c4cb074f6f5a3fc49", "content_id": "7b21eb7c96eb89eb50bd6494ce809aad3f4c1dd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 131, "num_lines": 28, "path": "/pages/product_page.py", "repo_name": "urticazoku/selenium_final_test", "src_encoding": "UTF-8", "text": "from .base_page import BasePage\nfrom .locators import ProductPageLocators\n\nclass ProductPage(BasePage):\n def add_to_cart(self):\n link = self.browser.find_element(*ProductPageLocators.BUTTON)\n link.click()\n\n def get_title(self):\n return self.browser.find_element(*ProductPageLocators.TITLE).text\n\n def get_price(self):\n return self.browser.find_element(*ProductPageLocators.PRICE).text\n\n def should_add_to_basket(self, title, price):\n msg_title = self.browser.find_element(*ProductPageLocators.MSG)\n msg_price = self.browser.find_element(*ProductPageLocators.MSG_PRICE)\n assert title==msg_title.text, \"{} not in {}\".format(title,msg.text)\n assert price==msg_price.text, \"{} not in {}\".format(price,msg.text)\n\n def should_not_be_success_message(self):\n assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \"Success message is presented, but should not be\"\n\n def should_disappear(self):\n assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \"Not dissapeared, but should not be\"\n\n def should_be_success_message(self):\n assert self.is_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \"Success message is not presented, but should be\"" }, { "alpha_fraction": 0.6986933350563049, "alphanum_fraction": 0.6986933350563049, "avg_line_length": 42.36666488647461, "blob_id": "b157ceb4f70df4db579d7c57262544820ee50aae", "content_id": "bde587ddd1c31ace594bcd072d585d9f5986bf5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 101, "num_lines": 30, "path": "/pages/login_page.py", "repo_name": "urticazoku/selenium_final_test", "src_encoding": "UTF-8", "text": "from .base_page import BasePage\nfrom .locators import LoginPageLocators\n\nclass LoginPage(BasePage):\n def should_be_login_page(self):\n self.should_be_login_url()\n self.should_be_login_form()\n self.should_be_register_form()\n\n def should_be_login_url(self):\n # ั€ะตะฐะปะธะทัƒะนั‚ะต ะฟั€ะพะฒะตั€ะบัƒ ะฝะฐ ะบะพั€ั€ะตะบั‚ะฝั‹ะน url ะฐะดั€ะตั\n assert \"login\" in self.browser.current_url \n\n def should_be_login_form(self):\n # ั€ะตะฐะปะธะทัƒะนั‚ะต ะฟั€ะพะฒะตั€ะบัƒ, ั‡ั‚ะพ ะตัั‚ัŒ ั„ะพั€ะผะฐ ะปะพะณะธะฝะฐ\n assert self.is_element_present(*LoginPageLocators.LOGIN_FORM), \"Login form is not presented\"\n\n def should_be_register_form(self):\n # ั€ะตะฐะปะธะทัƒะนั‚ะต ะฟั€ะพะฒะตั€ะบัƒ, ั‡ั‚ะพ ะตัั‚ัŒ ั„ะพั€ะผะฐ ั€ะตะณะธัั‚ั€ะฐั†ะธะธ ะฝะฐ ัั‚ั€ะฐะฝะธั†ะต\n assert self.is_element_present(*LoginPageLocators.REG_FORM), \"Register form is not presented\"\n\n def register_new_user(self, email, password):\n password_first = self.browser.find_element(*LoginPageLocators.PASSWORD)\n password_rep = self.browser.find_element(*LoginPageLocators.PASSWORD_REP)\n email_reg = self.browser.find_element(*LoginPageLocators.EMAIL)\n button_reg = self.browser.find_element(*LoginPageLocators.REG_BTN)\n email_reg.send_keys(email)\n password_first.send_keys(password)\n password_rep.send_keys(password)\n button_reg.click()\n" } ]
5
benjlis/bicycle
https://github.com/benjlis/bicycle
f6ee7a621208cc0a97db1afc435e18f6d31befa4
d56fbe242df6e341b89608a48b4eed2e88c671de
368ea9f34ba4187272e6c9d36562c61eb10350fa
refs/heads/master
2016-08-06T19:26:37.243853
2015-08-27T18:02:42
2015-08-27T18:02:42
41,503,089
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5929941534996033, "alphanum_fraction": 0.6113427877426147, "avg_line_length": 35.30303192138672, "blob_id": "c2a72af6c2ccbb2ddb261170d093ff2891e2b9ea", "content_id": "93efa8f2b096c35ed7d568994eae775e1ad95b87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 97, "num_lines": 33, "path": "/main.py", "repo_name": "benjlis/bicycle", "src_encoding": "UTF-8", "text": "import bicycle\n\n# testing methods\ndef create_test_objects():\n bshop = bicycle.BikeShop(\"Ben's\", .2, [('Trek Hybrid', 1), ('Trek Road', 1), ('Trek Kid', 2),\n ('Canondale Ultra', 1), ('Giant Towner', 3)])\n customers = [bicycle.Customer('Tony', 200), \\\n bicycle.Customer('Kate', 500), \\\n bicycle.Customer('Lou', 1000)]\n return bshop, customers\n\n# created this so that I could continue processing after errors\n# Question: is this the simplest most pythonic way?\ndef buy_bike(customer, bike_shop, model_name):\n try:\n customer.buy_bike(bike_shop, model_name)\n except Exception, err:\n print err\n \nif __name__ == '__main__':\n bshop, customers = create_test_objects()\n bshop.print_inventory()\n for c in customers:\n c.print_affordable_bikes(bshop)\n buy_bike(customers[0], bshop, 'Trek Kid')\n buy_bike(customers[1], bshop, 'Giant Towner')\n buy_bike(customers[1], bshop, 'Trek Kid')\n buy_bike(customers[2], bshop, 'Canondale Ultra')\n buy_bike(customers[2], bshop, 'Trek Road')\n buy_bike(customers[0], bshop, 'Xxx')\n print\n bshop.print_inventory()\n bshop.print_profit()\n\n" }, { "alpha_fraction": 0.5681707859039307, "alphanum_fraction": 0.5773895978927612, "avg_line_length": 38.98058319091797, "blob_id": "3603a3931c04d1400a81a958764ae6390e485d32", "content_id": "0c41df4ba05280e6752851a2912a9b6b5941df22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4122, "license_type": "no_license", "max_line_length": 94, "num_lines": 103, "path": "/bicycle.py", "repo_name": "benjlis/bicycle", "src_encoding": "UTF-8", "text": "class Bicycle(object):\n def __init__(self, model_name, weight, production_cost):\n self.model_name = model_name\n self.weight = weight\n self.production_cost = production_cost\n \n def __str__(self):\n return \"model: \" + self.model_name + \", weight: \" + \\\n str(self.weight) + \", prod cost:\" + \\\n str(self.production_cost)\n\n# Not strictly called for by exercise but thought it bike shop inventory and \n# customer simpler. The idea here is to create one univeral bike catalog of all \n# bikes that can be referenced by all bike shops and customers.\n#\n# The implementation here is a singleton object.\n# Question: Is this the pythonic way to do this? \nclass BikeCatalog(object):\n def __init__(self):\n self.listings = [Bicycle('Trek Hybrid', 20, 500), \\\n Bicycle('Trek Road', 15, 1000), \\\n Bicycle('Trek Kid', 15, 75), \\\n Bicycle('Canondale Fitness', 16, 700), \\\n Bicycle('Canondale Ultra', 14, 800), \\\n Bicycle('Giant Towner', 28, 300)]\n \n def get_production_cost(self, model_name): \n for b in self.listings:\n if b.model_name == model_name:\n return b.production_cost\n # if we get here, the lookup failed \n raise ValueError(model_name + \" not in bike catalog.\")\n \nbc = BikeCatalog() # create one instance that can be refenence by all BikeShops and Customers\n\nclass BikeShop(object):\n def __init__(self, name, margin = .2, inventory = []):\n self.name = name\n self.margin = margin\n self.inventory = {}\n for b in inventory:\n self.inventory[b[0]] = b[1]\n self.profit = 0\n \n def get_sale_price(self, bike):\n return (1.0 + self.margin) * bc.get_production_cost(bike)\n \n def under_budget(self, budget):\n \"\"\"Returns a list of bicycles in inventory that are within budget\"\"\"\n within_budget = []\n for bike in self.inventory:\n if budget >= self.get_sale_price(bike):\n within_budget.append(bike)\n return(within_budget)\n \n def print_inventory(self):\n print self.name + \" Inventory:\"\n for bike, qty in self.inventory.iteritems():\n print bike, qty\n print\n \n def print_profit(self):\n print self.name + \" profit to date: $\" + str(int(self.profit)) \n \n def purchase(self, model_name):\n self.inventory[model_name] -= 1 # decrement inventory\n if self.inventory[model_name] == 0: # no more in inventory remove\n del self.inventory[model_name] \n # update_profit\n self.profit += bc.get_production_cost(model_name) * self.margin\n return(model_name)\n\nclass Customer(object):\n def __init__(self, name, budget):\n self.name = name\n self.budget = budget\n self.bikes_owned = []\n \n def print_affordable_bikes(self, bike_shop):\n \"\"\"Print a list of bicycles in invetory at bike_shop that are within \n budget\n \"\"\"\n print \"Bikes at \" + bike_shop.name + \" that \" + self.name + \\\n \" can afford, given a budget of $\" + str(self.budget) + \":\"\n for b in bike_shop.under_budget(self.budget):\n print b\n print\n\n # Question: is the print_receipt flag the best way to have the ability\n # to buy a bike with/withoutthe receipt?\n def buy_bike(self, bike_shop, model_name, print_receipt=True):\n sale_price = bike_shop.get_sale_price(model_name)\n # make sure bike is in within budget\n if sale_price <= self.budget:\n self.bikes_owned.append(bike_shop.purchase(model_name))\n self.budget -= sale_price\n if print_receipt:\n print(self.name + \" purchased a \" + model_name + \\\n \" and has a remaining budget of $\" + str(int(self.budget))) \n else:\n # Question: is this the best way to handle this case.\n raise ValueError(model_name + \" not within \" + self.name + \"'s budget\")\n return\n " } ]
2
sumeyye-agac/channel-distillation
https://github.com/sumeyye-agac/channel-distillation
0df2a56bc84e70038fe16560bada51c1ea002c64
2464d30ba01e11491e520e51be498c91f1e54b91
95c499e600fa235e9059615e08e5e4d7da76413f
refs/heads/master
2023-06-21T16:08:43.944285
2020-06-09T14:26:17
2020-06-09T14:26:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5603806376457214, "alphanum_fraction": 0.6230103969573975, "avg_line_length": 29.909090042114258, "blob_id": "2604200df6888825d40ce6460cf665bc19488c9b", "content_id": "50ea00fc8788857cc17cbb1822680d894e413abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5952, "license_type": "no_license", "max_line_length": 170, "num_lines": 187, "path": "/README.md", "repo_name": "sumeyye-agac/channel-distillation", "src_encoding": "UTF-8", "text": "# Channel Distillation\n\nPyTorch implement of [Channelย Distillation: Channel-Wiseย Attentionย forย Knowledge Distillation](https://arxiv.org/abs/2006.01683)\n\n## Innovation\n\n1. Channel Distillation (CD)\n2. Guided Knowledge Distillation (GKD)\n3. Early Decay Teacher (EDT)\n\nNote\n> In our code, kdv2 means GKD and lrd2 means EDT.\n\n## Structure of Repository\n```\nโ”œโ”€โ”€ cifar_config.py # Hyperparameters\nโ”œโ”€โ”€ cifar_train.py\nโ”œโ”€โ”€ data\nโ”‚ โ””โ”€โ”€ directory_of_data.md\nโ”œโ”€โ”€ imagenet_config.py # Hyperparameters\nโ”œโ”€โ”€ imagenet_train.py\nโ”œโ”€โ”€ losses\nโ”‚ โ”œโ”€โ”€ cd_loss.py # CD Loss\nโ”‚ โ”œโ”€โ”€ ce_loss.py\nโ”‚ โ”œโ”€โ”€ __init__.py\nโ”‚ โ””โ”€โ”€ kd_loss.py # GKD Loss\nโ”œโ”€โ”€ models\nโ”‚ โ”œโ”€โ”€ channel_distillation.py # Distillation Network\nโ”‚ โ”œโ”€โ”€ __init__.py\nโ”‚ โ””โ”€โ”€ resnet.py\nโ”œโ”€โ”€ pretrain\nโ”‚ โ””โ”€โ”€ path_of_teacher_checkpoint.md\nโ”œโ”€โ”€ README.md\nโ””โ”€โ”€ utils\n โ”œโ”€โ”€ average_meter.py\n โ”œโ”€โ”€ data_prefetcher.py\n โ”œโ”€โ”€ __init__.py\n โ”œโ”€โ”€ logutil.py\n โ”œโ”€โ”€ metric.py\n โ””โ”€โ”€ util.py # Early Decay Teacher\n```\n\n## Requirements\n\n> python >= 3.7 \n> torch >= 1.4.0 \n> torchvision >= 0.5.0\n\n## Experiments\n\n### ImageNet\n\n#### Prepare Dataset\n\n+ Download the ImageNet dataset from http://www.image-net.org/\n+ Then, move validation images to labeled subfolders, using [the following shell script](https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh)\n\n```bash\nimages should be arranged in this way\n\n./data/ILSVRC2012/train/dog/xxx.png\n./data/ILSVRC2012/train/cat/xxy.png\n./data/ILSVRC2012/val/dog/xxx.png\n./data/ILSVRC2012/val/cat/xxy.png\n```\n\n#### Training\n\nNote \n> Teacher checkpoint will be downloaded automatically. \n\nRunning the following command and experiment will be launched.\n\n```bash\nCUDA_VISIBLE_DEVICES=0 python3 ./imagenet_train.py\n```\n\nIf you want to run other experiments, you just need modify following losses in `imagenet_config.py`\n\n+ s_resnet18.t_resnet34.cd.ce\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 1, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv1\"},\n]\n```\n\n+ s_resnet18.t_resnet34.cd.ce.kdv2\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"KDLossv2\", \"T\": 1, \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"kdv2_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 0.9, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv1\"},\n]\n```\n\n+ s_resnet18.t_resnet34.cd.kdv2.lrdv2\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv2\"},\n {\"loss_name\": \"KDLossv2\", \"T\": 1, \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"kdv2_family\", \"loss_rate_decay\": \"lrdv2\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 0.9, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv2\"},\n]\n```\n\n#### Result\n\n+ Ablation Study\n\n| Method | Model | Top-1 error(%) | Top-5 error(%) |\n| -- | -- | -- | -- |\n| teacher | ResNet34 | 26.73 | 8.74 |\n| student | ResNet18 | 30.43 | 10.76 |\n| KD | ResNet34-ResNet18 | 29.50 | 9.52 |\n| **CD(our)** | ResNet34-ResNet18 | 28.53 | 9.56 |\n| **CD+GKD(our)** | ResNet34-ResNet18 | 28.26 | 9.41 |\n| **CD+GKD+EDT(our)** | ResNet34-ResNet18 | 27.61 | 9.2 |\n\n+ Comparion result with other methods\n\n| Method | Model | Top-1 error(%) | Top-5 error(%) |\n| -- | -- | -- | -- |\n| teacher | ResNet34 | 26.73 | 8.74 |\n| student | ResNet18 | 30.43 | 10.76 |\n| KD | ResNet34-ResNet18 | 29.50 | 9.52 |\n| FitNets | ResNet34-ResNet18 | 29.34 | 10.77 |\n| AT | ResNet34-ResNet18 | 29.30 | 10.00 |\n| RKD | ResNet34-ResNet18 | 28.46 | 9.74 |\n| **CD+GKD+EDT(our)** | ResNet34-ResNet18 | 27.61 | 9.2 |\n\n### CIFAR100\n\n#### Prepare Dataset\nCIFAR100 dataset will be downloaded automatically.\n\n#### Training\n\nNote\n> Download the teacher checkpoint from [here](https://drive.google.com/file/d/1e3IW5pxH7W-aOipIY7cGQJ3dmRrHXZ51/view) \n> Then, put the checkpoint in the pretrain directory\n\nRunning the following command and experiment will be launched.\n\n```bash\nCUDA_VISIBLE_DEVICES=0 python3 ./cifar_train.py\n```\n\nIf you want to run other experiments, you just need modify following losses in `cifar_config.py`\n\n+ s_resnet18.t_resnet34.cd.ce\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 1, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv1\"},\n]\n```\n\n+ s_resnet18.t_resnet34.cd.ce.kdv2\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"KDLossv2\", \"T\": 1, \"loss_rate\": 0.1, \"factor\": 1, \"loss_type\": \"kdv2_family\", \"loss_rate_decay\": \"lrdv1\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 0.9, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv1\"},\n]\n```\n\n+ s_resnet18.t_resnet34.cd.kdv2.lrdv2\n```python\nloss_list = [\n {\"loss_name\": \"CELoss\", \"loss_rate\": 1, \"factor\": 1, \"loss_type\": \"ce_family\", \"loss_rate_decay\": \"lrdv2\"},\n {\"loss_name\": \"KDLossv2\", \"T\": 1, \"loss_rate\": 0.1, \"factor\": 1, \"loss_type\": \"kdv2_family\",\"loss_rate_decay\": \"lrdv2\"},\n {\"loss_name\": \"CDLoss\", \"loss_rate\": 6, \"factor\": 0.9, \"loss_type\": \"fd_family\", \"loss_rate_decay\": \"lrdv2\"},\n]\n```\n\n#### Result\n\n+ Ablation Study\n\n| Method | Model | Top-1 error(%) | Top-5 error(%) |\n| -- | -- | -- | -- |\n| teacher | ResNet152| 19.09 | 4.45 |\n| student | ResNet50 | 22.02 | 5.74 |\n| KD | ResNet152-ResNet50 | 20.36 | 4.94 |\n| **CD(our)** | ResNet152-ResNet50 | 20.08 | 4.78 |\n| **CD+GKD(our)** | ResNet152-ResNet50 | 19.49 | 4.85 |\n| **CD+GKD+EDT(our)** | ResNet152-ResNet50 | 18.63 | 4.29 |\n" }, { "alpha_fraction": 0.45445239543914795, "alphanum_fraction": 0.48208802938461304, "avg_line_length": 38.08000183105469, "blob_id": "bbe67e768d2f5a6cdf181987f6caf11eb803e61d", "content_id": "b7fbedeecf423d4e2b9a100dc4bab217bed61b7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 977, "license_type": "no_license", "max_line_length": 121, "num_lines": 25, "path": "/utils/util.py", "repo_name": "sumeyye-agac/channel-distillation", "src_encoding": "UTF-8", "text": "def adjust_loss_alpha(alpha, epoch, factor=0.9, loss_type=\"ce_family\", loss_rate_decay=\"lrdv1\", dataset_type=\"imagenet\"):\n \"\"\"Early Decay Teacher\"\"\"\n\n if dataset_type == \"imagenet\":\n if loss_rate_decay == \"lrdv1\":\n return alpha * (factor ** (epoch // 30))\n else: # lrdv2\n if \"ce\" in loss_type or \"kd\" in loss_type:\n return 0 if epoch <= 30 else alpha * (factor ** (epoch // 30))\n else:\n return alpha * (factor ** (epoch // 30))\n else: # cifar\n if loss_rate_decay == \"lrdv1\":\n return alpha\n else: # lrdv2\n if epoch >= 160:\n exponent = 2\n elif epoch >= 60:\n exponent = 1\n else:\n exponent = 0\n if \"ce\" in loss_type or \"kd\" in loss_type:\n return 0 if epoch <= 60 else alpha * (factor**exponent)\n else:\n return alpha * (factor**exponent)\n" }, { "alpha_fraction": 0.5361050367355347, "alphanum_fraction": 0.54923415184021, "avg_line_length": 25.882352828979492, "blob_id": "069f77f0358922a65357289d9486070672539dcb", "content_id": "101131008b0df655d8e3033000e156f287fdcbbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/losses/cd_loss.py", "repo_name": "sumeyye-agac/channel-distillation", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\n\nclass CDLoss(nn.Module):\n \"\"\"Channel Distillation Loss\"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, stu_features: list, tea_features: list):\n loss = 0.\n for s, t in zip(stu_features, tea_features):\n s = s.mean(dim=(2, 3), keepdim=False)\n t = t.mean(dim=(2, 3), keepdim=False)\n loss += torch.mean(torch.pow(s - t, 2))\n return loss\n" } ]
3
dylan2805/SCB_V1
https://github.com/dylan2805/SCB_V1
b4a8eee365cb9bf43906c7ee63290e86c7ce76ef
4999c4fc4ca3786de502d88009e9e91868767914
7566141a0d31e84f4744b147a5fd6b90da8b3590
refs/heads/master
2020-08-07T04:11:03.486868
2019-10-10T09:09:57
2019-10-10T09:09:57
213,291,404
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6184351444244385, "alphanum_fraction": 0.6205787658691406, "avg_line_length": 34.846153259277344, "blob_id": "efbc78fc3cec6fb0355a9438e08cef07824e2cf7", "content_id": "b748376011a6604b07794918dfbd3a2fc685da77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 933, "license_type": "no_license", "max_line_length": 136, "num_lines": 26, "path": "/Get_img_and_issuer_excl.py", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "from os import listdir\nfrom os.path import isfile, join\nimport requests\nimport re\n\ndef get_issuer_exclusivity(terms):\n issuer_dict = {'visa':'visa', 'master':'mastercard|master card', 'mx':'americanexpress|american express', 'up':'unionpay|union pay'}\n issuer_count = {}\n for key in issuer_dict.keys():\n issuer_count[key] = len(re.findall(issuer_dict[key], terms.lower()))\n if max(issuer_count.values())==0:\n return 'all'\n else:\n return max(issuer_count, key=issuer_count.get)\n\ndef get_image(img_url, img_set, dir_path, img_name):\n try:\n img_path = dir_path+ img_name + re.findall(r'(\\.jpg|\\.png)', img_url)[0]\n except:\n img_path = dir_path+ img_name + '.jpg'\n if img_name not in img_set:\n# img_res = requests.get(img_url)\n# with open(img_path, 'wb') as handle:\n# handle.write(img_res.content)\n img_set.add(img_name)\n return img_path\n\n" }, { "alpha_fraction": 0.42073169350624084, "alphanum_fraction": 0.6829268336296082, "avg_line_length": 13.909090995788574, "blob_id": "3c83e7cf35706367c46e3c8de68449300eddaec1", "content_id": "f1ce5db542dfca8d602b9c8122cffa781b3116f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 164, "license_type": "no_license", "max_line_length": 21, "num_lines": 11, "path": "/requirements.txt", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "tqdm==4.28.1\nnumpy==1.15.4\ngeopy==1.20.0\nrequests==2.20.1\npandas==0.25.1\nbeautifulsoup4==4.8.1\nscikit_learn==0.21.3\nurllib3==1.24.3\nlxml\nboto3==1.9.210\ns3fs==0.3.4\n" }, { "alpha_fraction": 0.6144366264343262, "alphanum_fraction": 0.6170774698257446, "avg_line_length": 43.55882263183594, "blob_id": "c1c58705eb767af21871f9a2f36b24545cb2d918", "content_id": "4539bd941b296c067156e5e73c96ed1487a1b582", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4544, "license_type": "no_license", "max_line_length": 141, "num_lines": 102, "path": "/completion.py", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom sklearn.feature_extraction.text import CountVectorizer\n \n\"\"\" Remove std category \"\"\"\ndef get_stdcat_from_tax(terms, taxonomy):\n count = []\n for key in list(taxonomy.keys()):\n count_vect = CountVectorizer(lowercase = True, vocabulary = taxonomy[key] )\n a = count_vect.fit_transform([terms])\n count += [np.sum(a)]\n return list(taxonomy.keys())[np.argmax(count)]\n\n\"\"\" Completion pipeline: CC cat\"\"\"\ndef completion_CCcat(df_card_name, df_cat, df_subcat, df_merchant, df_promo, df_term, cat_to_CCcat, CC_category_taxonomy):\n if re.findall('_', df_card_name)!=[]:\n bank_name = df_card_name.split('_')[0]\n else:\n bank_name = 'dbs'\n \n # separate case for citi and scb because we also look at subcategory\n if bank_name in ['citi', 'scb']:\n cat_key = str(df_cat).lower()+'#'+str(df_subcat).lower()\n else:\n cat_key = str(df_cat).lower()\n if cat_to_CCcat[bank_name][cat_key]['apply_tax']=='True':\n df_cat = get_stdcat_from_tax(' '.join([str(df_merchant), str(df_promo),str(df_term)]), CC_category_taxonomy)\n else :\n df_cat = cat_to_CCcat[bank_name][cat_key]['std_category']\n \n return df_cat\n \n\"\"\" Completion pipeline: stdcat\"\"\"\ndef completion_stdcat(df_card_name, df_cat, df_subcat, df_merchant, df_promo, df_term, cat_to_stdcat, std_category_taxonomy):\n if re.findall('_', df_card_name)!=[]:\n bank_name = df_card_name.split('_')[0]\n else:\n bank_name = 'dbs'\n # separate case for citi and scb because we also look at subcategory\n if bank_name in ['citi', 'scb']:\n cat_key = str(df_cat).lower()+'#'+str(df_subcat).lower()\n else:\n cat_key = str(df_cat).lower()\n if cat_to_stdcat[bank_name][cat_key]['apply_tax']=='True':\n df_cat = get_stdcat_from_tax(' '.join([str(df_merchant), str(df_promo),str(df_term)]), std_category_taxonomy)\n else :\n df_cat = cat_to_stdcat[bank_name][cat_key]['std_category']\n \n return (bank_name, df_cat)\n \n\"\"\" Completion pipeline: google type\n Do after the completion pipeline of standard_category or CC Buddy Category\n\"\"\"\ndef completion_google_type(standard_category, stdcategory_to_googletype):\n cat_key = str(standard_category).lower()\n df_cat = stdcategory_to_googletype[cat_key] \n return df_cat\n\n\"\"\" Completion pipeline: google_api \"\"\"\ndef completion_google_api(df_address, df_is_online):\n if str(df_address) == \"\":\n if str(df_is_online) == \"True\":\n return (False, None)\n else:\n if str(df_is_online) == \"False\":\n return (True, None)\n else:\n return (None, None)\n else:\n match = re.search(r'www|website|website.|participating outlets|http|https|Valid at all|Click here|View here', str(df_address))\n if match:\n return (True, str(df_address).lower())\n else:\n return (False, None) \n \n\"\"\" Completion pipeline: postal code \"\"\"\ndef completion_postal(is_online, postal_code, postal_code_map):\n if str(postal_code).isdigit():\n num_list = list(str(postal_code).zfill(6))\n sector_num = int(\"\".join(num_list[0:2])) # two-first numbers -> sector\n sector = sector_num\n if sector_num in list(postal_code_map.postal_sector): \n district = postal_code_map[postal_code_map.postal_sector == sector_num]['postal_district'].iloc[0]\n district_name = postal_code_map[postal_code_map.postal_sector == sector_num]['postal_district_name'].iloc[0]\n general_location = postal_code_map[postal_code_map.postal_sector == sector_num]['general_location'].iloc[0]\n area = postal_code_map[postal_code_map.postal_sector == sector_num]['suggested_area'].iloc[0]\n else:\n district, district_name, general_location, area = '', '', '', ''\n else:\n if str(is_online) == \"True\":\n postal_code, sector, district, district_name, general_location, area = '', '', '', 'Online', 'Online', 'Online' \n else:\n postal_code, sector, district, district_name, general_location, area = '', '', '', '', '', ''\n \n if str(sector).isdigit():\n return str(postal_code).zfill(6), str(sector).zfill(2) , str(district).zfill(2), str(district_name), str(general_location), str(area)\n else:\n return str(postal_code), str(sector), str(district), str(district_name), str(general_location), str(area)" }, { "alpha_fraction": 0.5192673802375793, "alphanum_fraction": 0.5240722894668579, "avg_line_length": 51.525001525878906, "blob_id": "33375e29933aebb10bd775aece1f8cbadcdaeeda", "content_id": "f5de9f7f4e84eaa9c78c1c753639e48468c741f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21023, "license_type": "no_license", "max_line_length": 316, "num_lines": 400, "path": "/SCB_Extract.py", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "## Load packages\n# Load packages\nimport re\nimport json\nimport pickle\nimport string\nimport requests\nimport numpy as np\nimport pandas as pd\nfrom os import listdir, makedirs\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, date\nfrom os.path import isfile, join,exists\nfrom urllib.request import Request,urlopen\nfrom helper_func import promo_caption_analysis\nfrom completion import *\nfrom google_api_caller import *\npd.set_option('display.max_columns', 999)\nimport time\n\nstart_time = time.time()\nimport boto3\nimport s3fs\n\ns3 = s3fs.S3FileSystem(anon=False)\n\nclass Extract:\n def get_cards(self):\n pass\n def get_promotions(self):\n pass\n def get_card_promotions(self):\n pass\n def get_url_content(self,url):\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n return webpage\n \nclass ScbExtract(Extract):\n \"\"\" Get the card name \"\"\"\n def get_cards(self, url):\n webpage = self.get_url_content(url)\n souptxt = BeautifulSoup(webpage,'lxml')\n cards = pd.DataFrame()\n productcontents = souptxt.find_all(\"li\",attrs={\"class\":\"item card has-desc\"})\n for productcontent in productcontents:\n productname = (productcontent.find(\"h3\", attrs={\"class\":\"title\"}).get_text()).strip()\n productdesc = (productcontent.find(\"p\", attrs={\"class\":\"description\"}).get_text()).strip() \n for EachPart in productcontent.select('a[class*=\"front is-link\"]'):\n producthref = EachPart.get('href') \n row = pd.DataFrame([productname, productdesc, producthref]).T\n cards = pd.concat([cards,row])\n return cards\n \"\"\" Get the promotion code \"\"\"\n def GetPromoCode(self, text, patternlist):\n patterns = \"|\".join(patternlist)\n srcresult = [ i for i in text.split(\"\\n\") if not re.findall(patterns,i.upper())==[]]\n pattern=[re.findall(patterns,k.upper()) for k in srcresult]\n pattern = list(set([y for x in pattern for y in x]))\n if len(pattern)>0:\n pattern = pattern[0]\n srcresult = [(s.upper().split(pattern)[-1]).strip().split()[0] for s in srcresult]\n for puncs in [\"รขโ‚ฌ.\",\":\",\";\",'\"',\"'\",\",\",\")\",\"]\",\"}\",\"(\",\"[\",\"{\"]:\n# print('bef = ',srcresult)\n srcresult = [i.replace(puncs, '') for i in srcresult if len(i)>2]\n# print('aft = ',srcresult)\n #srcresult = srcresult[0]\n return srcresult\n \"\"\" Get the image \"\"\"\n def get_image(self, img_url, img_set, dir_path, img_name):\n try:\n img_path = dir_path+ img_name + re.findall(r'(\\.jpg|\\.jpeg|\\.png)', img_url)[0]\n except:\n img_path = dir_path+ img_name + '.jpg'\n if img_name not in img_set:\n# img_res = requests.get(img_url)\n# with open(img_path, 'wb') as handle:\n# handle.write(img_res.content)\n img_set.add(img_name)\n return img_path\n \"\"\" Get the compressed merchant name \"\"\"\n def compress(self, name):\n name = name.translate(str.maketrans('', '', string.punctuation))\n name = ''.join([i if ord(i) < 128 else ' ' for i in name])\n name=\"\".join(name.lower().split())\n return name\n\n \"\"\" Get the minimum pax \"\"\"\n def GetMinPax(self, text, patternlist):\n patterns = \"|\".join(patternlist)\n ret = re.findall(patterns,text)\n if len(ret):\n ret = [int(s) for s in ret[0] if s.isdigit()][0]\n else:\n ret = np.NaN\n return ret\n \"\"\" Get the maximum pax \"\"\"\n def GetMaxPax(self, text, patternlist):\n patterns = \"|\".join(patternlist)\n ret = re.findall(patterns,text)\n if len(ret):\n ret = [int(s) for s in ret[0] if s.isdigit()][0]\n else:\n ret = np.NaN\n return ret\n \"\"\" Get the sentence \"\"\"\n def GetSentence(self,text):\n sentences=[]\n if len(re.findall(\"\\n\",text))>1:\n para = re.split(r'\\n', text.strip())\n for line in para:\n sublines = re.split(r' *[\\.\\?!][\\'\"\\)\\]]* *', line.strip())\n for subline in sublines:\n if len(subline)>0:\n sentences += [subline] \n return sentences\n \"\"\" Get the image deprecated \"\"\"\n def get_image_deprecated(self,img_url,img_set, dir_path, img_name):\n img_path=np.NaN\n if len(img_url)>0:\n img_name = img_url.split('/')[-1]\n if not re.search(r'\\.(jpg|jpeg|png)$', img_name):\n img_name = img_name.split(\"\\\\\")[0]\n if not re.search(r'\\.(jpg|jpeg|png)$', img_name):\n img_name = img_name.split('?')[0]\n img_path = dir_path+ img_name\n if img_name not in img_set:\n img_res = requests.get(img_url)\n with open(img_path, 'wb') as handle:\n handle.write(img_res.content)\n img_set.add(img_name) \n return img_path\n \"\"\" Get the set of image's name \"\"\"\n def set_imgurl_fname(self,url,setname):\n urlcontainer = \"/\".join(url.split(\"/\")[0:-1])\n ext = (url.split(\"/\")[-1]).split(\".\")[-1]\n fname = \"\".join(url.split(\"/\")[-1].split(\".\")[0:-1]).strip()\n return urlcontainer+\"/\"+setname+\".\"+ext\n \"\"\" Get the issuer exclusivity \"\"\"\n def get_issuer_exclusivity(self,terms):\n dictkeys = {'visa':'visa', 'master':'mastercard|master card', 'amex':'americanexpress|american express', 'unionpay':'unionpay|union pay'}\n issuer_count = {'visa':0, 'master':0,'amex':0,'unionpay':0}\n for key,value in dictkeys.items():\n issuer_count[key] = len(re.findall(value, terms.lower()))\n if max(issuer_count.values())==0:\n return 'all'\n else:\n return max(issuer_count, key=issuer_count.get)\n \"\"\" Get the postal code \"\"\"\n def GetPostalCode(self,address):\n IsSingapore = True if len(re.findall('Singapore', address)) else False\n pc = \"\"\n if IsSingapore:\n pc = address.split(\"Singapore\")[-1].strip()\n return pc\n \"\"\" Get the exclusion rule \"\"\"\n def get_exclusionrule(self,offr):\n visa = [offr[\"visa\"]] if (\"visa\" in offr) else [False]\n master = [offr[\"mas\"]] if (\"mas\" in offr) else [False]\n exclusion =\"scb_all\"\n if ((visa[0]) & (not master[0])):\n exclusion =\"scb_visa\"\n if((not visa[0]) & (master[0])):\n exclusion =\"scb_master\"\n return exclusion\n \"\"\" Get the promotions\"\"\"\n def get_promotions(self, url):\n json_data = self.get_url_content(url).decode('utf-8')\n d = json.loads(json_data)\n rows = []\n count = 0\n columns=[\"promo_code\",\"minmaxlimit\",\"card_name\",\"id\", \"merchant_name\",\"promotion_caption\",\"promotion\",\"image_url\",\"start\", \"end\",\"terms\",\"category\",\"subcategory\",\"visa\",\"master\",'website',\"raw_input\",\"vcardlist\",\"mcardlist\",\"brcode\",\"qrcode\",'storename','address',\"latitude\",\"longitude\",\"ophr\",\"phone\",\"fax\"]\n for offr in d['offers']['offer']:\n vcardlist=offr['visa_card_list'] if ('visa_card_list' in offr) else [] \n mcardlist=offr['master_card_list'] if ('master_card_list' in offr) else [] \n vallen = len(mcardlist)\n row=[]\n vTerm=BeautifulSoup(offr[\"tnc\"],'lxml').getText().strip()\n promocaption = offr[\"otitle\"].strip()\n promotioncontainer =BeautifulSoup(offr[\"odesc\"],'lxml')\n promotion = promotioncontainer.getText().strip()\n ## Figuring out Exclusivity ## \n listitems = promotioncontainer.find(\"ul\")\n promocode = []\n minspend = []\n\n if(len(promocode)==0):\n for sentence in self.GetSentence(promotion):\n if len(re.findall('promo code',sentence.lower()))>0 and len(re.findall('does not apply|is not applicable|terms and conditions',sentence.lower()))==0 :\n promocode+=[sentence]\n if(len(promocode)==0):\n for sentence in self.GetSentence(vTerm):\n if len(re.findall('promo code',sentence.lower()))>0 and len(re.findall('does not apply|is not applicable|terms and conditions',sentence.lower()))==0 :\n promocode+=[sentence]\n\n if(len(promocode)>0):\n promocode =list(np.unique(np.array(promocode)))\n prmcode =[]\n for prm in promocode:\n prm=re.sub(' +', ' ', prm.strip()) \n if len(re.findall('promo code',prm.lower()))>0 and len(re.findall('does not apply|is not applicable|terms and conditions',prm.lower()))==0 :\n prmcode.append(prm)\n promocode = \". \".join(prmcode)\n else:\n promocode = \"\"\n \n if(len(minspend)==0):\n for sentence in self.GetSentence(vTerm):\n if len(re.findall(\"limited to|limited of upto|minimum spend|maximum|min\\.\\s\\d*\",sentence.lower()))>0 and len(re.findall('goods and service|terms of service|reserves the right|not limited to|is not applicable|terms and conditions',sentence.lower()))==0 :\n minspend+=[sentence]\n\n if(len(minspend)>0):\n minspend =list(np.unique(np.array(minspend)))\n prmcode =[]\n for prm in minspend:\n prm=re.sub(' +', ' ', prm.strip())\n if len(re.findall(\"limited to|limited of upto|minimum spend|maximum|min\\.\\s\\d*\",prm.lower()))>0 and len(re.findall('goods and service|terms of service|reserves the right|not limited to|is not applicable|terms and conditions',prm.lower()))==0 :\n prmcode.append(prm)\n minspend = \". \".join(prmcode)\n else:\n minspend = \"\"\n \n if len(promocaption)==0:\n promocaption = re.split(r'\\n', promotion)[0]\n row+=[promocode]\n row+=[minspend]\n row+=[self.get_exclusionrule(offr)]\n row+=[offr[\"id\"]]\n row+=[offr[\"name\"].strip()]\n \n row+=[promocaption]\n row+=[promotion]\n row+=[offr[\"oimg\"].strip()]\n sd=datetime.strptime(offr[\"sd\"].strip(), \"%d-%m-%Y %H:%M:%S\").date() \n row+=[sd] \n ed=datetime.strptime(offr[\"ed\"].strip(), \"%d-%m-%Y %H:%M:%S\").date() \n row+=[ed] \n row+=[vTerm]\n row+= [offr[\"cat\"]] if (\"cat\" in offr) else [\"\"]\n row+=[offr[\"sbcat\"]] if (\"sbcat\" in offr) else [\"\"]\n row+=[offr[\"visa\"]] if (\"visa\" in offr) else [False]\n row+=[offr[\"mas\"]] if (\"mas\" in offr) else [False]\n row+=[offr[\"url\"]]\n row+=[offr]\n row+=[str(vcardlist)]\n row+=[str(mcardlist)]\n row+=[offr[\"brcode\"]]\n row+=[offr[\"qrcode\"]]\n\n if len(offr['venue'])>0:\n cnt=0\n OfferVenue = offr['venue'] \n for venue in OfferVenue:\n mrow=[]\n for k,v in venue.items():\n v=str(v).strip() if (k in venue) else \"\" \n v= \"\" if v == \"None\" else v\n v=re.sub(\"/\",\"|\",v)\n mrow.append(v)\n rows.append(row+mrow)\n cnt+=1\n else:\n mrow=['','','','','','','']\n rows.append(row+mrow) \n count+=1\n deals = pd.DataFrame(rows, columns=columns) \n return deals\n \"\"\" Get the card promotions \"\"\"\n def get_card_promotions(self, outfile, promotions, cards):\n pass \n# main\nif __name__ == '__main__' : \n deals_url = \"https://www.sc.com/sg/data/tgl/offers.json\"\n cc_link = \"https://www.sc.com/sg/credit-cards/\"\n scb_obj = ScbExtract()\n SCBDeals = scb_obj.get_promotions(deals_url)\n SCBDeals['flag']=\"\"\n SCBDeals['comments']=\"\"\n SCBDeals['postal_code'] = SCBDeals.address.apply(scb_obj.GetPostalCode)\n SCBDeals.phone=SCBDeals.phone.apply(lambda x:(''.join(x.strip().split(\" \"))).split(\"Ext:\")[0])\n cc = scb_obj.get_cards(cc_link)\n cc.columns = ['card_name','feature','linktofeaturedetails']\n cc.to_csv(\"scb_cards.csv\")\n SCBDeals['issuer_exclusivity'] = SCBDeals.terms.apply(lambda x:scb_obj.get_issuer_exclusivity(x))\n SCBDeals['merchant_compressed'] = SCBDeals.merchant_name.apply(scb_obj.compress)\n \n img_dir='images/scb/' + str(date.today()) + '/'\n if not exists(img_dir):\n makedirs(img_dir)\n img_set = set([f for f in listdir(img_dir) if isfile(join(img_dir, f))])\n SCBDeals[\"image_path\"] = SCBDeals[[\"image_url\",\"merchant_compressed\"]].apply(lambda x:scb_obj.get_image(x[0],img_set,img_dir,x[1]),axis=1)\n \n #-------------------------------------------------------------------------------------------\n # Derive is_online\n #-------------------------------------------------------------------------------------------\n SCBDeals['is_online'] = SCBDeals.category.apply(lambda x: x.lower()==\"online\")\n\n #-------------------------------------------------------------------------------------------\n # Handling of min_pax, max_pax\n #-------------------------------------------------------------------------------------------\n patternlist=[\"min\\. of \\d diners\",\"minimum of \\d diners\",\"min\\. of \\d pax\",\"minimum of \\d pax\",\"min\\. of \\d person\",\"minimum of \\d person\",\"min\\. of \\d people\",\"minimum of \\d people\"]\n SCBDeals['min_pax'] = SCBDeals.terms.apply(lambda x:scb_obj.GetMinPax(x,patternlist))\n patternlist=[\"max. of \\d pax\",\"maximum of \\d pax\",\"max. of \\d diners\",\"maximum of \\d diners\",\"max. of \\d person\",\"maximum of \\d person\",\"max. of \\d people\",\"maximum of \\d people\"]\n SCBDeals['max_pax'] = SCBDeals.terms.apply(lambda x:scb_obj.GetMaxPax(x,patternlist))\n\n #-------------------------------------------------------------------------------------------\n # Handling of promo codes\n #-------------------------------------------------------------------------------------------\n print(\"Handling promo codes\")\n patternlist = ['PROMO CODE:','PROMO CODE :','PROMO CODE']\n SCBDeals['promo_code'] = SCBDeals.promotion.apply(lambda x: scb_obj.GetPromoCode(x,patternlist))\n xpromorows = SCBDeals[SCBDeals.promo_code.apply(len)==0]\n xpromorows.promo_code = \"\"\n promorows = SCBDeals[SCBDeals.promo_code.apply(len)>0]\n\n promorow = pd.DataFrame()\n for index, row in promorows.iterrows():\n crow = row.copy()\n for i in row.promo_code:\n crow.promo_code=str(i)\n promorow = pd.concat([promorow,pd.DataFrame(crow).T]) \n SCBDeals = pd.concat([xpromorows,promorow])\n \n data_folder = \"data/\"\n\n \"\"\" Load merchant_dict \"\"\"\n with open(data_folder + 'merchant_dict_clean.pickle', 'rb') as handle:\n merchant_dict = pickle.load(handle)\n\n \"\"\" Load cat_to_stdcat for CC cat \"\"\"\n with open(data_folder + 'cat_to_CC_cat.pickle', 'rb') as handle:\n cat_to_CCcat = pickle.load(handle)\n \"\"\" Load stdcategory_to_googletype for CC cat \"\"\"\n with open(data_folder + 'CC_category_to_googletype.pickle', 'rb') as handle:\n CCcategory_to_googletype = pickle.load(handle)\n \"\"\" Load std_category_taxonomy for CC cat \"\"\"\n with open(data_folder + 'CC_category_taxonomy.pickle', 'rb') as handle:\n CC_category_taxonomy = pickle.load(handle)\n\n \"\"\" Load cat_to_stdcat \"\"\"\n with open(data_folder + 'cat_to_stdcat.pickle', 'rb') as handle:\n cat_to_stdcat = pickle.load(handle)\n \"\"\" Load stdcategory_to_googletype \"\"\"\n with open(data_folder + 'stdcategory_to_googletype.pickle', 'rb') as handle:\n stdcategory_to_googletype = pickle.load(handle)\n \"\"\" Load std_category_taxonomy \"\"\"\n with open(data_folder + 'std_category_taxonomy.pickle', 'rb') as handle:\n std_category_taxonomy = pickle.load(handle)\n\n postal_code_map = pd.read_csv(data_folder + 'RegionTable.csv')\n\n #-------------------------------------------------------------------------------------------\n # Handling of promotion analytic\n #-------------------------------------------------------------------------------------------\n \n SCBDeals['promotion_analytic'] = SCBDeals.promotion_caption.apply(lambda x: promo_caption_analysis(x))\n\n #-------------------------------------------------------------------------------------------\n # Handling of standarlization\n #-------------------------------------------------------------------------------------------\n SCBDeals.category = SCBDeals.category.apply(lambda x: np.nan if str(x)==\"\" else x)\n SCBDeals.subcategory = SCBDeals.subcategory.apply(lambda x: np.nan if str(x)==\"\" else x)\n SCBDeals['google_api'] = SCBDeals[['address', 'is_online']].apply(lambda x: completion_google_api(x.address, x.is_online)[0], axis=1)\n SCBDeals['listing_outlet'] = SCBDeals[['address', 'is_online']].apply(lambda x: completion_google_api(x.address, x.is_online)[1], axis=1)\n SCBDeals['std_category'] = SCBDeals[['card_name', 'category', 'subcategory', 'merchant_name', 'promotion', 'terms']].apply(lambda x: \n completion_stdcat(str(x.card_name), str(x.category), str(x.subcategory), str(x.merchant_name), \n str(x.promotion), str(x.terms), cat_to_stdcat, std_category_taxonomy)[1], axis=1)\n SCBDeals['cc_buddy_category'] = SCBDeals[['card_name', 'category', 'subcategory', 'merchant_name', 'promotion', 'terms']].apply(lambda x: \n completion_CCcat(str(x.card_name), str(x.category), str(x.subcategory), str(x.merchant_name), \n str(x.promotion), str(x.terms), cat_to_CCcat, CC_category_taxonomy), axis=1) \n SCBDeals['google_type'] = SCBDeals.std_category.apply(lambda x: completion_google_type(x, stdcategory_to_googletype)) \n\n #-------------------------------------------------------------------------------------------\n # Handling of postal code\n #-------------------------------------------------------------------------------------------\n SCBDeals['postal_code'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[0]), axis=1)\n SCBDeals['country'] = SCBDeals['postal_code'].apply(lambda x: \"SGP\")\n SCBDeals['sector'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[1]), axis=1)\n SCBDeals['district'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[2]), axis=1)\n SCBDeals['district_name'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[3]), axis=1)\n SCBDeals['general_location'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[4]), axis=1) \n SCBDeals['area'] = SCBDeals[['is_online', 'postal_code']].apply(lambda x: str(completion_postal(x.is_online, x.postal_code, postal_code_map)[5]), axis=1) \n\n ############################################################################################\n print(\"Output data\")\n ColStandard = ['card_name', 'category', 'subcategory', 'cc_buddy_category', 'std_category', 'merchant_name', \n 'merchant_compressed', 'google_type', 'promotion', 'promotion_caption','promotion_analytic', 'promo_code', 'address', \n 'latitude', 'longitude', 'start', 'end', 'phone', 'website', 'image_url', 'image_path', 'issuer_exclusivity', \n 'raw_input','min_pax','max_pax', 'is_online', 'listing_outlet', 'google_api', 'terms', 'postal_code', 'country',\n 'sector', 'district', 'district_name', 'general_location', 'area', 'flag', 'comments'] \n StandardDeals=SCBDeals[ColStandard]\n \n name = \"scb_\" + str(date.today()) +\".csv\"\n directory = \"s3://data-pipeline-cardspal/\"+str(date.today())+\"/extracts/\"+ name\n with s3.open(directory,'w') as f:\n StandardDeals.to_csv(f,index=False)\n print(\"SCB uploaded.\")\n \n print(\"$$$$$$$$$$$$$$$$$ OUTPUT SUCCESS $$$$$$$$$$$$$$$$$$$$\")\n print(\"------------ %s minutes ------------\" %((time.time() - start_time)/60))\n \n \n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5497413277626038, "avg_line_length": 47.92946243286133, "blob_id": "292d351d46819aa6b3c7e1490c8d47417b964bfa", "content_id": "220e3c92e813027c9f7f072a81e9b87dd621f0ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11791, "license_type": "no_license", "max_line_length": 156, "num_lines": 241, "path": "/helper_func.py", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "import re\nimport requests\nimport numpy as np\nfrom datetime import datetime\n\n\"\"\" Remove all html tag \"\"\"\ndef remove_html_tags(text):\n \"\"\"Remove html tags from a string\n :param text: input text\n :return clean text\n \"\"\"\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, ' ', text)\n\n\"\"\" Remove all html tag \"\"\"\ndef remove_special_characters(text):\n \"\"\"Remove special characters\n :param text: input text\n :return clean text\n \"\"\"\n replace_list_wo_sp = ['<p>', '</p>','<span style=\"background: white; color: black;\">',\n '<p style=\"margin: 0in 0in 0pt;\">', '<p style=\"margin: 0in 0in 0pt;\">',\n '</span>','<span>', '<span style=\"color: black;\">', '</li><div>', '</div>',\n '&ldquo;','&rdquo;','<ul style=\"list-style-type: circle;\">', '</ul>', '<div>', \n '\\r''<p style=\"margin: 0in 0in 8pt;\">', '&amp;', '&reg;','^', '*' , ' *','&rsquo;s']\n for replace_word in replace_list_wo_sp:\n text = str(text).replace(replace_word, ' ')\n \n replace_list_w_sp = ['<br>', '&nbsp;', '*#']\n for replace_word in replace_list_w_sp:\n text = str(text).replace(replace_word, ' ')\n \n text = str(text).replace('&lsaquo;', '<')\n text = str(text).replace('&rsaquo;', '>')\n text = str(text).replace('&percnt;', '%')\n text = str(text).replace('&lt;', '<')\n text = str(text).replace('&gt;', '>')\n text = str(text).replace('S&dollar;', 'S$')\n text = str(text).replace('SGD', 'S$')\n return text.strip()\n\n# \"\"\" Get sentence from a text \"\"\"\n# def GetSentence(text):\n# \"\"\"Get sentence from text\n# :param text: input text\n# :return: sentences\n# \"\"\"\n# sentences=[]\n# if len(re.findall(\"\\n\",text))>1:# & (method & 1) == 1:\n# para = re.split(r'\\n', text.strip())\n# for line in para:\n# sublines = re.split(r' *[\\.\\?!][\\'\"\\)\\]]* *', line.strip())\n# for subline in sublines:\n# if len(subline)>0:\n# sentences += [subline] \n# return sentences\n\n# \"\"\" Get promotion code from the text \"\"\"\n# def GetPromoCode(text, patternlist):\n# \"\"\"Get promotion code form text\n# :param text: input text\n# :param patternlist: pattern of promotion code\n# :return: promotion code\n# \"\"\"\n# patterns = \"|\".join(patternlist)\n# srcresult = [ i for i in text.split(\"\\n\") if not re.findall(patterns,i.upper())==[]]\n# pattern=[re.findall(patterns,k.upper()) for k in srcresult]\n# pattern = list(set([y for x in pattern for y in x]))\n# if len(pattern)>0:\n# pattern = pattern[0]\n# srcresult = [(s.upper().split(pattern)[-1]).strip().split()[0] for s in srcresult]\n# for puncs in [\":\",\";\",'\"',\"'\",\",\",\")\",\"]\",\"}\",\"(\",\"[\",\"{\"]:\n# srcresult = [i.replace(puncs, '') for i in srcresult if len(i)>2]\n# return ' '.join(srcresult)\n\n\"\"\" Get Issuer Exclusivity \"\"\"\ndef get_issuer_exclusivity(terms):\n \"\"\"Get issuer exclusivity from terms\n :param terms: text\n :return: issuer\n \"\"\"\n dictkeys = {'visa':'visa', 'master':'mastercard|master card', 'amex':'americanexpress|american express', 'unionpay':'unionpay|union pay'}\n issuer_count = {'visa':0, 'master':0,'amex':0,'unionpay':0}\n for key,value in dictkeys.items():\n issuer_count[key] = len(re.findall(value, terms.lower()))\n if max(issuer_count.values())==0:\n return 'all'\n else:\n return max(issuer_count, key=issuer_count.get)\n\ndef get_image(img_url, img_set, dir_path):\n \"\"\"Get image from url and save to a folder\n :param img_url: url\n :param img_set: set of images\n :param dir_path: path to folder\n :return: part to image\n \"\"\"\n if img_url != 'https://www.ocbc.com/assets/images/Cards_Promotions_Visuals/':\n img_name = img_url.split('/')[-1]\n if not re.search(r'\\.(jpg|jpeg|png)$', img_name):\n img_name = img_name.split(\"\\\\\")[0]\n if not re.search(r'\\.(jpg|jpeg|png)$', img_name):\n img_name = img_name.split('?')[0]\n img_path = dir_path+ img_name\n if img_name not in img_set:\n# img_res = requests.get(img_url)\n# with open(img_path, 'wb') as handle:\n# handle.write(img_res.content)\n \n img_set.add(img_name) \n return img_path\n else:\n return None\n \n\"\"\" Get std date time format \"\"\"\ndef GetStdDateTime(time):\n \"\"\"Get std date time format fromraw data\n :param time: input date time of raw data\n :param bank_name: name of the bank, since each bank has a different format\n :return: std_date format\n \"\"\"\n try:\n time = datetime.strptime(str(time),'%Y-%m-%d %H:%M:%S').date()\n except:\n try:\n time = datetime.strptime(str(time),'%m/%d/%Y').date()\n except:\n try:\n time = datetime.strptime(str(time),'%d-%B-%y').date()\n except:\n try:\n time = datetime.strptime(str(time),'%d-%b-%y').date()\n except:\n try:\n time = datetime.strptime(str(time),'%d/%m/%y').date()\n except:\n try:\n time = datetime.strptime(str(time),'%d/%m/%y').date()\n except:\n try:\n time = datetime.strptime(str(time).replace(',',''), '%B %d %Y').date()\n except:\n try:\n time = datetime.strptime(str(time).replace(',',''), '%d %b %Y').date()\n except:\n try:\n time = datetime.strptime(str(time).replace(',',''), '%d %b %y').date()\n except:\n try:\n time = datetime.strptime(str(time).replace(',',''), '%d %B %Y').date()\n except:\n time = '' \n return str(time)\n\n\"\"\" Make promotion_analytic column \"\"\"\ndef promo_caption_analysis(promotion_caption):\n promo_caption_analysis = str(promotion_caption).replace(r\"1 for 1\", \"1-for-1\")\n promo_caption_analysis = promo_caption_analysis.replace(r\"1-For-1\", \"1-for-1\")\n promo_caption_analysis = promo_caption_analysis.replace(r\"Buy 1 get 1 free\", \"1-for-1\")\n \n # Deal with SGD\n match = re.search('(\\d+)SGD', promo_caption_analysis)\n if match:\n promo_caption_analysis = re.sub(str(match.group(1)) + \"SGD\", \"S$\" + str(match.group(1)), promo_caption_analysis) \n promo_caption_analysis = promo_caption_analysis.replace(\"SGD\", \"S$\")\n \n # Deal with USD to SGD\n# USD_to_SGD = 1.37\n# match = re.search('USD(\\d+)|USD(\\d+) ', promo_caption_analysis)\n# if match:\n# promo_caption_analysis = re.sub(r\"USD \" + str(match.group(1)), \"S$\" + str(int(int(match.group(1)) * USD_to_SGD)), promo_caption_analysis)\n# promo_caption_analysis = re.sub(r\"USD\" + str(match.group(1)), \"S$\" + str(int(int(match.group(1)) * USD_to_SGD)), promo_caption_analysis)\n \n# # Deal with AED to SGD\n# AED_to_SGD = 0.31\n# match = re.search('AED(\\d+)|AED(\\d+) ', promo_caption_analysis)\n# if match:\n# promo_caption_analysis = re.sub(r\"AED \" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * AED_to_SGD), promo_caption_analysis)\n# promo_caption_analysis = re.sub(r\"AED\" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * AED_to_SGD), promo_caption_analysis)\n \n# # Deal with HKD to SGD\n# HKD_to_SGD = 0.18\n# match = re.search('HKD(\\d+)|HKD(\\d+) ', promo_caption_analysis)\n# if match:\n# promo_caption_analysis = re.sub(r\"HKD \" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * HKD_to_SGD), promo_caption_analysis)\n# promo_caption_analysis = re.sub(r\"HKD\" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * HKD_to_SGD), promo_caption_analysis)\n \n # Deal with JPY to SGD\n # Note: some special: JPY X|Y? What does this mean? XY?\n # e.g., ocbc_v4.csv 44 Valid from now till 31 October 2019 Enjoy 5% off + 8% Tax Free with minimum purchase of JPY 5|264 (tax exclusive)\n # Note: Valid for OCBC Visa Cardholders. Shop now:https://www.biccamera.co.jp.e.lj.hp.transer.com/bicgroup/index.html\n# JPY_to_SGD = 0.013\n# match = re.search('JPY(\\d+)|JPY(\\d+) | JPY (\\d+)', promo_caption_analysis)\n# if match:\n# print(match)\n# promo_caption_analysis = re.sub(r\"JPY \" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * JPY_to_SGD), promo_caption_analysis)\n# promo_caption_analysis = re.sub(r\"JPY\" + str(match.group(1)), \"S$\" + str(int(match.group(1)) * JPY_to_SGD), promo_caption_analysis)\n# promo_caption_analysis.replace(\"1,000 JPY\", \"S$\" + str(int(1000 * JPY_to_SGD)))\n \n # Deal with dines\n ## pattern 1: 1 dines free with every x paying adults\n match_1 = re.search('(\\d+) dines free with', promo_caption_analysis)\n match_2 = re.search('(\\d+) paying guests', promo_caption_analysis) \n if match_1 and match_2:\n promo_caption_analysis = re.sub(str(match_1.group(1)) + ' dines free with ' + str(match_2.group(1)) \n + ' paying guests', \" pay \" + str(match_2.group(1)) + \" dine \" \n + str(int(match_1.group(1)) + int(match_2.group(1))), promo_caption_analysis)\n \n ## patern 2: 1 dines free with x paying guests\n match_1 = re.search('(\\d+) dines free with ', promo_caption_analysis)\n match_2 = re.search('(\\d+) paying adults ', promo_caption_analysis)\n \n if match_1 and match_2: \n promo_caption_analysis = re.sub(str(match_1.group(1)) + ' dines free with every ' + str(match_2.group(1))\n + ' paying adults', \" pay \" + str(match_2.group(1)) + \" dine \" \n + str(int(match_1.group(1)) + int(match_2.group(1))), promo_caption_analysis)\n \n # Deal with with no min. spend / min. to minimum / regular-priced to regular price/ minimum\n promo_caption_analysis = promo_caption_analysis.replace(\"with no min. spend\", \"\")\n promo_caption_analysis = promo_caption_analysis.replace(\"min.\", \"minimum\")\n promo_caption_analysis = promo_caption_analysis.replace(\"with min $\", \"with minimum S$\")\n promo_caption_analysis = promo_caption_analysis.replace(\"with a min spend of \", \"with a minimum spend of \")\n promo_caption_analysis = promo_caption_analysis.replace(\"regular-priced\", \"regular price\")\n promo_caption_analysis = promo_caption_analysis.replace(\"late check-...\", \"late check-out\")\n promo_caption_analysis = promo_caption_analysis.replace(\"...\", \"\")\n \n # Deal with every spend of S$X to every S$X spend\n match = re.search('every spend of S\\$(\\d+)', promo_caption_analysis)\n if match:\n promo_caption_analysis = re.sub(r' every spend of S\\$' + str(match.group(1)), ' every S\\$' + str(match.group(1)) + ' spend', promo_caption_analysis)\n # Deal with above S$X to mimimum S$X\n match = re.search('above S\\$(\\d+)', promo_caption_analysis)\n if match:\n promo_caption_analysis = re.sub(r'above S\\$' + str(match.group(1)), 'minimum S\\$' + str(match.group(1)), promo_caption_analysis)\n# # Deal with with min $500 spend\n# match = re.search('with min \\$(\\d+) spend', promo_caption_analysis)\n# if match:\n# promo_caption_analysis = re.sub(r'with min \\$' + str(match.group(1)) + ' spend', 'minimum S\\$' + str(match.group(1)), promo_caption_analysis)\n \n return promo_caption_analysis" }, { "alpha_fraction": 0.5622613430023193, "alphanum_fraction": 0.5701718330383301, "avg_line_length": 44.26234436035156, "blob_id": "595c48090536b33dd9c0383796b01bce36bb8019", "content_id": "7de33fe5eebac09575c4d19436d6567c564013fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14664, "license_type": "no_license", "max_line_length": 208, "num_lines": 324, "path": "/google_api_caller.py", "repo_name": "dylan2805/SCB_V1", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport html\nimport pickle\nimport requests\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom difflib import SequenceMatcher\nfrom geopy.geocoders import Nominatim\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ndef pc_to_region(pc, df_region):\n \"\"\" Map the postal code to the corresponding region by using the region table\n :param pc: postal code\n :param df_region: dataframe of region\n :return: df with columns being ['PostalSector', 'PostalDistrict', 'PostalDistrictName', 'GeneralLocation']\n if no postal givent or no match, the function returns the same df but filled with '' \n \"\"\"\n if (pc == '') or (len(pc) != 6):\n return pd.DataFrame([['']*df_region.shape[1]], columns = df_region.columns).to_dict('records')[0]\n else:\n try:\n return df_region.loc[df_region['postal_sector'] == int(pc[:2])].to_dict('records')[0]\n except:\n return pd.DataFrame([['']*df_region.shape[1]], columns = df_region.columns).to_dict('records')[0]\n\ndef similar(a, b):\n \"\"\" Check the similar of two inputs\n :param a: first input\n :param b: second input\n :return: similarity of 2 inputs \n \"\"\"\n return SequenceMatcher(None, a, b).ratio()\n\ndef get_address(phone, name, gtypes_list, extract_status, df_region):\n \"\"\" Get the address by using Google API\n Note: Need to store the api key in a different path/folder/files\n :param phone: phone number [not using here!]\n :param name: name of merchant\n :param gtypes_list: list of google type [to check the correct of results]\n :param extract_status: status of extraction\n :param df_region: region table in dataframe\n :return: \n \"\"\"\n api_key = os.environ['gkey']\n flag = 0\n res_phone = []\n res_name = []\n tags = re.compile('<.*?>')\n \n ### Retrieving all stores across island ###\n if isinstance(name, str) and extract_status == 'all':\n formated_name = name.replace(' ','%20')\n formated_name = re.sub(tags, '', formated_name)\n formated_name = html.unescape(formated_name)\n formated_name = formated_name.lower()\n try:\n res= requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=1.290849,103.844673&radius=20067.09&keyword='+ formated_name +'&key=' + api_key).json()\n except:\n try:\n res= requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=1.290849,103.844673&radius=20067.09&keyword='+ formated_name +'%20singapore' +'&key=' + api_key).json()\n except:\n res = {'results':[]}\n pass\n res_name = res['results']\n \n # Looping on all pages\n while 'next_page_token' in res.keys():\n res = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?pagetoken=' + res['next_page_token'] + \"&key=\"+api_key).json()\n res_name += res['results']\n \n # I kept the res_phone, but here it's empty so the following line doesn't do anything\n combined_res = res_phone + res_name\n # I get rid of duplicates\n combined_res = [i for n, i in enumerate(combined_res) if i not in combined_res[n + 1:]]\n\n ### Scoring loop to know if we retrieved the right stores\n if isinstance(name, str):\n# del_list = []\n for i in range(len(combined_res)):\n combined_res[i]['true_store'] = 0\n combined_res[i]['hamming_score'] = similar(name, combined_res[i]['name'])\n if combined_res[i]['hamming_score'] < 0.5:\n combined_res[i]['flag'] = 1\n elif combined_res[i]['hamming_score'] <0.1:\n# del_list += [i]\n combined_res[i]['true_store'] = 1\n else:\n combined_res[i]['flag'] = 0\n \n if combined_res[i]['types']==[]:\n combined_res[i]['types'] = 1\n elif combined_res[i]['types'][0] not in gtypes_list:\n combined_res[i]['true_store'] = 2\n combined_res[i]['flag'] = max(combined_res[i]['flag'], combined_res[i]['true_store'])\n ### Postcode ###\n L = []\n flag_pc = 0\n for el in combined_res:\n postcode = get_postcode(el['geometry']['location']['lat'], el['geometry']['location']['lng'])\n if postcode == '':\n flag_pc = 2\n region = {\n 'postal_sector' : '',\n 'postal_district': '',\n 'postal_district_name': '',\n 'general_location': ''\n }\n else:\n region = pc_to_region(postcode, df_region)\n el['postal_code']=postcode\n el['postal_sector'] = region['postal_sector']\n el['postal_district'] = region['postal_district']\n el['postal_district_name'] = region['postal_district_name']\n el['general_location'] = region['general_location']\n el['flag'] = max(flag_pc, el['flag'])\n \n return combined_res\n\ndef get_postcode(lat, lng, postcode = ''):\n \"\"\" Get the postal code\n Note: It seems geopy blocks the connection after a certain use, \n in case it does then use the following code to change the user_agent name.\n :param lat: latitude\n :param lng: longtiture\n :param postcode : None\n :return: postal code\n \"\"\"\n if isinstance(postcode, str) and len(postcode) == 6:\n return postcode\n elif lat == np.nan or lng == np.nan or lat == '' or lng == '':\n return ''\n else:\n pass\n i = 0\n flag_ok = True\n while flag_ok:\n try:\n geolocator = Nominatim(user_agent=\"my-application\" + str(i))\n location = geolocator.reverse(str(lat) + ',' + str(lng))\n flag_ok = False\n except:\n if i > 500:\n flag_ok = False\n else:\n pass\n try:\n if len(str(location.raw['address']['postcode']))==6:\n return location.raw['address']['postcode']\n elif len(str(location.raw['address']['postcode'])) == 5:\n return '0' + str(location.raw['address']['postcode'])\n else:\n return 'Online'\n except:\n return 'Online'\n\n# def google_completion(name, extracted_address = '', gtypes_list=[], merchant_dict):\ndef google_completion(name, extracted_address, gtypes_list, merchant_dict, df_region):\n '''\n This fonction retrieves the data to complete the extracted deal. \n /!\\ For now merchant_dict and df_region are dealt as global parameters\n Input: - name, which will be used for the query, str expected\n - address, as optional parameter which will then be matched to the corresponding address from the google api\n \n Output: DataFrame with the columns which are to be matched with the deal\n '''\n flag = 0\n \n ### Output dataframe's columns ###\n columns = ['latitude', 'longitude', 'merchant_id', 'store_id', 'google_name',\n 'corr_name', 'corr_address', 'google_address', 'google_flag']\n \n if name not in merchant_dict.keys():\n address = get_address(None, name, gtypes_list, 'all', df_region)\n merchant_dict[name] = address\n for i,el in enumerate(merchant_dict[name]):\n el['merchant_id'] = len(merchant_dict.keys())\n el['store_id'] = i+1\n pickle.dump(merchant_dict, open('merchant_dict_clean.pickle', 'wb'))\n\n address = merchant_dict[name]\n ### Getting rid of addresss with flag >1\n address_tmp = []\n for el in address:\n if el['flag'] <=1:\n address_tmp += [el]\n address = address_tmp\n\n if address == []: \n df_empty = pd.DataFrame([['']*len(columns)], columns = columns)\n df_empty.loc[0,'google_flag'] = 2\n return df_empty \n else:\n df_address_tmp = pd.DataFrame(columns = columns) # + ['address_score'])\n for el in address:\n df_tmp = {}\n \n ### Filling google API results ###\n df_tmp['latitude'] = el['geometry']['location']['lat']\n df_tmp['longitude'] = el['geometry']['location']['lng']\n df_tmp['merchant_id'] = el['merchant_id']\n df_tmp['store_id'] = el['store_id']\n df_tmp['google_name'] = el['name']\n df_tmp['corr_name'] = el['hamming_score']\n df_tmp['google_address'] = el['vicinity']\n \n ### Filling region details ###\n df_tmp['postal_code'] = el['postal_code']\n df_tmp['postal_sector'] = el['postal_sector']\n df_tmp['postal_district'] = el['postal_district']\n df_tmp['postal_district_name'] = el['postal_district_name']\n df_tmp['general_location'] = el['general_location']\n df_tmp['google_flag'] = max(flag, el['flag'])\n \n ### Appending the current store to the list of stores\n df_address_tmp = df_address_tmp.append(pd.DataFrame(df_tmp, index=[0]), sort = True)\n \n df_address_tmp.reset_index(inplace = True, drop = True)\n \n ### Checking correlation with given address ###\n if isinstance(extracted_address, str) and extracted_address != '':\n df_address_tmp['corr_address'] = df_address_tmp['google_address'].apply(lambda x: similar(x, extracted_address))\n max_corr = max(df_address_tmp['corr_address'])\n df_address_tmp['true_address'] = np.where(df_address_tmp['corr_address']==max_corr, True,False)\n \n ### Flaging if low score ###\n if max_corr < 0.5:\n if len(df_address_tmp.loc[df_address_tmp['true_address']==True, 'google_flag'].values)==1:\n df_address_tmp.loc[df_address_tmp['true_address']==True, 'google_flag'] = max(df_address_tmp.loc[df_address_tmp['true_address']==True, 'google_flag'].values, 1)\n else:\n df_address_tmp.loc[df_address_tmp['true_address']==True, 'google_flag'] = max(df_address_tmp.loc[df_address_tmp['true_address']==True, 'google_flag'].values[0], 1)\n else:\n df_address_tmp['corr_address'] = [0]*df_address_tmp.shape[0]\n df_address_tmp['true_address'] = [False]*df_address_tmp.shape[0]\n df_address_tmp['google_flag'] = np.where(df_address_tmp['google_flag'], np.nan, 0)\n return df_address_tmp\n\ndef completion_pipeline(df_raw, bank_name, filename, merchant_dict, df_region):\n df_raw.reset_index(inplace = True, drop = True) #reset index in case since we'll be looping using iloc\n \n \n columns = ['latitude', 'longitude', 'merchant_id', 'store_id', 'google_name',\n 'corr_name', 'corr_address', 'true_address', 'postal_sector', 'postal_district', 'postal_district_name',\n 'general_location', 'google_address']\n df_final = pd.DataFrame(columns = set(df_raw.columns.tolist() + columns))\n \n ### Creating deal_id ###\n country = 'sg'\n time.sleep(1)\n timestamp = int(time.time())\n seq = 0\n \n for i in tqdm(range(df_raw.shape[0])):\n deal = df_raw.iloc[i].to_frame().T\n \n ### Mapping bank categories to standard categories ###\n # start by retrieving bank's name\n if re.findall('_', deal['card_name'].item())!=[]:\n bank_name = deal['card_name'].item().split('_')[0]\n else:\n bank_name = 'dbs'\n \n ### Case if deal is an online deal ### \n if deal.is_online.item() == True:\n deal['latitude'] = ''\n deal['longitude'] = ''\n deal['store_id'] = ''\n deal['merchant_id'] = ''\n deal['google_name'] = deal['merchant_name'].item()\n deal['corr_name'] = ''\n deal['corr_address'] = ''\n deal['google_address'] = ''\n deal['true_address'] = True\n deal['postal_code'] = ''\n deal = deal.merge(deal.postal_code.apply(lambda s: pd.Series(pc_to_region(s,df_region))), left_index=True, right_index=True)\n\n ### Case where we already have the store's location ###\n elif ('latitude' in deal.columns and deal.latitude.isnull().values.any() == False):\n deal['store_id'] = ''\n deal['merchant_id'] = ''\n deal['google_name'] = deal['merchant_name'].item()\n deal['corr_name'] = ''\n deal['corr_address'] = ''\n deal['google_address'] = deal['address'].item()\n deal['true_address'] = True\n \n ### Adding postcode ###\n deal['postal_code'] = deal[['latitude', 'longitude', 'postal_code']].apply(lambda x: get_postcode(x['latitude'], x['longitude'], x['postal_code']), axis = 1)\n \n ### Adding region information ###\n deal = deal.merge(deal.postal_code.apply(lambda s: pd.Series(pc_to_region(s,df_region))), left_index=True, right_index=True)\n \n ### Otherwise we use google API to complete the deal's information ###\n else:\n if 'latitude' in deal.columns:\n deal.drop('latitude', axis = 1, inplace = True)\n deal.drop('longitude', axis = 1, inplace = True)\n if 'postal_code' in deal.columns:\n deal.drop('postal_code', axis = 1, inplace = True)\n df_completion = google_completion(deal.iloc[0]['merchant_name'], deal.iloc[0]['address'], deal.iloc[0]['google_types'], merchant_dict, df_region)\n deal = pd.concat([pd.DataFrame([deal.values[0] for j in range(df_completion.shape[0])], columns = deal.columns),df_completion], axis = 1)\n deal['flag'] = deal[['flag', 'google_flag']].max(axis=1) # merging flags\n deal.drop('google_flag', axis = 1, inplace = True)\n deal.reset_index(drop = True, inplace = True)\n\n ### Appending to create the final dataframe ###\n df_final = df_final.append(deal, sort = True)\n df_final.reset_index(drop = True, inplace = True)\n ### Joining on card key\n card_table = pd.read_csv('card_table.csv')\n card_table = card_table.set_index('deal_handler')\n \n if 'card_name' in df_final.columns:\n df_final = df_final.rename({'card_name':'deal_handler'}, axis = 1)\n \n df_final = df_final.join(card_table, on ='deal_handler')\n \n ### Adding deal_id, uniaue per rows ###\n ### Idea: combine (merchant name, franchaise_ID, promotion, valid date, promo code -> unique ID)?\n df_final['deal_id'] = ['_'.join([country,bank_name,str(timestamp),str(i)]) for i in range(df_final.shape[0])]\n \n df_final.reset_index(inplace = True, drop = True)\n return df_final" } ]
6
elmaus/Memory_Game
https://github.com/elmaus/Memory_Game
188898830ac2b63c07d2130f851e28c74c5222cc
ce519d94408475ba025b2b63c8aa25551f7437e9
6d6a619c79996ce00d5ec45d2007bc045cc0e0e0
refs/heads/main
2023-01-09T21:14:55.499288
2020-11-02T13:09:02
2020-11-02T13:09:02
309,372,997
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5850732922554016, "alphanum_fraction": 0.6063971519470215, "avg_line_length": 15.783581733703613, "blob_id": "895600f5c84870c0da57d273a5a3bb90805597e8", "content_id": "54403e4aa25945fb0c8316e5a2cc63b468e525e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 68, "num_lines": 134, "path": "/main.py", "repo_name": "elmaus/Memory_Game", "src_encoding": "UTF-8", "text": "\n# environment: pygameenv\n\nimport pygame as pg\nimport time\nimport random\n\nWIDTH = 600\nHEIGHT = 600\n\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\n\nFPS = 30\n\npg.init()\npg.mixer.init()\n\nscreen = pg.display.set_mode((WIDTH, HEIGHT))\t\npg.display.set_caption('Memory Game')\nclock = pg.time.Clock()\npg.font.init()\n\nBLANK = pg.image.load('images/blank.png').convert_alpha()\nBG = pg.image.load('images/bg.png').convert_alpha()\n \nimages = []\t\npair = []\n\n\n\nclass Boxes(pg.sprite.Sprite):\n\tdef __init__(self, **kwargs):\n\t\tpg.sprite.Sprite.__init__(self)\n\n\t\tself.img = kwargs['img']\n\t\tself.name = kwargs['name']\n\t\tself.state = 'close'\n\t\tself.image = BLANK\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = (kwargs['cx'], kwargs['cy'])\n\n\tdef show(self):\n\t\tglobal compare, piece_one\n\n\t\tself.image = self.img\n\t\tif len(pair) < 2:\n\t\t\tpair.append(self)\n\t\t\n\n\tdef check(self):\n\t\tglobal pair\n\n\t\tif len(pair) == 2:\n\t\t\tif pair[0].name != pair[1].name:\n\t\t\t\ttime.sleep(.50)\n\t\t\t\tpair[0].image = BLANK\n\t\t\t\tpair[1].image = BLANK\n\t\t\telse:\n\t\t\t\tpair[0].state = 'open'\n\t\t\t\tpair[1].state = 'open'\n\n\t\t\tpair = []\n\t\t\n\t\t\n\t\t\n\t\t\nfruit = None\nfruits = None\n\n\ndef init_tiles():\n\tglobal fruit, fruits\n\tfruit = pg.sprite.Group()\n\tfruits = []\n\n\tfor i in range(2):\n\t\tfor j in range(10):\n\t\t\tindex = j + 1\n\t\t\timages.append(\n\t\t\t\t{'name': 'm{}'.format(index),\n\t\t\t\t'image': pg.image.load('images/m{}.png'.format(index))\n\t\t\t\t}\n\t\t\t)\n\n\tcx = 92\n\tfor i in range(5):\n\t\tcy = 100\n\t\tfor j in range(4):\n\t\t\titem = random.choice(images)\n\t\t\tlemon = Boxes(cx=cx, cy=cy, img=item['image'], name=item['name'])\n\t\t\timages.remove(item)\n\t\t\tfruit.add(lemon)\n\t\t\tfruits.append(lemon)\n\t\t\tcy += 104\n\t\tcx += 104\n\ninit_tiles()\n\nscreen.blit(BG, (0, 0))\n\nrunning = True\n\nwhile running:\n\tclock.tick(FPS)\n\t# screen.fill(BLACK)\n\tpg.display.flip()\n\tfruit.draw(screen)\n\n\tm = pg.mouse.get_pos()\n\n\tfor event in pg.event.get():\n\t\tif event.type == pg.QUIT:\n\t\t\trunning = False\n\n\t\tif event.type == pg.MOUSEBUTTONDOWN:\n\t\t\tfor i in fruits:\n\t\t\t\tif i.rect.collidepoint(m):\n\t\t\t\t\ti.show()\n\t\tif event.type == pg.MOUSEBUTTONUP:\n\t\t\tfor i in fruits:\n\t\t\t\tif i.rect.collidepoint(m):\n\t\t\t\t\ti.check()\n\n\tis_all_open\t= 0\t\t\n\tfor f in fruits:\n\t\tif f.state == 'close':\n\t\t\tis_all_open += 1\n\n\tif is_all_open == 0:\n\t\tfruit = pg.sprite.Group()\n\n\t\t\t\t\t\n\t\t\t\t\t\npg.quit()\n\n" } ]
1
bneal7/Temperature-Converter
https://github.com/bneal7/Temperature-Converter
155bc9ab929a4f8f1666ce701fd0f1ed67788a1a
bf28f7c75201cc8878ebbb6f3feed7db4ce9f015
c783b93ca19205e7dbd8c46531493ce7e9f57a6b
refs/heads/master
2021-06-13T21:14:52.556301
2020-04-10T01:17:00
2020-04-10T01:17:00
254,444,815
0
0
null
2020-04-09T18:09:11
2020-04-09T18:09:13
2020-04-10T01:17:01
null
[ { "alpha_fraction": 0.5482573509216309, "alphanum_fraction": 0.5509383082389832, "avg_line_length": 35.09677505493164, "blob_id": "06c5cffdf583494406f19d924875ffc856cc22cb", "content_id": "79309da93c877cf57cae49c6284e52c9df118160", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2238, "license_type": "no_license", "max_line_length": 86, "num_lines": 62, "path": "/main.py", "repo_name": "bneal7/Temperature-Converter", "src_encoding": "UTF-8", "text": "##BROOKLYN NEAL\n\n##FAHRENHEIT TO CELSIUS CONVERTER\n\n#Your code goes here\nimport tkinter\nimport tkinter.messagebox as box \n\nclass FahrenConverterGUI:\n def __init__(self):\n # Create the main window\n self.main_window = tkinter.Tk()\n\n # Create two frames to group widgets\n self.top_frame = tkinter.Frame(self.main_window)\n self.bottom_frame = tkinter.Frame(self.main_window)\n\n #Create widgets for the top frame\n self.prompt_label = tkinter.Label(self.top_frame,\n text = 'Enter a temperature in fahrenheit:')\n self.ftemp_entry = tkinter.Entry(self.top_frame,\n width = 10)\n #Pack the top frame's widgets\n self.prompt_label.pack(side = 'left')\n self.ftemp_entry.pack(side = 'left')\n \n #Create widgets for the bottom frame\n self.calc_button = tkinter.Button(self.bottom_frame,\n text = 'Calculate',\n command = self.convert)\n self.quit_button = tkinter.Button(self.bottom_frame,\n text='Quit',\n command=self.main_window.destroy)\n \n #Pack the buttons\n self.calc_button.pack(side = 'left')\n self.quit_button.pack(side = 'left')\n \n # Pack the frames\n self.top_frame.pack()\n self.bottom_frame.pack()\n\n #Enter the tkinter main loop\n tkinter. mainloop()\n\n #The convert method is a callback function for the Calculate button\n def convert(self):\n #Get the value entered by the user into the ftemp_entry widget\n ftemp = float(self.ftemp_entry.get())\n\n #Convert fahrenheit to celsius\n celsius = (int(ftemp) - 32) * (5/9)\n\n #Display the results in an info dialog box\n tkinter.messagebox.showinfo('Results',\n str(ftemp) +\n ' temperature in fahrenhet is equal to ' +\n str(celsius) + ' temperature in celsius.')\n\n\n#Create an instance of the TemperatureConverterGUI class\ntemp_converter = FahrenConverterGUI()\n" } ]
1
fansi-sifan/LPTHW
https://github.com/fansi-sifan/LPTHW
8f281f7ee95bf1a3e6d246d8f02a8b6fe0b673f7
425c7643202da68433f654f26437bd7c4619399a
bfa8e8775a91a725a1f059dde5b23058f0358745
refs/heads/master
2021-01-01T19:08:17.466780
2017-09-18T05:58:54
2017-09-18T05:58:54
98,514,835
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4680170714855194, "alphanum_fraction": 0.5213219523429871, "avg_line_length": 23.66666603088379, "blob_id": "667ea4b7a7f2552ecfcdcccee9a5db99d607453c", "content_id": "6aa35fa6915eff4467dc348fa4af209f9d21c567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 62, "num_lines": 36, "path": "/St.Petersburg Paradox/St.Petersburg Paradox.py", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "GB18030", "text": "from graphics import *\r\nfrom random import random\r\nimport math\r\n\r\ndef main():\r\n times=input(\"How many times to play? \")\r\n win=GraphWin(\"St. Petersburg paradox\",1400,500)\r\n win.setCoords(0.0, 0.0, times, 40.0)\r\n Text(Point(20.0/1500*times,20),\"20\").draw(win)#ๆ”ถ็›ŠๆœŸๆœ›็š„ๅ‚็…ง้‡\r\n Text(Point(times*0.95,1),times).draw(win)\r\n win.setBackground(\"white\")\r\n \r\n total=0\r\n for i in range(times+1):\r\n n=0\r\n money=0\r\n e=0\r\n \r\n while random()>0.5:\r\n n=n+1\r\n money=money+2**(n)#ๅ‚จๅญ˜ไธ€ๆฌก่ฏ•้ชŒ็š„ๆ€ปๆ”ถ็›Š\r\n \r\n total=total+money#ๅ‚จๅญ˜ๆ‰€ๆœ‰ๆจกๆ‹Ÿ่ฏ•้ชŒ็š„ๆ€ปๆ”ถ็›Š\r\n e=float(total)/float(i+1)#่ฎก็ฎ—ๅ‡บๆ€ปไฝ“ๅนณๅ‡ๆ”ถ็›Š็š„ๆ•ฐๅญฆๆœŸๆœ›\r\n\r\n P1=Point(i+1,math.log(i+1)/math.log(2))#ๆ็‚น็”ปๅ‚็…งๆ›ฒ็บฟlnx/ln2\r\n P1.setFill(\"red\")\r\n P1.draw(win)\r\n \r\n P2=Point(i+1,e)#ๆ็‚น็”ปๆจกๆ‹Ÿๆ›ฒ็บฟ\r\n P2.setFill(\"blue\")\r\n P2.draw(win)\r\n \r\n win.getMouse()\r\n win.close()\r\nmain()\r\n\r\n \r\n \r\n" }, { "alpha_fraction": 0.4846729040145874, "alphanum_fraction": 0.5237383246421814, "avg_line_length": 27.888267517089844, "blob_id": "aeee380996f51c81555f4432c871fcb127db8040", "content_id": "b1998da06febc272de638f09c1b1e951819649f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5652, "license_type": "no_license", "max_line_length": 81, "num_lines": 179, "path": "/Super Memory/Super Memory.py", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "GB18030", "text": "from graphics import*\r\nfrom button import*\r\nfrom string import*\r\nfrom random import*\r\nfrom operator import itemgetter\r\nimport re\r\n\r\ndef addNewWords():\r\n#็”จๆˆทๆ‰‹ๅŠจๆทปๅŠ ๅ•่ฏๅ’Œๅฏนๅบ”ไธญๆ–‡่งฃ้‡Š\r\n print\r\n \r\n wordlist=open(\"wordlist.txt\",'a')\r\n\r\n x=raw_input(\"Enter the new word: \")\r\n y=raw_input(\"Enter the Chinese meaning of the word: \")\r\n \r\n while x!=\"\":\r\n english='%-20s' %x\r\n chinese='%-20s' %y\r\n wordlist.write(english+ ' ' +chinese+'0'+'\\n')\r\n \r\n x=raw_input(\"Enter another new word: \")\r\n if x!=\"\":\r\n y=raw_input(\"Enter the Chinese meaning of the word: \")\r\n wordlist.close\r\n print\"the result has been saved to wordlist.txt\"\r\n \r\ndef Test():\r\n#็จ‹ๅบ็ป™ๅ‡บไธญๆ–‡๏ผŒ็”จๆˆท่พ“ๅ…ฅ่‹ฑๆ–‡ใ€‚ๆญฃ็กฎไฟฎๆ”นfaultๅ€ผ๏ผŒ้‡ๆ–ฐไฟๅญ˜ไธบๆ–‡ไปถwordlist\r\n print\r\n \r\n wordlist=open(\"wordlist.txt\",'r')\r\n lines=wordlist.readlines()\r\n testNum=input(\"How many words do you want to test? \")\r\n while testNum>len(lines):\r\n testNum=input(\"Please enter a number smaller than \"+str(len(lines))+\": \")\r\n \r\n#ๆฃ€ๆŸฅ็”จๆˆท่พ“ๅ…ฅๆ˜ฏๅฆๅˆๆณ•๏ผŒ้˜ฒๆญขๆŠฅ้”™\r\n \r\n for i in range(testNum):\r\n line= lines[randrange(0,testNum)]\r\n#้šๆœบๆŠฝๅ–ๅ•่ฏ\r\n \r\n fault=eval(line[41])\r\n question=line[21:40]\r\n question=str.replace(question,' ','')\r\n \r\n \r\n keys=line[0:20]\r\n keys=str.replace(keys,' ','')\r\n keys=str.lower(keys)\r\n \r\n ans=raw_input(\"Please enter the translation of \"+question+\": \" )\r\n ans=str.lower(ans)\r\n \r\n if ans==keys:\r\n print \"correct!\\n\"\r\n if fault!=0:\r\n fault=fault-1\r\n else:\r\n print \"Wrong! The correct answer is: \",keys,\"\\n\"\r\n fault=fault+1\r\n \r\n line2=line.replace(line[41],str(fault))\r\n wordlist=open(\"wordlist.txt\",'r')\r\n final=re.sub(line,line2,wordlist.read())\r\n#ไฝฟ็”จๆญฃๅˆ™่ฟ็ฎ—๏ผŒ็›ดๆŽฅๅœจๆบๆ–‡ไปถไธญไฟฎๆ”น๏ผŒๅพ—ๅˆฐๆ›ดๆ–ฐfaultๅ€ผๅŽ็š„ๆœ€็ปˆๆ–‡ไปถ\r\n \r\n wordlist=open('wordlist.txt','w')\r\n wordlist.write(final)\r\n wordlist.close\r\n \r\n rep=raw_input(\"You've finished all the tests!\")\r\n\r\ndef Review():\r\n#็จ‹ๅบๆŒ‰็…งtestไธญ้”™่ฏฏๆฌกๆ•ฐ้™ๅบๆŽ’ๅˆ—๏ผŒไพ›็”จๆˆทๅคไน \r\n def compare(x,y):\r\n if x[41]<y[41]: \r\n return 1\r\n elif x[41]>y[41]:\r\n return -1\r\n else:\r\n return 0\r\n print\r\n \r\n wordlist=open(\"wordlist.txt\",'r')\r\n lines=wordlist.readlines()\r\n for line in lines:\r\n lines.sort(cmp=compare)\r\n print line,\r\n \r\n reply=raw_input(\"\\nDo you want to save the result?\" )\r\n if reply[0]=='y' or reply[0]=='Y':\r\n wordReview=open(\"wordReview.txt\",'w')\r\n print\"the result has been saved to wordReview.txt!\"\r\n for line in lines:\r\n wordReview.write(line)\r\n else:\r\n print\"the result has not been saved!\"\r\n \r\n wordReview.close\r\n\r\ndef FindWords():\r\n#ๅœจ็ป™ๅฎš็š„ๅ››็บง้ข˜ๅบ“ไธญๆ‰พๅ‡บ่ฏๆฑ‡้ƒจๅˆ†็š„้ซ˜้ข‘่ฏ๏ผŒไฟๅญ˜ไธบๆ–ฐๆ–‡ไปถ\r\n infile = open(\"2000-2011.txt\")\r\n lines = infile.readlines()\r\n infile.close()\r\n \r\n dictionary = {}\r\n wordCount = 0\r\n startSearch = False\r\n for line in lines:\r\n if str.find(line, \"Part III Vocabulary (\") >= 0:\r\n startSearch = True\r\n elif str.find(line, \"Part IV\") >= 0:\r\n startSearch = False\r\n #ๅช้œ€่ฆ่ฏๆฑ‡้ƒจๅˆ†\r\n \r\n if startSearch and line != \"\":\r\n if line[0] == 'A' or line[0] == 'B'\\\r\n or line[0] == 'C' or line[0] == 'D'\\\r\n and line[1] == ')':\r\n wordCount += 1\r\n newWord = line[3:-1]\r\n dictionary[newWord] = 1 + dictionary.get(newWord, 0)\r\n\r\n dictionary = sorted(dictionary.iteritems(),\\\r\n key = itemgetter(1), \\\r\n reverse = True)\r\n\r\n outfile = open(\"result_2000_2010.txt\", \"w\")\r\n for key, value in dictionary:\r\n outfile.write('%-20s'%(str(key)) + '%-20s'%(str(value))+ \"\\n\")\r\n outfile.write(\"word count: \" + str(wordCount) + \"\\n\")\r\n outfile.write(\"dictionary length: \" + str(len(dictionary)) + \"\\n\")\r\n outfile.close()\r\n print\"the result has been saved to 'result_2000_2010.txt'!\"\r\n \r\ndef memoryWords():\r\n win=GraphWin(\"Super-Memory\",600,200)\r\n win.setBackground(\"White\")\r\n \r\n banner=Text(Point(300,30),\"Super Memory\")\r\n banner.setSize(20)\r\n banner.setStyle(\"bold\")\r\n banner.draw(win)\r\n \r\n msg=Text(Point(300,80),\"Welcome to Super Memory!\")\r\n msg.setSize(16)\r\n msg.setFill(\"black\")\r\n msg.draw(win)\r\n \r\n bA=Button(win, Point(150,180),120,40,\"Add new words\")\r\n bT=Button(win, Point(300,180),120,40,\"Test\")\r\n bR=Button(win, Point(450,180),120,40,\"Review\")\r\n bF=Button(win, Point(300,130),300,40,\"Find most frequent words in CET4\")\r\n bE=Button(win, Point(580,20),30,16,\"Exit\")\r\n bA.activate()\r\n bT.activate()\r\n bR.activate()\r\n bF.activate()\r\n bE.activate()\r\n\r\n while 1:\r\n p=win.getMouse()\r\n #็ญ‰ๅพ…็”จๆˆท้€‰ๆ‹ฉๆ‰ง่กŒ\r\n if 90 <= p.getX() <=210 and 160 <= p.getY() <=200:\r\n addNewWords() \r\n if 240 <= p.getX() <=360 and 160 <= p.getY() <=200:\r\n Test()\r\n if 390 <= p.getX() <=510 and 160 <= p.getY() <=200:\r\n Review()\r\n if 150 <= p.getX() <=450 and 110 <= p.getY() <=150:\r\n FindWords()\r\n if 565 <= p.getX() <=595 and 12 <= p.getY() <=28:\r\n break\r\n \r\n \r\nmemoryWords()\r\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 19, "blob_id": "caf35a575b8dff00f88407739b6f10656fc25083", "content_id": "59d2e49aa868b077323a5bd8df22e24705119164", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 26, "license_type": "no_license", "max_line_length": 19, "num_lines": 1, "path": "/Super Memory/readme.txt", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "MacCentralEurope", "text": "โ€˜ลˆโ€“โ€“Super Memory.py" }, { "alpha_fraction": 0.6131687164306641, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 33.71428680419922, "blob_id": "73c6cd027bcaea3b0a1d74a046d0922a9da94c11", "content_id": "8e1ddb2f9cf1dbea22b1e9bc6297fe84b8f1684f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/lpthw/ex34.py", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "UTF-8", "text": "animals = ['bear', 'tiger', 'penguin', 'zebra']\nbear = animals [0]\n\nprint \"The animal at 1. is %s\" % animals[1]\nprint \"The thrid animal is %s\" % animals[2]\nprint \"The first anumal is %s\" % animals[0]\nprint \"The anumal at 3 is %s\" % animals[3]\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 18, "blob_id": "0a789dd8600ac469fea898fb27eaaa463b45d680", "content_id": "752b7d26d1451539cf96b15e9973e6e841bf56a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/lpthw/ex17b.py", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "UTF-8", "text": "from sys import argv\nfrom os.path import exists\n\nscript, from_file, to_file = argv\n\n\nopen (to_file, 'w').write(open(from_file).read)\n" }, { "alpha_fraction": 0.522799551486969, "alphanum_fraction": 0.5471898317337036, "avg_line_length": 21.325000762939453, "blob_id": "a3834d5dc1efbc762be1929ae2b8d3e02de96ec3", "content_id": "7ef6a76c0f3a82dab0f31ba32150edbdc7f9f216", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 84, "num_lines": 40, "path": "/Stochastic Process/Stochastic Process.py", "repo_name": "fansi-sifan/LPTHW", "src_encoding": "GB18030", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef SP():\r\n steps=input(\"่ฏท่พ“ๅ…ฅ้šๆœบ่ฟ‡็จ‹็š„ๆญฅๆ•ฐ: \")\r\n \r\n s=np.random.randn(1,steps) #ๅพ—ๅˆฐๆญฃๆ€ๅˆ†ๅธƒ็ฌฆๅˆN๏ผˆ0,1๏ผ‰็š„1็ปด้šๆœบๆ•ฐ็ป„\r\n p=np.cumsum(s) #ๅฏน้šๆœบๆ•ฐ่ฟญไปฃ\r\n\r\n plt.plot(p)\r\n plt.show()\r\n\r\ndef SDE():\r\n N=input (\"่ฏท่พ“ๅ…ฅ้šๆœบ่ฟ‡็จ‹็š„ๆญฅๆ•ฐ๏ผš\")\r\n s0=input (\"่ฏท่พ“ๅ…ฅ้šๆœบ่ฟ‡็จ‹่ตทๅง‹ไฝ็ฝฎ็š„ๅ€ผ๏ผš\")\r\n Alpha,Mu,Sigma=input (\"่ฏทไพๆฌก่พ“ๅ…ฅAlpha,Mu,Sigma็š„ๅ€ผ: \")\r\n \r\n S=np.zeros(N+1)\r\n S[0]=s0\r\n dt=0.02 #ๅ–่ถณๅคŸๅฐ็š„dt๏ผŒ่ฟ›่กŒ่ฟญไปฃ\r\n \r\n for i in range (0,N):\r\n dBt=np.sqrt(dt)*np.random.randn() #ๅพ—ๅˆฐ็ฌฆๅˆN(0,dt)็š„้šๆœบๆ•ฐ\r\n S[i+1]=S[i]+Alpha*(Mu-S[i])*dt+Sigma*S[i]*dBt #่ฟญไปฃ\r\n plt.plot(S,label='Alpha=%s \\nMu=%s \\nSigma=%s'%(Alpha,Mu,Sigma)) \r\n plt.text(N/10,np.min(S),'Mean=%0.2f \\nDev=%0.2f'%(np.mean(S),np.var(S))) #ๆ˜พ็คบๆœŸๆœ›ๅ’Œๆ–นๅทฎ\r\n\r\n plt.legend()\r\n plt.show()\r\n \r\nSDE()\r\ndef main():\r\n plt.title('้šๆœบ่ฟ‡็จ‹ๅคงไฝœไธš ๅˆ˜ๆ€ๅ‡ก')\r\n plt.xlabel('time')\r\n plt.ylabel('position')\r\n \r\n if input(\"่ฏท่พ“ๅ…ฅ่ฆ่ฟ›่กŒ็š„้ข˜ๅท๏ผˆ1ๆˆ–2๏ผ‰: \")==1:\r\n SP()\r\n else:\r\n SDE()\r\n\r\n\r\n\r\n\r\n\r\n" } ]
6
greenkarson/python
https://github.com/greenkarson/python
4531a7c89479747ee102acbd6a0a85074a0bda92
55c2bea0da21308a99dd59017ad4a42a841acf20
37c0acbfbae606f59a4b8f876ea6422a4a2a3475
refs/heads/master
2021-02-09T09:32:02.406287
2020-06-14T12:56:11
2020-06-14T12:56:11
244,267,405
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6934404373168945, "alphanum_fraction": 0.7121820449829102, "avg_line_length": 27.730770111083984, "blob_id": "43cf4c0cfefde797a58b0184596d0f73ba66d7bf", "content_id": "eb48eedaecb46fb8b5fcae7a9da1375ade694ed7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 123, "num_lines": 26, "path": "/Audio/specgram.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torchaudio\nimport matplotlib.pyplot as plt\n\nwaveform, sample_rate = torchaudio.load(r\"E:\\dataset\\audio\\SpeechCommands\\speech_commands_v0.02\\bed\\00f0204f_nohash_0.wav\")\n\nprint(\"Shape of waveform: {}\".format(waveform.size()))\nprint(\"Sample rate of waveform: {}\".format(sample_rate))\n\nplt.figure()\nplt.plot(waveform.t().numpy())\n\n# specgram = torchaudio.transforms.Spectrogram()(waveform)\n#\n# print(\"Shape of spectrogram: {}\".format(specgram.size()))\n#\n# plt.figure()\n# plt.imshow(specgram.log2()[0, :, :].numpy(), cmap='gray')\n\nspecgram = torchaudio.transforms.MelSpectrogram()(waveform)\n\nprint(\"Shape of spectrogram: {}\".format(specgram.size()))\n\nplt.figure()\np = plt.imshow(specgram.log2()[0,:,:].detach().numpy(), cmap='gray')\n\nplt.show()\n" }, { "alpha_fraction": 0.4964176118373871, "alphanum_fraction": 0.5353121757507324, "avg_line_length": 26.91428565979004, "blob_id": "c244c7b2ca29e50b27eb6f319031fa45156bcf06", "content_id": "d26ae60c5c03e51ed173990b2f13c7679baa5fa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1954, "license_type": "no_license", "max_line_length": 120, "num_lines": 70, "path": "/Audio/audio_understand.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torchaudio\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport torch\nfrom torch.nn import functional as F\nfrom torch import optim\n\n\ndef normalize(tensor):\n tensor_minusmean = tensor - tensor.mean()\n return tensor_minusmean / tensor_minusmean.max()\n\n\ntf = torchaudio.transforms.MFCC(sample_rate=8000)\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.seq = torch.nn.Sequential(\n torch.nn.Conv2d(1, 4, (1, 3), (1, 2), (0, 1)),\n torch.nn.BatchNorm2d(4),\n torch.nn.ReLU(),\n torch.nn.Conv2d(4, 4, (1, 3), (1, 2), (0, 1)),\n torch.nn.BatchNorm2d(4),\n torch.nn.ReLU(),\n torch.nn.Conv2d(4, 4, (1, 3), (1, 2), (0, 1)),\n torch.nn.BatchNorm2d(4),\n torch.nn.ReLU(),\n torch.nn.Conv2d(4, 8, 3, 2, 1),\n torch.nn.BatchNorm2d(8),\n torch.nn.ReLU(),\n torch.nn.Conv2d(8, 8, 3, 2, 1),\n torch.nn.BatchNorm2d(8),\n torch.nn.ReLU(),\n torch.nn.Conv2d(8, 1, (8, 1)),\n )\n\n def forward(self, x):\n h = self.seq(x)\n return h.reshape(-1, 8)\n\n\nif __name__ == '__main__':\n\n data_loader = torch.utils.data.DataLoader(torchaudio.datasets.YESNO('.', download=True), batch_size=1, shuffle=True)\n\n net = Net()\n opt = torch.optim.Adam(net.parameters())\n\n loss_fn = torch.nn.MSELoss()\n\n for epoch in range(100000):\n datas = []\n tags = []\n for data, _, tag in data_loader:\n tag = torch.stack(tag, dim=1).float()\n specgram = normalize(tf(data))\n datas.append(F.adaptive_avg_pool2d(specgram, (32, 256)))\n tags.append(tag)\n\n specgrams = torch.cat(datas, dim=0)\n tags = torch.cat(tags, dim=0)\n y = net(specgrams)\n loss = loss_fn(y, tags)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n print(loss)\n" }, { "alpha_fraction": 0.5593220591545105, "alphanum_fraction": 0.5932203531265259, "avg_line_length": 28.5, "blob_id": "c2751c4fadb111cfb9f552ff73a71ac340e7866a", "content_id": "458960ec584061c15ec1f87568ab8fa514715a41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/MTCNN2/Note/pleace_hold.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\nfor face_size in [12]:\n print(\"gen %i img\" % face_size)" }, { "alpha_fraction": 0.5145395994186401, "alphanum_fraction": 0.5242326259613037, "avg_line_length": 33.375, "blob_id": "0df836def35b584ec19d288ba80e1edfd736dc53", "content_id": "650a8241ace9b6a6f20a78dd2562506501def568", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2476, "license_type": "no_license", "max_line_length": 98, "num_lines": 72, "path": "/FACE_MTCNN/gen_landmark.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os\nfrom PIL import Image\nfrom celeba import Celeba\nimport numpy as np\nfrom tools import utils\n\n\nclass Genlandmark():\n def __init__(self, metadata, output_folder, crop_size, net_stage):\n self.net_data_folder = os.path.join(output_folder, net_stage)\n\n self.landmarks_dest = os.path.join(self.net_data_folder, 'landmarks')\n if not os.path.exists(self.landmarks_dest):\n os.makedirs( self.landmarks_dest)\n\n self.crop_size = crop_size\n self.metadata = metadata\n\n def run(self):\n landmarks_meta = open(os.path.join(self.net_data_folder, 'landmarks_meta.txt'), 'w')\n\n landmarks_count = 0\n\n for i, item in enumerate(self.metadata):\n\n img_path = item['file_name']\n boxes = item['meta_data']\n landmarks = item['landmarks']\n\n img = Image.open(img_path)\n width, height = img.size\n\n for bbox, landmark in zip(boxes, landmarks):\n left = bbox[0]\n top = bbox[1]\n w = bbox[2]\n h = bbox[3]\n\n # there is error data in datasets\n if w <= 0 or h <= 0:\n continue\n\n right = bbox[0] + w + 1\n bottom = bbox[1] + h + 1\n\n crop_box = np.array([left, top, right, bottom])\n crop_img = img.crop(crop_box)\n resize_img = crop_img.resize((self.crop_size, self.crop_size))\n\n landmark = np.array(landmark)\n landmark.resize(5, 2)\n\n # (( x - bbox.left)/ width of bounding box, (y - bbox.top)/ height of bounding box\n landmark_gtx = (landmark[:, 0] - left) / w\n landmark_gty = (landmark[:, 1] - top) / h\n landmark_gt = np.concatenate([landmark_gtx, landmark_gty]).tolist()\n if landmarks_count < 60000:\n landmarks_count += 1\n resize_img.save(f\"{self.landmarks_dest}/{landmarks_count}.jpg\")\n landmarks_meta.write(f\"{landmarks_count}.jpg {3} \")\n landmarks_meta.write(\" \".join([str(i) for i in landmark_gt]))\n landmarks_meta.write('\\n')\n landmarks_meta.flush()\n\n landmarks_meta.close()\n\n\nif __name__ == '__main__':\n celeba = Celeba(r\"E:\\dataset\")\n train, dev, test = celeba.split_data()\n data = Genlandmark(train, r'F:\\celeba', 48, 'onet')\n data.run()\n\n" }, { "alpha_fraction": 0.4552057981491089, "alphanum_fraction": 0.518159806728363, "avg_line_length": 14.884614944458008, "blob_id": "16389ea0a8facde36319396920a521680deb4f14", "content_id": "328fcaa450c3c2dd5c1823b38a4be7a9f8eab0fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 48, "num_lines": 26, "path": "/PythonStudy/count_det.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\n\n# ่ฎก็ฎ—่กŒๅˆ—ๅผๅคงๅฐๅ‡ฝๆ•ฐ\n\na = np.array([[1, 2], [3, 4]])\nprint(np.linalg.det(a))\n\n\nb = torch.tensor([[1., 2.], [3., 4.]])\nc = torch.tensor([[5., 6.], [7., 8.]])\nm = torch.tensor([[1., 2.], [3., 4.], [5., 6.]])\nk = torch.randn(1, 4, 5, 3)\nprint(b.det())\n\nd = c + b\ne = c - b\nf = c * b\nprint(d)\nprint(e)\nprint(f)\nprint(m @ b)\nprint(m.t())\nprint(k.shape)\nj = k.permute(0, 3, 1, 2)\nprint(j.shape)\n" }, { "alpha_fraction": 0.6295731663703918, "alphanum_fraction": 0.7042682766914368, "avg_line_length": 27.565217971801758, "blob_id": "644f8e58d25bf5911c431df34e01575d163076d2", "content_id": "9239164233700db15fc8f6081ca1924b5c1c169a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/OpenCV_Practice/contour_box.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimg = cv2.imread(\"15.jpg\")\ndst = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,thresh = cv2.threshold(dst,55,255,cv2.THRESH_BINARY)\n\ncontours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nx,y,w,h = cv2.boundingRect(contours[0])\nimg_contours = cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255))\n\nrect = cv2.minAreaRect(contours[0])\nbox = cv2.boxPoints(rect)\nbox = np.int0(box)\nimg_contours = cv2.drawContours(img,[box],-1,(0,255,0),2)\n\n(x,y),r = cv2.minEnclosingCircle(contours[0])\ncenter = (int(x),int(y))\nr = int(r)\nimg_contours = cv2.circle(img,center,r,(255,0,0),2)\n\ncv2.imshow(\"img\",img_contours)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6051779985427856, "alphanum_fraction": 0.7055016160011292, "avg_line_length": 22.846153259277344, "blob_id": "5ee0b10bae6e02c6516d837ad03d596bbffa85b9", "content_id": "69383f060fcc30151cb20e8fa9f0f2249efd5f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 309, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/OpenCV_Practice/corner_harris_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\nimg = cv2.imread(\"32.jpg\")\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\ndst = cv2.cornerHarris(gray,2,3,0.03)\ndst = cv2.dilate(dst,cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)))\nimg[dst > 0.01*dst.max()] =[0,0,255]\n\ncv2.imshow(\"dst\",img)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5094655752182007, "alphanum_fraction": 0.5171720385551453, "avg_line_length": 41.63571548461914, "blob_id": "8844edf18293943794d96013f0b03ae98f051431", "content_id": "00b5c9a4bd8f20fdaa8d4c3495e176d461508063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11938, "license_type": "no_license", "max_line_length": 184, "num_lines": 280, "path": "/FACE_MTCNN/Train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch, glob, os\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom Network import PNet, RNet, ONet\nfrom dataset import MtcnnDataset\n\n\nclass Trainer():\n\n def __init__(self, net_stage, resume=False):\n\n self.net_stage = net_stage\n self.train_dataset = MtcnnDataset(r'F:\\celeba', net_stage=self.net_stage)\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=1000, shuffle=True, num_workers=2,\n drop_last=True)\n\n self.eval_dataset = MtcnnDataset(r\"F:\\celeba\", net_stage=f\"{self.net_stage}_eval\")\n self.eval_dataloader = DataLoader(self.eval_dataset, batch_size=1000, shuffle=True, num_workers=2,\n drop_last=True)\n\n if self.net_stage == 'pnet':\n self.net = PNet()\n if self.net_stage == 'rnet':\n self.net = RNet()\n if self.net_stage == 'onet':\n self.net = ONet()\n\n if torch.cuda.is_available() is True:\n self.net.cuda()\n\n self.loss_cls = nn.CrossEntropyLoss()\n self.loss_box = nn.MSELoss()\n self.loss_landmark = nn.MSELoss()\n\n self.opt = optim.Adam(self.net.parameters())\n\n self.epoch_num = 1\n self.global_step = 1\n\n if resume:\n # torch.load(\"./param/pnet.pt\")\n self.load_state_dict()\n\n self.summarywrite = SummaryWriter(\"./runs\", purge_step=self.epoch_num)\n\n def __call__(self):\n for epoch in range(10000):\n total_loss = 0\n for i, (img_data, label, box, landmarks) in enumerate(self.train_dataloader):\n if torch.cuda.is_available() is True:\n img_data = img_data.cuda()\n gt_label = label.cuda()\n gt_boxes = box.cuda()\n gt_landmarks = landmarks.cuda()\n else:\n gt_label = label\n gt_boxes = box\n gt_landmarks = landmarks\n\n pred_label, pred_offset, pred_landmarks = self.net(img_data)\n\n pred_label = pred_label.view(-1, 2)\n pred_offset = pred_offset.view(-1, 4)\n pred_landmarks = pred_landmarks.view(-1, 10)\n\n # print(pred_label.shape, pred_offset.shape, pred_landmarks.shape)\n # print(label.shape, box.shape, landmarks.shape)\n\n self.opt.zero_grad()\n cls_loss = self.cls_loss(gt_label, pred_label)\n box_loss = self.box_loss(gt_label, gt_boxes, pred_offset)\n landmark_loss = self.landmark_loss(gt_label, gt_landmarks, pred_landmarks)\n loss = cls_loss + box_loss + landmark_loss\n loss.backward()\n self.opt.step()\n total_loss += loss.cpu().detach()\n # self.summarywrite.add_scalars('train/loss', loss.cpu().detach().item(), global_step=self.epoch_num)\n self.summarywrite.add_scalars(\"train/loss\", {i: j for i, j in\n zip([\"loss\", \"cls_loss\", \"box_loss\", \"landmark_loss\"],\n [loss.cpu().detach().item(), cls_loss.cpu().item(),\n box_loss.cpu().item(), landmark_loss.cpu().item()])\n }, global_step=self.global_step)\n self.global_step += 1\n\n print(\n f\"epoch:{self.epoch_num}---loss:{loss.cpu().item()}---cls_loss:{cls_loss.cpu().item()}---box_loss:{box_loss.cpu().item()}---landmark_loss:{landmark_loss.cpu().item()}\")\n self.save_state_dict()\n self.export_model(f\"./param/{self.net_stage}.pt\")\n\n if self.epoch_num % 10 == 0:\n with torch.no_grad():\n for name, parmeter in self.net.named_parameters():\n if parmeter.grad is not None:\n avg_grad = torch.mean(parmeter.grad)\n print(f\"{name}----grad_avg:{avg_grad}\")\n self.summarywrite.add_scalar(f\"grad_avg/{name}\", avg_grad.item(), self.epoch_num)\n self.summarywrite.add_histogram(f\"grad/{name}\", parmeter.cpu().numpy(), self.epoch_num)\n if parmeter.data is not None:\n avg_weight = torch.mean(parmeter.data)\n print(f\"{name}----weight_avg:{avg_weight}\")\n self.summarywrite.add_scalar(f\"weight_avg/{name}\", avg_weight.item(), self.epoch_num)\n self.summarywrite.add_histogram(f\"weight/{name}\", parmeter.cpu().numpy(), self.epoch_num)\n\n total = 0\n right = 0\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n\n total_cls_loss = 0\n total_box_loss = 0\n total_landmark_loss = 0\n\n for i, (img_data, label, box, landmarks) in enumerate(self.eval_dataloader):\n if torch.cuda.is_available() is True:\n img_data = img_data.cuda()\n gt_label = label.cuda()\n gt_boxes = box.cuda()\n gt_landmarks = landmarks.cuda()\n else:\n gt_label = label\n gt_boxes = box\n gt_landmarks = landmarks\n\n with torch.no_grad():\n pred_label, pred_offset, pred_landmarks = self.net(img_data)\n print(pred_label, pred_offset, pred_landmarks)\n\n\n pred_label = pred_label.view(-1, 2)\n pred_offset = pred_offset.view(-1, 4)\n pred_landmarks = pred_landmarks.view(-1, 10)\n total_cls_loss += self.cls_loss(gt_label, pred_label)\n total_box_loss += self.box_loss(gt_label, gt_boxes, pred_offset)\n total_landmark_loss += self.landmark_loss(gt_label, gt_landmarks, pred_landmarks)\n\n pred_label = torch.argmax(pred_label, dim=1)\n\n mask = gt_label <= 1\n\n right += torch.sum(gt_label[mask] == pred_label[mask])\n total += gt_label[mask].shape[0]\n\n p_mask = gt_label == 1\n tp += torch.sum(gt_label[p_mask] == pred_label[p_mask])\n fp += torch.sum(gt_label[p_mask] != pred_label[p_mask])\n\n n_mask = gt_label == 0\n tn += torch.sum(gt_label[n_mask] == pred_label[n_mask])\n fn += torch.sum(gt_label[n_mask] != pred_label[n_mask])\n\n # acc = right.cpu().detach() / total\n acc = torch.true_divide(right, total)\n # precision = tp / (tp + fp)\n precision = torch.true_divide(tp, (tp + fp))\n # recall = tp / (tp + fn)\n recall = torch.true_divide(tp, (tp + fn))\n # f1 = 2 * precision * recall / (precision + recall)\n f1 = torch.true_divide((2 * precision * recall), (precision + recall))\n\n avg_cls_loss = total_cls_loss / i\n avg_box_loss = total_box_loss / i\n avg_lanmark_loss = total_landmark_loss / i\n\n self.summarywrite.add_scalars(\"eval/loss\", {i: j for i, j in\n zip([\"avg_cls_loss\", \"avg_box_loss\", \"avg_lanmark_loss\"],\n [avg_cls_loss.cpu().item(),\n avg_box_loss.cpu().item(), avg_lanmark_loss.cpu().item()])\n }, global_step=self.epoch_num)\n\n self.summarywrite.add_scalars(\"eval_set\", {\n i: j for i, j in\n zip([\"acc\", \"precision\", \"recall\", \"f1\"], [acc, precision, recall, f1])\n }, global_step=self.epoch_num)\n print(\"Epoch %d, \" % self.epoch_num,\n f\"result on eval set: acc {acc.cpu().item()}\",\n f\"precision {precision.cpu().item()}\",\n f\"recall {recall.cpu().item()}\",\n f\"f1 {f1.cpu().item()}\")\n self.epoch_num += 1\n\n def cls_loss(self, gt_label, pred_label):\n\n pred_label = torch.squeeze(pred_label)\n gt_label = torch.squeeze(gt_label)\n\n # Online hard sample mining\n\n mask = torch.eq(gt_label, 0) | torch.eq(gt_label, 1)\n valid_gt_label = torch.masked_select(gt_label, mask)\n mask = torch.stack([mask] * 2, dim=1)\n valid_pred_label = torch.masked_select(pred_label, mask).reshape(-1, 2)\n\n # compute log-softmax\n # valid_pred_label = torch.log(valid_pred_label)\n\n loss = self.loss_cls(valid_pred_label, valid_gt_label)\n\n pos_mask = torch.eq(valid_gt_label, 1)\n neg_mask = torch.eq(valid_gt_label, 0)\n\n neg_loss = loss.masked_select(neg_mask)\n pos_loss = loss.masked_select(pos_mask)\n\n if neg_loss.shape[0] > pos_loss.shape[0]:\n neg_loss, _ = neg_loss.topk(pos_loss.shape[0])\n loss = torch.cat([pos_loss, neg_loss])\n loss = torch.mean(loss)\n\n return loss\n\n def box_loss(self, gt_label, gt_offset, pred_offset):\n pred_offset = torch.squeeze(pred_offset)\n gt_offset = torch.squeeze(gt_offset)\n gt_label = torch.squeeze(gt_label)\n\n mask = torch.eq(gt_label, 1) | torch.eq(gt_label, 2)\n # broadcast mask\n mask = torch.stack([mask] * 4, dim=1)\n\n # only valid element can effect the loss\n valid_gt_offset = torch.masked_select(gt_offset, mask).reshape(-1, 4)\n valid_pred_offset = torch.masked_select(\n pred_offset, mask).reshape(-1, 4)\n return self.loss_box(valid_pred_offset, valid_gt_offset)\n\n def landmark_loss(self, gt_label, gt_landmark, pred_landmark):\n pred_landmark = torch.squeeze(pred_landmark)\n gt_landmark = torch.squeeze(gt_landmark)\n gt_label = torch.squeeze(gt_label)\n mask = torch.eq(gt_label, 3)\n # broadcast mask\n mask = torch.stack([mask] * 10, dim=1)\n\n valid_gt_landmark = torch.masked_select(\n gt_landmark, mask).reshape(-1, 10)\n valid_pred_landmark = torch.masked_select(\n pred_landmark, mask).reshape(-1, 10)\n return self.loss_landmark(valid_pred_landmark, valid_gt_landmark)\n\n def save_state_dict(self):\n checkpoint_name = \"checkpoint_epoch_%d\" % self.epoch_num\n file_path = f\"param/{self.net_stage}/{checkpoint_name}\"\n if not os.path.exists(f\"param/{self.net_stage}\"):\n os.makedirs(f\"param/{self.net_stage}\")\n\n state = {\n 'epoch_num': self.epoch_num,\n 'global_step': self.global_step,\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.opt.state_dict(),\n }\n torch.save(state, file_path)\n\n def export_model(self, filename):\n torch.save(self.net.state_dict(), filename)\n\n def load_state_dict(self):\n\n # Get the latest checkpoint in output_folder\n all_checkpoints = glob.glob(os.path.join(f\"./param/{self.net_stage}\", 'checkpoint_epoch_*'))\n\n if len(all_checkpoints) > 1:\n epoch_nums = [int(i.split('_')[-1]) for i in all_checkpoints]\n max_index = epoch_nums.index(max(epoch_nums))\n latest_checkpoint = all_checkpoints[max_index]\n\n state = torch.load(latest_checkpoint)\n self.epoch_num = state['epoch_num'] + 1\n # self.global_step = state['self.global_step'] + 1\n self.net.load_state_dict(state['state_dict'])\n self.opt.load_state_dict(state['optimizer'])\n\n\nif __name__ == '__main__':\n train = Trainer(\"rnet\", resume=True)\n train()\n" }, { "alpha_fraction": 0.44106462597846985, "alphanum_fraction": 0.5475285053253174, "avg_line_length": 19.230770111083984, "blob_id": "da9ef39e74ba0fdf834e95ce19a20ca3a4886dcd", "content_id": "f065b55f530de7b2824af8982448c95090fc62c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/MTCNN2/Note/test3.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\n\na = torch.randn(240, 137)\nb = a > 0.6\nc = torch.nonzero(torch.gt(a, 0.6))\nprint(a[a > 0.6].shape)\nprint(b.nonzero().shape)\n\no = np.array([[1, 2, 3, 4, 4], [1, 2, 3, 4, 2], [1, 2, 3, 4, 3]])\nd = o[:, 4]\ne = d.argsort()\nprint(d, e)\n" }, { "alpha_fraction": 0.6464646458625793, "alphanum_fraction": 0.7070707082748413, "avg_line_length": 23.83333396911621, "blob_id": "0160eadca85c4cf8c42548c524ef86de70d9fc47", "content_id": "f5456f1921d6af10beda24c9a9a3bb5b299a310b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 96, "num_lines": 12, "path": "/OpenCV_Practice/feature_orb_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"33.jpg\")\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\norb = cv2.ORB_create()\nkb = orb.detect(gray)\nkps,des = orb.compute(gray, kb)\n\ndst = cv2.drawKeypoints(img, kps, None, (0,0,255),cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)\ncv2.imshow(\"dst\", dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5219123363494873, "alphanum_fraction": 0.5245683789253235, "avg_line_length": 24.542373657226562, "blob_id": "6b73e3bf3f8ac5c0a9906ff514f119bcfaca30bf", "content_id": "08da37e32b221aeab65152933e84a23543398fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 61, "num_lines": 59, "path": "/PythonStudy/Data_structure/Binary_tree/binary_tree.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self,elem = -1,lchild = None,rchild = None):\n self.elem = elem\n self.lchild = lchild\n self.rchild = rchild\n\n\nclass Tree:\n def __init__(self):\n self.root = Node\n self.queue = []\n\n def add(self,elem):\n node = Node(elem)\n if self.root == None:\n self.root = node\n self.queue.append(self.root)\n else:\n tree_node = self.queue[0]\n if tree_node == None:\n tree_node.lchild = node\n self.queue.append(tree_node.lchild)\n else:\n tree_node.rchild = node\n self.queue.append(tree_node.rchild)\n self.queue.pop(0)\n\n # ๅ‰ๅบ้ๅŽ†\n def pre_traverse(self,root):\n if root == None:\n return\n print(root.elem)\n self.pre_traverse(root.lchild)\n self.pre_traverse(root.rchild)\n # ไธญๅบ้ๅŽ†\n def mid_traverse(self,root):\n if root == None:\n return\n self.mid_traverse(root.lchild)\n print(root.elem)\n self.mid_traverse(root.rchild)\n # ๅŽๅบ้ๅŽ†\n def after_traverse(self,root):\n if root == None:\n return\n self.after_traverse(root.lchild)\n self.after_traverse(root.rchild)\n print(root.elem)\n\nitem = range(6)\ntree = Tree()\nfor i in item:\n tree.add(i)\n\ntree.pre_traverse(tree.root)\nprint(\"===================\")\ntree.mid_traverse(tree.root)\nprint(\"===================\")\ntree.after_traverse(tree.root)" }, { "alpha_fraction": 0.47766321897506714, "alphanum_fraction": 0.5395188927650452, "avg_line_length": 35.1875, "blob_id": "0af3d4a52211f869fb305057860039b64dd162d5", "content_id": "c80d50adacd0e00fb15a852a9aaa1c0c9fbcb540", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 87, "num_lines": 16, "path": "/CenterLoss/Drawing.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport os\n\n\ndef DrawPics(features, labels, epoch):\n plt.clf()\n color = ['#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF',\n '#F5F5DC', '#FFE4C4', '#000000', '#FFEBCD', '#0000FF',\n ]\n for i in range(10):\n plt.scatter(features[labels == i, 0], features[labels == i, 1], color=color[i])\n plt.legend([\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"], loc = 'upper right')\n plt.title(f\"Epoch-{epoch}\")\n if os.path.exists(\"./Pics\") is False:\n os.mkdir(\"./Pics\")\n plt.savefig(f\"Pics/Epoch-{epoch}\")\n\n\n\n" }, { "alpha_fraction": 0.4352881610393524, "alphanum_fraction": 0.5202224254608154, "avg_line_length": 33.70175552368164, "blob_id": "da705d8ec50da9cded0db13033fd0ada154b0762", "content_id": "d3df4344b59e8081df2c2ef9d7cf2ef90214d441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1990, "license_type": "no_license", "max_line_length": 82, "num_lines": 57, "path": "/AlexNet/AlexNet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), # (96, 55, 55)\n nn.ReLU(),\n nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=2), # 96, 27, 27\n nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=2), # 256, 13, 13\n nn.Conv2d(256, 384, kernel_size=3, padding=1), # 384, 13, 13\n nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), # 256, 13, 13\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2) # 256, 6, 6\n )\n self.classfifier = nn.Sequential(\n # inplace ๆ˜ฏๅฆ่ฟ›่กŒ่ฆ†็›–\n nn.Dropout(p=0.5, inplace=True),\n nn.Linear(256*6*6, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5, inplace=True),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, 1000)\n )\n self.init_bias()\n\n def forward(self, x):\n x = self.sequential(x)\n x = x.view(-1,256*6*6)\n x = self.classfifier(x)\n return x\n\n def init_bias(self):\n for layer in self.sequential:\n if isinstance(layer, nn.Conv2d):\n nn.init.normal_(layer.weight,mean=0,std=0.01)\n nn.init.constant_(layer.bias, 0)\n nn.init.constant_(self.sequential[4].bias, 1)\n nn.init.constant_(self.sequential[10].bias, 1)\n nn.init.constant_(self.sequential[12].bias, 1)\n\n\nif __name__ == '__main__':\n a = torch.randn(4, 3, 224, 224)\n net = AlexNet()\n b = net(a)\n print(b.shape)\n" }, { "alpha_fraction": 0.5409836173057556, "alphanum_fraction": 0.5591026544570923, "avg_line_length": 29.473684310913086, "blob_id": "e987f5ef44eb62f2cb19caa07e094341cf7fbb0d", "content_id": "530fe7542fd63588e8c071f1bb18ddec6e11e645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 59, "num_lines": 38, "path": "/deep_learning/day01/data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,os,cv2\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nclass MNISTDataset(Dataset):\n # ๅฐ†ๆ•ฐๆฎ่ฃ…ๅ…ฅDatasetไธญ๏ผŒๅ‡†็กฎ่ฏดๅฐ†ๅ›พๅƒ่ทฏๅพ„ๅ’Œๆ ‡็ญพ่ฃ…ๅ…ฅ\n def __init__(self, root, is_train=True):\n self.dataset = [] # ่ฎฐๅฝ•ๆ‰€ๆœ‰ๆ•ฐๆฎ\n sub_dir = \"TRAIN\" if is_train else \"TEST\"\n # ้ๅŽ†็›ฎๅฝ•ๅŠๅญ็›ฎๅฝ•ไธ‹ๆ ‡็ญพๆ•ฐๆฎๅŠๅ›พๅƒ่ทฏๅพ„่ฃ…ๅ…ฅๆ•ฐๆฎ้›†ไธญ\n for tag in os.listdir(f\"{root}/{sub_dir}\"):\n img_dir = f\"{root}/{sub_dir}/{tag}\"\n for img_filename in os.listdir(img_dir):\n img_path = f\"{img_dir}/{img_filename}\"\n self.dataset.append((img_path, tag))\n # ่Žทๅ–ๆ•ฐๆฎ้•ฟๅบฆ๏ผŒๆ€ปๅ…ฑๆœ‰ๅคšๅฐ‘ๆ•ฐๆฎ\n def __len__(self):\n return len(self.dataset)\n # ๅค„็†ๆฏๆกๆ•ฐๆฎ\n def __getitem__(self, index):\n data = self.dataset[index]\n # ่ฏปๅ…ฅๅ›พๅƒๆ•ฐๆฎ\n img_data = cv2.imread(data[0],0)\n # ๅ›พๅƒๆ•ฐๆฎๅฑ•ๅนณ\n img_data = img_data.reshape(-1)\n # ๅ›พๅƒๅฝ’ไธ€ๅŒ–\n img_data = img_data / 255\n\n # ๆ ‡็ญพone-hot ็ผ–็ \n tag_one_hot = np.zeros(10)\n tag_one_hot[int(data[1])] = 1\n\n return np.float32(img_data),np.float32(tag_one_hot)\n\n\n# if __name__ == '__main__':\n# dataset = MNISTDataset(\"../data/MNIST_IMG\")\n# print(dataset[30000])\n\n" }, { "alpha_fraction": 0.5586642622947693, "alphanum_fraction": 0.6092057824134827, "avg_line_length": 34.74193572998047, "blob_id": "5099cdc6fdd95307178c8324d868af05365c5c92", "content_id": "82cf8c86884e485c02dac3db733f4366958c009c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 73, "num_lines": 31, "path": "/MTCNN2/Note/Check.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os, cv2\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\nsrc_path = r\"/Users/karson/Downloads/CelebaA/img_celeba\"\nsrc_label = r\"/Users/karson/Downloads/CelebaA/Anno/list_bbox_celeba.txt\"\ntest_path = \"/Users/karson/Downloads/Test\"\n\npath = \"/Users/karson/Downloads/CelebaA/img_celeba.7z/000001.jpg\"\n\nfor i, line in enumerate(open(src_label, \"r\")):\n strs = line.split()\n if i < 2:\n continue\n img_path = f\"{src_path}/{strs[0]}\"\n img = Image.open(img_path)\n x1, y1, w, h = int(strs[1]), int(strs[2]), int(strs[3]), int(strs[4])\n x2, y2 = x1 + w, y1 + h\n print(x1, y1, x2, y2)\n img_draw = ImageDraw.Draw(img)\n img_draw.rectangle((x1, y1, x2, y2), outline=\"green\", width=2)\n # img.save(f\"/Users/karson/Downloads/Test/{i}.jpg\")\n\n # img_crop = img.crop([x1,y1,x2,y2])\n # img_crop.save(f\"/Users/karson/Downloads/Test/{i}.jpg\")\n _x1 = int(x1 + w * 0.12)\n _y1 = int(y1 + h * 0.1)\n _x2 = int(x1 + w * 0.9)\n _y2 = int(y1 + h * 0.85)\n img_draw.rectangle((_x1, _y1, _x2, _y2), outline=\"red\", width=2)\n img.save(f\"/Users/karson/Downloads/Test/{i}.jpg\")\n" }, { "alpha_fraction": 0.4149765968322754, "alphanum_fraction": 0.6037441492080688, "avg_line_length": 32.73684310913086, "blob_id": "859e5d1152f3867c21fd9cc4ba7786d166c9a3ca", "content_id": "42cb19c5fe438fc92cb55d0dbff384f98d9997be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 99, "num_lines": 19, "path": "/OpenCV_Practice/draw_shape.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread(\"1.jpg\")\ncv2.line(img, (20, 100), (100, 100), [0, 0, 255], thickness=1)\ncv2.circle(img, (50, 50), 10, [0, 0, 255], thickness=2)\n# ็”ป็Ÿฉๅฝข\ncv2.rectangle(img, (100, 100), (200, 200), [0, 0, 255], thickness=1)\n# ็”ปๆคญๅœ†\ncv2.ellipse(img, (150, 150), (100, 50), 0, 0, 360, [0, 0, 255], thickness=1)\n# ็”ปๅคš่พนๅฝข\npts = np.array([[10, 5], [50, 10], [70, 20], [20, 30]], dtype=np.int32)\npts = pts.reshape((-1, 1, 2))\ncv2.polylines(img, [pts], True, (0, 0, 255), thickness=2)\n\ncv2.putText(img, \"girl\", (180, 100), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), lineType=cv2.LINE_AA)\n\ncv2.imshow(\"src\", img)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.5922005772590637, "alphanum_fraction": 0.6022284030914307, "avg_line_length": 31.636363983154297, "blob_id": "dbc83e8958fe7b36daec412779f813e57eb51085", "content_id": "606c6f62e0ee0a43501478e8fe6302bea7be20b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1795, "license_type": "no_license", "max_line_length": 113, "num_lines": 55, "path": "/CenterLoss/Train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets,transforms\nfrom torch import nn\nfrom LeNet_plus import LeNet_Plus\nfrom Centerloss import CenterLoss\nfrom torch import optim\nfrom Drawing import DrawPics\n\n\nclass Train():\n def __init__(self):\n\n self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5])])\n self.dataset = datasets.MNIST(\"./MNIST\",train=True,download=True, transform=self.transform)\n self.Dataloader = DataLoader(self.dataset,batch_size=100,shuffle=True,num_workers=4)\n\n self.net = LeNet_Plus()\n\n self.classifiction_loss = nn.CrossEntropyLoss()\n self.centerloss = CenterLoss\n\n self.opt = optim.SGD(self.net.parameters(), lr=2e-3)\n\n def __call__(self, *args, **kwargs):\n\n for epoch in range(10000):\n Features = []\n Lables = []\n for i, (img, tag) in enumerate(self.Dataloader):\n\n features, output = self.net(img)\n\n Features.append(features)\n Lables.append(tag)\n\n # print(features.shape,tag.shape)\n\n center_loss = self.centerloss(features, tag, 2)\n class_loss = self.classifiction_loss(output, tag)\n loss = center_loss + class_loss\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n print(f\"epoch:{epoch}---loss:{loss}---center_loss:{center_loss}---class_loss{class_loss}\")\n features_list = torch.cat(Features, dim=0)\n label_list = torch.cat(Lables, dim=0)\n DrawPics(features_list.cpu().detach().numpy(),label_list.cpu().detach().numpy(),epoch)\n\n\nif __name__ == '__main__':\n train = Train()\n train()\n" }, { "alpha_fraction": 0.4973653256893158, "alphanum_fraction": 0.5137587785720825, "avg_line_length": 39.66666793823242, "blob_id": "2e59f9cf4fd83bc3c633972f8b977e64eb4397f7", "content_id": "fb3817de298513bc6f24350dcfed9d1195c212da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3416, "license_type": "no_license", "max_line_length": 115, "num_lines": 84, "path": "/AlexNet/Train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom AlexNet import AlexNet\n\n\nclass Train():\n def __init__(self, root):\n self.summarywriter = SummaryWriter(log_dir=\"./runs\")\n print(\"Tensorboard summary writer created\")\n\n self.dataset = datasets.ImageFolder(root, transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]))\n print(\"Dataset created\")\n self.Dataloader = DataLoader(self.dataset,\n batch_size=128,\n shuffle=True,\n num_workers=4,\n pin_memory=False,\n drop_last=True,\n )\n print(\"Dataloader created\")\n self.net = AlexNet().cuda()\n print(\"AlexNet created\")\n self.loss_fn = nn.CrossEntropyLoss()\n print(\"loss_fn created\")\n self.opt = optim.SGD(params=self.net.parameters(),lr=0.01, momentum=0.9,weight_decay=0.0005)\n print(\"optim created\")\n\n def __call__(self, *args, **kwargs):\n total_step = 1\n lr_scheduler = optim.lr_scheduler.StepLR(self.opt, 30, gamma=0.1)\n print(\"lr_schedeler created\")\n for epoch in range(10000):\n lr_scheduler.step()\n for i, (imgs, classes) in enumerate(self.Dataloader):\n imgs = imgs.cuda()\n classes = classes.cuda()\n\n output = self.net(imgs)\n loss = self.loss_fn(output, classes)\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n\n with torch.no_grad():\n _, preds = torch.max(output, dim=1)\n accuracy = torch.sum(preds == classes)\n\n print(f\"Epoch:{epoch + 1}----Step:{total_step}----Loss:{loss.item()}----Acc:{accuracy.item()}\")\n self.summarywriter.add_scalar(\"loss\", loss.item(), total_step)\n self.summarywriter.add_scalar(\"accurary\", accuracy.item(), total_step)\n\n\n\n with torch.no_grad():\n print(\"*\" * 50)\n for name, parameter in self.net.named_parameters():\n if parameter.grad is not None:\n avg_grad = torch.mean(parameter.grad)\n print(f\"{name} - grad_avg:{avg_grad}\")\n self.summarywriter.add_scalar(f\"grad_avg/{name}\", avg_grad.item(), total_step)\n self.summarywriter.add_histogram(f\"grad/{name}\", parameter.cpu().numpy(), total_step)\n\n if parameter.data is not None:\n avg_weight = torch.mean(parameter.data)\n print(f\"{name} - grad_avg:{avg_weight}\")\n self.summarywriter.add_scalar(f\"weight_avg/{name}\", avg_weight.item(), total_step)\n self.summarywriter.add_histogram(f\"weight/{name}\", parameter.cpu().numpy(), total_step)\n\n\n total_step += 1\n\n\nif __name__ == '__main__':\n train = Train(r'D:\\work\\tiny-imagenet-200')\n train()\n" }, { "alpha_fraction": 0.6405959129333496, "alphanum_fraction": 0.716946005821228, "avg_line_length": 23.454545974731445, "blob_id": "99bdbf9576f27e77d37b20b7c4fbeb8c4ad4b0da", "content_id": "a972269cd734e9241bdbcd2874847a5acb5619ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/OpenCV_Practice/match_bf_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg1 = cv2.imread(\"33.jpg\")\nimg2 = cv2.imread(\"34.jpg\")\n\ngray_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\ngray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\norb = cv2.ORB_create()\nkp1, des1 = orb.detectAndCompute(gray_img1, None)\nkp2, des2 = orb.detectAndCompute(gray_img2, None)\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\nmatches = bf.match(des1, des2)\n\n# lambda\nmatches = sorted(matches, key=lambda x: x.distance)\n\ndst = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)\n\ncv2.imshow(\"img\",dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.748251736164093, "alphanum_fraction": 0.7542457580566406, "avg_line_length": 28.47058868408203, "blob_id": "f315bd48bf3b0063cd185b95545a2aa93eedb59b", "content_id": "fdc9b0672a9a493551dbf53cbe23cbc96c466947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 90, "num_lines": 34, "path": "/PythonStudy/Machine_Learning/Classifier.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import datasets,linear_model,neighbors,svm,preprocessing\nfrom sklearn.model_selection import train_test_split,cross_val_score\nfrom sklearn.metrics import classification_report,accuracy_score,f1_score,confusion_matrix\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\niris = datasets.load_iris()\nx,y = iris.data,iris.target\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx_test,x_train,y_test,y_train = train_test_split(x,y,test_size=0.33)\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# ๅˆ›ๅปบๆจกๅž‹\nclr = linear_model.SGDClassifier()\n# clr = neighbors.KNeighborsClassifier()\n# clr = linear_model.LogisticRegression()\n# clr = svm.SVC()\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nclr.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\nscores = cross_val_score(clr,x_train,y_train)\nprint(scores)\n# ้ข„ๆต‹\ny_pred = clr.predict(x_test)\n# ่ฏ„ไผฐ\nprint(accuracy_score(y_test,y_pred))\n# f1_score\nprint(f1_score(y_test, y_pred, average='micro'))\n# ๅˆ†็ฑปๆŠฅๅ‘Š\nprint(classification_report(y_test, y_pred))\n# ๆททๆท†็Ÿฉ้˜ต\nprint(confusion_matrix(y_test, y_pred))" }, { "alpha_fraction": 0.6442307829856873, "alphanum_fraction": 0.6490384340286255, "avg_line_length": 21.11111068725586, "blob_id": "54baa17897ea768652dcaf5d9faaa8573e1d34f7", "content_id": "191009f0d7589fbf3bb536ffc0849b567f1b65df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 36, "num_lines": 9, "path": "/Quantization/data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\r\nfrom torchvision import datasets\r\nfrom torchvision import models\r\n\r\nclass DNet(nn.Module):\r\n def __init__(self):\r\n super(DNet, self).__init__()\r\n\r\n models.MobileNetV2\r\n" }, { "alpha_fraction": 0.4334975481033325, "alphanum_fraction": 0.4950738847255707, "avg_line_length": 22.882352828979492, "blob_id": "1d5d5ec55f7fc2abf70a4fac237ff452b5e45bcb", "content_id": "1372213acfd9fb45de10261b71b2419aa17398db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 812, "license_type": "no_license", "max_line_length": 46, "num_lines": 34, "path": "/MTCNN/tool/PNet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layer = nn.Sequential(\n nn.Conv2d(3, 10, 3, 1, padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(10, 16, 3, 1),\n nn.PReLU(),\n nn.Conv2d(16, 32, 3, 1),\n nn.PReLU()\n )\n self.conv4_1 = nn.Conv2d(32, 1, 1, 1)\n self.conv4_2 = nn.Conv2d(32, 4, 1, 1)\n\n def forward(self, x):\n h = self.pre_layer(x)\n cond = F.sigmoid(self.conv4_1(h))\n offset = self.conv4_2(h)\n return cond, offset\n\n\nif __name__ == '__main__':\n\n net = PNet()\n x = torch.randn(1, 3, 12, 12)\n cond, offset = net(x)\n print(cond.shape, offset.shape)\n" }, { "alpha_fraction": 0.5690747499465942, "alphanum_fraction": 0.6036121845245361, "avg_line_length": 32.5638313293457, "blob_id": "e127343bf284ea3598dd3d2c60fb4ea7a93f569a", "content_id": "1b7061d1cb0198a3815e7dff0dbbc1c31f3c25f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3156, "license_type": "no_license", "max_line_length": 79, "num_lines": 94, "path": "/Prune/prune.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nimport torch.nn.utils.prune as prune\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n # 1 input image channel, 6 output channels, 3x3 square conv kernel\n self.conv1 = nn.Conv2d(1, 6, 3)\n self.conv2 = nn.Conv2d(6, 16, 3)\n self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5x5 image dimension\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, int(x.nelement() / x.shape[0]))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nif __name__ == '__main__':\n # Single Pruning a Module\n model = LeNet().to(device=device)\n module = model.conv1\n print(list(module.named_parameters()))\n print(\"=\" * 50)\n print(list(module.named_buffers()))\n prune.random_unstructured(module, name=\"weight\", amount=0.3)\n print(\"=\" * 50)\n print(list(module.named_parameters()))\n print(\"=\" * 50)\n print(list(module.named_buffers()))\n print(\"=\" * 50)\n print(module.weight)\n print(\"=\" * 50)\n print(module._forward_pre_hooks)\n\n prune.l1_unstructured(module, name=\"bias\", amount=3)\n print(\"=\" * 50)\n print(list(module.named_parameters()))\n print(\"=\" * 50)\n print(list(module.named_buffers()))\n print(\"=\" * 50)\n print(module.bias)\n print(\"=\" * 50)\n print(module._forward_pre_hooks)\n\n # Iterative Pruning\n # prune.ln_structured(module, name=\"weight\", amount=0.5, n=2, dim=0)\n\n # As we can verify, this will zero out all the connections corresponding to\n # 50% (3 out of 6) of the channels, while preserving the action of the\n # previous mask.\n print(\"=\" * 50)\n print(module.weight)\n for hook in module._forward_pre_hooks.values():\n if hook._tensor_name == \"weight\": # select out the correct hook\n break\n print(\"=\" * 50)\n print(list(hook)) # pruning history in the container\n print(\"=\" * 50)\n print(model.state_dict().keys())\n\n # Remove pruningre parametrization\n print(\"=\" * 50)\n print(list(module.named_parameters()))\n print(\"=\" * 50)\n print(list(module.named_buffers()))\n print(\"=\" * 50)\n print(module.weight)\n prune.remove(module, 'weight')\n print(\"=\" * 50)\n print(list(module.named_parameters()))\n print(\"=\" * 50)\n print(list(module.named_buffers()))\n\n # Pruning multiple parameters in a model\n new_model = LeNet()\n for name, module in new_model.named_modules():\n # prune 20% of connections in all 2D-conv layers\n if isinstance(module, torch.nn.Conv2d):\n prune.l1_unstructured(module, name='weight', amount=0.2)\n # prune 40% of connections in all linear layers\n elif isinstance(module, torch.nn.Linear):\n prune.l1_unstructured(module, name='weight', amount=0.4)\n print(\"=\" * 50)\n print(dict(new_model.named_buffers()).keys())\n\n" }, { "alpha_fraction": 0.5686537027359009, "alphanum_fraction": 0.5853985548019409, "avg_line_length": 32.20000076293945, "blob_id": "88bb4e664fe04a686837db75a1edf7c81d36bcd8", "content_id": "bd30a92ec25c8a65b1e1039cf9a1186bc558f3f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1691, "license_type": "no_license", "max_line_length": 95, "num_lines": 45, "path": "/MTCNN/Data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,os\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport numpy as np\n\n\nclass FaceDataset(Dataset):\n\n def __init__(self, path):\n super().__init__()\n self.path = path\n self.dataset = []\n # ่พ“ๅ…ฅ่ทฏๅพ„่ฟ›่กŒๆ‹ผๆŽฅ๏ผŒๅฐ†ๆญฃๆ ทๆœฌๆ•ฐๆฎ๏ผŒ่ดŸๆ ทๆœฌ๏ผŒ้ƒจๅˆ†ๆ ทๆœฌๆ•ฐๆฎๆทปๅŠ ๅˆฐๆ•ฐๆฎ้›†\n self.dataset.extend(open(os.path.join(path, \"positive.txt\")).readlines())\n self.dataset.extend(open(os.path.join(path, \"negative.txt\")).readlines())\n self.dataset.extend(open(os.path.join(path, \"part.txt\")).readlines())\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n # stripๅˆ ้™ค้ฆ–ไฝๅญ—็ฌฆ๏ผŒsplitๅฎ‰่ฃ…็ฉบๆ ผๆฅๆ‹†ๅˆ†ๅญ—็ฌฆไธฒ\n strs = self.dataset[index].split()\n # tensor[]ๆ‹ฌๅทไธ่ƒฝๅฐ‘๏ผŒไธ็„ถไผ ๅ…ฅ็š„ๅ‚ๆ•ฐ็ฑปๅž‹ไธๆ˜ฏๅผ ้‡\n cond = torch.tensor([int(strs[1])])\n offset = torch.tensor([float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5])])\n\n # ๅ›พ็‰‡่ฏปๅ–ๅนถ่ฟ›่กŒๅฝ’ไธ€ๅŒ–ๆ“ไฝœ๏ผŒๅŠๆ ผๅผ่ฐƒๆ•ดHWC->CHW\n if strs[1] == 1:\n img_path = f\"{self.path}/positive/{strs[0]}\"\n elif strs[1] == 2:\n img_path = f\"{self.path}/part/{strs[0]}\"\n else:\n img_path = f\"{self.path}/negative/{strs[0]}\"\n # ๅ›พ็‰‡ๅฝ’ไธ€ๅŒ–๏ผŒๅฐ†ๅฝ’ไธ€ๅŒ–ๅˆฐ-0.5ๅทฆๅณ\n img_data = torch.tensor(np.array(Image.open(img_path)) / 255.-0.5).float()\n img_data = img_data.permute(2, 0, 1)\n return img_data, cond, offset\n\n\nif __name__ == '__main__':\n path = \"/Users/karson/Downloads/Dataset/12\"\n dataset = FaceDataset(path)\n print(dataset.__len__())\n print(dataset[0])" }, { "alpha_fraction": 0.416879802942276, "alphanum_fraction": 0.47229325771331787, "avg_line_length": 24.5, "blob_id": "cacac40ce5449e1f8a923b4f42e2182cf2a79c4a", "content_id": "da37aeca6894ae2183e9b4dc10aa0d0d18902e0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 48, "num_lines": 46, "path": "/MTCNN/tool/Onet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layer = nn.Sequential(\n nn.Conv2d(3, 32, 3, 1, padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(32, 64, 3, 1, padding=0),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(64, 64, 1, 3, padding=0),\n nn.PReLU(),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 128, 2, 1, padding=0),\n nn.PReLU()\n )\n self.fc = nn.Linear(3 * 3 * 128, 256)\n self.prelu = nn.PReLU()\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n self.detect = nn.Linear(256, 1)\n # ๅ็งป้‡่พ“ๅ‡บ\n self.offset = nn.Linear(256, 4)\n\n def forward(self, x):\n h = self.pre_layer(x)\n h = h.view(h.size(0), -1)\n h = self.fc(h)\n h = self.prelu(h)\n label = F.sigmoid(self.detect(h))\n offset = self.offset(h)\n return label, offset\n\n\nif __name__ == '__main__':\n net = ONet()\n x = torch.randn(1, 3, 48, 48)\n # y = net(x)\n # print(y.shape)\n a, b = net(x)\n print(a.shape, b.shape)\n" }, { "alpha_fraction": 0.7539936304092407, "alphanum_fraction": 0.7619808316230774, "avg_line_length": 26.217391967773438, "blob_id": "a6d05decd93ef3b1a625cb5cc628e1f6fd94cde4", "content_id": "617a643fd6628cbd1610d376a8afc5542148a2ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 82, "num_lines": 23, "path": "/PythonStudy/Machine_Learning/adaboosting.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import datasets,preprocessing,ensemble\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nwine = datasets.load_wine()\nx,y = wine.data,wine.target\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# ๅˆ›ๅปบๆจกๅž‹\nada = ensemble.AdaBoostClassifier(n_estimators=25)\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nada.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\n# ้ข„ๆต‹\ny_pred = ada.predict(x_test)\n\nprint(accuracy_score(y_test,y_pred))\n" }, { "alpha_fraction": 0.4698464870452881, "alphanum_fraction": 0.5252193212509155, "avg_line_length": 29.399999618530273, "blob_id": "429c9c3b7d2434e351598602ce825f76f4fd7283", "content_id": "e82bb825509365714dbc9beecf316bd36f884014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3648, "license_type": "no_license", "max_line_length": 67, "num_lines": 120, "path": "/FACE_MTCNN/Network.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\n\ndef init_weights(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias, 0.1)\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super(PNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 10, kernel_size=3, stride=1, padding=1),\n nn.PReLU(10),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(10, 16, kernel_size=3, stride=1),\n nn.PReLU(16),\n nn.Conv2d(16, 32, kernel_size=3, stride=1),\n nn.PReLU(32),\n )\n self.cls = nn.Sequential(\n nn.Conv2d(32, 2, kernel_size=1, stride=1)\n )\n self.box_offset = nn.Conv2d(32, 4, kernel_size=1, stride=1)\n self.landmarks = nn.Conv2d(32, 10, kernel_size=1, stride=1)\n self.apply(init_weights)\n\n def forward(self, x):\n feature_map = self.sequential(x)\n label = self.cls(feature_map)\n offset = self.box_offset(feature_map)\n landmarks = self.landmarks(feature_map)\n return label, offset, landmarks\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super(RNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 28, kernel_size=3, stride=1, padding=1),\n nn.PReLU(28),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(28, 48, kernel_size=3, stride=1),\n nn.PReLU(48),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(48, 64, kernel_size=2, stride=1),\n nn.PReLU(64),\n )\n self.fc = nn.Linear(64 * 3 * 3, 128)\n self.prelu = nn.PReLU(128)\n self.cls = nn.Sequential(\n nn.Linear(128, 2)\n )\n self.box_offset = nn.Linear(128, 4)\n self.landmarks = nn.Linear(128, 10)\n self.apply(init_weights)\n\n def forward(self, x):\n x = self.sequential(x)\n x = x.view(-1, 64 * 3 * 3)\n x = self.fc(x)\n x = self.prelu(x)\n label = self.cls(x)\n offset = self.box_offset(x)\n landmarks = self.landmarks(x)\n return label, offset, landmarks\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super(ONet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),\n nn.PReLU(32),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(32, 64, kernel_size=3, stride=1),\n nn.PReLU(64),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(64, 64, kernel_size=2, stride=1),\n nn.PReLU(64),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 128, kernel_size=2, stride=1),\n nn.PReLU(128),\n )\n self.fc = nn.Linear(128 * 3 * 3, 256)\n self.prelu = nn.PReLU(256)\n self.cls = nn.Sequential(\n nn.Linear(256, 2),\n )\n self.box_offset = nn.Linear(256, 4)\n self.landmarks = nn.Linear(256, 10)\n self.apply(init_weights)\n\n def forward(self, x):\n x = self.sequential(x)\n x = x.view(-1, 128 * 3 * 3)\n x = self.fc(x)\n x = self.prelu(x)\n label = self.cls(x)\n offset = self.box_offset(x)\n landmarks = self.landmarks(x)\n return label, offset, landmarks\n\n\nif __name__ == '__main__':\n # net = ONet()\n # x = torch.randn(4, 3, 48, 48)\n # y = net(x)\n # print(y.shape)\n # label, offset, landmarks = net(x)\n # print(label.shape, offset.shape, landmarks.shape)\n net = RNet()\n x = torch.randn(4,3,24,24)\n label, offset, landmarks = net(x)\n print(label.shape, offset.shape, landmarks.shape)\n" }, { "alpha_fraction": 0.4801670014858246, "alphanum_fraction": 0.5107863545417786, "avg_line_length": 34.04878234863281, "blob_id": "fbdd2c02d49e1a721fc7403be51cb45364c55c14", "content_id": "81406871abefd1a7c66df701f4799bd47c5b1d80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2874, "license_type": "no_license", "max_line_length": 98, "num_lines": 82, "path": "/MTCNN2/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from data import *\nfrom Net import *\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport time\n\n\nclass Train:\n\n def __init__(self, root, img_size):\n self.summarywrite = SummaryWriter(\"./runs\")\n\n self.img_size = img_size\n\n self.mydataset = MyDataset(root, img_size)\n self.dataloader = DataLoader(self.mydataset, batch_size=1000, shuffle=True, num_workers=4)\n\n if img_size == 12:\n self.net = PNet().cuda()\n self.net.load_state_dict(torch.load(r\"param\\pnet2020-05-15-21-43-13.pt\"))\n elif img_size == 24:\n self.net = RNet().cuda()\n self.net.load_state_dict(torch.load(r\"param\\rnet2020-05-16-11-26-57.pt\"))\n elif img_size == 48:\n self.net = ONet().cuda()\n self.net.load_state_dict(torch.load(r\"param\\onet2020-05-15-21-52-22.pt\"))\n\n self.opt = optim.Adam(self.net.parameters(), lr=1e-4)\n\n def __call__(self, epochs):\n\n for epoch in range(epochs):\n total_loss = 0\n for i, (img, tag) in enumerate(self.dataloader):\n img = img.cuda()\n tag = tag.cuda()\n\n predict = self.net(img)\n\n if self.img_size == 12:\n predict = predict.reshape(-1, 15)\n\n torch.sigmoid_(predict[:, 0])\n\n c_mask = tag[:, 0] < 2\n # print(c_mask.shape)\n\n c_predict = predict[c_mask]\n c_tag = tag[c_mask]\n loss_c = torch.mean((c_predict[:, 0] - c_tag[:, 0]) ** 2)\n\n off_mask = tag[:, 0] > 0\n off_predict = predict[off_mask]\n off_tag = tag[off_mask]\n loss_off = torch.mean((off_predict[:, 1:5] - off_tag[:, 1:5]) ** 2)\n\n loss = loss_c + loss_off\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n print(i, loss.cpu().data.numpy())\n total_loss += loss.cpu().detach().data\n avr_loss = total_loss / i\n self.summarywrite.add_scalars(\"train_loss\", {\"total_avr_loss\": avr_loss}, epoch)\n\n if self.img_size == 12:\n t_pnet = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n torch.save(self.net.state_dict(), f\"./param/pnet{t_pnet}.pt\")\n elif self.img_size == 24:\n t_rnet = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n torch.save(self.net.state_dict(), f\"./param/rnet{t_rnet}.pt\")\n elif self.img_size == 48:\n t_onet = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n torch.save(self.net.state_dict(), f\"./param/onet{t_onet}.pt\")\n\n\nif __name__ == '__main__':\n train = Train(\"D:\\work\\Dataset\", 24)\n train(100000)\n" }, { "alpha_fraction": 0.6797385811805725, "alphanum_fraction": 0.741830050945282, "avg_line_length": 24.58333396911621, "blob_id": "a0c4a1bfa4e1725466686e59150931fda3eb2a54", "content_id": "d89fc1dcd6cce69ce5e57453e0fd92c7916bc6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 102, "num_lines": 12, "path": "/OpenCV_Practice/feature_fast_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"33.jpg\")\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\nfast = cv2.FastFeatureDetector_create(threshold=35)\nkeypiont = fast.detect(gray)\n\ndst = cv2.drawKeypoints(img, keypiont, None, (0,0,255), cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)\n\ncv2.imshow(\"dst\", dst)\ncv2.waitKey()" }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 16.399999618530273, "blob_id": "2b477954a825241a5977337c47141d8617aefbbd", "content_id": "eb82b996dcc6edbf1356a7e4283fae8354616e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/PythonStudy/Fun.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "def factorial(num):\n result = 1\n for n in range(1, num + 1):\n result *= n\n return result\n\n\nm = int(input(\"่ฏท่พ“ๅ…ฅไธ€ไธชๆ•ฐ๏ผš\"))\n\nprint(\"%d่ฟ™ไธชๆ•ฐ้˜ถไน˜ๆ˜ฏ%d\" % (m, factorial(m)))\n" }, { "alpha_fraction": 0.46405228972435, "alphanum_fraction": 0.4771241843700409, "avg_line_length": 20.928571701049805, "blob_id": "38dfd48827c5d7fc32e84264c79f500669397fbb", "content_id": "519cd6c224ca310744d57304ce966bfc82967b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/PythonStudy/tri-angle.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nๅˆคๆ–ญ่พ“ๅ…ฅ็š„่พน้•ฟ่ƒฝๅฆๆž„ๆˆไธ‰่ง’ๅฝข๏ผŒๅฆ‚ๆžœ่ƒฝๅˆ™่ฎก็ฎ—ๅ‡บไธ‰่ง’ๅฝข็š„ๅ‘จ้•ฟๅ’Œ้ข็งฏ\nVersion: 0.1\nAuthor: karson\n\"\"\"\na = float(input(\"่พ“ๅ…ฅa:\"))\nb = float(input(\"่พ“ๅ…ฅb:\"))\nc = float(input(\"่พ“ๅ…ฅc:\"))\n\nif a + b > c and a + c > b and b + c > a:\n print(\"ๅ‘จ้•ฟๆ˜ฏ๏ผš%f\" % (a+b+c))\n p = a + b + c\n s = (p * (p - a) * (p - b) * (p - c)) ** 0.5\n print(\"้ข็งฏๆ˜ฏ๏ผš%f\" % s)" }, { "alpha_fraction": 0.49032649397850037, "alphanum_fraction": 0.506045937538147, "avg_line_length": 38.69599914550781, "blob_id": "439adb2ff5a46b134b64aa97b736e227365214ee", "content_id": "b2be2734afcfe5450ab93eb73ffbd1164a7b1344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4962, "license_type": "no_license", "max_line_length": 114, "num_lines": 125, "path": "/MTCNN2/train_self.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from data import *\nfrom Net import *\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass Train:\n\n def __init__(self, root, img_size):\n\n self.summarywrite = SummaryWriter(\"./runs\")\n\n self.img_size = img_size\n # self.full_dataset = MyDataset(root, img_size)\n # train_size = int(0.8 * len(self.full_dataset))\n # test_size = len(self.full_dataset) - train_size\n # train_dataset, test_dataset = torch.utils.data.random_split(self.full_dataset, [train_size, test_size])\n self.train_dataset = MyDataset(root, img_size)\n self.train_loader = DataLoader(self.train_dataset, batch_size=1000, shuffle=True)\n # self.test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=True)\n\n if img_size == 12:\n self.net = PNet()\n elif img_size == 24:\n self.net = RNet()\n elif img_size == 48:\n self.net = ONet()\n\n self.opt = optim.Adam(self.net.parameters())\n\n def __call__(self, epochs):\n\n for epoch in range(epochs):\n total_cls_loss = 0\n total_box_loss = 0\n total_landmark_loss = 0\n for i, (img, tag) in enumerate(self.train_loader):\n predict = self.net(img)\n\n if self.img_size == 12:\n predict = predict.reshape(-1, 15)\n torch.sigmoid_(predict[:, 0])\n\n c_mask = tag[:, 0] < 2\n c_predict = predict[c_mask]\n c_tag = tag[c_mask]\n loss_c = torch.mean((c_predict[:, 0] - c_tag[:, 0]) ** 2)\n\n off_mask = tag[:, 0] > 0\n off_predict = predict[off_mask]\n off_tag = tag[off_mask]\n loss_off = torch.mean((off_predict[:, 1:5] - off_tag[:, 1:5]) ** 2)\n\n landmark_mask = tag[:, 0] > 0\n landmark_predict = predict[landmark_mask]\n landmark_tag = tag[landmark_mask]\n loss_landmark = torch.mean((landmark_predict[:, 5:] - landmark_tag[:, 5:]) ** 2)\n\n loss = loss_c + loss_off + loss_landmark\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n total_cls_loss += loss_c.cpu().data.numpy()\n total_box_loss += loss_off.cpu().data.numpy()\n total_landmark_loss += loss_landmark.cpu().data.numpy()\n avr_cls_loss = total_cls_loss / i\n avr_box_loss = total_box_loss / i\n avr_landmark_loss = total_landmark_loss / i\n total_loss = avr_cls_loss + avr_box_loss + avr_landmark_loss\n self.summarywrite.add_scalars(\"train_loss\", {\"cls_loss\": avr_cls_loss, \"offset_loss\": avr_box_loss,\n \"landmark_loss\": avr_landmark_loss}, epoch)\n print(epoch,total_loss,avr_cls_loss,avr_box_loss,avr_landmark_loss)\n # sum_loss = loss.cpu().data.numpy() + sum_loss\n # avr_train_loss = sum_loss / len(self.train_loader)\n # print(avr_train_loss)\n\n\n '''\n sum_test_loss = 0\n for i, (img, tag) in enumerate(self.test_loader):\n predict = self.net(img)\n\n if self.img_size == 12:\n predict = predict.reshape(-1, 15)\n\n c_mask = tag[:, 0] < 2\n c_predict = predict[c_mask]\n c_tag = tag[c_mask]\n loss_c = torch.mean((c_predict[:, 0] - c_tag[:, 0]) ** 2)\n\n off_mask = tag[:, 0] > 0\n off_predict = predict[off_mask]\n off_tag = tag[off_mask]\n loss_off = torch.mean((off_predict[:, 1:5] - off_tag[:, 1:5]) ** 2)\n\n landmark_mask = tag[:, 0] > 0\n landmark_predict = predict[landmark_mask]\n landmark_tag = tag[landmark_mask]\n loss_landmark = torch.mean((landmark_predict[:, 5:] - landmark_tag[:, 5:]) ** 2)\n\n test_loss = loss_c + loss_off + loss_landmark\n\n self.summarywrite.add_scalars(\"test_loss\", {\"test_cls_loss\": loss_c, \"test_offset_loss\": loss_off,\n \"test_landmark_loss\": loss_landmark}, epoch)\n\n # print(loss.cpu().data.numpy())\n # sum_test_loss = test_loss.cpu().data.numpy() + sum_test_loss\n # test_avr_loss = sum_test_loss / len(self.test_loader)\n # print(test_avr_loss)\n '''\n\n if self.img_size == 12:\n torch.save(self.net.state_dict(), \"pnet.pt\")\n elif self.img_size == 24:\n torch.save(self.net.state_dict(), \"rnet.pt\")\n elif self.img_size == 48:\n torch.save(self.net.state_dict(), \"onet.pt\")\n\n\nif __name__ == '__main__':\n train = Train(\"/Users/karson/Downloads/Dataset/\", 12)\n train(10000)\n" }, { "alpha_fraction": 0.472457617521286, "alphanum_fraction": 0.4779660999774933, "avg_line_length": 22.610000610351562, "blob_id": "a3f0bf31fe1187df9bf6f791d1320e764d7dfd2e", "content_id": "d98b14a55d028d2991f47c7328675854b7bf1a4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2620, "license_type": "no_license", "max_line_length": 40, "num_lines": 100, "path": "/PythonStudy/Data_structure/Linked_list/Single_Linkedlist.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n\n # is_empty() ้“พ่กจๆ˜ฏๅฆไธบ็ฉบ\n def is_empty(self):\n return self.head is None\n # length() ้“พ่กจ้•ฟๅบฆ\n def lenth(self):\n cur = self.head\n count = 0\n while cur is not None:\n count += 1\n cur = cur.next\n return count\n\n # items() ่Žทๅ–้“พ่กจๆ•ฐๆฎ่ฟญไปฃๅ™จ\n def items(self):\n cur = self.head\n while cur is not None:\n yield cur.data\n cur = cur.next\n\n def add(self, item):\n \"\"\"ๅ‘้“พ่กจๅคด้ƒจๆทปๅŠ ๅ…ƒ็ด \"\"\"\n node = Node(item)\n # ๆ–ฐ็ป“็‚นๆŒ‡้’ˆๆŒ‡ๅ‘ๅŽŸๅคด้ƒจ็ป“็‚น\n node.next = self.head\n # ๅคด้ƒจ็ป“็‚นๆŒ‡้’ˆไฟฎๆ”นไธบๆ–ฐ็ป“็‚น\n self.head = node\n\n # append(data) ้“พ่กจๅฐพ้ƒจๆทปๅŠ ๅ…ƒ็ด \n def append(self,data):\n new_node = Node(data)\n if self.is_empty():\n self.head = new_node\n else:\n cur = self.head\n while cur.next is not None:\n cur = cur.next\n cur.next = new_node\n\n # insert(pos, item) ๆŒ‡ๅฎšไฝ็ฝฎๆทปๅŠ ๅ…ƒ็ด \n def insert(self,index,data):\n if index <= 0:\n self.add(data)\n elif index > (self.lenth()-1):\n self.append(data)\n else:\n node = Node(data)\n cur = self.head\n for i in range(index-1):\n cur = cur.next\n node.next = cur.next\n cur.next =node\n\n # remove(item) ๅˆ ้™ค่Š‚็‚น\n def remove(self,data):\n cur = self.head\n pre = None\n while cur is not None:\n if cur.data == data:\n if not pre:\n self.head = cur.next\n else:\n pre.next = cur.next\n return True\n else:\n pre = cur\n cur = cur.next\n # find(item) ๆŸฅๆ‰พ่Š‚็‚นๆ˜ฏๅฆๅญ˜ๅœจ\n def find(self, item):\n \"\"\"ๆŸฅๆ‰พๅ…ƒ็ด ๆ˜ฏๅฆๅญ˜ๅœจ\"\"\"\n return item in self.items()\n\nif __name__ == '__main__':\n link_list = Linkedlist()\n # ๅ‘้“พ่กจๅฐพ้ƒจๆทปๅŠ ๆ•ฐๆฎ\n for i in range(10):\n link_list.append(i)\n # ้“พ่กจๆ•ฐๆฎๆ’ๅ…ฅๆ•ฐๆฎ\n link_list.insert(3, 9)\n\n # ๅ‘ๅคด้ƒจๆทปๅŠ ๆ•ฐๆฎ\n link_list.add(6)\n # ้ๅŽ†้“พ่กจๆ•ฐๆฎ\n for i in link_list.items():\n print(i, end='\\t')\n # ๅˆ ้™ค้“พ่กจๆ•ฐๆฎ\n link_list.remove(9)\n link_list.remove(6)\n print('\\n', list(link_list.items()))\n\n # ๆŸฅๆ‰พ้“พ่กจๆ•ฐๆฎ\n print(link_list.find(4))" }, { "alpha_fraction": 0.526652455329895, "alphanum_fraction": 0.5472636818885803, "avg_line_length": 38.63380432128906, "blob_id": "33a2fc1c9df6f85e4e52c6a6e1039dededd369d3", "content_id": "62930777f88944a3a2693a32352cad913931dd05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2814, "license_type": "no_license", "max_line_length": 115, "num_lines": 71, "path": "/FACE_MTCNN/dataset.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch, os\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nfrom torchvision import transforms\nimport numpy as np\n\n\nclass MtcnnDataset(Dataset):\n def __init__(self, dataset_root, net_stage='pnet'):\n super(MtcnnDataset, self).__init__()\n self.root = dataset_root\n self.net_data_path = os.path.join(self.root, net_stage)\n\n self.dataset = []\n with open(f\"{self.net_data_path}/positive_meta.txt\", 'r') as f:\n self.dataset.extend(f.readlines())\n with open(f\"{self.net_data_path}/negative_meta.txt\", 'r') as f:\n self.dataset.extend(f.readlines())\n with open(f\"{self.net_data_path}/part_meta.txt\", 'r') as f:\n self.dataset.extend(f.readlines())\n with open(f\"{self.net_data_path}/landmarks_meta.txt\", 'r') as f:\n self.dataset.extend(f.readlines())\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, item):\n data = self.dataset[item]\n strs = data.split(' ')\n\n if strs[1] == \"0\":\n img_path = f\"{self.net_data_path}/negative/{strs[0]}\"\n label = int(strs[1])\n box = np.array([float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5])])\n box = torch.tensor(box).float()\n landmarks = torch.zeros(10)\n elif strs[1] == \"1\":\n img_path = f\"{self.net_data_path}/positive/{strs[0]}\"\n label = int(strs[1])\n box = np.array([float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5])])\n box = torch.tensor(box).float()\n landmarks = torch.zeros(10)\n elif strs[1] == \"2\":\n img_path = f\"{self.net_data_path}/part/{strs[0]}\"\n label = int(strs[1])\n box = np.array([float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5])])\n box = torch.tensor(box).float()\n landmarks = torch.zeros(10)\n else:\n img_path = f\"{self.net_data_path}/landmarks/{strs[0]}\"\n label = int(strs[1])\n box = torch.zeros(4).float()\n landmarks = np.array([float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5]), float(strs[6]),\n float(strs[7]), float(strs[8]), float(strs[9]), float(strs[10]), float(strs[11]),\n ])\n landmarks = torch.tensor(landmarks).float()\n img_data = self.ToTensor(Image.open(img_path))\n\n return img_data, label, box, landmarks\n\n def ToTensor(self, data):\n tf = transforms.Compose([transforms.ToTensor()])\n norm_data = (tf(data) - 127.5) * 0.0078125\n\n return norm_data\n\n\nif __name__ == '__main__':\n dataset = MtcnnDataset(r'F:\\celeba', net_stage='rnet')\n print(dataset.net_data_path)\n print(dataset[0])\n" }, { "alpha_fraction": 0.5653061270713806, "alphanum_fraction": 0.6755102276802063, "avg_line_length": 27.705883026123047, "blob_id": "5027fffc74d809a62d3405317bb72c2663985894", "content_id": "018845c2260e88cfa88086a681cf31e66ab5c768", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 71, "num_lines": 17, "path": "/MTCNN2/Note/opencv.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nfrom PIL import Image\npath = \"/Users/karson/Downloads/CelebaA/img_celeba.7z/000001.jpg\"\nsrc_label = \"/Users/karson/Downloads/CelebaA/Anno/list_bbox_celeba.txt\"\n\nimg = cv2.imread(path, 0)\nimg = cv2.convertScaleAbs(img, alpha=1, beta=0)\nimg = cv2.GaussianBlur(img, (3, 3), 1)\ndst = cv2.Canny(img, 50, 150)\n\nx1, y1, w, h = 95,71,226,313\nx2, y2 = x1 + w, y1 + h\ncv2.rectangle(dst,(x1,y1),(x2,y2),[0,0,255],thickness=3)\n\ncv2.imshow(\"...\", img)\ncv2.imshow(\"dst\", dst)\ncv2.waitKey(0)\n\n\n" }, { "alpha_fraction": 0.4624791443347931, "alphanum_fraction": 0.5464146733283997, "avg_line_length": 15.94339656829834, "blob_id": "003fcbe9abd8a065c6342fe70358c6f34a6a3ffc", "content_id": "5feaba25b17a848a109531c364d75b7ba834501c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2281, "license_type": "no_license", "max_line_length": 52, "num_lines": 106, "path": "/PythonStudy/Numpy/Numpy.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\n# ๅˆ›ๅปบๆ•ฐ็ป„\n# a = np.array([[1, 2], [3, 4]])\n# i = np.arange(0, 12)\n# ๅˆ›ๅปบ้ซ˜ๆ–ฏๅˆ†ๅธƒ้‡‡ๆ ท็š„้šๆœบๆ•ฐ็ป„\n# b = np.random.rand(3, 4)\n# ๅˆ›ๅปบๅ‡ๅŒ€ๅˆ†ๅธƒ้‡‡ๆ ท็š„้šๆœบๆ•ฐ็ป„\n# d = np.random.uniform(0, 1, (3, 4))\n# uninitialized output\n# c = np.empty((3, 3))\n# ๅˆ›ๅปบ่ฟž็ปญๆ•ฐ็ป„,ๅนถ่ฝฌๅž‹\n# e = np.arange(0, 12).reshape(3, 4)\n# ๅˆ›ๅปบๆ•ฐ็ป„่Œƒๅ›ด0-2ๅ‡ๅˆ†ไธบ12ไปฝ\n# f = np.linspace(0, 2, 12)\n\n# nidmๆ•ฐ็ป„่ฝด็ปดๅบฆไธชๆ•ฐ\n# shapeๆ•ฐ็ป„็š„ๅฝข็Šถ\n# sizeๆ•ฐ็ป„ๅคงๅฐๅ…ƒ็ด ไธชๆ•ฐ\n# dtypeๆ•ฐ็ป„็ฑปๅž‹๏ผŒๅฆ‚ๆ•ดๅž‹ๆตฎ็‚นๅž‹\n\n# print(a.ndim,a.shape,a.size,a.dtype)\n\n# ๆ•ฐๆฎ็ฑปๅž‹่ฝฌๅŒ–\n# a = np.array([[1., 2.], [3., 4.]],dtype=float32)\n\n# ๆ•ฐ็ป„ๆ•ฐๆฎ่ฟ็ฎ—\n# a = a ** 2\n# a = 10*np.sin(a)\n\n# ๆ•ฐ็ป„ๆˆชๅ–ๆ“ไฝœ>,ๅฏไปฅๆˆชๅ–ๅ…ƒ็ด \n# b = b > 2\n# print(i[i > 5])\n\n# ่šๅˆๆ“ไฝœ\n# i = np.arange(0, 12)\n# print(i.sum(), i.max(), i.mean(), i.min())\n\n# ๅคš็ปดๅบฆๆ“ไฝœๆŒ‰่ฝด็›ธๅŠ ๏ผŒๆŒ‰็…งๅฝข็Šถๆ„ไน‰ๆฅ่ฟ›่กŒๅˆ’ๅˆ†\n# e = np.arange(0, 12).reshape(3, 4, 5)\n# print(e.sum(axis=1))\n\n# ๆœ€ๅคงๅ€ผ็ดขๅผ•่ฟ”ๅ›ž\n# e = np.arange(0, 12).reshape(3, 4)\n# print(e.argmax(axis=1))\n# print(e[e>6],np.where(e>6))\n\n# ็ดฏๅŠ ไน‹ๅ‰ๅ…ƒ็ด \n# print(e.cumsum())\n\n# ้€šๅ‡ฝๆ•ฐ๏ผŒๆ‰€ๆœ‰ๅ…ƒ็ด ไฝœ็”จ\n\n# ็ดขๅผ•ใ€ๅˆ‡็‰‡ใ€่ฟญไปฃ\n# a = np.array([[1, 2], [3, 4]])\n# ๅ•ไธชๅ…ƒ็ด ็ดขๅผ•\n# print(a[1, 0])\n# ๅคšไธชๅ…ƒ็ด ็ดขๅผ•, :ๅ†’ๅท่กจ็คบ็ปดๅบฆๅ…จ้ƒจๅ–\n# print(a[:,1])\n\n# ็ดขๅผ•\n# k = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n\n# ่ตทๅง‹็ดขๅผ•:็ป“ๆŸ็ดขๅผ•:ๆญฅ้•ฟ ,[] ่กจ็คบ็บฌๅบฆ\n# print(k[0:2,3])\n# 0ๅฏ็œ็•ฅ,็บฌๅบฆๆœ€ๅคงๅ€ผๅฏ็œ็•ฅ\n# print(k[0:3:2,2])\n# print(k[:3:2,2])\n# print(k[::2,2])\n# print(k[[0,2],2])\n\n# print(k[[0,2],0:2])\n\n# ็ดขๅผ•็ฎ€ๅ†™\n# m = np.random.rand(2,3,4,5)\n# print(m[:,:,:,1])\n# print(m[...,1])\n\n\n\n# ๅขžๅŠ ็บฌๅบฆๆ–นๆณ•\n# o = np.arange(0, 12)\n# print(o.reshape(3, 4))\n# print(o.reshape(-1,4))\n# print(o[:,None])\n# print(o.squeeze())\n\n# ๅ‡็บฌๅบฆๆ–นๆณ•\n# p = np.arange(0,12).reshape(3,1,4)\n# print(p[:,0,:])\n\n# ๅคš็ปดๅบฆๆ•ฐๆฎๅฑ•ๅนณ\n# p = np.arange(0,12).reshape(3,1,4)\n# print(p.flatten())\n\n# ๆ‹ผๆŽฅ\n# a = np.array([[1,2,3],[2,3,4]])\n# b = np.array([[5,6,4],[7,8,9]])\n# ๆŒ‰็บฌๅบฆๆ‹ผๆŽฅ\n# c = np.stack([a,b],axis=1)\n# print(c.shape)\n\n#ๅ†…้ƒจๆ‹ผๆŽฅ\n# a = np.array([[1,2,3],[2,3,4]])\n# b = np.array([[5,6,4],[7,8,9]])\n# c = np.concatenate([a,b],axis=1)\n# print(c)\n\n\n\n" }, { "alpha_fraction": 0.45681512355804443, "alphanum_fraction": 0.5242915153503418, "avg_line_length": 29.89583396911621, "blob_id": "26331674b2e46d756bdd21cc779cb0d2f51235e1", "content_id": "c76def929943a885a882c47c0bc6825bc2230e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 68, "num_lines": 48, "path": "/CenterLoss/LeNet_plus.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass LeNet_Plus(nn.Module):\n def __init__(self):\n super(LeNet_Plus, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(1,32,kernel_size=5, stride=1,padding=2),\n nn.BatchNorm2d(32),\n nn.PReLU(),\n nn.Conv2d(32, 32, kernel_size=5, stride=1,padding=2),\n nn.BatchNorm2d(32),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2,stride=2),\n\n nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(64),\n nn.PReLU(),\n nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(64),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(128),\n nn.PReLU(),\n nn.Conv2d(128, 128, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(128),\n nn.PReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.feature = nn.Linear(128*3*3,2)\n self.output = nn.Linear(2,10)\n\n def forward(self, x):\n x = self.sequential(x)\n x = x.view(-1,128*3*3)\n features = self.feature(x)\n outputs = self.output(features)\n return features, outputs\n\n\nif __name__ == '__main__':\n net = LeNet_Plus()\n a = torch.randn(2,1,28,28)\n b, c = net(a)\n print(b.shape, c.shape)" }, { "alpha_fraction": 0.5639705657958984, "alphanum_fraction": 0.5938725471496582, "avg_line_length": 36.43119430541992, "blob_id": "cbdeb804eac399c5ff917968a80c17842b56be48", "content_id": "3b600e24d596b75a0eebd3bb86594f9841acc4e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4352, "license_type": "no_license", "max_line_length": 116, "num_lines": 109, "path": "/deep_learning/day02/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torchvision.transforms import Compose, Resize, RandomAffine, RandomHorizontalFlip, ToTensor, Normalize\nfrom day02.net import *\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets,transforms,models\nfrom torch import optim\nfrom torch.utils.tensorboard import SummaryWriter\nfrom PIL.Image import BICUBIC\n\n\nDEVICE = \"cuda:0\"\n\nclass Train:\n\n def __init__(self):\n\n image_size = 224\n\n train_transform = Compose([\n Resize(image_size, BICUBIC),\n RandomAffine(degrees=2, translate=(0.02, 0.02), scale=(0.98, 1.02), shear=2, fillcolor=(124, 117, 104)),\n RandomHorizontalFlip(),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n test_transform = Compose([\n Resize(image_size, BICUBIC),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n self.summaryWriter = SummaryWriter(\"./logs\")\n # Dataloader่ฃ…่ฝฝ่ฎญ็ปƒๆ•ฐๆฎ้›†๏ผŒbatch_sizeๆฏ่ฝฎ100ไธชๆ•ฐๆฎ๏ผŒshuffleๅนถๆ‰“ไนฑ้กบๅบ\n self.train_dataset = datasets.CIFAR10(\"../data/CIFAR10/\", True,transform=train_transform, download=True)\n # Dataloader่ฃ…่ฝฝๆต‹่ฏ•ๆ•ฐๆฎ้›†๏ผŒbatch_sizeๆฏ่ฝฎ100ไธชๆ•ฐๆฎ๏ผŒshuffleๅนถๆ‰“ไนฑ้กบๅบ\n self.train_dataload = DataLoader(self.train_dataset,batch_size=100,shuffle=True)\n self.test_dataset = datasets.CIFAR10(\"../data/CIFAR10/\",False,transform=test_transform,download=True)\n self.test_dataload = DataLoader(self.test_dataset, batch_size=100, shuffle=True)\n\n # ๅˆ›ๅปบ็ฝ‘็ปœ\n self.net = NetV2()\n\n # ่ฃ…่ฝฝไน‹ๅ‰่ฎญ็ปƒ็Šถๆ€\n # self.net.load_state_dict(torch.load(\"./checkpoint/27.t\"))\n # ๅฐ†ๆ•ฐๆฎ็งปๅŠจ่‡ณGPU่ฟ็ฎ—\n # self.net.to(DEVICE)\n # ๅˆ›ๅปบไผ˜ๅŒ–ๅ™จ๏ผŒๅฐ†็ฝ‘็ปœไธญnet.parameters()ๅ‚ๆ•ฐๆ”พๅ…ฅไผ˜ๅŒ–ๅ™จ\n self.opt = optim.Adam(self.net.parameters())\n self.loss_fn = nn.CrossEntropyLoss()\n\n def __call__(self):\n\n global imgs\n for epoch in range(100000):\n sum_loss = 0\n for i,(imgs,tags) in enumerate(self.train_dataload):\n # imgs,tags = imgs.to(DEVICE),tags.to(DEVICE)\n self.net.train()\n\n y = self.net(imgs)\n loss = self.loss_fn(y,tags)\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n # ๅฐ†ๆŸๅคฑๆ•ฐๆฎๆ”พๅ›žcpu๏ผŒdetachๅœๆญขๅๅ‘ไผ ๆ’ญ๏ผŒitemๆ”พๅ›žpythonๆ ‡้‡\n sum_loss += loss.cpu().detach().item()\n\n avg_loss = sum_loss / len(self.train_dataload)\n\n\n sum_score = 0\n test_sum_loss = 0\n for i,(imgs,tags) in enumerate(self.test_dataload):\n # imgs,tags = imgs.to(DEVICE),tags.to(DEVICE)\n self.net.eval()\n\n test_y = self.net(imgs)\n test_loss = self.loss_fn(test_y,tags)\n test_sum_loss += test_loss.cpu().detach().item()\n\n predict_tags = torch.argmax(test_y,dim=1)\n\n # ๅฐ†ๅพ—ๅˆ†ๆ•ฐๆฎๆ”พๅ›žcpu๏ผŒdetachๅœๆญขๅๅ‘ไผ ๆ’ญ๏ผŒitemๆ”พๅ›žpythonๆ ‡้‡\n sum_score += torch.sum(torch.eq(predict_tags,tags).float()).cpu().detach().item()\n\n test_avg_loss = test_sum_loss / len(self.test_dataload)\n score = sum_score / len(self.test_dataset)\n\n # ๆทปๅŠ ่ฎญ็ปƒๆŸๅคฑไธŽๆต‹่ฏ•ๆŸๅคฑๆ ‡้‡ๅœจtensorboardไธญ\n self.summaryWriter.add_scalars(\"loss\",{\"train_loss\":avg_loss,\"test_loss\":test_avg_loss},epoch)\n self.summaryWriter.add_scalar(\"score\",score,epoch)\n self.summaryWriter.add_graph(self.net,(imgs,))\n layer1_weight = self.net.sequential[0].weight\n layer2_weight = self.net.sequential[4].weight\n layer3_weight = self.net.sequential[8].weight\n self.summaryWriter.add_histogram(\"layer1_weight\", layer1_weight)\n self.summaryWriter.add_histogram(\"layer2_weight\", layer2_weight)\n self.summaryWriter.add_histogram(\"layer3_weight\", layer3_weight)\n\n print(epoch,avg_loss,test_avg_loss,score)\n # ไฟๅญ˜็ฝ‘็ปœ่ฎญ็ปƒ็Šถๆ€\n torch.save(self.net.state_dict(),f\"./checkpoint/{epoch}.t\")\n\n\nif __name__ == '__main__':\n train = Train()\n train()\n" }, { "alpha_fraction": 0.752964437007904, "alphanum_fraction": 0.790513813495636, "avg_line_length": 35.21428680419922, "blob_id": "f0c7c820b04a7370e82cd3ca9e8f6f44a95f2d5f", "content_id": "6a65692235f0bf9b33a56ca9f94f17cb2d4d4a1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/deep_learning/day02/analysis.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from day02.net import *\nfrom torch.utils.tensorboard import SummaryWriter\nimport cv2\nnet = NetV2()\nnet.load_state_dict(torch.load(\"./checkpoint/2.t\"))\nsummaryWriter = SummaryWriter(\"./logs\")\nlayer1_weight = net.sequential[0].weight\nlayer2_weight = net.sequential[4].weight\nlayer3_weight = net.sequential[8].weight\n\nsummaryWriter.add_histogram(\"layer1_weight\",layer1_weight)\nsummaryWriter.add_histogram(\"layer2_weight\",layer2_weight)\nsummaryWriter.add_histogram(\"layer3_weight\",layer3_weight)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.4881810247898102, "alphanum_fraction": 0.524206817150116, "avg_line_length": 34.787330627441406, "blob_id": "08532aab762ffad2eb4d2f93d60bc264a88525ae", "content_id": "e14bb863dfaf3c05383b6a9cdd7387c3f36a470c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10117, "license_type": "no_license", "max_line_length": 100, "num_lines": 221, "path": "/MTCNN/Detect.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch, os, time\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport numpy as np\nfrom tool import utils\nimport Nets\nfrom torchvision import transforms\n\n# ็ฝ‘็ปœ่ฐƒๅ‚\n# P็ฝ‘็ปœ:\np_cls = 0.6 # ๅŽŸไธบ0.6\np_nms = 0.5 # ๅŽŸไธบ0.5\n# R็ฝ‘็ปœ๏ผš\nr_cls = 0.6 # ๅŽŸไธบ0.6\nr_nms = 0.5 # ๅŽŸไธบ0.5\n# R็ฝ‘็ปœ๏ผš\no_cls = 0.3 # ๅŽŸไธบ0.97\no_nms = 0.5 # ๅŽŸไธบ0.7\n\n\nclass Detect:\n\n def __init__(self, pnet_param, rnet_param, onet_param, isCuda=False):\n\n self.isCuda = isCuda\n # ๅฎžไพ‹ๅŒ–็ฝ‘็ปœ\n self.pnet = Nets.PNet()\n self.rnet = Nets.RNet()\n self.onet = Nets.ONet()\n # CUDAๅŠ ้€Ÿ็ฝ‘็ปœ\n if self.isCuda:\n self.pnet().cuda()\n self.rnet().cuda()\n self.onet().cuda()\n # ่ฃ…่ฝฝ็ฝ‘็ปœ่ฎญ็ปƒ็ป“ๆžœ\n self.pnet.load_state_dict(torch.load(pnet_param))\n self.rnet.load_state_dict(torch.load(rnet_param))\n self.onet.load_state_dict(torch.load(onet_param))\n\n self.pnet.eval()\n self.rnet.eval()\n self.onet.eval()\n # ๅฐ†ๅ›พ็‰‡ๆ•ฐๆฎ่ฝฌๆขๆˆNCHW\n self.__image_transform = transforms.Compose(\n transforms.ToTensor()\n )\n\n def detect(self, image):\n # P็ฝ‘็ปœๆฃ€ๆต‹-----1st\n start_time = time.time() # ๅผ€ๅง‹่ฎกๆ—ถ\n pnet_boxes = self.__pnet_detect(image)\n if pnet_boxes.shape[0] == 0:\n return np.array([])\n\n end_time = time.time() # ่ฎกๆ—ถ็ป“ๆŸ\n t_pnet = end_time - start_time\n\n # R็ฝ‘็ปœๆฃ€ๆต‹-------2nd\n start_time = time.time()\n rnet_boxes = self.__rnet_detect(image, pnet_boxes)\n if rnet_boxes.shape[0] == 0:\n return np.array([])\n end_time = time.time()\n t_rnet = end_time - start_time\n\n # O็ฝ‘็ปœๆฃ€ๆต‹--------3rd\n start_time = time.time()\n onet_boxes = self.__onet_detect(image, rnet_boxes)\n if onet_boxes.shape[0] == 0:\n return np.array([])\n end_time = time.time()\n t_onet = end_time - start_time\n\n total_time = t_pnet + t_rnet + t_onet\n print(\"total:{0}pnet:{1}rnet:{2}onet:{3}\".format(total_time, t_pnet, t_rnet, t_onet))\n return onet_boxes\n\n def __pnet_detect(self, image):\n boxes = []\n img = image\n w, h = img.size\n min_silde_len = min(w, h)\n\n # ๅˆๅง‹็ผฉๆ”พๆฏ”ไพ‹๏ผˆไธบ1ๆ—ถไธ็ผฉๆ”พ๏ผ‰:ๅพ—ๅˆฐไธๅŒๅˆ†่พจ็Ž‡็š„ๅ›พ็‰‡\n scale = 1\n while min_silde_len > 12:\n img_data = self.__image_transform(img)\n if self.isCuda:\n img_data = img_data.cuda()\n\n img_data.unsqueeze(0) # ๅœจโ€œๆ‰นๆฌกโ€ไธŠๅ‡็ปด๏ผˆๆต‹่ฏ•ๆ—ถไผ ็š„ไธๆญขไธ€ๅผ ๅ›พ็‰‡๏ผ‰\n\n _cls, _offest = self.pnet(img_data) # โ˜…โ˜…่ฟ”ๅ›žๅคšไธช็ฝฎไฟกๅบฆๅ’Œๅ็งป้‡\n cls = _cls[0][0].cpu().data # [203, 245]๏ผšๅˆ†็ป„ๅท็งฏ็‰นๅพๅ›พ็š„ๅฐบๅฏธ๏ผšW,H\n offest = _offest[0].cpu().data # [4, 203, 245] # ๅˆ†็ป„ๅท็งฏ็‰นๅพๅ›พ็š„้€š้“ใ€ๅฐบๅฏธ:C,W,H\n # โ˜…็ฝฎไฟกๅบฆๅคงไบŽ0.6็š„ๆก†็ดขๅผ•๏ผ›ๆŠŠP็ฝ‘็ปœ่พ“ๅ‡บ๏ผŒ็œ‹ๆœ‰ๆฒกๆฒกๆก†ๅˆฐ็š„ไบบ่„ธ๏ผŒ่‹ฅๆฒกๆก†ๅˆฐไบบ่„ธ๏ผŒ่ฏดๆ˜Ž็ฝ‘็ปœๆฒก่ฎญ็ปƒๅฅฝ๏ผ›ๆˆ–่€…็ฝฎไฟกๅบฆ็ป™้ซ˜ไบ†ใ€่ฐƒไฝŽ\n idxs = torch.nonzero(torch.gt(cls, p_cls))\n # ๆ นๆฎ็ดขๅผ•๏ผŒไพๆฌกๆทปๅŠ ็ฌฆๅˆๆกไปถ็š„ๆก†๏ผ›cls[idx[0], idx[1]]ๅœจ็ฝฎไฟกๅบฆไธญๅ–ๅ€ผ๏ผšidx[0]่กŒ็ดขๅผ•๏ผŒidx[1]ๅˆ—็ดขๅผ•\n for idx in idxs:\n # โ˜…่ฐƒ็”จๆก†ๅ็ฎ—ๅ‡ฝๆ•ฐ_box๏ผˆๆŠŠ็‰นๅพๅ›พไธŠ็š„ๆก†๏ผŒๅ็ฎ—ๅˆฐๅŽŸๅ›พไธŠๅŽป๏ผ‰๏ผŒๆŠŠๅคงไบŽ0.6็š„ๆก†็•™ไธ‹ๆฅ๏ผ›\n boxes.append(self.__box(idx, offest, cls[idx[0], idx[1]], scale))\n\n scale *= 0.7 # ็ผฉๆ”พๅ›พ็‰‡:ๅพช็ŽฏๆŽงๅˆถๆกไปถ\n _w = int(w * scale) # ๆ–ฐ็š„ๅฎฝๅบฆ\n _h = int(h * scale)\n\n img = img.resize((_w, _h)) # ๆ นๆฎ็ผฉๆ”พๅŽ็š„ๅฎฝๅ’Œ้ซ˜๏ผŒๅฏนๅ›พ็‰‡่ฟ›่กŒ็ผฉๆ”พ\n min_side_len = min(_w, _h) # ้‡ๆ–ฐ่Žทๅ–ๆœ€ๅฐๅฎฝ้ซ˜\n\n # ่ฟ”ๅ›žๆก†ๆก†๏ผŒๅŽŸ้˜ˆๅ€ผ็ป™p_nms=0.5๏ผˆiouไธบ0.5๏ผ‰๏ผŒๅฐฝๅฏ่ƒฝไฟ็•™IOUๅฐไบŽ0.5็š„ไธ€ไบ›ๆก†ไธ‹ๆฅ๏ผŒ่‹ฅ็ฝ‘็ปœ่ฎญ็ปƒ็š„ๅฅฝ๏ผŒๅ€ผๅฏไปฅ็ป™ไฝŽไบ›\n return utils.nms(np.array(boxes), p_nms)\n\n def __rnet_detect(self, image, pnet_boxes):\n\n _img_dataset = [] # ๅˆ›ๅปบ็ฉบๅˆ—่กจ๏ผŒๅญ˜ๆ”พๆŠ ๅ›พ\n _pnet_boxes = utils.convert_to_square(pnet_boxes) # โ˜…็ป™p็ฝ‘็ปœ่พ“ๅ‡บ็š„ๆก†๏ผŒๆ‰พๅ‡บไธญๅฟƒ็‚น๏ผŒๆฒฟ็€ๆœ€ๅคง่พน้•ฟ็š„ไธค่พนๆ‰ฉๅ……ๆˆโ€œๆญฃๆ–นๅฝขโ€๏ผŒๅ†ๆŠ ๅ›พ\n for _box in _pnet_boxes: # โ˜…้ๅŽ†ๆฏไธชๆก†๏ผŒๆฏไธชๆก†่ฟ”ๅ›žๆก†4ไธชๅๆ ‡็‚น๏ผŒๆŠ ๅ›พ๏ผŒๆ”พ็ผฉ๏ผŒๆ•ฐๆฎ็ฑปๅž‹่ฝฌๆข๏ผŒๆทปๅŠ ๅˆ—่กจ\n _x1 = int(_box[0])\n _y1 = int(_box[1])\n _x2 = int(_box[2])\n _y2 = int(_box[3])\n\n img = image.crop((_x1, _y1, _x2, _y2)) # ๆ นๆฎ4ไธชๅๆ ‡็‚นๆŠ ๅ›พ\n img = img.resize((24, 24)) # ๆ”พ็ผฉๅœจๅ›บๅฐบๅฏธ\n img_data = self.__image_transform(img) # ๅฐ†ๅ›พ็‰‡ๆ•ฐ็ป„่ฝฌๆˆๅผ ้‡\n _img_dataset.append(img_data)\n\n img_dataset = torch.stack(_img_dataset) # stackๅ †ๅ (้ป˜่ฎคๅœจ0่ฝด)๏ผŒๆญคๅค„็›ธๅฝ“ๆ•ฐๆฎ็ฑปๅž‹่ฝฌๆข๏ผŒ่งไพ‹ๅญ2โ˜…\n if self.isCuda:\n img_dataset = img_dataset.cuda() # ็ป™ๅ›พ็‰‡ๆ•ฐๆฎ้‡‡็”จcudaๅŠ ้€Ÿ\n\n _cls, _offset = self.rnet(img_dataset) # โ˜…โ˜…ๅฐ†24*24็š„ๅ›พ็‰‡ไผ ๅ…ฅ็ฝ‘็ปœๅ†่ฟ›่กŒไธ€ๆฌก็ญ›้€‰\n\n cls = _cls.cpu().data.numpy() # ๅฐ†gpuไธŠ็š„ๆ•ฐๆฎๆ”พๅˆฐcpuไธŠๅŽป๏ผŒๅœจ่ฝฌๆˆnumpyๆ•ฐ็ป„\n offset = _offset.cpu().data.numpy()\n # print(\"r_cls:\",cls.shape) # (11, 1):P็ฝ‘็ปœ็”Ÿๆˆไบ†11ไธชๆก†\n # print(\"r_offset:\", offset.shape) # (11, 4)\n\n boxes = [] # R ็ฝ‘็ปœ่ฆ็•™ไธ‹ๆฅ็š„ๆก†๏ผŒๅญ˜ๅˆฐboxes้‡Œ\n # ๅŽŸ็ฝฎไฟกๅบฆ0.6ๆ˜ฏๅไฝŽ็š„๏ผŒๆ—ถๅ€™ๅพˆๅคšๆก†ๅนถๆฒกๆœ‰็”จ(ๅฏๆ‰“ๅฐๅ‡บๆฅ่ง‚ๅฏŸ)๏ผŒๅฏไปฅ้€‚ๅฝ“่ฐƒ้ซ˜ไบ›๏ผ›\n # idxs็ฝฎไฟกๅบฆๆก†ๅคงไบŽ0.6็š„็ดขๅผ•๏ผ›โ˜…่ฟ”ๅ›židxs:0่ฝดไธŠ็ดขๅผ•[0,1]๏ผŒ_:1่ฝดไธŠ็ดขๅผ•[0,0]๏ผŒๅ…ฑๅŒๅ†ณๅฎšๅ…ƒ็ด ไฝ็ฝฎ๏ผŒ่งไพ‹ๅญ3\n idxs, _ = np.where(cls > r_cls)\n for idx in idxs: # ๆ นๆฎ็ดขๅผ•๏ผŒ้ๅŽ†็ฌฆๅˆๆกไปถ็š„ๆก†๏ผ›1่ฝดไธŠ็š„็ดขๅผ•๏ผŒๆฐไธบ็ฌฆๅˆๆกไปถ็š„็ฝฎไฟกๅบฆ็ดขๅผ•๏ผˆ0่ฝดไธŠ็ดขๅผ•ๆญคๅค„็”จไธๅˆฐ๏ผ‰\n _box = _pnet_boxes[idx]\n _x1 = int(_box[0])\n _y1 = int(_box[1])\n _x2 = int(_box[2])\n _y2 = int(_box[3])\n\n ow = _x2 - _x1 # ๅŸบๅ‡†ๆก†็š„ๅฎฝ\n oh = _y2 - _y1\n\n x1 = _x1 + ow * offset[idx][0] # ๅฎž้™…ๆก†็š„ๅๆ ‡็‚น\n y1 = _y1 + oh * offset[idx][1]\n x2 = _x2 + ow * offset[idx][2]\n y2 = _y2 + oh * offset[idx][3]\n\n boxes.append([x1, y1, x2, y2, cls[idx][0]]) # ่ฟ”ๅ›ž4ไธชๅๆ ‡็‚นๅ’Œ็ฝฎไฟกๅบฆ\n\n return utils.nms(np.array(boxes), r_nms) # ๅŽŸr_nmsไธบ0.5๏ผˆ0.5่ฆๅพ€ๅฐ่ฐƒ๏ผ‰๏ผŒไธŠ้ข็š„0.6่ฆๅพ€ๅคง่ฐƒ;ๅฐไบŽ0.5็š„ๆก†่ขซไฟ็•™ไธ‹ๆฅ\n\n def __onet_detect(self, image, rnet_boxes):\n _img_dataset = [] # ๅˆ›ๅปบๅˆ—่กจ๏ผŒๅญ˜ๆ”พๆŠ ๅ›พr\n _rnet_boxes = utils.convert_to_square(rnet_boxes) # ็ป™r็ฝ‘็ปœ่พ“ๅ‡บ็š„ๆก†๏ผŒๆ‰พๅ‡บไธญๅฟƒ็‚น๏ผŒๆฒฟ็€ๆœ€ๅคง่พน้•ฟ็š„ไธค่พนๆ‰ฉๅ……ๆˆโ€œๆญฃๆ–นๅฝขโ€\n for _box in _rnet_boxes: # ้ๅŽ†R็ฝ‘็ปœ็ญ›้€‰ๅ‡บๆฅ็š„ๆก†๏ผŒ่ฎก็ฎ—ๅๆ ‡๏ผŒๆŠ ๅ›พ๏ผŒ็ผฉๆ”พ๏ผŒๆ•ฐๆฎ็ฑปๅž‹่ฝฌๆข๏ผŒๆทปๅŠ ๅˆ—่กจ๏ผŒๅ †ๅ \n _x1 = int(_box[0])\n _y1 = int(_box[1])\n _x2 = int(_box[2])\n _y2 = int(_box[3])\n\n img = image.crop((_x1, _y1, _x2, _y2)) # ๆ นๆฎๅๆ ‡็‚นโ€œๆŠ ๅ›พโ€\n img = img.resize((48, 48))\n img_data = self.__image_transform(img) # ๅฐ†ๆŠ ๅ‡บ็š„ๅ›พ่ฝฌๆˆๅผ ้‡\n _img_dataset.append(img_data)\n\n img_dataset = torch.stack(_img_dataset) # ๅ †ๅ ๏ผŒๆญคๅค„็›ธๅฝ“ๆ•ฐๆฎๆ ผๅผ่ฝฌๆข๏ผŒ่งไพ‹ๅญ2\n if self.isCuda:\n img_dataset = img_dataset.cuda()\n\n _cls, _offset = self.onet(img_dataset)\n cls = _cls.cpu().data.numpy() # (1, 1)\n offset = _offset.cpu().data.numpy() # (1, 4)\n\n boxes = [] # ๅญ˜ๆ”พo็ฝ‘็ปœ็š„่ฎก็ฎ—็ป“ๆžœ\n # ๅŽŸo_clsไธบ0.97ๆ˜ฏๅไฝŽ็š„๏ผŒๆœ€ๅŽ่ฆ่พพๅˆฐๆ ‡ๅ‡†็ฝฎไฟกๅบฆ่ฆ่พพๅˆฐ0.99999๏ผŒ่ฟ™้‡Œๅฏไปฅๅ†™ๆˆ0.99998๏ผŒ\n # ่ฟ™ๆ ท็š„่ฏๅ‡บๆฅๅฐฑๅ…จๆ˜ฏไบบ่„ธ;็•™ไธ‹็ฝฎไฟกๅบฆๅคงไบŽ0.97็š„ๆก†๏ผ›โ˜…่ฟ”ๅ›židxs:0่ฝดไธŠ็ดขๅผ•[0]๏ผŒ_:1่ฝดไธŠ็ดขๅผ•[0]๏ผŒๅ…ฑๅŒๅ†ณๅฎšๅ…ƒ็ด ไฝ็ฝฎ๏ผŒ่งไพ‹ๅญ3\n idxs, _ = np.where(cls > o_cls)\n for idx in idxs: # ๆ นๆฎ็ดขๅผ•๏ผŒ้ๅŽ†็ฌฆๅˆๆกไปถ็š„ๆก†๏ผ›1่ฝดไธŠ็š„็ดขๅผ•๏ผŒๆฐไธบ็ฌฆๅˆๆกไปถ็š„็ฝฎไฟกๅบฆ็ดขๅผ•๏ผˆ0่ฝดไธŠ็ดขๅผ•ๆญคๅค„็”จไธๅˆฐ๏ผ‰\n _box = _rnet_boxes[idx] # ไปฅR็ฝ‘็ปœๅšไธบๅŸบๅ‡†ๆก†\n _x1 = int(_box[0])\n _y1 = int(_box[1])\n _x2 = int(_box[2])\n _y2 = int(_box[3])\n\n ow = _x2 - _x1 # ๆก†็š„ๅŸบๅ‡†ๅฎฝ๏ผŒๆก†ๆ˜ฏโ€œๆ–นโ€็š„๏ผŒow=oh\n oh = _y2 - _y1\n\n x1 = _x1 + ow * offset[idx][0] # O็ฝ‘็ปœๆœ€็ปˆ็”Ÿๆˆ็š„ๆก†็š„ๅๆ ‡๏ผ›็”Ÿๆ ท๏ผŒๅ็งป้‡โ–ณฮด=x1-_x1/w*side_len\n y1 = _y1 + oh * offset[idx][1]\n x2 = _x2 + ow * offset[idx][2]\n y2 = _y2 + oh * offset[idx][3]\n\n boxes.append([x1, y1, x2, y2, cls[idx][0]]) # ่ฟ”ๅ›ž4ไธชๅๆ ‡็‚นๅ’Œ1ไธช็ฝฎไฟกๅบฆ\n\n return utils.nms(np.array(boxes), o_nms, isMin=True) # ็”จๆœ€ๅฐ้ข็งฏ็š„IOU๏ผ›ๅŽŸo_nms(IOU)ไธบๅฐไบŽ0.7็š„ๆก†่ขซไฟ็•™ไธ‹ๆฅ\n\n def __box(self, start_index, offset, cls, scale, stride=2, side_len=12):\n _x1 = (start_index[1].float() * stride) / scale # ็ดขๅผ•ไน˜ไปฅๆญฅ้•ฟ๏ผŒ้™คไปฅ็ผฉๆ”พๆฏ”ไพ‹๏ผ›โ˜…็‰นๅพๅ็ฎ—ๆ—ถโ€œ่กŒ็ดขๅผ•๏ผŒ็ดขๅผ•ไบ’ๆขโ€๏ผŒๅŽŸไธบ[0]\n _y1 = (start_index[0].float() * stride) / scale\n _x2 = (start_index[1].float() * stride + side_len) / scale\n _y2 = (start_index[0].float() * stride + side_len) / scale\n\n ow = _x2 - _x1 # ไบบ่„ธๆ‰€ๅœจๅŒบๅŸŸๅปบ่ฎฎๆก†็š„ๅฎฝๅ’Œ้ซ˜\n oh = _y2 - _y1\n\n _offset = offset[:, start_index[0], start_index[1]] # ๆ นๆฎidxs่กŒ็ดขๅผ•ไธŽๅˆ—็ดขๅผ•๏ผŒๆ‰พๅˆฐๅฏนๅบ”ๅ็งป้‡โ–ณฮด:[x1,y1,x2,y2]\n x1 = _x1 + ow * _offset[0] # ๆ นๆฎๅ็งป้‡็ฎ—ๅฎž้™…ๆก†็š„ไฝ็ฝฎ๏ผŒx1=x1_+w*โ–ณฮด๏ผ›็”Ÿๆ ทๆ—ถไธบ:โ–ณฮด=x1-x1_/w\n y1 = _y1 + oh * _offset[1]\n x2 = _x2 + ow * _offset[2]\n y2 = _y2 + oh * _offset[3]\n return [x1, y1, x2, y2, cls] # ๆญฃๅผๆก†๏ผš่ฟ”ๅ›ž4ไธชๅๆ ‡็‚นๅ’Œ1ไธชๅ็งป้‡\n\n\n" }, { "alpha_fraction": 0.7339848279953003, "alphanum_fraction": 0.7665581107139587, "avg_line_length": 39.08695602416992, "blob_id": "5d57d8de4197b9f2cd595ea17d6ba0eff38d83e0", "content_id": "a6a674f175e379c549be0d16e012d287b042dc90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "no_license", "max_line_length": 95, "num_lines": 23, "path": "/PythonStudy/Machine_Learning/Clustering.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom sklearn.datasets import samples_generator\nfrom sklearn import metrics,cluster\nfrom sklearn.mixture import gaussian_mixture\n\n# x,y = samples_generator.make_blobs(n_samples=200,n_features=3,cluster_std=0.6,random_state=0)\nx,y = samples_generator.make_circles(n_samples=200,noise=.05,random_state=0,factor=0.4)\n# x,y = samples_generator.make_moons(n_samples=200,noise=.05,random_state=0)\n# print(x.shape,y.shape)\n\n# clu = cluster.KMeans(2)\n# clu = cluster.MeanShift()\n# clu = cluster.DBSCAN(eps=0.98,min_samples=4)\n# clu = cluster.SpectralClustering(2,affinity=\"nearest_neighbors\")\n# clu = cluster.AffinityPropagation()\nclu = gaussian_mixture.GaussianMixture(n_components=2)\n\nlabels = clu.fit_predict(x)\nprint(metrics.silhouette_score(x,labels))\nprint(metrics.calinski_harabasz_score(x,labels))\nprint(metrics.davies_bouldin_score(x,labels))\nplt.scatter(x[:,0],x[:,1],c=labels)\nplt.show()" }, { "alpha_fraction": 0.6847058534622192, "alphanum_fraction": 0.7576470375061035, "avg_line_length": 31.615385055541992, "blob_id": "be4e1874182854d4bf67de944d9ff3fcafb2511c", "content_id": "646a5c00f9a91540a857810cc9056174e2d08c66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/OpenCV_Practice/contour.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"14.jpg\")\ndst = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\nretval, dst = cv2.threshold(dst,10,255,cv2.THRESH_BINARY)\n\n# contours,hierarchy = cv2.findContours(dst,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\ncontours,hierarchy = cv2.findContours(dst,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\nimg_contour = cv2.drawContours(img,contours,-1,(0,0,255),thickness=2)\n\ncv2.imshow(\"contour\",img_contour)\ncv2.waitKey(0)\n\n" }, { "alpha_fraction": 0.49544817209243774, "alphanum_fraction": 0.5343137383460999, "avg_line_length": 28.75, "blob_id": "1adb5d6fd8023cc186fb6ec67a3c06a0c9e40f02", "content_id": "16c5bdf85ab32744de2ad40b2483d88dc09e2f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2856, "license_type": "no_license", "max_line_length": 110, "num_lines": 96, "path": "/RNN/RNN.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torch import optim\nfrom torch.nn import functional as F\n\n\nclass LSTM(nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = nn.LSTM(28 * 1, 128, 2, batch_first=True, bidirectional=False)\n self.output = nn.Linear(128, 10)\n\n def forward(self, x):\n n, c, h, w = x.shape\n x = x.permute(0, 2, 3, 1)\n x = x.reshape(n, h, w * c)\n h0 = torch.zeros(2 * 1, n, 128)\n c0 = torch.zeros(2 * 1, n, 128)\n hsn, (hn, cn) = self.rnn(x, (h0, c0))\n out = self.output(hsn[:, -1, :])\n # print(hsn[:,-1,:].shape)\n return out\n\n\nclass MyGRU(nn.Module):\n def __init__(self):\n super().__init__()\n self.gru = nn.GRU(28, 128, 2, batch_first=True)\n self.output = nn.Linear(128, 10)\n\n def forward(self, x):\n n, c, h, w = x.shape\n x = x.permute(0, 2, 3, 1)\n x = x.reshape(n, h, w * c)\n h0 = torch.zeros(2 * 1, n, 128)\n hsn, hn = self.gru(x, h0)\n out = self.output(hsn[:, -1, :])\n return out\n\n\nclass GRU_Cell(nn.Module):\n def __init__(self):\n super().__init__()\n self.gru_cell1 = nn.GRUCell(28, 128)\n self.gru_cell2 = nn.GRUCell(128, 128)\n self.output = nn.Linear(128, 10)\n\n def forward(self, x):\n n, c, h, w = x.shape\n x = x.permute(0, 2, 3, 1)\n x = x.reshape(n, h, w * c)\n\n hx_1 = torch.zeros(n, 128)\n hx_2 = torch.zeros(n, 128)\n\n for i in range(h):\n hx_1 = F.relu(self.gru_cell1(x[:, i, :], hx_1))\n hx_2 = F.relu(self.gru_cell2(hx_1, hx_2))\n out = self.output(hx_2)\n return out\n\n\nif __name__ == '__main__':\n # imgs = torch.randn(2, 1, 28, 28)\n # rnn = GRU_Cell()\n # y = rnn(imgs)\n # print(y.shape)\n # exit()\n train_dataset = datasets.MNIST(root='../data', train=True, transform=transforms.ToTensor(), download=True)\n test_dataset = datasets.MNIST(root='../data', train=False, transform=transforms.ToTensor(), download=True)\n\n train_dataloader = DataLoader(train_dataset, batch_size=100, shuffle=True)\n test_dataloader = DataLoader(test_dataset, batch_size=100, shuffle=True)\n\n rnn = LSTM()\n opt = optim.Adam(rnn.parameters())\n loss_fn = nn.CrossEntropyLoss()\n\n for epoch in range(100000):\n for i, (img, tag) in enumerate(train_dataloader):\n # print(img.shape)\n y = rnn(img)\n loss = loss_fn(y, tag)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n print(loss.cpu().detach().item())\n\n for i, (img, tag) in enumerate(test_dataset):\n y = rnn(img)\n test_loss = loss_fn(y, tag)\n\n print(test_loss.cpu().detach().item())\n" }, { "alpha_fraction": 0.5701850056648254, "alphanum_fraction": 0.6071817278862, "avg_line_length": 40.772727966308594, "blob_id": "67e9c8e47ba903616afd9cb587f92e41af4051af", "content_id": "b7b61107fe2f10548aff80db0caccee70a578b7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 117, "num_lines": 22, "path": "/CenterLoss/Centerloss.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\ndef CenterLoss(feature, label, lamda):\n\n a = torch.tensor([[0, 0], [-1, -3], [-2, -2], [-3, 0], [-2, 3], [0, 3], [2, 3], [3, 0], [2, -2], [1, 3]])\n center = nn.Parameter(a.float(), requires_grad=True)\n # ้€š่ฟ‡ๆ ‡็ญพๆฅ็ญ›้€‰ๅ‡บไธๅŒ็ฑปๅˆซ็š„ไธญๅฟƒ็‚น\n center_class = center.index_select(0, index=label.long())\n\n # ็ปŸ่ฎกๆฏไธช็ฑปๅˆซ็š„ไธชๆ•ฐ\n count = torch.histc(label.float(), bins=int(max(label).item() + 1), max=int(max(label).item()))\n\n # ๆฏไธช็ฑปๅˆซๅฏนๅบ”ๅ…ƒ็ด ไธชๆ•ฐ\n count_class = count.index_select(0, index=label.long())\n\n # loss = lamda/2 *((torch.mean((feature-center_class)**2).sum(dim=1)) / count_class)\n loss = lamda / 2 * (torch.mean(torch.div(torch.sum(((feature - center_class)**2), dim=1), count_class)))\n loss = lamda / 2 * (torch.mean(torch.div(torch.sum(torch.pow((feature - center_class), 2), dim=1), count_class)))\n # loss = ((feature-center_class)**2).sum(1) / center_class\n return loss\n" }, { "alpha_fraction": 0.6103739738464355, "alphanum_fraction": 0.6875753998756409, "avg_line_length": 24.15151596069336, "blob_id": "69d3ceef5f44b2fb7be15d28b7dc5436bd6f72b9", "content_id": "d36f9272749536b474c2ba70b8939716d54f2d81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 77, "num_lines": 33, "path": "/OpenCV_Practice/bitwise.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg1 = cv2.imread(\"1.jpg\")\nimg2 = cv2.imread(\"666.jpg\")\n\nrows,cols,chanels = img2.shape\nroi = img1[0:rows,0:cols]\n# ่ฝฌ็ฐๅบฆ\nimg2gray = cv2.cvtColor(img2,cv2.COLOR_RGB2GRAY)\n# ไบŒๅ€ผๅŒ–\nret, mask = cv2.threshold(img2gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n# ่ฎพ็ฝฎๆŽฉ็ ็™ฝๅบ•้ป‘ๅญ—\nmask_inv = cv2.bitwise_not(mask)\n\n# ๆŒ‰ไฝไธŽ่ฟ็ฎ—\n# img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)\n# img2_fg = cv2.bitwise_and(img2, img2, mask=mask)\nimg1_bg = cv2.bitwise_and(roi, roi, mask=mask)\nimg2_fg = cv2.bitwise_and(img2, img2, mask=mask_inv)\n\ndst = cv2.add(img1_bg, img2_fg)\nimg1[0:rows, 0:cols] = dst\n\n# cv2.imshow(\"...\",roi)\n# cv2.imshow(\"img2\",img2)\n# cv2.imshow(\"img2gray\",img2gray)\n# cv2.imshow(\"mask\",mask)\n# cv2.imshow(\"maks_inv\",mask_inv)\n# cv2.imshow(\"img1_bg\",img1_bg)\n# cv2.imshow(\"img2_fg\",img2_fg)\ncv2.imshow(\"dst\",img1)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.3894277513027191, "alphanum_fraction": 0.4643549919128418, "avg_line_length": 24.774999618530273, "blob_id": "14d5238d7ea0b90c1e326833cd23f7ac39526cd8", "content_id": "9bd2887f85bdf2b7dbcdd3818b5828281565355e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4124, "license_type": "no_license", "max_line_length": 49, "num_lines": 160, "path": "/deep_learning/day02/net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass NetV1(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 16, 3, 1, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 32, 3, 2, padding=1),\n nn.ReLU(),\n nn.Conv2d(32,64,3,2,padding=1)\n )\n self.outlayer = nn.Sequential(\n nn.Linear(64*8*8,10)\n )\n\n def forward(self, x):\n h = self.sequential(x)\n h = h.reshape(-1,64*8*8)\n h = self.outlayer(h)\n return h\n\n\ndef weight_init(m):\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n\nclass NetV2(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 16, 3, 1, padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2,stride=2),\n nn.Conv2d(16, 32, 3, 1, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n # nn.Dropout2d(0.2),\n nn.MaxPool2d(kernel_size=2,stride=2),\n nn.Conv2d(32,64,3,1,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n self.outlayer = nn.Sequential(\n # nn.Dropout(0.2),\n nn.Linear(64*8*8,10)\n )\n self.apply(weight_init)\n\n def forward(self, x):\n h = self.sequential(x)\n h = h.reshape(-1,64*8*8)\n h = self.outlayer(h)\n return h\n\nclass NetV3(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3,32,3,1,padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n\n nn.MaxPool2d(2,2),\n nn.Conv2d(32,64,3,1,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n nn.MaxPool2d(2,2),\n nn.Conv2d(64,64,3,1,padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 128, 3, 1, padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 256, 3, 1, padding=1),\n nn.ReLU(),\n nn.MaxPool2d(2, 2)\n )\n self.out_layer = nn.Sequential(\n nn.Linear(256*4*4,4096),\n nn.ReLU(),\n nn.Linear(4096,10)\n )\n\n def forward(self, x):\n h = self.sequential(x)\n h = h.reshape(-1,256*4*4)\n h = self.out_layer(h)\n return h\n\nclass NetV4(nn.Module):\n def __init__(self):\n super().__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3,32,3,1,padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n\n nn.Conv2d(32,32,3,1,padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n\n nn.MaxPool2d(3,2,padding=1),\n nn.Conv2d(32,64,3,1,padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, 1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n\n nn.MaxPool2d(3, 2, padding=1),\n nn.Conv2d(64, 128, 3, 1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(128, 128, 3, 1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n\n nn.MaxPool2d(3, 2, padding=1),\n nn.Conv2d(128, 256, 3, 1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, 256, 3, 1, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n\n )\n self.out_layer = nn.Sequential(\n nn.Linear(256*4*4,4096),\n nn.ReLU(),\n nn.Linear(4096,10)\n )\n\n def forward(self, x):\n h = self.sequential(x)\n h = h.reshape(-1,256*4*4)\n h = self.out_layer(h)\n return h\n\n\nif __name__ == '__main__':\n net = NetV4()\n print(net)\n x = torch.randn(1, 3, 32, 32)\n y = net(x)\n print(y.shape)\n" }, { "alpha_fraction": 0.5587925314903259, "alphanum_fraction": 0.5703755617141724, "avg_line_length": 34.61249923706055, "blob_id": "3f4203b672abb32d60695d6f320959aa8f6c9239", "content_id": "47cb1d955f5b9a62a067e40eb57967ecfc7ba878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3091, "license_type": "no_license", "max_line_length": 111, "num_lines": 80, "path": "/deep_learning/day01/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom day01.net import *\nfrom day01.data import *\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.utils.tensorboard import SummaryWriter\n\nDEVICE = \"cuda:0\"\n\n\nclass Train:\n\n def __init__(self, root):\n\n self.summaryWriter = SummaryWriter(\"./logs\")\n # Dataloader่ฃ…่ฝฝ่ฎญ็ปƒๆ•ฐๆฎ้›†๏ผŒbatch_sizeๆฏ่ฝฎ100ไธชๆ•ฐๆฎ๏ผŒshuffleๅนถๆ‰“ไนฑ้กบๅบ\n self.train_dataset = MNISTDataset(root, True)\n self.train_dataload = DataLoader(self.train_dataset, batch_size=100, shuffle=True)\n # Dataloader่ฃ…่ฝฝๆต‹่ฏ•ๆ•ฐๆฎ้›†๏ผŒbatch_sizeๆฏ่ฝฎ100ไธชๆ•ฐๆฎ๏ผŒshuffleๅนถๆ‰“ไนฑ้กบๅบ\n self.test_dataset = MNISTDataset(root, False)\n self.test_dataload = DataLoader(self.test_dataset, batch_size=100, shuffle=True)\n # ๅˆ›ๅปบ็ฝ‘็ปœ\n self.net = NetV1()\n\n # ่ฃ…่ฝฝไน‹ๅ‰่ฎญ็ปƒ็Šถๆ€\n # self.net.load_state_dict(torch.load(\"./checkpoint/27.t\"))\n # ๅฐ†ๆ•ฐๆฎ็งปๅŠจ่‡ณGPU่ฟ็ฎ—\n # self.net.to(DEVICE)\n # ๅˆ›ๅปบไผ˜ๅŒ–ๅ™จ๏ผŒๅฐ†็ฝ‘็ปœไธญnet.parameters()ๅ‚ๆ•ฐๆ”พๅ…ฅไผ˜ๅŒ–ๅ™จ\n self.opt = optim.Adam(self.net.parameters())\n\n def __call__(self):\n\n for epoch in range(100000):\n sum_loss = 0\n for i, (imgs, tags) in enumerate(self.train_dataload):\n # imgs,tags = imgs.to(DEVICE),tags.to(DEVICE)\n self.net.train()\n\n y = self.net(imgs)\n loss = torch.mean((tags - y) ** 2)\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n # ๅฐ†ๆŸๅคฑๆ•ฐๆฎๆ”พๅ›žcpu๏ผŒdetachๅœๆญขๅๅ‘ไผ ๆ’ญ๏ผŒitemๆ”พๅ›žpythonๆ ‡้‡\n sum_loss += loss.cpu().detach().item()\n\n avg_loss = sum_loss / len(self.train_dataload)\n\n sum_score = 0\n test_sum_loss = 0\n for i, (imgs, tags) in enumerate(self.test_dataload):\n # imgs,tags = imgs.to(DEVICE),tags.to(DEVICE)\n self.net.eval()\n\n test_y = self.net(imgs)\n test_loss = torch.mean((tags - test_y) ** 2)\n test_sum_loss += test_loss.cpu().detach().item()\n\n predict_tags = torch.argmax(test_y, dim=1)\n label_tags = torch.argmax(tags, dim=1)\n # ๅฐ†ๅพ—ๅˆ†ๆ•ฐๆฎๆ”พๅ›žcpu๏ผŒdetachๅœๆญขๅๅ‘ไผ ๆ’ญ๏ผŒitemๆ”พๅ›žpythonๆ ‡้‡\n sum_score += torch.sum(torch.eq(predict_tags, label_tags).float()).cpu().detach().item()\n\n test_avg_loss = test_sum_loss / len(self.test_dataload)\n score = sum_score / len(self.test_dataset)\n\n self.summaryWriter.add_scalars(\"loss\", {\"train_loss\": avg_loss, \"test_loss\": test_avg_loss}, epoch)\n self.summaryWriter.add_scalar(\"score\", score, epoch)\n self.summaryWriter.add_graph(self.net, (imgs,))\n\n print(epoch, avg_loss, test_avg_loss, score)\n # ไฟๅญ˜็ฝ‘็ปœ่ฎญ็ปƒ็Šถๆ€\n torch.save(self.net.state_dict(), f\"./checkpoint/{epoch}.t\")\n\n\nif __name__ == '__main__':\n train = Train(\"../data/MNIST_IMG\")\n train()\n" }, { "alpha_fraction": 0.49558234214782715, "alphanum_fraction": 0.5638554096221924, "avg_line_length": 20.842105865478516, "blob_id": "eccad5b60584e1e5c0486d01b9094017f9745bb6", "content_id": "363d315269074e586a3963b4230fd9b07773031c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 71, "num_lines": 57, "path": "/SEQ2SEQ/gen_number.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from PIL import Image, ImageDraw, ImageFont, ImageFilter\nimport random\nimport os\n\n\n# ้šๆœบๆ•ฐๅญ—\ndef ranNum():\n a = str(random.randint(0, 9))\n a = chr(random.randint(48, 57))\n b = chr(random.randint(65, 90)) # ๅคงๅ†™ๅญ—ๆฏ\n c = chr(random.randint(97, 122)) # ๅฐๅ†™ๅญ—ๆฏ\n d = ord(a)\n return a\n\n\n# ้šๆœบ้ขœ่‰ฒ1\ndef ranColor1():\n return (random.randint(65, 255),\n random.randint(65, 255),\n random.randint(65, 255))\n\n\n# ้šๆœบ้ขœ่‰ฒ2\ndef ranColor2():\n return (random.randint(32, 127),\n random.randint(32, 127),\n random.randint(32, 127))\n\n\n# 240*60\nw = 240\nh = 60\n\nfont = ImageFont.truetype(\"Arial.ttf\", 40)\nfor i in range(1000):\n\n image = Image.new(\"RGB\", (w, h), (255, 255, 255))\n draw = ImageDraw.Draw(image)\n\n for x in range(w):\n for y in range(h):\n draw.point((x, y), fill=ranColor1())\n\n filename = \"\"\n for j in range(4):\n ch = ranNum()\n filename += ch\n draw.text((60 * j + 10, 10), (ch), font=font, fill=ranColor2())\n\n # ๆจก็ณŠ:\n image = image.filter(ImageFilter.BLUR)\n # image.show()\n if not os.path.exists(\"./code\"):\n os.makedirs(\"./code\")\n image_path = r\"./code\"\n image.save(\"{0}/{1}.jpg\".format(image_path, filename))\n print(i)\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6173469424247742, "avg_line_length": 18.600000381469727, "blob_id": "72d3db95ea4158a8d7975b2df8c792cf025d42c0", "content_id": "e24ba7b00e1a8bbc75c7e94bf69eaa23ed4f41f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 86, "num_lines": 10, "path": "/MTCNN/train_pnet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "# ่ฎญ็ปƒP็ฝ‘็ปœ\n\nimport Nets\nimport Train\n\nif __name__ == '__main__':\n net = Nets.PNet()\n\n trainer = Train.Trainer(net, \"/Users/karson/Downloads/Dataset/12\") # ็ฝ‘็ปœใ€ไฟๅญ˜ๅ‚ๆ•ฐใ€่ฎญ็ปƒๆ•ฐๆฎ\n trainer() # ่ฐƒ็”จ่ฎญ็ปƒๆ–นๆณ•\n" }, { "alpha_fraction": 0.38966965675354004, "alphanum_fraction": 0.439496248960495, "avg_line_length": 27.82105255126953, "blob_id": "9e703751db80600750ddadeeba32c84492677c1b", "content_id": "0906742bfb0388c757d2268256474f348f91df6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5575, "license_type": "no_license", "max_line_length": 105, "num_lines": 190, "path": "/MTCNN2/detect.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from Net import *\nfrom data import tf\nfrom PIL import Image, ImageDraw\nfrom tools import utils\nimport numpy as np\n\n\nclass Detector:\n\n def __init__(self):\n self.pnet = PNet()\n self.pnet.load_state_dict(torch.load(r\"param\\pnet2020-05-15-21-43-13.pt\"))\n self.pnet.eval()\n\n self.rnet = RNet()\n self.rnet.load_state_dict(torch.load(r\"param\\rnet2020-05-16-12-38-44.pt\"))\n self.rnet.eval()\n\n self.onet = ONet()\n self.onet.load_state_dict(torch.load(r\"param\\onet2020-05-15-21-52-22.pt\"))\n self.onet.eval()\n\n def __call__(self, img):\n boxes = self.detPnet(img)\n if boxes is None:\n return []\n\n print(boxes.shape)\n boxes = self.detRnet(img, boxes)\n if boxes is None:\n return []\n print(boxes.shape)\n\n boxes = self.detOnet(img, boxes)\n if boxes is None:\n return []\n print(boxes.shape)\n return boxes\n\n def detPnet(self, img):\n w, h = img.size\n scale = 1\n img_scale = img\n\n min_side = min(w, h)\n\n _boxes = []\n while min_side > 12:\n _img_scale = tf(img_scale)\n y = self.pnet(_img_scale[None, ...])\n y = y.cpu().detach()\n\n torch.sigmoid_(y[:, 0, ...])\n c = y[0, 0]\n c_mask = c > 0.48\n idxs = c_mask.nonzero()\n _x1, _y1 = idxs[:, 1] * 2, idxs[:, 0] * 2 # 2ไธบๆ•ดไธชP็ฝ‘็ปœไปฃ่กจ็š„ๆญฅ้•ฟ\n _x2, _y2 = _x1 + 12, _y1 + 12\n\n p = y[0, 1:, c_mask]\n x1 = (_x1 - p[0, :] * 12) / scale\n y1 = (_y1 - p[1, :] * 12) / scale\n x2 = (_x2 - p[2, :] * 12) / scale\n y2 = (_y2 - p[3, :] * 12) / scale\n\n cc = y[0, 0, c_mask]\n\n _boxes.append(torch.stack([x1, y1, x2, y2, cc], dim=1))\n\n # ๅ›พๅƒ้‡‘ๅญ—ๅก”\n scale *= 0.702\n _w, _h = int(w * scale), int(h * scale)\n img_scale = img_scale.resize((_w, _h))\n min_side = min(_w, _h)\n\n boxes = torch.cat(_boxes, dim=0)\n return utils.nms(boxes.cpu().detach().numpy(), 0.33)\n\n def detRnet(self, img, boxes):\n imgs = []\n for box in boxes:\n crop_img = img.crop(box[0:4])\n crop_img = crop_img.resize((24, 24))\n imgs.append(tf(crop_img))\n _imgs = torch.stack(imgs, dim=0)\n\n y = self.rnet(_imgs)\n\n y = y.cpu().detach()\n torch.sigmoid_(y[:, 0])\n y = y.numpy()\n # print(y[:,0])\n\n c_mask = y[:, 0] > 0.55\n _boxes = boxes[c_mask]\n print(_boxes.shape)\n\n _y = y[c_mask]\n\n _w, _h = _boxes[:, 2] - _boxes[:, 0], _boxes[:, 3] - _boxes[:, 1]\n x1 = _boxes[:, 0] - _y[:, 1] * _w\n y1 = _boxes[:, 1] - _y[:, 2] * _h\n x2 = _boxes[:, 2] - _y[:, 3] * _w\n y2 = _boxes[:, 3] - _y[:, 4] * _h\n cc = _y[:, 0]\n\n _boxes = np.stack([x1, y1, x2, y2, cc], axis=1)\n\n return utils.nms(_boxes, 0.3)\n\n def detOnet(self, img, boxes):\n imgs = []\n for box in boxes:\n crop_img = img.crop(box[0:4])\n crop_img = crop_img.resize((48, 48))\n imgs.append(tf(crop_img))\n _imgs = torch.stack(imgs, dim=0)\n\n y = self.onet(_imgs)\n y = y.cpu().detach()\n torch.sigmoid_(y[:, 0])\n y = y.numpy()\n\n c_mask = y[:, 0] > 0.7\n _boxes = boxes[c_mask]\n print(_boxes.shape)\n\n _y = y[c_mask]\n\n _w, _h = _boxes[:, 2] - _boxes[:, 0], _boxes[:, 3] - _boxes[:, 1]\n x1 = _boxes[:, 0] - _y[:, 1] * _w\n y1 = _boxes[:, 1] - _y[:, 2] * _h\n x2 = _boxes[:, 2] - _y[:, 3] * _w\n y2 = _boxes[:, 3] - _y[:, 4] * _h\n cc = _y[:, 0]\n\n # ็”Ÿๆˆๆ•ฐๆฎ้”™่ฏฏ\n landmarks = _y[:, 5:]\n px1 = landmarks[:, 0] * _w + _boxes[:, 0]\n py1 = landmarks[:, 1] * _h + _boxes[:, 1]\n px2 = landmarks[:, 2] * _w + _boxes[:, 0]\n py2 = landmarks[:, 3] * _h + _boxes[:, 1]\n px3 = landmarks[:, 4] * _w + _boxes[:, 0]\n py3 = landmarks[:, 5] * _h + _boxes[:, 1]\n px4 = landmarks[:, 6] * _w + _boxes[:, 0]\n py4 = landmarks[:, 7] * _h + _boxes[:, 1]\n px5 = landmarks[:, 8] * _w + _boxes[:, 0]\n py5 = landmarks[:, 9] * _h + _boxes[:, 1]\n\n _boxes = np.stack([x1, y1, x2, y2, cc, px1, py1, px2, py2, px3, py3, px4, py4, px5, py5], axis=1)\n\n _boxes = utils.nms(_boxes, 0.3)\n _boxes = utils.nms(_boxes, 0.3, is_min=True)\n return _boxes\n\n\nif __name__ == '__main__':\n test_img = Image.open(\"12.jpg\")\n img_draw = ImageDraw.Draw(test_img)\n detector = Detector()\n box = detector(test_img)\n\n for i in box: # ๅคšไธชๆก†๏ผŒๆฒกๅพช็Žฏไธ€ๆฌกๆก†ไธ€ไธชไบบ่„ธ\n x1 = int(i[0])\n y1 = int(i[1])\n x2 = int(i[2])\n y2 = int(i[3])\n\n # px1 = int(i[5])\n # py1 = int(i[6])\n # px2 = int(i[7])\n # py2 = int(i[8])\n # px3 = int(i[9])\n # py3 = int(i[10])\n # px4 = int(i[11])\n # py4 = int(i[12])\n # px5 = int(i[13])\n # py5 = int(i[14])\n\n # print((x1, y1, x2, y2))\n # print(\"conf:\", i[4]) # ็ฝฎไฟกๅบฆ\n img_draw.rectangle((x1, y1, x2, y2), outline='green', width=2)\n\n # img_draw.point((px1, py1),fill=\"green\")\n # img_draw.point((px2, py2), fill=\"green\")\n # img_draw.point((px3, py3), fill=\"green\")\n # img_draw.point((px4, py4), fill=\"green\")\n # img_draw.point((px5, py5), fill=\"green\")\n\n test_img.show() # ๆฏๅพช็Žฏไธ€ๆฌกๆก†ไธ€ไธชไบบ่„ธ\n\n\n\n" }, { "alpha_fraction": 0.5901162624359131, "alphanum_fraction": 0.6598837375640869, "avg_line_length": 30.363636016845703, "blob_id": "1f093b4df1b0348b0312c9e43a5ffdea19516791", "content_id": "c784236a5d62e80ad164813247d22b3f626ccacb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 344, "license_type": "no_license", "max_line_length": 82, "num_lines": 11, "path": "/Center_Loss/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\ndata = torch.tensor([[3, 4], [5, 6], [7, 8], [9, 8], [6, 5]], dtype=torch.float32)\nlabel = torch.tensor([0, 1, 0, 3, 4, 0], dtype=torch.float32)\n\nc = torch.index_select(data,0,label.long())\ncount = torch.histc(label,bins=5,max=4)\ncount_class = count.index_select(dim=0, index=label.long())\nprint(c)\nprint(count)\nprint(count_class)" }, { "alpha_fraction": 0.6679462790489197, "alphanum_fraction": 0.7293666005134583, "avg_line_length": 28, "blob_id": "1edd44c7db3edd52139d427bff67dcc6357e4626", "content_id": "7ecd8287c59abf83500b6146183072c7d7fdcddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 56, "num_lines": 18, "path": "/OpenCV_Practice/morphologyEX.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"3.jpg\",0)\nimg2 = cv2.imread(\"4.jpg\",0)\n\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))\n\ndst = cv2.dilate(img,kernel)\n# dst = cv2.erode(img,kernel)\n# dst = cv2.morphologyEx(img2,cv2.MORPH_OPEN,kernel)\n# dst = cv2.morphologyEx(img2,cv2.MORPH_CLOSE,kernel)\n# dst = cv2.morphologyEx(img,cv2.MORPH_GRADIENT,kernel)\n# dst = cv2.morphologyEx(img2,cv2.MORPH_TOPHAT,kernel)\n# dst = cv2.morphologyEx(img2,cv2.MORPH_BLACKHAT,kernel)\n\ncv2.imshow(\"src\",img)\ncv2.imshow(\"dst\",dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5410752892494202, "alphanum_fraction": 0.5449462532997131, "avg_line_length": 32.69565200805664, "blob_id": "238faa74175c56c28d5164cea42f06f3a4cf5690", "content_id": "2817a864ca827be258b8e6d95d0de62ee8863dd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2325, "license_type": "no_license", "max_line_length": 106, "num_lines": 69, "path": "/FACE_MTCNN/celeba.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os, torch\nimport numpy as np\nfrom PIL import Image\n\n\nclass Celeba():\n\n def __init__(self, dataset_root):\n self.dataset_folder = os.path.join(dataset_root, \"CelebA\")\n self.anno_folder = os.path.join(self.dataset_folder, \"Anno\")\n self.image_folder = os.path.join(self.dataset_folder, \"img_celeba\")\n\n self.box_anno = os.path.join(self.anno_folder, 'list_bbox_celeba.txt')\n self.landmarks_anno = os.path.join(self.anno_folder, 'list_landmarks_celeba.txt')\n # self.file_box_anno = open(self.box_anno)\n # self.file_landmarks_anno = open(self.landmarks_anno)\n\n def load(self):\n file_box_anno = open(self.box_anno)\n file_landmarks_anno = open(self.landmarks_anno)\n ret = []\n for i, (file_box_line, file_landmarks_line) in enumerate(zip(file_box_anno, file_landmarks_anno)):\n if i < 2:\n continue\n image_name = file_box_line.split()[0]\n\n boxes = file_box_line.split()[1:]\n boxes = list(filter(lambda x: x != '', boxes))\n boxes = np.array(boxes).astype(int)\n\n landmarks = file_landmarks_line.split()[1:]\n landmarks = list(filter(lambda x: x != '', landmarks))\n landmarks = np.array(landmarks).astype(int)\n\n img_path = os.path.join(self.image_folder, image_name)\n item = {\n 'file_name': img_path,\n 'num_bb': 1,\n 'meta_data': [boxes],\n 'landmarks': [landmarks]\n }\n ret.append(item)\n return ret\n\n def split_data(self):\n ret = self.load()\n partition_file = os.path.join(self.dataset_folder, 'Eval', 'list_eval_partition.txt')\n f_partition = open(partition_file)\n\n train = []\n dev = []\n test = []\n\n for line, item in zip(f_partition, ret):\n dtype = int(line.split()[1])\n if dtype == 0:\n train.append(item)\n if dtype == 1:\n dev.append(item)\n if dtype == 2:\n test.append(item)\n return train, dev, test\n\n\nif __name__ == '__main__':\n data = Celeba(\"/Users/karson/Downloads\")\n train, dev, test = data.split_data()\n # print(data.dataset_folder)\n print(len(train),len(dev),len(test))\n" }, { "alpha_fraction": 0.5022026300430298, "alphanum_fraction": 0.5462555289268494, "avg_line_length": 12.352941513061523, "blob_id": "9c12b966ccec64ad5c13d8863e230cb884e837b7", "content_id": "32207ec55ccb748300323b22463d021ed4555e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 30, "num_lines": 17, "path": "/PythonStudy/grade.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n็™พๅˆ†ๅˆถๆˆ็ปฉ่ฝฌๆข\nversion: 0.1\nauthor: karson\n\"\"\"\ngrade = float(input(\"่ฏท่พ“ๅ…ฅๅˆ†ๆ•ฐ๏ผš\"))\n\nif grade >= 90:\n print(\"A\")\nelif grade >= 80:\n print(\"B\")\nelif grade >= 70:\n print(\"C\")\nelif grade >= 60:\n print(\"D\")\nelse:\n print(\"E\")\n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.6482758522033691, "avg_line_length": 19.714284896850586, "blob_id": "22997e32a9a7de61f0d5ab9a226e7eafe76ff4db", "content_id": "e815d22767adbea44e1bec35c2b373e38a2a8391", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/MTCNN2/Note/total_line.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "dst_path = \"/Users/karson/Downloads/Dataset/12\"\n\ncheck = open(f\"{dst_path}/part.txt\")\nc = 0\nfor i, line in enumerate(check):\n c += 1\nprint(i)\n" }, { "alpha_fraction": 0.46330276131629944, "alphanum_fraction": 0.5112593770027161, "avg_line_length": 25.065217971801758, "blob_id": "d14881a0ab9e690652c7de522788b3a9c7ee9174", "content_id": "d4650a0762141e27fb98850f68c7fa00f612669e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2398, "license_type": "no_license", "max_line_length": 117, "num_lines": 92, "path": "/deep_learning/day03/MobileNetV2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\ninverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n]\n\n\nclass InvertResidual(nn.Module):\n\n def __init__(self, input_channels, output_channels, stride, expend_ratio):\n super().__init__()\n self.stride = stride\n\n self.use_res_connect = self.stride == 1 and input_channels == output_channels\n\n hidden_dim = input_channels * expend_ratio\n layers = []\n if expend_ratio != 1:\n layers.extend([\n nn.Conv2d(input_channels, hidden_dim, 3, 1, padding=1, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6()\n ])\n\n layers.extend([\n nn.Conv2d(hidden_dim, hidden_dim, 1, stride, padding=0, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(),\n\n nn.Conv2d(hidden_dim, output_channels, 1, 1, padding=0, bias=False),\n nn.BatchNorm2d(output_channels)\n ])\n\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.input_layer = nn.Sequential(\n nn.Conv2d(3, 32, 3, 2, padding=1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU6()\n )\n\n block = []\n inputc = 32\n for t, c, n, s in inverted_residual_setting:\n\n for i in range(n):\n stride = s if i == 0 else 1\n block.append(InvertResidual(input_channels=inputc, output_channels=c, stride=stride, expend_ratio=t))\n inputc = c\n\n block.extend([\n nn.Conv2d(320, 1280, 1, 1, bias=False),\n nn.BatchNorm2d(1280),\n nn.ReLU6(),\n nn.AvgPool2d(7, 1),\n nn.Conv2d(1280, 1000, 1, 1)\n ])\n\n self.residual_layer = nn.Sequential(*block)\n\n def forward(self, x):\n h = self.input_layer(x)\n h = self.residual_layer(h)\n # h.reshape(-1,1000)\n return h\n\n\nif __name__ == '__main__':\n net = MobileNetV2()\n x = torch.randn(1,3,224,224)\n y = net(x)\n print(y.shape)\n print(net)\n" }, { "alpha_fraction": 0.6450216174125671, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 18.33333396911621, "blob_id": "615a7abbff0e5499e899905bf4d3cc425ba70953", "content_id": "af76b4a9e2be56450405b849ea27e684804b4ca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/deep_learning/day02/test1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\nconv = nn.Conv2d(3,16,3,1,padding=1)\nx = torch.randn(1,3,16,16)\ny = conv(x)\nprint(conv.weight)\nnn.init.kaiming_normal_(conv.weight)\nnn.init.normal_(0,0.1)\nnn.init.zeros_(conv.bias)\n\nprint(y.shape)" }, { "alpha_fraction": 0.6768292784690857, "alphanum_fraction": 0.745121955871582, "avg_line_length": 30.5, "blob_id": "266ce51d1106813b39081fc8e31e397866ddc3af", "content_id": "be4a86c1c285665ec5fbe0963ac970d65c3e2229", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 864, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/OpenCV_Practice/watershed.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimg = cv2.imread(\"30.jpg\")\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,thresh = cv2.threshold(gray,50,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)\n\n# ๅŽป้™คๅ™ช้Ÿณ\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))\nopening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2)\n# ่ƒŒๆ™ฏๅˆ†็ฆป\nsurge_bg =cv2.dilate(opening,kernel,iterations=3)\n# ๅฝขๆˆๅฑฑๅณฐ\ndist_transform = cv2.distanceTransform(opening,1,5)\nret,surge_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,cv2.THRESH_BINARY)\n# ๆ‰พๅˆฐๆœช็ŸฅๅŒบๅŸŸ\nsurge_fg = np.uint8(surge_fg)\nunknown = cv2.subtract(surge_bg,surge_fg)\n# ๅฏปๆ‰พไธญๅฟƒ\nret,marker1 = cv2.connectedComponents(surge_fg)\nmarkers = marker1 + 1\nmarkers[unknown == 255] =0\n\nmarkers3 = cv2.watershed(img,markers)\nimg[markers3 == -1] = [0,0,255]\ncv2.imshow(\"...\",unknown)\ncv2.waitKey(0)\n\n" }, { "alpha_fraction": 0.6581876277923584, "alphanum_fraction": 0.7305246591567993, "avg_line_length": 30.450000762939453, "blob_id": "14697a034d44ddb79144b4807fc6e48335b13c2b", "content_id": "8b504e808952426751929287bee384e37302916c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 106, "num_lines": 40, "path": "/OpenCV_Practice/match_flann_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\n# ๆœ€ๅฐๅŒน้…ๆ•ฐ้‡่ฎพไธบ10ไธช๏ผŒ ๅคงไบŽ่ฟ™ไธชๆ•ฐ้‡ไปŽไธญ็ญ›้€‰ๅ‡บ10ไธชๆœ€ๅฅฝ็š„\nMIN_MATCH_COUNT = 10\n\nimg1 = cv2.imread('34.jpg')\ngrayImg1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n# grayImg1 = np.float32(grayImg1)\nimg2 = cv2.imread('33.jpg')\ngrayImg2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n# grayImg2 = np.float32(grayImg2)\norb = cv2.ORB_create()\n\nkp1, des1 = orb.detectAndCompute(grayImg1, None)\nkp2, des2 = orb.detectAndCompute(grayImg2, None)\n\n# matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)\n# matches = matcher.knnMatch(np.float32(des1), np.float32(des2), k=2)\n\n# FLANN_INDEX_KDTREE=0\n# indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5)\n# searchParams= dict(checks=50)\n# flann=cv2.FlannBasedMatcher(indexParams,searchParams)\nflann = cv2.FlannBasedMatcher()\n# ๆ่ฟฐๆ–‡ไปถๅฟ…้กปไธบnumpy float32ๆ ผๅผ\nmatches = flann.knnMatch(np.float32(des1), np.float32(des2), k=2)\n\n\nmatchesMask = [[0, 0] for i in range(len(matches))]\n\nfor i, (m, n) in enumerate(matches):\n if m.distance < 0.7 * n.distance:\n matchesMask[i] = [1, 0]\n\ndraw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0)\nimg3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)\n\ncv2.imshow(\"img\", img3)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.45698925852775574, "alphanum_fraction": 0.5322580933570862, "avg_line_length": 17.700000762939453, "blob_id": "009405f457648810a360b4425bf78349c5abae99", "content_id": "d6419414d7d6212ba6d22751d4998a42b6ac2510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/PythonStudy/Numpy/Broadcast.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\na = np.array([[1,2],[3,4]])\nb = np.array([[3],[4]])\nprint(a+b)\n\nc = np.array([5])\nprint(np.tile(c,[3,3]))\n\nd = np.array([[3,4]])\nprint(np.tile(d,[3,1]),np.tile(d,[3]))" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.6226415038108826, "avg_line_length": 12.25, "blob_id": "dc74c45d67ac9b7e5236ea2dd9269252e813971a", "content_id": "adb57439d72e09477c9738c05ce00f5aba32c260", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 248, "license_type": "no_license", "max_line_length": 28, "num_lines": 16, "path": "/PythonStudy/Check_Var.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nไฝฟ็”จtype()ๆฃ€ๆŸฅๅ˜้‡็š„็ฑปๅž‹\nVersion: 0.1\nAuthor: Karson\n\"\"\"\nprint(\"--ไฝฟ็”จtype()ๆฃ€ๆŸฅๅ˜้‡็š„็ฑปๅž‹--\")\na = 123\nb = 12.345\nc = 1+5j\nd = True\ne = \"hello world!\"\nprint(type(a))\nprint(type(b))\nprint(type(c))\nprint(type(d))\nprint(type(e))\n" }, { "alpha_fraction": 0.4084761142730713, "alphanum_fraction": 0.6149684190750122, "avg_line_length": 23.10869598388672, "blob_id": "7d3052e1767d3bb1fa6b77b5e7fc4a445f43b0cc", "content_id": "251c90cc8957bccd5015318d8abbc8a34583db09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 96, "num_lines": 46, "path": "/MTCNN2/Note/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch, os\n\nw = 226\nh = 313\nw_ = np.random.randint(-w * 0.2, w * 0.2)\nprint(w_)\n\nside_len = np.random.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))\nprint(side_len)\n# ๆต‹่ฏ•ๅญ—็ฌฆไธฒๆตฎ็‚นๆ•ฐๆ˜ฏๅฆ่ƒฝๅคŸ็›ดๆŽฅ่ฝฌๆขไธบๆ•ดๅž‹ๆ•ฐ็ป„\n# a = ['0.25375939849624063','0.10150375939849623','0.016917293233082706','0.10150375939849623']\n# b = list(map(eval,a))\n# c = list(map(int,a))\n# print(c)\n\n\n# ๆณจ้‡Š๏ผš\n# ไพ‹ๅญ1--maxไธŽmaxium็š„ๅŒบๅˆซ\n\na = [-2, -1, 0, 1, 2]\nprint(np.max(a)) # ๆŽฅๆ”ถไธ€ไธชๅ‚ๆ•ฐ๏ผŒ่ฟ”ๅ›žๆœ€ๅคงๅ€ผ\nprint(np.maximum(0, a)) # ๆŽฅๆ”ถไธคไธชๅ‚ๆ•ฐ๏ผŒXไธŽY้€ไธชๆฏ”่พƒๅ–ๅ…ถๆœ€ๅคงๅ€ผ๏ผš่‹ฅๆฏ”0ๅฐ่ฟ”ๅ›ž0๏ผŒ่‹ฅๆฏ”0ๅคง่ฟ”ๅ›ž่พƒๅคงๅ€ผ\n\nb = torch.randn(3, 15, 15)\nc = b.unsqueeze(0)\nprint(c.shape)\n\n# torch.Size([1, 1, 1, 1])\ncls = torch.randn(4, 1, 1, 1)\noff = torch.randn(4, 4, 1, 1)\nblv = cls[0][0]\n_off = off[0]\np_cls = 0.6\np = torch.gt(cls, p_cls)\nidxs = torch.nonzero(p)\nprint(p.shape, idxs.shape)\nprint(blv.shape, _off.shape)\n\nprint(torch.randn(1, 1))\n\na = [[\"000008.jpg\", 212, 89, 218, 302], [\"000008.jpg\", 212, 89, 218, 302]]\nb = [[\"000008.jpg\", 279, 198, 343, 205, 298, 251, 275, 282, 334, 284],\n [\"000008.jpg\", 279, 198, 343, 205, 298, 251, 275, 282, 334, 284]]\nfor c in zip(a, b):\n print(list(c))\n" }, { "alpha_fraction": 0.6196615695953369, "alphanum_fraction": 0.6825141310691833, "avg_line_length": 27.227272033691406, "blob_id": "91dac671d6223516932dd4e03c7be17b8926e69e", "content_id": "60d4fe247fb412105b3f359258499e34c54332cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 77, "num_lines": 44, "path": "/OpenCV_Practice/car_num.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nraw_img = cv2.imread(\"23.jpg\")\n\nimg = cv2.GaussianBlur(raw_img,(3,3),0)\nimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\nSobel_x = cv2.Sobel(img, cv2.CV_16S, 1, 0)\nabs_x = cv2.convertScaleAbs(Sobel_x)\nret,thresh = cv2.threshold(abs_x,100,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)\nkernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(17,5))\nimg = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernelx)\ncv2.imshow('image', img)\ncv2.waitKey(0)\nexit()\nkernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(20,1))\nkernely = cv2.getStructuringElement(cv2.MORPH_RECT,(1,19))\n\nimg = cv2.dilate(img,kernelx)\nimg = cv2.erode(img,kernelx)\nimg = cv2.dilate(img,kernely)\nimg = cv2.erode(img,kernely)\n\nimage = cv2.medianBlur(img, 15)\ncv2.imshow(\"...2.\", image)\n# ๆŸฅๆ‰พ่ฝฎๅป“\ncontours, _ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\nfor item in contours:\n rect = cv2.boundingRect(item)\n x = rect[0]\n y = rect[1]\n weight = rect[2]\n height = rect[3]\n if weight > (height * 2):\n # ่ฃๅ‰ชๅŒบๅŸŸๅ›พ็‰‡\n chepai = raw_img[y:y + height, x:x + weight]\n cv2.imshow('chepai' + str(x), chepai)\n\n# ็ป˜ๅˆถ่ฝฎๅป“\nimage = cv2.drawContours(raw_img, contours, -1, (0, 0, 255), 3)\ncv2.imshow('image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()" }, { "alpha_fraction": 0.4749999940395355, "alphanum_fraction": 0.5083333253860474, "avg_line_length": 11.100000381469727, "blob_id": "3c87f83b756ff923106791c0d3b297d6acdb3c39", "content_id": "6d4a63eca49b56115ef7130a6a26cf6c4b9bbdb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 40, "num_lines": 10, "path": "/PythonStudy/linear_algebra/least_square_method.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\nx = np.matrix(np.array([[3], [1], [6]]))\ny = 4 * x\n\nprint(x)\n\nprint(y)\n\nprint((x.T @ x).I @ x.T @ y)" }, { "alpha_fraction": 0.4577464759349823, "alphanum_fraction": 0.5070422291755676, "avg_line_length": 12, "blob_id": "6b50f2372742e95cd2ee7bae255168a5d08a42bb", "content_id": "fa17459f310161942376e37a39543c16c187ce4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/PythonStudy/Var1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n่ต‹ๅ€ผ่ฟ็ฎ—็ฌฆๅ’Œๅคๅˆ่ต‹ๅ€ผ่ฟ็ฎ—็ฌฆ\nVersion: 0.1\nAuthor: Karson\n\"\"\"\n\na = 10\nb = 3\na += b # ็›ธๅฝ“ไบŽ๏ผša = a + b\na *= a + 2 # ็›ธๅฝ“ไบŽ๏ผša = a * (a + 2)\nprint(a) #ๆƒณๆƒณ่ฟ™้‡Œไผš่พ“ๅ‡บไป€ไนˆ" }, { "alpha_fraction": 0.5306748747825623, "alphanum_fraction": 0.5368098020553589, "avg_line_length": 18.117647171020508, "blob_id": "5395e2e36ebbced9308c3f32e7166fe05c297498", "content_id": "9c450f9c65b3898b7df5eb1976d9a06690c08778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/Loss/focal_loss.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass focal_loss(nn.Module):\n\n def __init__(self, gamma, alpha):\n super().__init__()\n self.gamma = gamma\n self.alpha = alpha\n\n def forward(self, x, tag):\n y = x[tag == 1]\n\n loss = self.alpha * (1 - y) ** self.gamma\n\n return torch.mean(loss)\n\n" }, { "alpha_fraction": 0.5451505184173584, "alphanum_fraction": 0.648829460144043, "avg_line_length": 15.61111068725586, "blob_id": "56308fd6ebd93bd753b2d394d458c281b2a6c272", "content_id": "39aefa30d0887eb9f82eb381af955483cef6f8dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 23, "num_lines": 18, "path": "/PythonStudy/Var2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nๆฏ”่พƒใ€้€ป่พ‘ๅ’Œ็ฎ—่บซไปฝ่ฟ็ฎ—็ฌฆ็š„ไฝฟ็”จ\nVersion: 0.1\nAuthor: Karson\n\"\"\"\nflag0 = 1 == 1\nflag1 = 2 > 1\nflag2 = 3 < 2\nflag3 = flag0 and flag1\nflag4 = flag2 or flag3\nflag5 = not flag4\n\nprint(\"flag0:\", flag0)\nprint(\"flag1:\", flag1)\nprint(\"flag2:\", flag2)\nprint(\"flag3:\", flag3)\nprint(\"flag4:\", flag4)\nprint(\"flag5:\", flag5)\n" }, { "alpha_fraction": 0.513313889503479, "alphanum_fraction": 0.5789383053779602, "avg_line_length": 29.48167610168457, "blob_id": "32d8963a6a03f05ad0b76766b11e249a0c46b54f", "content_id": "c787e8e7506d91bba8931f49014e301532f5d524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5821, "license_type": "no_license", "max_line_length": 89, "num_lines": 191, "path": "/yolo/daraknet53.net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\n\nclass Converlutional(nn.Module):\n def __init__(self, in_channels, out_channels, kernel, stride, padding):\n super(Converlutional, self).__init__()\n self.converlutional_layer = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel, stride, padding, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.1)\n )\n\n def forward(self, x):\n return self.converlutional_layer(x)\n\n\nclass Residual(nn.Module):\n def __init__(self, in_channels):\n super(Residual, self).__init__()\n self.residual_layer = nn.Sequential(\n Converlutional(in_channels, in_channels // 2, kernel=1, stride=1, padding=0),\n Converlutional(in_channels // 2, in_channels, kernel=3, stride=1, padding=1)\n )\n\n def forward(self, x):\n return x + self.residual_layer(x)\n\n\nclass Upsample(nn.Module):\n def __init__(self):\n super(Upsample, self).__init__()\n\n def forward(self, x):\n return torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')\n\n\nclass Downsample(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(Downsample, self).__init__()\n self.downsample_layer = nn.Sequential(\n Converlutional(in_channels, out_channels, kernel=3, stride=2, padding=1)\n )\n\n def forward(self, x):\n return self.downsample_layer(x)\n\n\nclass ConverlutionalSet(nn.Module):\n def __init__(self, in_channels, out_channnels):\n super(ConverlutionalSet, self).__init__()\n self.converlutonal_set_layer = nn.Sequential(\n Converlutional(in_channels, out_channnels, kernel=1, stride=1, padding=0),\n Converlutional(out_channnels, in_channels, kernel=3, stride=1, padding=1),\n\n Converlutional(in_channels, out_channnels, kernel=1, stride=1, padding=0),\n Converlutional(out_channnels, in_channels, kernel=3, stride=1, padding=1),\n\n Converlutional(in_channels, out_channnels, kernel=1, stride=1, padding=0)\n )\n\n def forward(self, x):\n return self.converlutonal_set_layer(x)\n\n\nclass Darknet53(nn.Module):\n def __init__(self):\n super(Darknet53, self).__init__()\n self.predict_52 = nn.Sequential(\n Converlutional(3, 32, kernel=3, stride=1, padding=1),\n Downsample(32, 64),\n\n Residual(64),\n Downsample(64, 128),\n\n Residual(128),\n Residual(128),\n Downsample(128,256),\n\n Residual(256),\n Residual(256),\n Residual(256),\n Residual(256),\n Residual(256),\n Residual(256),\n Residual(256),\n Residual(256),\n )\n\n self.predict_26 = nn.Sequential(\n Downsample(256, 512),\n\n Residual(512),\n Residual(512),\n Residual(512),\n Residual(512),\n Residual(512),\n Residual(512),\n Residual(512),\n Residual(512),\n )\n\n self.predict_13 = nn.Sequential(\n Downsample(512, 1024),\n\n Residual(1024),\n Residual(1024),\n Residual(1024),\n Residual(1024)\n )\n\n self.convolutionalset_13 = nn.Sequential(\n ConverlutionalSet(1024, 512),\n )\n\n self.detection_13 = nn.Sequential(\n Converlutional(512, 1024, kernel=3, stride=1, padding=1),\n nn.Conv2d(1024, 45, kernel_size=1, stride=1, padding=0)\n )\n\n self.up_26 = nn.Sequential(\n Converlutional(512, 256, kernel=1, stride=1, padding=0),\n Upsample()\n )\n\n self.convolutionalset_26 = nn.Sequential(\n ConverlutionalSet(768,256)\n )\n\n self.detection_26 = nn.Sequential(\n Converlutional(256, 512, kernel=3, stride=1, padding=1),\n nn.Conv2d(512, 45, kernel_size=1, stride=1, padding=0)\n )\n\n self.up_52 = nn.Sequential(\n Converlutional(256, 128, kernel=1, stride=1, padding=0),\n Upsample()\n )\n\n self.convolutionalset_52 = nn.Sequential(\n ConverlutionalSet(384, 128)\n )\n\n self.detection_52 = nn.Sequential(\n Converlutional(128, 256, kernel=3, stride=1, padding=1),\n nn.Conv2d(256, 45, kernel_size=1, stride=1, padding=0)\n )\n\n def forward(self, x):\n predict_52 = self.predict_52(x)\n predict_26 = self.predict_26(predict_52)\n predict_13 = self.predict_13(predict_26)\n\n convolutionalset_13 = self.convolutionalset_13(predict_13)\n # print(convolutionalset_13.shape)\n\n detection_13 = self.detection_13(convolutionalset_13)\n # print(detection_13.shape)\n\n up_26_out = self.up_26(convolutionalset_13)\n # print(up_26_out.shape)\n\n route_26_out = torch.cat((up_26_out,predict_26), dim=1)\n # print(route_26_out.shape)\n\n convolutionalset_26 = self.convolutionalset_26(route_26_out)\n # print(convolutionalset_26.shape)\n\n detection_26 = self.detection_26(convolutionalset_26)\n # print(detection_26.shape)\n\n up_52_out = self.up_52(convolutionalset_26)\n # print(up_52_out.shape)\n route_52_out = torch.cat((up_52_out, predict_52), dim=1)\n # print(route_52_out.shape)\n\n convolutionalset_52 = self.convolutionalset_52(route_52_out)\n # print(convolutionalset_52.shape)\n detection_52 = self.detection_52(convolutionalset_52)\n # print(detection_52.shape)\n\n return detection_13, detection_26, detection_52\n\n\nif __name__ == '__main__':\n x = torch.randn(1, 3, 416, 416)\n net = Darknet53()\n y_13, y_26, y_52 = net(x)\n print(y_13.shape)\n print(y_26.shape)\n print(y_52.shape)" }, { "alpha_fraction": 0.5350624322891235, "alphanum_fraction": 0.6243996024131775, "avg_line_length": 21.17021369934082, "blob_id": "01a2f5986a49c6cfcf71dc744177db905937aa80", "content_id": "0bd4300be93e0454f4787f21fca4271be7ee2328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1321, "license_type": "no_license", "max_line_length": 92, "num_lines": 47, "path": "/OpenCV_Practice/hough_line.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimg = cv2.imread(\"27.jpg\")\n\nimg_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncanny = cv2.Canny(img_gray,50,100)\n\n\"\"\"\nimage ่พ“ๅ…ฅๅ›พๅƒ \nrho ๆญฅ้•ฟไธบ1ๅƒ็ด \ntheta ่ง’ๅบฆๆญฅ้•ฟpi/180\nthreshold ็บฟๆฎต้˜€ๅ€ผ่ถ…่ฟ‡ๅคšๅฐ‘ๆ˜ฏไธบๆ–ฐ็š„็›ด็บฟ\nlines=None, \nminLineLength= ็บฟ็š„ๆœ€็Ÿญ้•ฟๅบฆ๏ผŒๆฏ”่ฟ™ไธช็Ÿญ็š„้ƒฝ่ขซๅฟฝ็•ฅ \nmaxLineGap= ไธคๆก็›ด็บฟไน‹้—ด็š„ๆœ€ๅคง้—ด้š”๏ผŒๅฐไบŽๆญคๅ€ผ๏ผŒ่ฎคไธบๆ˜ฏไธ€ๆก็›ด็บฟ\n่พ“ๅ‡บไธŠไนŸๅ˜ไบ†๏ผŒไธๅ†ๆ˜ฏ็›ด็บฟๅ‚ๆ•ฐ็š„๏ผŒ่ฟ™ไธชๅ‡ฝๆ•ฐ่พ“ๅ‡บ็š„็›ดๆŽฅๅฐฑๆ˜ฏ็›ด็บฟ็‚น็š„ๅๆ ‡ไฝ็ฝฎ\n\"\"\"\n\nlines = cv2.HoughLinesP(canny,1,np.pi/180,30,None,50,10)\n# ๆๅ–ไธบไบŒ็ปด\nline = lines[:,0,:]\nfor x1,y1,x2,y2 in line[:]:\n cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)\n\n\n\"\"\" \nimage, rho, theta, threshold, lines=None, srn=None, stn=None, min_theta=None, max_theta=None\n\n\nlines = cv2.HoughLines(canny, 1, np.pi / 180, 100)\n# ๆžๅๆ ‡่ฝฌๆข\nfor line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b)) # ็›ด็บฟ่ตท็‚นๆจชๅๆ ‡\n y1 = int(y0 + 1000 * (a)) # ็›ด็บฟ่ตท็‚น็บตๅๆ ‡\n x2 = int(x0 - 1000 * (-b)) # ็›ด็บฟ็ปˆ็‚นๆจชๅๆ ‡\n y2 = int(y0 - 1000 * (a)) # ็›ด็บฟ็ปˆ็‚น็บตๅๆ ‡\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\"\"\"\n\n\ncv2.imshow(\"...\",img)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5295429229736328, "alphanum_fraction": 0.5406911969184875, "avg_line_length": 23.2702693939209, "blob_id": "1b7123fdca40303d87635821746c53bd402ce51d", "content_id": "af4ed07a4faec9b7dd45cf96f3f7b8fc3ed3d062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 897, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/SEQ2SEQ/backup/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from backup.net import *\nfrom backup.data import *\nfrom torch.utils.data import DataLoader\nfrom torch import optim\n\n\nclass Trainer:\n\n def __init__(self):\n train_dataset = MyDataset(\"../code\")\n self.train_dataloader = DataLoader(train_dataset, 100, True)\n\n self.net = Cnn2Seq()\n\n self.opt = optim.Adam(self.net.parameters())\n\n self.loss_fn = nn.CrossEntropyLoss()\n\n def __call__(self):\n for epoch in range(10):\n for i,(img,tag) in enumerate(self.train_dataloader):\n output = self.net(img)\n\n output = output.reshape(-1, 10)\n tag = tag.reshape(-1)\n\n loss = self.loss_fn(output,tag.long())\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n print(loss)\n\nif __name__ == '__main__':\n train = Trainer()\n train()" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6833333373069763, "avg_line_length": 20.909090042114258, "blob_id": "f4e0dbd280b98755b75d0899ab367bd095872652", "content_id": "b49c6017ad518e323ddc877bcd18675559edcb10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/deep_learning/day02/count.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,thop\nfrom torch import nn\n\nconv = nn.Conv2d(3,16,3,1,padding=1)\nx = torch.randn(1,3,16,16)\ny = conv(x)\n# ็ปŸ่ฎก่ฎก็ฎ—้‡\nflops, parm = thop.profile(conv,(x,))\n# ่พ“ๅ‡บๆ ผๅผ\nflops, parm = thop.clever_format((flops,parm),'%3.f')\nprint(flops,parm)" }, { "alpha_fraction": 0.43996745347976685, "alphanum_fraction": 0.5429385304450989, "avg_line_length": 18.34645652770996, "blob_id": "fdc6ab5be7a5f951047afa2faca0f9fe63d5a3be", "content_id": "cf49d779f762df8e2b2142b18404f59de93f3935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3403, "license_type": "no_license", "max_line_length": 46, "num_lines": 127, "path": "/PythonStudy/Numpy/Numpy_Practice1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\n# 11. ๅฆ‚ไฝ•่Žทๅ–ไธคไธชnumpyๆ•ฐ็ป„ไน‹้—ด็š„ๅ…ฌๅ…ฑ้กน๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**่Žทๅ–ๆ•ฐ็ป„aๅ’Œๆ•ฐ็ป„bไน‹้—ด็š„ๅ…ฌๅ…ฑ้กนใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.array([1,2,3,2,3,4,3,4,5,6])\n# b = np.array([7,2,10,2,7,4,9,4,9,8])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# array([2, 4])\n\n# a = np.array([1,2,3,2,3,4,3,4,5,6])\n# b = np.array([7,2,10,2,7,4,9,4,9,8])\n# print(np.intersect1d(a,b))\n\n# #12. ๅฆ‚ไฝ•ไปŽไธ€ไธชๆ•ฐ็ป„ไธญๅˆ ้™คๅญ˜ๅœจไบŽๅฆไธ€ไธชๆ•ฐ็ป„ไธญ็š„้กน๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ไปŽๆ•ฐ็ป„aไธญๅˆ ้™คๆ•ฐ็ป„bไธญ็š„ๆ‰€ๆœ‰้กนใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.array([1,2,3,4,5])\n# b = np.array([5,6,7,8,9])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# array([1,2,3,4])\n\n# a = np.array([1,2,3,4,5])\n# b = np.array([5,6,7,8,9])\n# print(np.setdiff1d(a,b))\n#13. ๅฆ‚ไฝ•ๅพ—ๅˆฐไธคไธชๆ•ฐ็ป„ๅ…ƒ็ด ๅŒน้…็š„ไฝ็ฝฎ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**่Žทๅ–aๅ’Œbๅ…ƒ็ด ๅŒน้…็š„ไฝ็ฝฎใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.array([1,2,3,2,3,4,3,4,5,6])\n# b = np.array([7,2,10,2,7,4,9,4,9,8])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > (array([1, 3, 5, 7]),)\n\n# a = np.array([1,2,3,2,3,4,3,4,5,6])\n# b = np.array([7,2,10,2,7,4,9,4,9,8])\n# print(np.where(a == b))\n\n#14. ๅฆ‚ไฝ•ไปŽnumpyๆ•ฐ็ป„ไธญๆๅ–็ป™ๅฎš่Œƒๅ›ดๅ†…็š„ๆ‰€ๆœ‰ๆ•ฐๅญ—๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**่Žทๅ–5ๅˆฐ10ไน‹้—ด็š„ๆ‰€ๆœ‰้กน็›ฎใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.array([2, 6, 1, 9, 10, 3, 27])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# (array([6, 9, 10]),)\n\na = np.array([2, 6, 1, 9, 10, 3, 27])\nindex = np.where((a >= 5) & (a <= 10))\nprint(a[index])\n\n#15. ๅฆ‚ไฝ•ๅˆ›ๅปบไธ€ไธชpythonๅ‡ฝๆ•ฐๆฅๅค„็†scalarsๅนถๅœจnumpyๆ•ฐ็ป„ไธŠๅทฅไฝœ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**่ฝฌๆข้€‚็”จไบŽไธคไธชๆ ‡้‡็š„ๅ‡ฝๆ•ฐmaxx๏ผŒไปฅๅค„็†ไธคไธชๆ•ฐ็ป„ใ€‚\n# ็ป™ๅฎš๏ผš\n# def maxx(x, y):\n# \"\"\"Get the maximum of two items\"\"\"\n# if x >= y:\n# return x\n# else:\n# return y\n# maxx(1, 5)\n# > 5\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# a = np.array([5, 7, 9, 8, 6, 4, 5])\n# b = np.array([6, 3, 4, 8, 9, 7, 1])\n# pair_max(a, b)\n# > array([ 6., 7., 9., 8., 9., 7., 5.])\ndef maxx(x, y):\n \"\"\"Get the maximum of two items\"\"\"\n if x >= y:\n return x\n else:\n return y\na = np.array([5, 7, 9, 8, 6, 4, 5])\nb = np.array([6, 3, 4, 8, 9, 7, 1])\npair_max = np.vectorize(maxx,otypes=[float])\nprint(pair_max(a,b))\n\n#16. ๅฆ‚ไฝ•ไบคๆขไบŒ็ปดnumpyๆ•ฐ็ป„ไธญ็š„ไธคๅˆ—๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅœจๆ•ฐ็ป„arrไธญไบคๆขๅˆ—1ๅ’Œ2ใ€‚\n# ็ป™ๅฎš๏ผš\n# arr = np.arange(9).reshape(3,3)\n\narr = np.arange(9).reshape(3,3)\nprint(arr[[0,2,1],:])\n\n#17. ๅฆ‚ไฝ•ไบคๆขไบŒ็ปดnumpyๆ•ฐ็ป„ไธญ็š„ไธค่กŒ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ไบคๆขๆ•ฐ็ป„arrไธญ็š„็ฌฌ1ๅ’Œ็ฌฌ2่กŒ๏ผš\n# ็ป™ๅฎš๏ผš\n# arr = np.arange(9).reshape(3,3)\n\nprint(arr[:,[0,2,1]])\n\n\n#18. ๅฆ‚ไฝ•ๅ่ฝฌไบŒ็ปดๆ•ฐ็ป„็š„่กŒ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅ่ฝฌไบŒ็ปดๆ•ฐ็ป„arr็š„่กŒใ€‚\n# ็ป™ๅฎš๏ผš\n# Input\n# arr = np.arange(9).reshape(3,3)\n\nprint(arr[::-1])\n\n#19. ๅฆ‚ไฝ•ๅ่ฝฌไบŒ็ปดๆ•ฐ็ป„็š„ๅˆ—๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅ่ฝฌไบŒ็ปดๆ•ฐ็ป„arr็š„ๅˆ—ใ€‚\n# ็ป™ๅฎš๏ผš\n# Input\n# arr = np.arange(9).reshape(3,3)\n\nprint(arr[:,::-1])\n\n#20. ๅฆ‚ไฝ•ๅˆ›ๅปบๅŒ…ๅซ5ๅˆฐ10ไน‹้—ด้šๆœบๆตฎๅŠจ็š„ไบŒ็ปดๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅˆ›ๅปบไธ€ไธชๅฝข็Šถไธบ5x3็š„ไบŒ็ปดๆ•ฐ็ป„๏ผŒไปฅๅŒ…ๅซ5ๅˆฐ10ไน‹้—ด็š„้šๆœบๅ่ฟ›ๅˆถๆ•ฐใ€‚\n\n\n\n#21. ๅฆ‚ไฝ•ๅœจnumpyๆ•ฐ็ป„ไธญๅชๆ‰“ๅฐๅฐๆ•ฐ็‚นๅŽไธ‰ไฝ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๅชๆ‰“ๅฐๆˆ–ๆ˜พ็คบnumpyๆ•ฐ็ป„rand_arr็š„ๅฐๆ•ฐ็‚นๅŽ3ไฝใ€‚\n# ็ป™ๅฎš๏ผš\n# rand_arr = np.random.random((5,3))\n" }, { "alpha_fraction": 0.47204968333244324, "alphanum_fraction": 0.5231193900108337, "avg_line_length": 26.339622497558594, "blob_id": "19929ec731c34d7e705382a3d08298e4de32b484", "content_id": "606726e174637b1dddbec38b8860bbcddccc8f32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 74, "num_lines": 53, "path": "/MTCNN2/tools/utils.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef iou(box, boxes, is_min=False):\n # box = x1,y1,x2,y2\n box_area = (box[2] - box[0]) * (box[3] - box[1])\n boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n xx1 = np.maximum(box[0], boxes[:, 0])\n yy1 = np.maximum(box[1], boxes[:, 1])\n xx2 = np.minimum(box[2], boxes[:, 2])\n yy2 = np.minimum(box[3], boxes[:, 3])\n\n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n if is_min:\n ovr = np.true_divide((w * h), np.minimum(box_area, boxes_area))\n return ovr\n else:\n ovr = np.true_divide((w * h), (box_area + boxes_area - (w * h)))\n return ovr\n\n\ndef nms(boxes, threshold, is_min=False):\n if boxes.shape[0] == 0:\n return np.array([])\n _boxes = boxes[(-boxes[:, 4]).argsort()]\n r_boxes = []\n\n while _boxes.shape[0] > 1:\n # ๅ–ๅ‡บ็ฌฌ1ไธชๆก†\n a_box = _boxes[0]\n # ๅ–ๅ‡บๅ‰ฉไฝ™็š„ๆก†\n b_boxes = _boxes[1:]\n\n # ๅฐ†1stไธชๆก†ๅŠ ๅ…ฅๅˆ—่กจ\n r_boxes.append(a_box) ##ๆฏๅพช็Žฏไธ€ๆฌกๅพ€๏ผŒๆทปๅŠ ไธ€ไธชๆก†\n _boxes = b_boxes[iou(a_box, b_boxes, is_min) < threshold]\n\n if _boxes.shape[0] > 0:\n ##ๆœ€ๅŽไธ€ๆฌก๏ผŒ็ป“ๆžœๅช็”จ1stไธช็ฌฆๅˆๆˆ–ๅชๆœ‰ไธ€ไธช็ฌฆๅˆ๏ผŒ่‹ฅๆก†็š„ไธชๆ•ฐๅคงไบŽ1๏ผ›\n # โ˜…ๆญคๅค„_boxes่ฐƒ็”จ็š„ๆ˜ฏwhilexๅพช็Žฏ้‡Œ็š„๏ผŒๆญคๅˆคๆ–ญๆกไปถๆ”พๅœจๅพช็Žฏ้‡Œๅ’Œๅค–้ƒฝๅฏไปฅ๏ผˆๅชๆœ‰ๅœจๅ‡ฝๆ•ฐ็ฑปๅค–ๆ‰ๅฏไบง็”Ÿๅฑ€้ƒจไฝœ็”จไบŽ๏ผ‰\n r_boxes.append(_boxes[0]) # ๅฐ†ๆญคๆก†ๆทปๅŠ ๅˆฐๅˆ—่กจไธญ\n\n return np.array(r_boxes)\n\n\nif __name__ == '__main__':\n b = [38, 50, 120, 180]\n bs = [[38, 50, 120, 180], [45, 56, 110, 200]]\n bs = np.array(bs)\n res = iou(b, bs)\n print(res)\n" }, { "alpha_fraction": 0.5127118825912476, "alphanum_fraction": 0.537530243396759, "avg_line_length": 30.769229888916016, "blob_id": "9aba46629c5b183f5bc0b1beb1e7fba942c16fd8", "content_id": "cd1ffa986186840f2e7b6bf8a649fb52ce74fef4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1652, "license_type": "no_license", "max_line_length": 120, "num_lines": 52, "path": "/MTCNN2/data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch.utils.data import Dataset\nfrom PIL import Image\nimport numpy as np\nfrom torchvision import transforms\n\ntf = transforms.Compose([\n transforms.ToTensor()\n])\n\n\nclass MyDataset(Dataset):\n\n def __init__(self, root, img_size):\n self.dataset = []\n self.root_dir = f\"{root}/{img_size}\"\n\n with open(f\"{self.root_dir}/positive.txt\", \"r\") as f:\n self.dataset.extend(f.readlines())\n\n with open(f\"{self.root_dir}/negative.txt\", \"r\") as f:\n self.dataset.extend(f.readlines())\n\n with open(f\"{self.root_dir}/part.txt\", \"r\") as f:\n self.dataset.extend(f.readlines())\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n data = self.dataset[index]\n strs = data.split()\n\n if strs[1] == \"1\":\n img_path = f\"{self.root_dir}/positive/{strs[0]}\"\n elif strs[1] == \"2\":\n img_path = f\"{self.root_dir}/part/{strs[0]}\"\n else:\n img_path = f\"{self.root_dir}/negative/{strs[0]}\"\n\n img_data = tf(Image.open(img_path))\n\n c, x1, y1, x2, y2 = float(strs[1]), float(strs[2]), float(strs[3]), float(strs[4]), float(strs[5])\n\n return img_data, np.array([c, x1, y1, x2, y2,\n float(strs[6]), float(strs[7]), float(strs[8]), float(strs[9]), float(strs[10]),\n float(strs[11]), float(strs[12]), float(strs[13]), float(strs[14]), float(strs[15])],\n dtype=np.float32)\n\n\nif __name__ == '__main__':\n dataset = MyDataset(\"/Users/karson/Downloads/Dataset/\", 12)\n print(dataset[0])\n" }, { "alpha_fraction": 0.6220472455024719, "alphanum_fraction": 0.7139107584953308, "avg_line_length": 22.875, "blob_id": "97f6fbb03b93675c17c8e2389df380e036bf40d7", "content_id": "6e4b94b7d1d74e54de0a5908ccbda4263e5ce87a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 56, "num_lines": 16, "path": "/OpenCV_Practice/canny_ct.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"25.jpg\")\n# ๅฏนๆฏ”ๅบฆๅขžๅผบๅŽปๅ™ช\nimg = cv2.convertScaleAbs(img,alpha=6,beta=0)\nimg = cv2.GaussianBlur(img,(3,3),1)\n# ่ฝฎๅป“ๆๅ–่กฅๆดž\ndst = cv2.Canny(img,50,150)\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))\ndst2 = cv2.morphologyEx(dst,cv2.MORPH_CLOSE,kernel)\n# dst = cv2.resize(dst,(500,500))\n\ncv2.imshow(\"src show\",img)\ncv2.imshow(\"dst show\",dst)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5233644843101501, "alphanum_fraction": 0.5626168251037598, "avg_line_length": 18.14285659790039, "blob_id": "7f1272fed9221eb10d4067f3090556734caa4038", "content_id": "54a99ccf7fd3c6c4adbc97567c0571a14794a4a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/PythonStudy/For_in.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n็”จforๅพช็Žฏๅฎž็Žฐ้ๅŽ†ๅˆ—่กจใ€ๅ…ƒ็ฅ–ใ€set้›†ๅˆใ€ๅญ—ๅ…ธ\n\nVersion: 0.1\nAuthor: karson\n\"\"\"\na = [1, 2, 3, 4, 5, 6]\nfor i in a:\n print(\"i=\", i)\n\nb = (6, 7, 8, 9, 10)\nfor x in b:\n print(\"x=\", x)\n\nc = {11, 12, 13}\nfor y in c:\n print(\"y=\", y)\n\nmy_dic = {'pythonๆ•™็จ‹': \"http://c.biancheng.net/python/\",\n 'shellๆ•™็จ‹': \"http://c.biancheng.net/shell/\",\n 'javaๆ•™็จ‹': \"http://c.biancheng.net/java/\"}\n\nfor d in my_dic.items():\n print(\"d\", d)\n\"\"\"\nๅœจไฝฟ็”จ for ๅพช็Žฏ้ๅŽ†ๅญ—ๅ…ธๆ—ถ๏ผŒ็ปๅธธไผš็”จๅˆฐๅ’Œๅญ—ๅ…ธ็›ธๅ…ณ็š„ 3 ไธชๆ–นๆณ•๏ผŒๅณ items()ใ€keys()ใ€values()\nๅฝ“็„ถ๏ผŒๅฆ‚ๆžœไฝฟ็”จ for ๅพช็Žฏ็›ดๆŽฅ้ๅŽ†ๅญ—ๅ…ธ๏ผŒๅˆ™่ฟญไปฃๅ˜้‡ไผš่ขซๅ…ˆๅŽ่ต‹ๅ€ผไธบๆฏไธช้”ฎๅ€ผๅฏนไธญ็š„้”ฎใ€‚\n\"\"\"" }, { "alpha_fraction": 0.530927836894989, "alphanum_fraction": 0.5463917255401611, "avg_line_length": 12.857142448425293, "blob_id": "3758e4e90f2361305b4c25b210c9bc35154f1345", "content_id": "423a9bd29914719ab7755c126ebc076a01a8bae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 27, "num_lines": 14, "path": "/PythonStudy/check_num3.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n่พ“ๅ…ฅไธคไธชๆญฃๆ•ดๆ•ฐ่ฎก็ฎ—ๅฎƒไปฌ็š„ๆœ€ๅคงๅ…ฌ็บฆๆ•ฐๅ’Œๆœ€ๅฐๅ…ฌๅ€ๆ•ฐ\nVersion: 0.1\nAuthor: karson\n\"\"\"\n\na = int(input(\"่ฏท่พ“ๅ…ฅไธ€ไธชๆ•ฐ๏ผš\"))\nb = int(input(\"่ฏท่พ“ๅ…ฅๅฆๅค–ไธ€ไธชๆ•ฐ๏ผš\"))\nwhile a % b != 0:\n MOD = a % b\n a = b\n b = MOD\n\nprint(\"gcd(a,b)=\", b)\n" }, { "alpha_fraction": 0.6230529546737671, "alphanum_fraction": 0.7196261882781982, "avg_line_length": 25.83333396911621, "blob_id": "be367f83fe6fd4c2ad4ec25459df86f1cd101f50", "content_id": "5e8b4a2bd33cf133b10753fffa9d4fbba20fffa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/OpenCV_Practice/hsv.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread(\"11.jpg\")\nhsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)\nlower_blue = np.array([100,200,100])\nupper_blue = np.array([200,255,200])\nmaks = cv2.inRange(hsv,lower_blue,upper_blue)\nres = cv2.bitwise_and(img,img,mask=maks)\ncv2.imshow(\"src\",img)\ncv2.imshow(\"maks\",maks)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.4606741666793823, "alphanum_fraction": 0.567415714263916, "avg_line_length": 21.25, "blob_id": "4965643e8f0271c694ceca25407d68904d388188", "content_id": "78f3dcfce4fe1bb91ed3634fac3e71f5682aa676", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/OpenCV_Practice/write_image.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimg = np.empty((200,200,3),np.uint8)\nimg[...,0] = 255\nimg[...,1] = 0\nimg[...,2] = 0\nimg = img[...,::-1] #bgr - rgb\ncv2.imwrite(\"img_save.jpg\", img)\n" }, { "alpha_fraction": 0.5154419541358948, "alphanum_fraction": 0.5282214879989624, "avg_line_length": 25.08333396911621, "blob_id": "9f35417656292efb68f403bfe7ddb83cf2ab16ad", "content_id": "ada3b4657d9dce4c0df0407023ca1c7bac75bb4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 92, "num_lines": 36, "path": "/SEQ2SEQ/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from data import *\nfrom net import *\nfrom torch.utils.data import DataLoader\nfrom torch import optim, nn\n\n\nclass Trainer:\n def __init__(self):\n\n self.train_dataset = Mydataset(\"./code\")\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=100, shuffle=True)\n\n self.net = CNN2SEQ()\n\n self.opt = optim.Adam(self.net.parameters())\n\n self.loss_fn = nn.CrossEntropyLoss()\n\n def __call__(self):\n for epoch in range(1000):\n for i, (img, tag) in enumerate(self.train_dataloader):\n y = self.net(img)\n y = y.reshape(-1, 10)\n tag = tag.reshape(-1)\n # print(y.shape, img.shape, tag.shape)\n loss = self.loss_fn(y, tag)\n\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n print(loss)\n\n\nif __name__ == '__main__':\n t = Trainer()\n t()\n" }, { "alpha_fraction": 0.45868945121765137, "alphanum_fraction": 0.4622507095336914, "avg_line_length": 22, "blob_id": "75751e156fdeca79229639415fbb0348eece7ddf", "content_id": "18793014f07ab6bbf10857a362b19410f9634b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 38, "num_lines": 61, "path": "/PythonStudy/Data_structure/Linked_list/practice.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n\n def is_empty(self):\n if self.head == Node:\n return self.head is None\n\n def lenth(self):\n cur = self.head\n count = 0\n while cur is not None:\n count = +1\n cur = self.next\n return count\n\n def item(self):\n cur = self.head\n while cur is not None:\n yield cur.data\n cur = cur.next\n\n def add(self,data):\n node = Node(data)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node\n\n def append(self,data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur is not None:\n cur = cur.next\n cur.next = node\n\n def insert(self,index,data):\n node = Node(data)\n if index <= 0 :\n self.add(data)\n elif index > (self.lenth()-1):\n self.append(data)\n else:\n cur = self.head\n for i in range(index-1):\n cur =cur.next\n node.next = cur.next\n cur.next = node\n\n def delete(self,data):\n cur = self.head\n pre = None\n\n" }, { "alpha_fraction": 0.6543062329292297, "alphanum_fraction": 0.7332535982131958, "avg_line_length": 25.15625, "blob_id": "e2842e52d0e70e67fe8a6f060c373f1fec235d96", "content_id": "84566a390afe33d981dd5240ffb92d0d1eed25cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 70, "num_lines": 32, "path": "/OpenCV_Practice/hist_pro.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n# ้€‰ๅ–้œ€่ฆ็š„ๅŒบๅŸŸๅ›พ็‰‡\nroi = cv2.imread(\"10.jpg\")\n# ่ฏปๅ–ๅŽŸๅ›พ\ntarget = cv2.imread(\"9.jpg\")\n# ๅฐ†ๅ›พ็‰‡BGR่ฝฌไธบHSV\nroi_hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)\ntarget_hsv = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)\n\n# ่ฎก็ฎ—roi็›ดๆ–นๅ›พๅนถ่ฟ›่กŒๅฝ’ไธ€ๅŒ–\nroi_hist = cv2.calcHist([roi_hsv],[0,1],None,[180,256],[0,180,0,256])\n\ncv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)\n# ๆŠŠroi็›ดๆ–นๅ›พๅๅ‘ๆŠ•ๅฝฑๅˆฐๅŽŸๅ›พไธŠ\ndst = cv2.calcBackProject([target_hsv],[0,1],roi_hist,[0,180,0,256],1)\n\n# ๆŠŠ้›ถๆ•ฃ็š„็‚น่ฟžๆˆไธ€็‰‡\ndst_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\ndst = cv2.filter2D(dst,-1,dst_kernel)\n\n# ๆŠŠdst่ฝฌไธบไบŒๅ€ผๅŒ–ๅ›พ็‰‡\nret,thresh = cv2.threshold(dst,50,255,cv2.THRESH_BINARY)\n\n# ๆŠŠๅ›พ็‰‡่ฝฌๆขๆˆไธ‰้€š้“\nthresh = cv2.merge((thresh,thresh,thresh))\n# ๆŠŠๅŽŸๅ›พไธŽ้ฎๆŒกๆŒ‰ไฝไธŽ่ฟ็ฎ—ๅˆๆˆๅ‡บๆๅ–็š„้ขœ่‰ฒ่ฝฎๅป“\nres = cv2.bitwise_and(target,thresh)\n# ็ป“ๆžœ่ฟ›่กŒๆ‹ผๆŽฅ\nres = np.hstack((target,thresh,res))\ncv2.imshow(\"res\",res)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.459447979927063, "alphanum_fraction": 0.4915074408054352, "avg_line_length": 37.925621032714844, "blob_id": "ccfe99906d63c8cf1ac63d2fc60e0e96ccaa0868", "content_id": "e9f015f6d12b218a259c67fab605aaf2d97ccedb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4710, "license_type": "no_license", "max_line_length": 119, "num_lines": 121, "path": "/FACE_MTCNN/gen_data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os\nfrom PIL import Image\nfrom celeba import Celeba\nimport numpy as np\nfrom tools import utils\n\n\nclass Gendata():\n def __init__(self, metadata, output_folder, crop_size, net_stage):\n self.net_data_folder = os.path.join(output_folder, net_stage)\n\n self.positive_dest = os.path.join(self.net_data_folder, 'positive')\n self.negative_dest = os.path.join(self.net_data_folder, 'negative')\n self.part_dest = os.path.join(self.net_data_folder, 'part')\n\n [os.makedirs(x) for x in (self.positive_dest, self.negative_dest, self.part_dest) if not os.path.exists(x)]\n\n self.crop_size = crop_size\n self.metadata = metadata\n\n def run(self):\n positive_meta = open(os.path.join(self.net_data_folder, 'positive_meta.txt'), 'w')\n negative_meta = open(os.path.join(self.net_data_folder, 'negative_meta.txt'), \"w\")\n part_meta = open(os.path.join(self.net_data_folder, 'part_meta.txt'), 'w')\n\n positive_count = 0\n negative_count = 0\n part_count = 0\n\n for i, item in enumerate(self.metadata):\n\n img_path = item['file_name']\n img = Image.open(img_path)\n\n boxes = np.array(item['meta_data'])[:, :4]\n boxes = boxes[boxes[:, 2] >= 0]\n boxes = boxes[boxes[:, 3] >= 0]\n\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1]\n\n width, height = img.size\n # print(img.size,boxes.shape)\n\n for box in boxes:\n x1, y1, x2, y2 = box\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n if max(w, h) < 40 or x1 < 0 or y1 < 0:\n continue\n\n size = np.random.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))\n delta_x = np.random.randint(- w * 0.2, w * 0.2)\n delta_y = np.random.randint(- h * 0.2, h * 0.2)\n\n nx1 = int(max(x1 + w / 2 + delta_x - size / 2, 0))\n ny1 = int(max(y1 + h / 2 + delta_y - size / 2, 0))\n nx2 = nx1 + size\n ny2 = ny1 + size\n\n if nx2 > width or ny2 > height:\n continue\n\n crop_box = np.array([nx1, ny1, nx2, ny2])\n\n offset_x1 = (x1 - nx1) / float(size)\n offset_y1 = (y1 - ny1) / float(size)\n offset_x2 = (x2 - nx2) / float(size)\n offset_y2 = (y2 - ny2) / float(size)\n\n crop_img = img.crop(crop_box)\n resize_img = crop_img.resize((self.crop_size, self.crop_size))\n _box = np.array([x1, y1, x2, y2])\n iou = utils.iou(_box, np.array([[nx1, ny1, nx2, ny2]]))\n if iou >= 0.65 and positive_count < 30000:\n positive_count += 1\n positive_meta.write(f\"{positive_count}.jpg {1} {offset_x1} {offset_y1} {offset_x2} {offset_y2} \\n\")\n resize_img.save(f\"{self.positive_dest}/{positive_count}.jpg\")\n positive_meta.flush()\n\n if iou > 0.4 and part_count < 30000:\n part_count += 1\n part_meta.write(f\"{part_count}.jpg {2} {offset_x1} {offset_y1} {offset_x2} {offset_y2} \\n\")\n resize_img.save(f\"{self.part_dest}/{part_count}.jpg\")\n part_meta.flush()\n\n size = np.random.randint(self.crop_size, min(width, height)+1 / 2)\n delta_x = np.random.randint(max(-size, -x1), w)\n delta_y = np.random.randint(max(-size, -y1), h)\n\n nx1 = max(x1 + delta_x, 0)\n ny1 = max(y1 + delta_y, 0)\n nx2 = nx1 + size\n ny2 = ny1 + size\n\n if nx2 > width or ny2 > height:\n continue\n\n crop_box = np.array([nx1, ny1, nx2, ny2])\n crop_img = img.crop(crop_box)\n resize_img = crop_img.resize((self.crop_size, self.crop_size))\n _box = np.array([x1, y1, x2, y2])\n iou = utils.iou(_box, np.array([[nx1, ny1, nx2, ny2]]))\n if iou < 0.3 and negative_count < 90000:\n negative_count += 1\n negative_meta.write(f\"{negative_count}.jpg {0} {0} {0} {0} {0} \\n\")\n resize_img.save(f\"{self.negative_dest}/{negative_count}.jpg\")\n negative_meta.flush()\n\n\n positive_meta.close()\n negative_meta.close()\n part_meta.close()\n\n\nif __name__ == '__main__':\n celeba = Celeba(r\"E:\\dataset\")\n train, dev, test = celeba.split_data()\n data = Gendata(test, r'F:\\celeba', 12, 'pnet_eval')\n data.run()\n print(data.positive_dest)\n" }, { "alpha_fraction": 0.39321357011795044, "alphanum_fraction": 0.46573519706726074, "avg_line_length": 22.123077392578125, "blob_id": "a1ec28b105d033aa639b5f0ed67ef67621735fb0", "content_id": "269a8f6d22f857776858f8e34ab4625649ebbb72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1503, "license_type": "no_license", "max_line_length": 65, "num_lines": 65, "path": "/SEQ2SEQ/backup/net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass Encoder(nn.Sequential):\n\n def __init__(self):\n super().__init__(\n nn.Conv2d(3, 16, 7, 2, 3),\n nn.ReLU(),\n nn.Conv2d(16, 32, 3, 1, 1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, 1, 1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, 1, 1),\n nn.ReLU(),\n nn.Conv2d(128, 128, 1, 1, 0),\n )\n\n\nclass Decoder(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.rnn = nn.GRU(128 * 7 * 30, 128, 2, batch_first=True)\n self.output_layer = nn.Linear(128, 10)\n\n def forward(self, x):\n x = x.reshape(-1, 128 * 7 * 30)\n x = x[:, None, :].repeat(1, 4, 1)\n h0 = torch.randn(2, x.size(0), 128)\n output, hn = self.rnn(x, h0)\n outputs = self.output_layer(output)\n return outputs\n\n\nclass Cnn2Seq(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.encode = Encoder()\n self.decode = Decoder()\n\n def forward(self, x):\n f = self.encode(x)\n y = self.decode(f)\n return y\n\n\nif __name__ == '__main__':\n # net = Encoder()\n # y = net(torch.randn(1, 3, 60, 240))\n # print(y.shape)\n # x = torch.randn([1, 32, 7, 30])\n # x = x.reshape(-1, 32 * 7 * 30)\n # x = x[:, None, :].repeat(1, 4, 1)\n # print(x.shape)\n\n net = Cnn2Seq()\n y = net(torch.randn(1, 3, 60, 240))\n print(y.shape)\n" }, { "alpha_fraction": 0.5941176414489746, "alphanum_fraction": 0.6823529601097107, "avg_line_length": 17.88888931274414, "blob_id": "7aba05a1d47576508f7eba0d192c6ad545816d22", "content_id": "b361ab8b30006ea6906208ca193f26faa0118cb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/OpenCV_Practice/usm.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"1.jpg\")\ndst = cv2.GaussianBlur(img,(5,5),1)\ncv2.addWeighted(img,2,dst,-1,0,dst)\n\ncv2.imshow(\"src\",img)\ncv2.imshow(\"dst\",img)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.6346456408500671, "alphanum_fraction": 0.7007874250411987, "avg_line_length": 34.33333206176758, "blob_id": "f4aae425ffe0f9c63e00a4a7db4287510b2b31f8", "content_id": "0154087168bf3cbe5df11bdb8346613e495930c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 90, "num_lines": 18, "path": "/OpenCV_Practice/adpative.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport matplotlib.pyplot as plt\n\nimg = cv2.imread(\"2.jpg\",0)\nret,th1 = cv2.threshold(img,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\nth2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)\nth3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)\n\ntitles = ['Original Image', 'Global Thresholding (v = 127)',\n 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']\n\nimages = [img,th1,th2,th3]\n\nfor i in range(4):\n plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([])\nplt.show()" }, { "alpha_fraction": 0.555084764957428, "alphanum_fraction": 0.6355932354927063, "avg_line_length": 22.700000762939453, "blob_id": "f510ca26157a9cf2f7ed076fdab0b4ab667bef17", "content_id": "dacc55b76df49c6d0f164423dddf3a818b9391f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/yolo/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\nx = torch.randn(1,45,13,13)\ny = x.permute(0,2,3,1)\nreshape_y = y.reshape(y.size(0), y.size(1), y.size(2), 3, -1)\nmask = reshape_y[..., 0] > 0.6\nidx = mask.nonzero()\nprint(idx.shape)\nvecs = reshape_y[mask]\nprint(vecs.shape)" }, { "alpha_fraction": 0.6570248007774353, "alphanum_fraction": 0.7231404781341553, "avg_line_length": 26, "blob_id": "bb5d72eacf2034b3f8e51287e518ee07287f3ca4", "content_id": "dee43c7ae43e50662434c345b59e4ed0939e541d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/OpenCV_Practice/color_space.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nsrc = cv2.imread(\"1.jpg\")\n# dst = cv2.cvtColor(src,cv2.COLOR_RGBA2GRAY)\ndst = cv2.cvtColor(src,cv2.COLOR_BGR2HSV)\n\n# dst = cv2.convertScaleAbs(src,alpha=6,beta=1)\ncv2.imshow(\"src show\",src)\ncv2.imshow(\"dst show\",dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.4915563762187958, "alphanum_fraction": 0.5244180560112, "avg_line_length": 37.438594818115234, "blob_id": "dea90ec83aaee005e5349dea722004982de9fb0c", "content_id": "57644db6b98aaab44015b3fabf6db08d2fa4a5aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2191, "license_type": "no_license", "max_line_length": 102, "num_lines": 57, "path": "/GAN/train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from net import *\nfrom data import FaceMyData\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torchvision import utils\nfrom torch.utils.tensorboard import SummaryWriter\nimport time\n\n\nclass Trainer:\n def __init__(self, root):\n self.summarywrite = SummaryWriter(\"./runs\")\n self.dataset = FaceMyData(root)\n self.train_dataloader = DataLoader(self.dataset, batch_size=1000, shuffle=True, num_workers=4)\n self.net = DCGAN().cuda()\n self.net.load_state_dict(torch.load(\"./param/param2020-05-15-19-53-49.pt\"))\n\n self.d_opt = optim.Adam(self.net.dnet.parameters(), 0.0002, betas=(0.5, 0.9))\n self.g_opt = optim.Adam(self.net.gnet.parameters(), 0.0002, betas=(0.5, 0.9))\n\n def __call__(self):\n for epoch in range(10000):\n\n for i, img in enumerate(self.train_dataloader):\n real_img = img.cuda()\n noise_d = torch.normal(0, 1, (100, 128, 1, 1)).cuda()\n loss_d = self.net.get_D_loss(noise_d, real_img)\n\n self.d_opt.zero_grad()\n loss_d.backward()\n self.d_opt.step()\n\n noise_g = torch.normal(0, 1, (100, 128, 1, 1)).cuda()\n loss_g = self.net.get_G_loss(noise_g)\n\n self.g_opt.zero_grad()\n loss_g.backward()\n self.g_opt.step()\n\n print(i, loss_d.cpu().detach().item(), loss_g.cpu().detach().item())\n\n print(\"........................\")\n noise = torch.normal(0, 1, (8, 128, 1, 1)).cuda()\n y = self.net(noise)\n utils.save_image(y, f\"./gen_img/{epoch}.jpg\", range=(-1, 1), normalize=True)\n\n self.summarywrite.add_scalars(\"loss\", {\"d_loss\": loss_d.cpu().detach().item(),\n \"g_loss\": loss_g.cpu().detach().item()},\n epoch)\n # self.summarywrite.add_graph(self.net,(real_img,))\n t = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\n torch.save(self.net.state_dict(), f\"./param/param{t}.pt\")\n\n\nif __name__ == '__main__':\n test = Trainer(\"./faces\")\n test()\n" }, { "alpha_fraction": 0.6711409687995911, "alphanum_fraction": 0.744966447353363, "avg_line_length": 27, "blob_id": "bf48ad8c405e0d5f68fe65f7c0e18f7a35b259ed", "content_id": "684ec4e6b04819c7053d01ae74641d16b7f2d4c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 74, "num_lines": 16, "path": "/OpenCV_Practice/contour_convex.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"15.jpg\")\ndst = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\nret,thres = cv2.threshold(dst,50,255,cv2.THRESH_BINARY)\n\ncontours,_ = cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nhull = cv2.convexHull(contours[0])\n\nprint(cv2.isContourConvex(contours[0]),cv2.isContourConvex(hull))\n# False True\n# ่ฝฎๅป“ๆ˜ฏ้žๅ‡ธ็š„๏ผŒๅ‡ธๅŒ…ๆ˜ฏๅ‡ธ็š„\nimg_contour = cv2.drawContours(img,[hull],-1,(0,0,255),2)\n\ncv2.imshow(\"img\",img_contour)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6535714268684387, "avg_line_length": 27.100000381469727, "blob_id": "c0b4045609a09d0e3721fb0cb8cd3de64414fd28", "content_id": "7a6b4a00e57945c3c02a03ed497da6a777a4b018", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/deep_learning/day01/deploy.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import jit\nfrom day01.net import *\nif __name__ == '__main__':\n modle = NetV1()\n modle.load_state_dict(torch.load(\"./checkpoint/4.t\"))\n\n input = torch.rand(1,784)\n\n traced_script_moudle = torch.jit.trace(modle,input)\n traced_script_moudle.save(\"mnist.pt\")" }, { "alpha_fraction": 0.36975598335266113, "alphanum_fraction": 0.4232479929924011, "avg_line_length": 43.720428466796875, "blob_id": "8ab11674ef6ea129513176341ec4707b1ec016e3", "content_id": "8aecbb7bca6f78b008e0630a9afdf06f3efb91bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8343, "license_type": "no_license", "max_line_length": 109, "num_lines": 186, "path": "/MTCNN2/Gen_Data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os\nfrom PIL import Image\nimport numpy as np\nfrom tools import utils\n\n\nclass GenData():\n\n def __init__(self, src_root, gen_root, image_size):\n self.image_size = image_size\n\n self.src_image_path = f\"{src_root}/img_celeba\"\n self.src_anno_path = f\"{src_root}/Anno/list_bbox_celeba.txt\"\n self.src_landmark_path = f\"{src_root}/Anno/list_landmarks_celeba.txt\"\n\n self.positive_image_dir = f\"{gen_root}/{image_size}/positive\"\n self.negative_image_dir = f\"{gen_root}/{image_size}/negative\"\n self.part_image_dir = f\"{gen_root}/{image_size}/part\"\n\n self.positive_label = f\"{gen_root}/{image_size}/positive.txt\"\n self.negative_label = f\"{gen_root}/{image_size}/negative.txt\"\n self.part_label = f\"{gen_root}/{image_size}/part.txt\"\n\n # ่‹ฅๆ–‡ไปถๅคนไธๅญ˜ๅœจๅˆ™ๅˆ›ๅปบ่ทฏๅพ„\n for path in [self.positive_image_dir, self.negative_image_dir, self.part_image_dir]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n def run(self, epoch):\n\n positive_label_txt = open(self.positive_label, \"w\")\n negative_label_txt = open(self.negative_label, \"w\")\n part_label_txt = open(self.part_label, \"w\")\n\n positive_count = 0\n negative_count = 0\n part_count = 0\n\n for _ in range(epoch):\n box_nano = open(self.src_anno_path, \"r\")\n landmark_anno = open(self.src_landmark_path, \"r\")\n # for i, line in enumerate(open(self.src_anno_path, \"r\")):\n for i, (box_line, landmark_line) in enumerate(zip(box_nano, landmark_anno)):\n if i < 2:\n continue\n strs = box_line.split()\n landmarks = landmark_line.split()\n\n print(strs, landmarks)\n\n img_path = f\"{self.src_image_path}/{strs[0]}\"\n img = Image.open(img_path)\n # img.show()\n\n x1 = int(strs[1])\n y1 = int(strs[2])\n w = int(strs[3])\n h = int(strs[4])\n x2 = x1 + w\n y2 = y1 + h\n\n px1 = int(landmarks[1])\n py1 = int(landmarks[2])\n px2 = int(landmarks[3])\n py2 = int(landmarks[4])\n px3 = int(landmarks[5])\n py3 = int(landmarks[6])\n px4 = int(landmarks[7])\n py4 = int(landmarks[8])\n px5 = int(landmarks[9])\n py5 = int(landmarks[10])\n\n if max(w, h) < 40 or x1 < 0 or y1 < 0 or w < 0 or h < 0:\n continue\n\n x1 = int(x1 + w * 0.12)\n y1 = int(y1 + h * 0.1)\n x2 = int(x1 + w * 0.9)\n y2 = int(y1 + h * 0.85)\n w = x2 - x1\n h = y2 - y1\n\n cx = int(x1 + (w / 2))\n cy = int(y1 + (w / 2))\n\n _cx = cx + np.random.randint(-w * 0.2, w * 0.2)\n _cy = cy + np.random.randint(-h * 0.2, h * 0.2)\n _w = w + np.random.randint(-w * 0.2, w * 0.2)\n _h = h + np.random.randint(-h * 0.2, h * 0.2)\n _x1 = int(_cx - (_w / 2))\n _y1 = int(_cy - (_h / 2))\n _x2 = int(_x1 + _w)\n _y2 = int(_y1 + _h)\n\n _x1_off = (_x1 - x1) / _w\n _y1_off = (_y1 - y1) / _h\n _x2_off = (_x2 - x2) / _w\n _y2_off = (_y2 - y2) / _h\n\n _px1_off = (px1 - _x1) / _w\n _py1_off = (py1 - _y1) / _h\n _px2_off = (px2 - _x1) / _w\n _py2_off = (py2 - _y1) / _h\n _px3_off = (px3 - _x1) / _w\n _py3_off = (py3 - _y1) / _h\n _px4_off = (px4 - _x1) / _w\n _py4_off = (py4 - _y1) / _h\n _px5_off = (px5 - _x1) / _w\n _py5_off = (py5 - _y1) / _h\n\n clip_img = img.crop([_x1, _y1, _x2, _y2])\n clip_img = clip_img.resize((self.image_size, self.image_size))\n\n iou = utils.iou(np.array([x1, y1, x2, y2]), np.array([[_x1, _y1, _x2, _y2]]))\n\n if iou > 0.6 and positive_count <= 30000:\n clip_img.save(f\"{self.positive_image_dir}/{positive_count}.jpg\")\n positive_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n positive_count, 1, _x1_off, _y1_off, _x2_off, _y2_off, _px1_off, _py1_off,\n _px2_off,_py2_off,_px3_off, _py3_off, _px4_off, _py4_off, _px5_off, _py5_off\n ))\n positive_label_txt.flush()\n positive_count += 1\n elif iou > 0.4 and part_count <= 30000:\n clip_img.save(f\"{self.part_image_dir}/{part_count}.jpg\")\n part_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n part_count, 2, _x1_off, _y1_off, _x2_off, _y2_off, _px1_off, _py1_off,\n _px2_off, _py2_off, _px3_off, _py3_off, _px4_off, _py4_off, _px5_off, _py5_off\n ))\n part_label_txt.flush()\n part_count += 1\n elif iou < 0.3 and negative_count <= 90000:\n clip_img.save(f\"{self.negative_image_dir}/{negative_count}.jpg\")\n negative_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n negative_count, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ))\n negative_label_txt.flush()\n negative_count += 1\n\n w, h = img.size\n _x1, _y1 = np.random.randint(0, w), np.random.randint(0, h)\n _w, _h = np.random.randint(0, w - _x1), np.random.randint(0, h - _y1)\n _x2, _y2 = _x1 + _w, _y1 + _h\n clip_img = img.crop([_x1, _y1, _x2, _y2])\n clip_img = clip_img.resize((self.image_size, self.image_size))\n iou = utils.iou(np.array([x1, y1, x2, y2]), np.array([[_x1, _y1, _x2, _y2]]))\n if iou > 0.6 and positive_count <= 30000:\n clip_img.save(f\"{self.positive_image_dir}/{positive_count}.jpg\")\n positive_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n positive_count, 1, _x1_off, _y1_off, _x2_off, _y2_off, _px1_off, _py1_off,\n _px2_off, _py2_off, _px3_off, _py3_off, _px4_off, _py4_off, _px5_off, _py5_off\n ))\n positive_label_txt.flush()\n positive_count += 1\n elif iou > 0.4 and part_count <= 30000:\n clip_img.save(f\"{self.part_image_dir}/{part_count}.jpg\")\n part_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n part_count, 2, _x1_off, _y1_off, _x2_off, _y2_off, _px1_off, _py1_off,\n _px2_off, _py2_off, _px3_off, _py3_off, _px4_off, _py4_off, _px5_off, _py5_off\n ))\n part_label_txt.flush()\n part_count += 1\n elif iou < 0.3 and negative_count <= 90000:\n clip_img.save(f\"{self.negative_image_dir}/{negative_count}.jpg\")\n negative_label_txt.write(\n \"{0}.jpg {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13} {14} {15}\\n\".format(\n negative_count, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ))\n negative_label_txt.flush()\n negative_count += 1\n\n positive_label_txt.close()\n negative_label_txt.close()\n part_label_txt.close()\n\n\nif __name__ == '__main__':\n dst_path = r\"F:\\celeba/\"\n path = r\"E:\\dataset\\CelebA/\"\n gendata = GenData(path, dst_path, image_size=48)\n gendata.run(1)\n\n" }, { "alpha_fraction": 0.5879310369491577, "alphanum_fraction": 0.5982758402824402, "avg_line_length": 31.27777862548828, "blob_id": "46762654ddc873d9a5dc2ff110db86124d9965f9", "content_id": "2c85c8befaf1ff2afe810582c1971bc338056878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/Loss/center_loss.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass CenterLoss(nn.Module):\n\n def __init__(self, class_num, feature_num):\n super(CenterLoss, self).__init__()\n self.class_num = class_num\n self.center = nn.Parameter(torch.randn(self.class_num, feature_num))\n\n def forward(self, feature, label):\n c = torch.index_select(self.center, 0, label)\n _n = torch.histc(label.float(), self.class_num, max=self.class_num)\n n = torch.index_select(_n, 0, label)\n d = torch.sum((feature - c) ** 2, dim=1) ** 0.5\n loss = d / n\n return loss" }, { "alpha_fraction": 0.6113989353179932, "alphanum_fraction": 0.621761679649353, "avg_line_length": 15.166666984558105, "blob_id": "4592f814e4c016541073b49b2a5465270792ef3c", "content_id": "7d1a68046a1f1e6e29ba1e4722237cfd4d351d1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/PythonStudy/User.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n็”จๆˆท่บซไปฝ้ชŒ่ฏ\nversion = 0.1\nauthor = karson\n\"\"\"\n\nuser = input(\"่ฏท่พ“ๅ…ฅ็”จๆˆทๅ\")\npassword = input(\"่ฏท่พ“ๅ…ฅๅฏ†็ \")\nif user == \"admin\" and password == \"123456\":\n print(\"็”จๆˆทๅๆญฃ็กฎๅฏ†็ ๆญฃ็กฎ\")\nelse:\n print(\"็”จๆˆทๅๅฏ†็ ้”™่ฏฏ\")" }, { "alpha_fraction": 0.4033038020133972, "alphanum_fraction": 0.46535053849220276, "avg_line_length": 25.698925018310547, "blob_id": "22fea075f9557caf7b02f940b449d51265f0cce2", "content_id": "82d1ad7098d3cbf8b9e782f87c8470857259d00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2532, "license_type": "no_license", "max_line_length": 47, "num_lines": 93, "path": "/MTCNN/Nets.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layer = nn.Sequential(\n nn.Conv2d(3, 10, 3, 1, padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(10, 16, 3, 1),\n nn.PReLU(),\n nn.Conv2d(16, 32, 3, 1),\n nn.PReLU()\n )\n self.conv4_1 = nn.Conv2d(32, 1, 1, 1)\n self.conv4_2 = nn.Conv2d(32, 4, 1, 1)\n\n def forward(self, x):\n # print(x.shape,x.dtype)\n h = self.pre_layer(x)\n cond = torch.sigmoid(self.conv4_1(h))\n offset = self.conv4_2(h)\n return cond, offset\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layrer = nn.Sequential(\n nn.Conv2d(3, 28, 3, 1, padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(28, 48, 3, 1, padding=0),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(48, 64, 2, 1, padding=0),\n nn.PReLU()\n )\n self.fc = nn.Linear(3 * 3 * 64, 128)\n self.prelu = nn.PReLU()\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n self.detect = nn.Linear(128, 1)\n # ๅ็งป้‡่พ“ๅ‡บ\n self.offset = nn.Linear(128, 4)\n\n def forward(self, x):\n h = self.pre_layrer(x)\n # h = h.reshape(-1,3*3*64)\n h = h.view(h.size(0), -1)\n h = self.fc(h)\n h = self.prelu(h)\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n label = F.sigmoid(self.detect(h))\n offset = self.offset(h)\n return label, offset\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layer = nn.Sequential(\n nn.Conv2d(3,32,3,1,padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3,2),\n nn.Conv2d(32,64,3,1,padding=0),\n nn.PReLU(),\n nn.MaxPool2d(3,2),\n nn.Conv2d(64,64,3,1,padding=0),\n nn.PReLU(),\n nn.MaxPool2d(2,2),\n nn.Conv2d(64,128,2,1,padding=0),\n nn.PReLU()\n )\n self.fc = nn.Linear(3*3*128,256)\n self.prelu = nn.PReLU()\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n self.detect = nn.Linear(256, 1)\n # ๅ็งป้‡่พ“ๅ‡บ\n self.offset = nn.Linear(256, 4)\n\n def forward(self, x):\n h = self.pre_layer(x)\n h = h.view(h.size(0), -1)\n h = self.fc(h)\n h = self.prelu(h)\n label = F.sigmoid(self.detect(h))\n offset = self.offset(h)\n return label, offset" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.6206896305084229, "avg_line_length": 14.315789222717285, "blob_id": "3fa62727bb49ef8698804500706ec6349c2eabcc", "content_id": "84835e13269540bc4358e12226f3a29e214a9fc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/PythonStudy/linear_algebra/martx.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\n\na = np.diag([1,2,3,4])\nprint(a)\nb = torch.diag(torch.tensor([1,2,3,4]))\nprint(b)\nc = np.eye(4,3)\nprint(c)\nd = torch.eye(3,4)\nprint(d)\ne = np.tri(3,3)\nprint(e)\nf = torch.tril(torch.ones(3,3))\nprint(f)\ng = np.ones([3,3])\nprint(g)\nh = torch.zeros(3,3)\nprint(h)" }, { "alpha_fraction": 0.5308641791343689, "alphanum_fraction": 0.5308641791343689, "avg_line_length": 15.199999809265137, "blob_id": "610804dfb4dd913c8b75db7e711325f7e46c0590", "content_id": "c80ea06132afdd96b830a638a66ab44dcb4f722c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/GAN/detect.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from net import *\nimport os\n\nif __name__ == '__main__':\n net = DCGAN().cuda()\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.7155172228813171, "avg_line_length": 24.88888931274414, "blob_id": "30cad2d10666338a4f890cd25b01ef87096f3da7", "content_id": "524133d087cac76ec7fc2678f7a69e5c12600613", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 58, "num_lines": 9, "path": "/OpenCV_Practice/hist_adpative.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"8.jpg\",0)\ndst = cv2.equalizeHist(img)\ncv2.imshow(\"dst1\",dst)\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\ndst2 = clahe.apply(img)\ncv2.imshow(\"src\",img)\ncv2.imshow(\"dst\",dst2)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5965909361839294, "alphanum_fraction": 0.6420454382896423, "avg_line_length": 10.733333587646484, "blob_id": "2b011582c7e3358ef64762e2245b7f185f5cd350", "content_id": "bd65532c002f3c3b8270ccef5e75e29c6c608866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 28, "num_lines": 15, "path": "/PythonStudy/Var.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nไฝฟ็”จๅ˜้‡ไฟๅญ˜ๆ•ฐๆฎๅนถ่ฟ›่กŒ็ฎ—ๆœฏ่ฟ็ฎ—\nVersion: 0.1\nAuthor: Karson\n\"\"\"\nprint(\"--ไฝฟ็”จๅ˜้‡ไฟๅญ˜ๆ•ฐๆฎๅนถ่ฟ›่กŒ็ฎ—ๆœฏ่ฟ็ฎ—--\")\na = 123\nb = 321\nprint(a+b)\nprint(a-b)\nprint(a*b)\nprint(a/b)\nprint(a//b)\nprint(a%b)\nprint(a**b)\n" }, { "alpha_fraction": 0.542870044708252, "alphanum_fraction": 0.5487364530563354, "avg_line_length": 35.3278694152832, "blob_id": "4a9a43ace55927fd022bc9e499077500ca0bb876", "content_id": "d4f299962d5af92c0aa3e197e8ded6b18eaa3fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2222, "license_type": "no_license", "max_line_length": 107, "num_lines": 61, "path": "/MTCNN/Train.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom Data import FaceDataset\nimport Nets\n\n\nclass Trainer:\n def __init__(self, net, dataset_path):\n self.net = net\n self.dataset_path = dataset_path\n\n self.cls_loss = nn.BCELoss()\n self.offset_loss = nn.MSELoss()\n\n self.optim = optim.Adam(self.net.parameters())\n\n def __call__(self):\n dataset = FaceDataset(self.dataset_path)\n dataloader = DataLoader(dataset, batch_size=512, shuffle=True, num_workers=2, drop_last=True)\n\n while True:\n for i, (img_data_, category_, offset_) in enumerate(dataloader):\n _output_category, _output_offset = self.net(img_data_)\n\n # print(category_.shape, offset_.shape)\n # print(_output_category.shape, _output_offset.shape)\n output_category = _output_category.view(-1, 1)\n output_offset = _output_offset.view(-1, 4)\n\n # print(output_category.shape, output_offset.shape)\n # print(\"----------------------------------------\")\n\n category_mask = torch.lt(category_, 2)\n category = torch.masked_select(category_, category_mask).float()\n output_category = torch.masked_select(output_category, category_mask)\n cls_loss = self.cls_loss(output_category, category)\n\n offset_mask = torch.gt(category_, 0)\n offset_index = torch.nonzero(offset_mask)[:, 0]\n offset = offset_[offset_index]\n output_offset = output_offset[offset_index]\n offset_loss = self.offset_loss(output_offset, offset)\n\n # ๆ€ปๆŸๅคฑ\n loss = cls_loss + offset_loss\n\n print(\"i=\", i, \"loss:\", loss.cpu().data.numpy(), \" cls_loss:\", cls_loss.cpu().data.numpy(),\n \" offset_loss\",\n offset_loss.cpu().data.numpy())\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n\nif __name__ == '__main__':\n net = Nets.PNet()\n train = Trainer(net, \"/Users/karson/Downloads/Dataset/12\")\n train()\n" }, { "alpha_fraction": 0.760064423084259, "alphanum_fraction": 0.7648953199386597, "avg_line_length": 26.04347801208496, "blob_id": "e0051bdd13bfc850bf9bf5a00d081fd737826b35", "content_id": "2c890a0004aec64672e393040b6a1ac354583346", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 82, "num_lines": 23, "path": "/PythonStudy/Machine_Learning/gbrt.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import datasets,preprocessing,ensemble\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nwine = datasets.load_wine()\nx,y = wine.data,wine.target\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# ๅˆ›ๅปบๆจกๅž‹\ngbdt = ensemble.GradientBoostingClassifier()\n# ๆจกๅž‹ๆ‹Ÿๅˆ\ngbdt.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\n# ้ข„ๆต‹\ny_pred = gbdt.predict(x_test)\n\nprint(accuracy_score(y_test,y_pred))" }, { "alpha_fraction": 0.5527369976043701, "alphanum_fraction": 0.5901201367378235, "avg_line_length": 26.740739822387695, "blob_id": "fc33597ec34698bdc23e7831ea9cf42113f0ce72", "content_id": "d4bacbaa4b41bea7c072d937a5ff60f9db0b388b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 86, "num_lines": 27, "path": "/PythonStudy/Machine_Learning/KernelRidge.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.kernel_ridge import KernelRidge\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import GridSearchCV\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nrng = np.random.RandomState(0)\nX = 5 * rng.rand(100, 1)\ny = np.sin(X).ravel()\n\n# Add noise to targets\ny[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))\n# ๅˆ›ๅปบๆจกๅž‹\nkr = GridSearchCV(KernelRidge(),\n param_grid={\"kernel\": [\"rbf\", \"laplacian\", \"polynomial\", \"sigmoid\"],\n \"alpha\": [1e0, 0.1, 1e-2, 1e-3],\n \"gamma\": np.logspace(-2, 2, 5)})\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nkr.fit(X, y)\n\nprint(kr.best_score_, kr.best_params_)\n\nX_plot = np.linspace(0, 5, 100)\ny_kr = kr.predict(X_plot[:, None])\n\nplt.scatter(X, y)\nplt.plot(X_plot, y_kr, color=\"red\")\nplt.show()\n" }, { "alpha_fraction": 0.642405092716217, "alphanum_fraction": 0.6867088675498962, "avg_line_length": 23.384614944458008, "blob_id": "067186df5822d870f85a8af75b841438d9e17099", "content_id": "1d3a00ae07377f267da850e20365184218d799d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/FACE_MTCNN/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,os\n\nprint(torch.cuda.is_available())\nright = torch.tensor(226).cuda()\ntotal = 246\ntp = torch.tensor(62).cuda()\nfp = torch.tensor(10).cuda()\ntn = torch.tensor(64).cuda()\nfn = torch.tensor(10).cuda()\nprint(right, total, tp, fp, tn, fn)\n# acc = right / total\nacc = torch.true_divide(right,total)\nprint(acc)" }, { "alpha_fraction": 0.660287082195282, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 28.885713577270508, "blob_id": "910cb43a8458fd3e6d81f0a4959d99141e88b560", "content_id": "21c2a34516476d7d2eb4b9c60085c32d2b817a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 72, "num_lines": 35, "path": "/OpenCV_Practice/fft_numpy.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nfrom PIL import Image\nimport numpy as np\n\n # ็”จPILไธญ็š„Image.openๆ‰“ๅผ€ๅ›พๅƒ่ฝฌ็ฐๅบฆ\nimage_arr = np.array(Image.open(\"9.jpg\").convert('L'), 'f') # ่ฝฌๅŒ–ๆˆnumpyๆ•ฐ็ป„\n\nf = np.fft.fft2(image_arr) #ๅ‚…้‡Œๅถๅ˜ๆข\nfshift = np.fft.fftshift(f) #ๆŠŠไธญ็‚น็งปๅŠจๅˆฐไธญ้—ดๅŽป\n\nmagnitude_spectrum = 20 * np.log(np.abs(fshift)) #่ฎก็ฎ—ๆฏไธช้ข‘็Ž‡็š„ๆˆๅˆ†ๅคšๅฐ‘\n\nplt.figure(figsize=(10, 10))\nplt.subplot(221), plt.imshow(image_arr, cmap='gray')\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(222), plt.imshow(magnitude_spectrum, cmap='gray')\nplt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])\n\n#ๅŽปๆŽ‰ไฝŽ้ข‘ไฟกๅท๏ผŒ็•™ไธ‹้ซ˜้ข‘ไฟกๅท\nrows, cols = image_arr.shape\ncrow, ccol = rows // 2, cols // 2\nfshift[crow - 30:crow + 30, ccol - 30:ccol + 30] = 0\n\n\n#ๅ‚…้‡Œๅถ้€†ๅ˜ๆข\nf_ishift = np.fft.ifftshift(fshift)\nimg_back = np.fft.ifft2(f_ishift)\nimg_back = np.abs(img_back)\n\nplt.subplot(223), plt.imshow(img_back, cmap='gray')\nplt.title('Image after HPF'), plt.xticks([]), plt.yticks([])\nplt.subplot(224), plt.imshow(img_back)\nplt.title('Result in JET'), plt.xticks([]), plt.yticks([])\n\nplt.show()" }, { "alpha_fraction": 0.4857594966888428, "alphanum_fraction": 0.5167721509933472, "avg_line_length": 28.259260177612305, "blob_id": "c8e2a76541f57b2744f50b909a0a7085e6af961d", "content_id": "9ed635806ee52d7c98f859d98e3c8272b95c72ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3160, "license_type": "no_license", "max_line_length": 73, "num_lines": 108, "path": "/SEQ2SEQ/backup/SEQ2SEQ_num.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as data\nimport Sampling_train_num\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.fc1 = nn.Sequential(\n nn.Linear(180,128),\n nn.BatchNorm1d(num_features=128),\n nn.ReLU()\n )\n self.lstm = nn.LSTM(input_size=128,\n hidden_size=128,\n num_layers=1,\n batch_first=True)\n\n def forward(self, x):\n x = x.reshape(-1,180,240).permute(0,2,1)\n x = x.reshape(-1,180)\n fc1 = self.fc1(x)\n fc1 = fc1.reshape(-1, 240, 128)\n lstm,(h_n,h_c) = self.lstm(fc1,None)\n out = lstm[:,-1,:]\n\n return out\n\n\nclass Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n self.lstm = nn.LSTM(input_size=128,\n hidden_size=128,\n num_layers=1,\n batch_first=True)\n self.out = nn.Linear(128,10)\n\n def forward(self,x):\n x = x.reshape(-1,1,128)\n x = x.expand(-1,4,128)\n lstm,(h_n,h_c) = self.lstm(x,None)\n y1 = lstm.reshape(-1,128)\n out = self.out(y1)\n output = out.reshape(-1,4,10)\n return output\n\n\nclass MainNet (nn.Module):\n def __init__(self):\n super(MainNet, self).__init__()\n self.encoder = Encoder()\n self.decoder = Decoder()\n\n def forward(self, x):\n encoder = self.encoder(x)\n decoder = self.decoder(encoder)\n\n return decoder\n\n\nif __name__ == '__main__':\n BATCH = 64\n EPOCH = 100\n save_path = r'params/seq2seq.pth'\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n net = MainNet().to(device)\n if os.path.exists(os.path.join(save_path)):\n net.load_state_dict(torch.load(save_path))\n opt = torch.optim.Adam(net.parameters())\n loss_func = nn.MSELoss()\n\n if os.path.exists(save_path):\n net.load_state_dict(torch.load(save_path))\n else:\n print(\"No Params!\")\n\n train_data = Sampling_train_num.Sampling(root=\"./code\")\n train_loader = data.DataLoader(dataset=train_data,\n batch_size=BATCH, shuffle=True, drop_last=True,num_workers=4)\n\n for epoch in range(EPOCH):\n for i, (x, y) in enumerate(train_loader):\n batch_x = x.to(device)\n batch_y = y.float().to(device)\n\n output = net(batch_x)\n loss = loss_func(output,batch_y)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if i % 5 == 0:\n label_y = torch.argmax(y,2).detach().numpy()\n out_y = torch.argmax(output,2).cpu().detach().numpy()\n\n accuracy = np.sum(\n out_y == label_y,dtype=np.float32)/(BATCH * 4)\n print(\"epoch:{},i:{},loss:{:.4f},acc:{:.2f}%\"\n .format(epoch,i,loss.item(),accuracy * 100))\n print(\"label_y:\",label_y[0])\n print(\"out_y:\",out_y[0])\n\n torch.save(net.state_dict(), save_path)\n" }, { "alpha_fraction": 0.5074467062950134, "alphanum_fraction": 0.534929633140564, "avg_line_length": 33.92241287231445, "blob_id": "90040e1c78c6af06e48d951b8271c92fd0306ee3", "content_id": "ea46d68527a8ecca8c6de9e9e7ebb765bc739c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12153, "license_type": "no_license", "max_line_length": 117, "num_lines": 348, "path": "/FACE_MTCNN/detect.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from Network import PNet, RNet, ONet\nfrom PIL import Image, ImageDraw\nfrom torchvision import transforms\nimport torch, math\nfrom tools.utils import nms\nfrom tools.utils import old_nms\nimport numpy as np\n\n\nclass FaceDetector():\n def __init__(self):\n\n self.pnet = PNet()\n self.pnet.load_state_dict(torch.load(\"./param/pnet.pt\"))\n self.rnet = RNet()\n self.rnet.load_state_dict(torch.load(\"./param/rnet.pt\"))\n self.onet = ONet()\n self.onet.load_state_dict(torch.load(\"./param/onet.pt\"))\n\n def detect(self, img):\n width, height = img.size\n scale = 1\n scale_img = img\n # print(scale_img.shape)\n min_side = min(width, height)\n # print(min_side)\n _boxes = []\n while min_side > 12:\n tf_img = self.img_preprocess(scale_img)\n cls, box_regs, _ = self.pnet(tf_img)\n cls = cls.cpu().detach()\n box_regs = box_regs.cpu().detach()\n probs = cls[0, 1, :, :]\n # print(probs.shape, box_regs.shape)\n probs_mask = probs > 0.4\n # print(probs_mask.shape)\n\n index = probs_mask.nonzero()\n # print(index.shape)\n tx1, ty1 = index[:, 1] * 2, index[:, 0] * 2\n tx2, ty2 = tx1 + 12, ty1 + 12\n # print(tx1.shape,ty1.shape,tx2.shape, ty2.shape)\n offset_mask = box_regs[0, :, probs_mask]\n # print(offset_mask.shape)\n x1 = (tx1 + offset_mask[0, :] * 12) / scale\n y1 = (ty1 + offset_mask[1, :] * 12) / scale\n x2 = (tx2 + offset_mask[2, :] * 12) / scale\n y2 = (ty2 + offset_mask[3, :] * 12) / scale\n # print(x1.shape, y1.shape, x2.shape, y2.shape)\n\n score = probs[probs_mask]\n # print(score.shape)\n _boxes.append(torch.stack([x1, y1, x2, y2, score], dim=1))\n\n scale *= 0.702\n cur_width, cur_height = int(width * scale), int(height * scale)\n print(cur_width, cur_height)\n scale_img = scale_img.resize((cur_width, cur_height))\n\n min_side = min(cur_width, cur_height)\n\n boxes = torch.cat(_boxes, dim=0)\n print(boxes.shape)\n return old_nms(boxes.cpu().detach().numpy(), 0.3)\n # return boxes\n\n def pnet_detect(self, img):\n scale_img = img\n # print(scale_img.shape)\n width = scale_img.shape[3]\n height = scale_img.shape[2]\n\n scales = []\n cur_width = width\n cur_height = height\n cur_factor = 1\n\n while cur_width >= 12 and cur_height >= 12:\n if 12 / cur_factor >= 12:\n w = cur_width\n h = cur_height\n scales.append((w, h, cur_factor))\n # print(w,h,cur_factor)\n\n cur_factor *= 0.7\n cur_width = math.ceil(cur_width * 0.7)\n cur_height = math.ceil(cur_height * 0.7)\n\n candidate_boxes = torch.empty((0, 4))\n candidate_score = torch.empty((0))\n candidate_offsets = torch.empty((0, 4))\n\n for w, h, f in scales:\n resize_img = torch.nn.functional.interpolate(scale_img, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n # print(resize_img.shape,f)\n p_distribution, box_regs, _ = self.pnet(resize_img)\n # print(p_distribution.shape, box_regs.shape)\n p_distribution = p_distribution.cpu().detach()\n box_regs = box_regs.cpu().detach()\n # print(p_distribution[0,1,:,:],p_distribution.shape, box_regs.shape)\n\n candidate, scores, offsets = self.generate_bboxes(p_distribution, box_regs, f)\n candidate = candidate.float()\n # print(candidate.shape, scores.shape, offsets.shape)\n\n candidate_boxes = torch.cat([candidate_boxes, candidate])\n candidate_score = torch.cat([candidate_score, scores])\n candidate_offsets = torch.cat([candidate_offsets, offsets])\n # print(candidate.shape, scores.shape, offsets.shape)\n\n if candidate_boxes.shape[0] != 0:\n # candidate_boxes = self.calibrate_box(candidate_boxes, candidate_offsets)\n keep = nms(candidate_boxes.cpu().numpy(), candidate_score.cpu().numpy(), 0.3)\n return candidate_boxes[keep]\n else:\n return candidate_boxes\n\n def rnet_detect(self, img, boxes):\n if boxes.shape[0] == 0:\n return boxes\n width, height = img.size\n boxes = self.convert_to_square(boxes)\n # boxes = self.refine_boxes(boxes, width, height)\n\n candidate_faces = list()\n for box in np.array(boxes):\n # im = img[:, :, box[1]: box[3], box[0]: box[2]]\n # im = torch.nn.functional.interpolate(im, size=(24, 24), mode='bilinear')\n # candidate_faces.append(im)\n\n # print(box[0], box[1], box[2], box[3])\n crop_img = img.crop(box[0:4])\n crop_img = crop_img.resize((24, 24))\n candidate_faces.append(self.img_preprocess(crop_img))\n\n candidate_faces = torch.cat(candidate_faces, 0)\n # print(candidate_faces.shape)\n\n # rnet forward pass\n p_distribution, box_regs, _ = self.rnet(candidate_faces)\n p_distribution = p_distribution.cpu().detach()\n box_regs = box_regs.cpu().detach()\n # print(p_distribution.shape, box_regs.shape)\n\n # filter negative boxes\n scores = p_distribution[:, 1]\n # print(scores)\n # print(scores.shape)\n\n mask = (scores >= 0)\n boxes = boxes[mask]\n # print(boxes.shape)\n box_regs = box_regs[mask]\n\n scores = scores[mask]\n # print(scores.shape)\n # c = list()\n # for i in boxes:\n # x1, y1, x2, y2 = i[0], i[1], i[2], i[3]\n # c.append(torch.tensor([[x1, y1, x2, y2]]))\n # c = torch.cat(c, 0)\n # cc = torch.stack([c[:,0],c[:,1],c[:,2],c[:,3],scores],1)\n # xxxx = old_nms(cc.cpu().numpy(), 0.2)\n # print(xxxx.shape)\n # return xxxx\n\n if boxes.shape[0] > 0:\n # boxes = self.calibrate_box(boxes, box_regs)\n # nms\n keep = nms(boxes.cpu().numpy(), scores.cpu().numpy(), 0.3)\n boxes = boxes[keep]\n print(boxes.shape)\n return boxes\n\n def onet_detect(self, img, boxes):\n if boxes.shape[0] == 0:\n return boxes, torch.empty(0, dtype=torch.int32)\n\n width, height = img.size\n\n boxes = self.convert_to_square(boxes)\n # boxes = self.refine_boxes(boxes, width, height)\n\n # get candidate faces\n candidate_faces = list()\n\n for box in np.array(boxes):\n # im = img[:, :, box[1]: box[3], box[0]: box[2]]\n # im = torch.nn.functional.interpolate(\n # im, size=(48, 48), mode='bilinear')\n # candidate_faces.append(im)\n # print(box[0], box[1], box[2], box[3])\n\n crop_img = img.crop(box[0:4])\n crop_img = crop_img.resize((48, 48))\n candidate_faces.append(self.img_preprocess(crop_img))\n\n candidate_faces = torch.cat(candidate_faces, 0)\n # print(candidate_faces.shape)\n\n p_distribution, box_regs, landmarks = self.onet(candidate_faces)\n p_distribution = p_distribution.cpu().detach()\n box_regs = box_regs.cpu().detach()\n landmarks = landmarks.cpu().detach()\n # print(p_distribution.shape, box_regs.shape, landmarks.shape)\n\n # filter negative boxes\n scores = p_distribution[:, 1]\n mask = (scores >= 0.4)\n boxes = boxes[mask]\n box_regs = box_regs[mask]\n scores = scores[mask]\n landmarks = landmarks[mask]\n\n if boxes.shape[0] > 0:\n # compute face landmark points\n landmarks = self.calibrate_landmarks(boxes, landmarks)\n landmarks = torch.stack([landmarks[:, :5], landmarks[:, 5:10]], 2)\n # boxes = self.calibrate_box(boxes, box_regs)\n # boxes = self.refine_boxes(boxes, width, height)\n\n # nms\n keep = nms(boxes.cpu().numpy(), scores.cpu().numpy(), 0.3)\n boxes = boxes[keep]\n landmarks = landmarks[keep]\n\n return boxes, landmarks\n\n def img_preprocess(self, img):\n tf = transforms.Compose([transforms.ToTensor()])\n img = tf(img)\n img = (img - 127.5) * 0.0078125\n img = img.unsqueeze(0)\n return img\n\n def generate_bboxes(self, probs, offsets, scale):\n stride = 2\n cell_size = 12\n\n cls = probs[0, 1, :, :]\n # print(probs.shape)\n\n inds_mask = cls > 0.5\n inds = inds_mask.nonzero()\n # print(\"inds:\",inds.shape)\n # print(offsets.shape)\n\n if inds.shape[0] == 0:\n return torch.empty((0, 4)), torch.empty(0), torch.empty((0, 4))\n\n tx1, ty1, tx2, ty2 = [offsets[0, i, inds[:, 0], inds[:, 1]] for i in range(4)]\n # print(tx1.shape, ty1.shape, tx2.shape, ty2.shape)\n offsets = torch.stack([tx1, ty1, tx2, ty2], dim=1)\n # they are defined as:\n # w = x2 - x1 + 1\n # h = y2 - y1 + 1\n # x1_true = x1 + tx1*w\n # x2_true = x2 + tx2*w\n # y1_true = y1 + ty1*h\n # y2_true = y2 + ty2*h\n\n score = cls[inds[:, 0], inds[:, 1]]\n # print(score.shape)\n\n bounding_boxes = torch.stack([\n stride * inds[:, 1] + 1.0,\n stride * inds[:, 0] + 1.0,\n stride * inds[:, 1] + 1.0 + cell_size,\n stride * inds[:, 0] + 1.0 + cell_size], 0).transpose(0, 1)\n # print(bounding_boxes.shape)\n bounding_boxes = torch.round(bounding_boxes / scale).int()\n # print(bounding_boxes.shape)\n # exit()\n return bounding_boxes, score, offsets\n\n def calibrate_box(self, bboxes, offsets):\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\n w = x2 - x1 + 1.0\n h = y2 - y1 + 1.0\n w = torch.unsqueeze(w, 1)\n h = torch.unsqueeze(h, 1)\n\n translation = torch.cat([w, h, w, h], 1).float() * offsets\n bboxes += torch.round(translation).int()\n return bboxes\n\n def convert_to_square(self, bboxes):\n\n square_bboxes = torch.zeros_like(bboxes, dtype=torch.float32)\n x1, y1, x2, y2 = [bboxes[:, i].float() for i in range(4)]\n h = y2 - y1 + 1.0\n w = x2 - x1 + 1.0\n max_side = torch.max(h, w)\n square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5\n square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5\n square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0\n square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0\n\n square_bboxes = torch.ceil(square_bboxes + 1).int()\n return square_bboxes\n\n def refine_boxes(self, bboxes, w, h):\n\n bboxes = torch.max(torch.zeros_like(bboxes), bboxes)\n sizes = torch.IntTensor([[h, w, h, w]]) * bboxes.shape[0]\n bboxes = torch.min(bboxes, sizes)\n return bboxes\n\n def calibrate_landmarks(self, bboxes, landmarks, align=False):\n\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\n\n w = x2 - x1 + 1.0\n h = y2 - y1 + 1.0\n w = torch.unsqueeze(w, 1)\n h = torch.unsqueeze(h, 1)\n\n translation = torch.cat([w] * 5 + [h] * 5, 1).float() * landmarks\n\n if align:\n landmarks = torch.ceil(translation).int()\n else:\n landmarks = torch.stack([bboxes[:, 0]] * 5 + [bboxes[:, 1]] * 5, 1) + torch.round(translation).int()\n return landmarks\n\n\nif __name__ == '__main__':\n img = Image.open(\"1.jpg\")\n img_draw = ImageDraw.Draw(img)\n detect = FaceDetector()\n p_img = detect.img_preprocess(img)\n\n p_boxes = detect.pnet_detect(p_img)\n print(p_boxes.shape)\n r_boxes = detect.rnet_detect(img, p_boxes)\n print(r_boxes.shape)\n o_boxes, landmarks = detect.onet_detect(img, r_boxes)\n print(o_boxes.shape, landmarks.shape)\n\n # boxes = detect.detect(img)\n\n for box in r_boxes:\n # Default draw red box on it.\n\n img_draw.rectangle((box[0], box[1], box[2], box[3]), outline='green', width=2)\n\n img.show()\n" }, { "alpha_fraction": 0.4502074718475342, "alphanum_fraction": 0.5062240958213806, "avg_line_length": 18.31999969482422, "blob_id": "2ad43c1b85e4a37eeb4721f71ac210f3e7b82087", "content_id": "68493f8b53bbc8426fc3c012c85a76341011a626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 40, "num_lines": 25, "path": "/Loss/net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\nclass Net(nn.Module):\n def __init__(self):\n\n super(Net, self).__init__()\n self.sequential = nn.Sequential(\n nn.Linear(784,32),\n nn.Linear(32,64),\n nn.Linear(64,10)\n\n )\n\n def forward(self, x):\n x = x.reshape(-1,784)\n return self.sequential(x)\n\n\nif __name__ == '__main__':\n x = torch.randn(1,1,28,28)\n x.reshape(-1,784)\n net = Net()\n y = net(x)\n print(y.shape)" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7677419185638428, "avg_line_length": 18.5, "blob_id": "07fd8921db9efcf8e33e8cdb178c7e105f5f5edb", "content_id": "f0816295440ff5e79916248872cbec58d9c77661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/OpenCV_Practice/SHIF.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\nimg = cv2.imread(\"33.jpg\")\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ncv2.FastFeatureDetector_create()\n\ncv2.drawKeypoints()" }, { "alpha_fraction": 0.7059329152107239, "alphanum_fraction": 0.7282888889312744, "avg_line_length": 24.866666793823242, "blob_id": "6d51dbc2d4ed3d9fcc492a5e5ea3cd63980e69c2", "content_id": "bcf0a55ac3bf3d961e67c9cebe1902288258d566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1253, "license_type": "no_license", "max_line_length": 100, "num_lines": 45, "path": "/PythonStudy/Machine_Learning/Linear_regression.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from sklearn import linear_model\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score,explained_variance_score\n\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nx,y =datasets.make_regression(n_samples=100,n_features=1,n_targets=1,noise=10,random_state=0)\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)\n\n# ๆ•ฐๆฎ้ข„ๅค„็†\n\n# ๅˆ›ๅปบๆจกๅž‹\nreg = linear_model.LinearRegression()\n# ๅฒญๅ›žๅฝ’\n# reg = linear_model.Ridge(0.5)\n# LASSOๅ›žๅฝ’\n# reg = linear_model.Lasso(0.1)\n# ๅผนๆ€งๅ›žๅฝ’\n# reg = linear_model.ElasticNet(0.5,0.5)\n# ้€ป่พ‘ๆ–ฏ่’‚ๅ›žๅฝ’?\n# reg = linear_model.LogisticRegression\n# ่ดๅถๆ–ฏๅ›žๅฝ’\n# reg = linear_model.BayesianRidge()\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nreg.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\n\n# ้ข„ๆต‹\nprint(reg.coef_,reg.intercept_)\n_x = np.array([-2.5,2.5])\n_y = reg.coef_ * _x + reg.intercept_\ny_pred =reg.predict(x_test)\n# ่ฏ„ไผฐ\nprint(mean_squared_error(y_test,y_pred))\nprint(mean_absolute_error(y_test,y_pred))\nprint(r2_score(y_test,y_pred))\nprint(explained_variance_score(y_test,y_pred))\nprint()\n\n# plt.scatter(x_test,y_test)\n# plt.plot(_x,_y,linewidth =3,color = \"orange\")\n# plt.show()" }, { "alpha_fraction": 0.5701274871826172, "alphanum_fraction": 0.6593806743621826, "avg_line_length": 29.5, "blob_id": "d28a53450d0a42da3a6b228c6e0bbc1f2e7eedea", "content_id": "b3aa5aef476eb220465adc439f9b8fad4d15c74d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/OpenCV_Practice/contour_line.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"16.jpg\")\ndst = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,thresh = cv2.threshold(dst,55,255,cv2.THRESH_BINARY)\n\ncontours,_ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nellipse = cv2.fitEllipse(contours[0])\ncv2.ellipse(img,ellipse,(0,0,255),2)\n\nh,w,_ = img.shape\n[vx,vy,x,y] = cv2.fitLine(contours[0],cv2.DIST_L2,0,0.01,0.01)\nlefty = int((-x * vy / vx) + y)\nrighty = int(((w - x) * vy / vx) + y)\ncv2.line(img, (w - 1, righty), (0, lefty), (0, 0, 255), 2)\n\ncv2.imshow(\"img_contour\", img)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.6404958963394165, "alphanum_fraction": 0.71074378490448, "avg_line_length": 12.5, "blob_id": "b34195407028837f4b98ffef8be31c6234b0fedc", "content_id": "2911ac2a0c3510e36b865d148004151e0dd871cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/PythonStudy/For.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n็”จforๅพช็Žฏๅฎž็Žฐ1~100ๆฑ‚ๅ’Œ\n\nVersion: 0.1\nAuthor: karson\n\"\"\"\n\nsum = 0\nfor x in range(101):\n sum += x\nprint(sum)\n\n\"\"\"\nfor ่ฟญไปฃๅ˜้‡ in ๅญ—็ฌฆไธฒ|ๅˆ—่กจ|ๅ…ƒ็ป„|ๅญ—ๅ…ธ|้›†ๅˆ๏ผš\n ไปฃ็ ๅ—\nๆ ผๅผไธญ๏ผŒ่ฟญไปฃๅ˜้‡็”จไบŽๅญ˜ๆ”พไปŽๅบๅˆ—็ฑปๅž‹ๅ˜้‡ไธญ่ฏปๅ–ๅ‡บๆฅ็š„ๅ…ƒ็ด ๏ผŒๆ‰€ไปฅไธ€่ˆฌไธไผšๅœจๅพช็Žฏไธญๅฏน่ฟญไปฃๅ˜้‡ๆ‰‹ๅŠจ่ต‹ๅ€ผ\n้œ€่ฆ่ฏดๆ˜Ž็š„ๆ˜ฏไธŠ้ขไปฃ็ ไธญ็š„range(101)ๅฏไปฅ็”จๆฅๆž„้€ ไธ€ไธชไปŽ0ๅˆฐ100็š„ๅ–ๅ€ผ่Œƒๅ›ด\n\"\"\"" }, { "alpha_fraction": 0.5477706789970398, "alphanum_fraction": 0.6114649772644043, "avg_line_length": 16.55555534362793, "blob_id": "69cd0708c39a8574636383ef67f5c176308393fe", "content_id": "d86d3b842dd0a7acdc0eecc55840c5f91b67b7ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/OpenCV_Practice/pyr_gass.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"13.jpg\")\n\nfor i in range(3):\n cv2.imshow(f\"img{i}\",img)\n img = cv2.pyrUp(img)\n # img = cv2.pyrDown(img)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5801886916160583, "alphanum_fraction": 0.6194968819618225, "avg_line_length": 20.200000762939453, "blob_id": "6d08f256eac42694544aeaa9df2279ed79577a74", "content_id": "d6a694610855f17b41b93aa6723fdaa028e1d239", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 43, "num_lines": 30, "path": "/MTCNN2/Note/OS_Path.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os,torch\n\npath1 = \"home\"\npath2 = \"karson\"\npath3 = \"work\"\nPATH = os.path.join(path1,path2,path3)\nPATH2 = path1 + path2 + path3\nprint(PATH,PATH2)\n\nPATH_cebela = \"../cebela/48/positive.txt\"\nfile = open(PATH_cebela,\"r\")\n\nprint(torch.randn(3,14,14) / 255.-0.5)\n\ntry:\n # ้€่กŒ่ฏปๅ–\n text_lines = file.readlines()\n # ่ฏปๅ…ฅ้ฆ–่กŒ\n # text_lines = file.readline()\n strs = text_lines[0].strip().split(\" \")\n print(torch.tensor([int(strs[1])]))\n print(text_lines)\n\n\n # text_lines = file.readlines()\n # print(type(text_lines), text_lines)\n # for line in text_lines:\n # print(type(line), line)\nfinally:\n file.close()\n" }, { "alpha_fraction": 0.5644171833992004, "alphanum_fraction": 0.6748466491699219, "avg_line_length": 17.22222137451172, "blob_id": "12bf86025ef375350f92a8810a7c19838365f141", "content_id": "d920c2a5263543eca02cee0d2051e37f435d2779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/OpenCV_Practice/canny.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"1.jpg\",0)\ndst = cv2.GaussianBlur(img,(3,3),0)\ndst = cv2.Canny(dst,50,150)\n\ncv2.imshow(\"src\",img)\ncv2.imshow(\"dst\",dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6010928750038147, "alphanum_fraction": 0.6693989038467407, "avg_line_length": 19.38888931274414, "blob_id": "d3327c3be32ae47df389a9c8a80949c4c5086662", "content_id": "3eb1a126e24e18b1c29c59b4d67abd8b2351b41c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/OpenCV_Practice/corner_shi_tomasi_1.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimg = cv2.imread(\"32.jpg\")\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# print(img.shape)\n\ncorners = cv2.goodFeaturesToTrack(gray,100, 0.01, 10)\nprint(corners)\ncorners = np.int0(corners)\n# print(corners)\n\nfor i in corners:\n x, y = i.ravel()\n # print(x,y)\n cv2.circle(img, (x, y), 3, 255)\n\ncv2.imshow(\"img\", img)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5712630152702332, "alphanum_fraction": 0.6512166857719421, "avg_line_length": 25.96875, "blob_id": "02b847d5f3095cc8421fcee25580d4411fc16973", "content_id": "483fb913f9c61410f5f42f4a08c64ab2a6f35a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 95, "num_lines": 32, "path": "/PythonStudy/Numpy/Picture_cutting.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image\nimg = Image.open(\"1.jpg\")\n#img.show()\n\nimg_data = np.array(img)\nprint(img_data.shape)\nimg_data = img_data.reshape(2, 690 // 2, 2, 750 // 2, 3)\nprint(img_data.shape)\nimg_data = img_data.transpose(0,2,1,3,4)\nprint(img_data.shape)\nimg_data = img_data.reshape(4,345,375,3)\nprint(img_data.shape)\nimgs = np.split(img_data,4)\n\nprint(imgs[0].shape)\n\n# for i in imgs:\n# img = Image.fromarray(i[0])\n# img.show()\nimg_0 = imgs[0][0]\nimg_1 = imgs[1][0]\nimg_2 = imgs[2][0]\nimg_3 = imgs[3][0]\n# imgs_data = np.concatenate([img_0[None,...],img_1[None,...],img_2[None,...],img_3[None,...]])\nimgs_data = np.stack([img_0,img_1,img_2,img_3])\nimgs_data = imgs_data.reshape(2,2,345,375,3)\nimgs_data = imgs_data.transpose(0,2,1,3,4)\nimgs_data = imgs_data.reshape(690,750,3)\na = Image.fromarray(imgs_data)\na.show()\nprint(imgs_data.shape)\n" }, { "alpha_fraction": 0.6794871687889099, "alphanum_fraction": 0.682692289352417, "avg_line_length": 28.25, "blob_id": "7ad8320447fa27044442835cdd41b3b29b765b51", "content_id": "aaa117fe14d667009fe0cb8c9a7eb1771c980349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 86, "num_lines": 32, "path": "/PythonStudy/Machine_Learning/Decision_tree.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import datasets,preprocessing,tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport graphviz\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nwine = datasets.load_wine()\nx,y = wine.data,wine.target\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)\n\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# ๅˆ›ๅปบๆจกๅž‹\ndc = tree.DecisionTreeClassifier(criterion=\"gini\")\n# ๆจกๅž‹ๆ‹Ÿๅˆ\ndc.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\n# ้ข„ๆต‹\ny_pred = dc.predict(x_test)\n# ่ฏ„ไผฐ\nprint(accuracy_score(y_test, y_pred))\n\ndot_data = tree.export_graphviz(dc, out_file=None,\n feature_names=wine.feature_names,\n class_names=wine.target_names,rounded=True,filled=True\n )\ngraph = graphviz.Source(dot_data)\ngraph.render(\"wine\")\n" }, { "alpha_fraction": 0.30483272671699524, "alphanum_fraction": 0.3828996419906616, "avg_line_length": 14.882352828979492, "blob_id": "1b87d34a6f1f50323fc98f912add8ad8a3157b66", "content_id": "d0258a8ad7b1f579c5b43d4ddc00d048a42300de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 31, "num_lines": 17, "path": "/PythonStudy/Fz.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nๅˆ†ๆฎตๅ‡ฝๆ•ฐๆฑ‚่งฃ\n 3x - 5 (x > 1)\nf(x) = x + 2 (-1 <= x <= 1)\n 5x + 3 (x < -1)\nversion = 0.1\nauthor = karson\n\"\"\"\n\nx = float(input(\"x=:\"))\n\nif x > 1:\n print(\"y=%.1f\" % (3*x-5))\nelif x < -1:\n print(\"y=%.1f\" % (5*x+3*x))\nelse:\n print(\"y=%.1f\" % (x+2))" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5569972395896912, "avg_line_length": 33.80644989013672, "blob_id": "66347d4318693606f0927a220caf99b40cb4f7d9", "content_id": "15d54ee98979eef89eb466db576ad56f52367df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 69, "num_lines": 31, "path": "/AlexNet/rearrange_tiny_imagenet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os, glob\n\nIMAGENET_DIR = r'D:\\work\\tiny-imagenet-200'\nfor root, dirs, files in os.walk(IMAGENET_DIR):\n if 'train' in root and 'images' in root:\n class_dir, _ = os.path.split(root)\n print(f'moving for : {class_dir}')\n\n for txtfile in glob.glob(os.path.join(class_dir, \"*.txt\")):\n os.remove(txtfile)\n\n for img_file in files:\n original_path = os.path.join(root, img_file)\n new_path = os.path.join(class_dir, img_file)\n os.rename(original_path, new_path)\n os.rmdir(root)\n\n\n# for root, dirs, files in os.walk(IMAGENET_DIR):\n# if 'train' in root and 'images' in root:\n# # print(root)\n# class_dir, _ = os.path.split(root)\n# # print(class_dir, _)\n# for txtfile in glob.glob(os.path.join(class_dir, \"*.txt\")):\n# # print(txtfile)\n#\n# for img_file in files:\n# original_path = os.path.join(root, img_file)\n# # print(original_path)\n# new_path = os.path.join(class_dir, img_file)\n# # print(new_path)\n" }, { "alpha_fraction": 0.5973303914070129, "alphanum_fraction": 0.6017797589302063, "avg_line_length": 41.761905670166016, "blob_id": "106a300c8c7ee278f323d63c82cd930723915b32", "content_id": "4e30f819fea181efa928040b8130038cfba3d3c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/MTCNN2/Note/test2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nlandmarks_path = \"/Users/karson/Downloads/CelebaA/Anno/list_landmarks_celeba_test.txt\"\nbbox_path = \"/Users/karson/Downloads/CelebaA/Anno/list_bbox_celeba_test.txt\"\n\nf_box_anno = open(bbox_path)\nf_landmarks_anno = open(landmarks_path)\nfor i, (f_box_line, f_landmarks_line) in enumerate(zip(f_box_anno, f_landmarks_anno)):\n if i < 2: # skip the top two lines in anno files\n continue\n image_name = f_box_line.strip().split()[0]\n\n boxes = f_box_line.strip().split()[1:]\n # boxes = list(filter(lambda x: x != '', boxes))\n # boxes = np.array(boxes).astype(int)\n print(boxes)\n\n landmarks = f_landmarks_line.strip().split()[1:]\n # landmarks = list(filter(lambda x: x != '', landmarks))\n # landmarks = np.array(landmarks).astype(int)\n print(landmarks)\n\n" }, { "alpha_fraction": 0.650602400302887, "alphanum_fraction": 0.7018072009086609, "avg_line_length": 26.66666603088379, "blob_id": "06086ec77d20e8738cbf2f7aa5f9c0a6bee3d425", "content_id": "7903c91f90c0f71254a6f390ae368e53b8061434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 668, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/deep_learning/day01/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import DataLoader\nfrom day01.data import *\nfrom day01.net import *\ntags = torch.randn(4,10)\ntest_y = torch.randn(4,10)\ntest_loss = torch.mean((tags - test_y) ** 2)\n# test_loss->tensor(1.9911) tensorๆ ‡้‡\na_argmax = torch.argmax(test_y,dim=1)\nb_argmax = torch.argmax(tags,dim=1)\nc = torch.eq(a_argmax,b_argmax).float()\n# torch.Size([4])\n\nh = torch.randn(4,10)\n# torch.Size([4, 1])\nz = torch.sum(h,dim=1,keepdim=True)\n# torch.Size([4, 10])\n# h/z\ntrain_dataset = MNISTDataset(\"../data/MNIST_IMG\",True)\ntrain_dataloder = DataLoader(train_dataset,batch_size=100,shuffle=True)\n# len(train_dataloder)\n# 600\nprint(a_argmax.size())\nprint(c)\n" }, { "alpha_fraction": 0.5291666388511658, "alphanum_fraction": 0.5791666507720947, "avg_line_length": 16.214284896850586, "blob_id": "7712059e8e561b17386c72cbde5d885cbdee3da1", "content_id": "f65106d0537eb9c381bc76edb79d86e7ab7a05cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 310, "license_type": "no_license", "max_line_length": 42, "num_lines": 14, "path": "/PythonStudy/inchi.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n่‹ฑๅˆถ่ฝฌๆขๅ…ฌๅˆถ\nversion: 0.1\nauthor: karson\n\"\"\"\n\nvalue = float(input(\"่ฏท่พ“ๅ…ฅ้•ฟๅบฆ\"))\nunit = input(\"่ฏท่พ“ๅ…ฅๅ•ไฝ\")\nif unit == \"in\":\n print(\"่ฝฌๆขไธบๅŽ˜็ฑณๆ˜ฏ%.1f\" % (value*2.54))\nelif unit == \"cm\":\n print(\"่ฝฌๆขไธบ่‹ฑๅฏธๆ˜ฏ%.1f\" % (value * 0.3937))\nelse:\n print(\"่ฏท่พ“ๅ…ฅๆญฃ็กฎๅ•ไฝ\")" }, { "alpha_fraction": 0.607723593711853, "alphanum_fraction": 0.6219512224197388, "avg_line_length": 22.4761905670166, "blob_id": "8c4dcfe5c2b9ca6259399b1fdec40c410f7683b8", "content_id": "aad68471d4d3bdce240c9cbca5180ce0853cf14e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 74, "num_lines": 21, "path": "/PythonStudy/check_num2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n่พ“ๅ…ฅไธ€ไธชๆญฃๆ•ดๆ•ฐๅˆคๆ–ญๆ˜ฏๅฆไธบ็ด ๆ•ฐ\nVersion: 0.1\nAuthor: Karson\n\"\"\"\n\nnum = int(input(\"่ฏท่พ“ๅ…ฅไธ€ไธชๆญฃๆ•ดๆ•ฐ๏ผš\"))\nif num > 1:\n for i in range(2, num):\n if num % i == 0:\n print(\"%dไธๆ˜ฏ็ด ๆ•ฐ\" % num)\n break\n else:\n print(\"%dๆ˜ฏ็ด ๆ•ฐ\" % num)\n \"\"\"\n ่ฟ™้‡Œ่ฆ็ป†็ป†ๅ“ๅ‘ณ่ฟ™ๆฎตไปฃ็ ๏ผŒelseๅ…ถๅฎžไธๆ˜ฏๅ’Œifๆ˜ฏไธ€ๅฏน๏ผŒ่€Œๆ˜ฏๅ’ŒforๅนถๆŽ’็š„๏ผŒๆˆ‘ไปฌๅธธ่ง็š„ๆ˜ฏifโ€ฆelseโ€ฆๆˆ–่€…ifโ€ฆelifโ€ฆelse่ฏธๅฆ‚ๆญค็ฑป๏ผŒ\n ไฝ†ๅ…ถๅฎžforไนŸๅฏไปฅๅ’Œelseๆญ้…ๅ‡บ็Žฐ๏ผŒๅœจ่ฟ™ๆฎตไปฃ็ ้‡Œ๏ผŒๅฝ“ๆŸไธ€ๆฌก้ๅŽ†็ป“ๆžœไฝ™ๆ•ฐไธบ0ๅŽ๏ผŒbreak็”Ÿๆ•ˆ๏ผŒ้‚ฃๅพช็Žฏๅฐฑ็ป“ๆŸไบ†๏ผŒ\n ้‚ฃไธŽไน‹ๆˆๅฏนๅ‡บ็Žฐ็š„elseไปฃ็ ไนŸๅฐฑไธๆ‰ง่กŒไบ†๏ผ›ๅฝ“ๆ‰€ๆœ‰้ๅŽ†็ป“ๆŸๅŽๆฒกๆœ‰ไธ€ๆฌกไฝ™ๆ•ฐไธบ0๏ผŒ้‚ฃ่ฏฅๅพช็Žฏๅฐฑ่ฝฌๅˆฐelseๅผ€ๅง‹ๆ‰ง่กŒ๏ผŒๆ‰“ๅฐ่พ“ๅ‡บโ€œ่ฏฅๆ•ฐไธบ่ดจๆ•ฐโ€ใ€‚\n \"\"\"\nelse:\n print(\"%dไธๆ˜ฏ็ด ๆ•ฐ\" % num)" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.800000011920929, "avg_line_length": 14, "blob_id": "e6796b13f3d679a79e4824a0f95597d41ca5b7d2", "content_id": "0fc8cca50d819da518176c7dea0a41dda894c4eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/OpenCV_Practice/video_camshift.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\ntracker = cv2.TrackerCSRT_create\n" }, { "alpha_fraction": 0.4211932420730591, "alphanum_fraction": 0.4728406071662903, "avg_line_length": 23.933332443237305, "blob_id": "aface82d6ab7ab22e43b4dcca06ab90562cfc36e", "content_id": "10a41a1c07325d204d5306a9368ebdeec7145f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 47, "num_lines": 45, "path": "/MTCNN/tool/RNet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\nimport torch.nn.functional as F\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.pre_layrer = nn.Sequential(\n nn.Conv2d(3, 28, 3, 1, padding=1),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(28, 48, 3, 1, padding=0),\n nn.PReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(48, 64, 2, 1, padding=0),\n nn.PReLU()\n )\n self.fc = nn.Linear(3 * 3 * 64, 128)\n self.prelu = nn.PReLU()\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n self.detect = nn.Linear(128, 1)\n # ๅ็งป้‡่พ“ๅ‡บ\n self.offset = nn.Linear(128, 4)\n\n def forward(self, x):\n h = self.pre_layrer(x)\n # h = h.reshape(-1,3*3*64)\n h = h.view(h.size(0), -1)\n h = self.fc(h)\n h = self.prelu(h)\n # ็ฝฎไฟกๅบฆ่พ“ๅ‡บ\n label = F.sigmoid(self.detect(h))\n offset = self.offset(h)\n return label, offset\n\n\nif __name__ == '__main__':\n net = RNet()\n x = torch.randn(1, 3, 24, 24)\n # y = net(x)\n # print(y.size(0))\n a, b = net(x)\n print(a.shape, b.shape)\n\n" }, { "alpha_fraction": 0.36152219772338867, "alphanum_fraction": 0.48202958703041077, "avg_line_length": 21.5238094329834, "blob_id": "3807233ac1767e6d917d4dba1310e102c73dc654", "content_id": "afaedcea972bbd6f7f7fb54d5ec6ff77aa208c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/PythonStudy/Numpy/pixel_shuffle.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\na = np.array([[[1, 1], [1, 1]], [[2, 2], [2, 2]], [[3, 3], [3, 3]], [[4, 4], [4, 4]]])\na = a.reshape(1, 4, 4)\na = a.transpose(0, 2, 1)\na = a.reshape(4, 2, 2)\na_data = np.split(a, 4)\n\na_0 = a_data[0][0]\na_1 = a_data[1][0]\na_2 = a_data[2][0]\na_3 = a_data[3][0]\n\nb = np.stack([a_0, a_1], axis=1)\nb = b.reshape(1, 2, 4)\nc = np.stack([a_2, a_3], axis=1)\nc = c.reshape(1, 2, 4)\nd = np.stack([b, c])\nd = d.transpose(1, 0, 2, 3)\nd = d.reshape(1, 4, 4)\nprint(d)\n" }, { "alpha_fraction": 0.5671641826629639, "alphanum_fraction": 0.5671641826629639, "avg_line_length": 17.272727966308594, "blob_id": "1fd7635f436d6d2518e01dd98554ccf6c51885d5", "content_id": "b4339a494e67a322cf1c32eae37b4c7aa6350a65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/deep_learning/day02/ce.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\nclass CE(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n def forward(self,ys,tags):\n h = -ys*torch.log(tags)\n return torch.mean(h)\n" }, { "alpha_fraction": 0.5513196587562561, "alphanum_fraction": 0.6392961740493774, "avg_line_length": 30.090909957885742, "blob_id": "8b431cb0d60a8876527974e4e0ed0863951e26cf", "content_id": "61b5252d752bd77cbb34577fa221144c5120cf21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 50, "num_lines": 11, "path": "/OpenCV_Practice/hist.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport matplotlib.pyplot as plt\nimg = cv2.imread(\"1.jpg\")\n\nimg_B = cv2.calcHist([img],[0],None,[256],[0,256])\nplt.plot(img_B,label=\"b\",color=\"b\")\nimg_G = cv2.calcHist([img],[1],None,[256],[0,256])\nplt.plot(img_G,label=\"g\",color=\"g\")\nimg_R = cv2.calcHist([img],[2],None,[256],[0,256])\nplt.plot(img_R,label=\"r\",color=\"r\")\nplt.show()" }, { "alpha_fraction": 0.5270643830299377, "alphanum_fraction": 0.6337416172027588, "avg_line_length": 26.813186645507812, "blob_id": "e59bb58cdef722aa17ad8658e0fc67ceb7cc45bd", "content_id": "acbf0124c0f8cc84dfb0c03e2a784fa210f42132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3551, "license_type": "no_license", "max_line_length": 82, "num_lines": 91, "path": "/PythonStudy/Numpy/Numpy_Practice2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "# 22. ๅฆ‚ไฝ•้€š่ฟ‡eๅผ็ง‘ๅญฆ่ฎฐๆ•ฐๆณ•๏ผˆๅฆ‚1e10๏ผ‰ๆฅๆ‰“ๅฐไธ€ไธชnumpyๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**้€š่ฟ‡eๅผ็ง‘ๅญฆ่ฎฐๆ•ฐๆณ•ๆฅๆ‰“ๅฐrand_arr๏ผˆๅฆ‚1e10๏ผ‰\n# ็ป™ๅฎš๏ผš\n# Create the random array\n# np.random.seed(100)\n# rand_arr = np.random.random([3,3])/1e3\n# rand_arr\n# > array([[ 5.434049e-04, 2.783694e-04, 4.245176e-04],\n# > [ 8.447761e-04, 4.718856e-06, 1.215691e-04],\n# > [ 6.707491e-04, 8.258528e-04, 1.367066e-04]])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([[ 0.000543, 0.000278, 0.000425],\n# > [ 0.000845, 0.000005, 0.000122],\n# > [ 0.000671, 0.000826, 0.000137]])\n\n\n\n#23. ๅฆ‚ไฝ•้™ๅˆถnumpyๆ•ฐ็ป„่พ“ๅ‡บไธญๆ‰“ๅฐ็š„้กน็›ฎๆ•ฐ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๅฐ†numpyๆ•ฐ็ป„aไธญๆ‰“ๅฐ็š„้กนๆ•ฐ้™ๅˆถไธบๆœ€ๅคš6ไธชๅ…ƒ็ด ใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.arange(15)\n# > array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([ 0, 1, 2, ..., 12, 13, 14])\n\n\n#24. ๅฆ‚ไฝ•ๆ‰“ๅฐๅฎŒๆ•ด็š„numpyๆ•ฐ็ป„่€Œไธๆˆชๆ–ญ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๆ‰“ๅฐๅฎŒๆ•ด็š„numpyๆ•ฐ็ป„a่€Œไธๆˆชๆ–ญใ€‚\n# ็ป™ๅฎš๏ผš\n# np.set_printoptions(threshold=6)\n# a = np.arange(15)\n# > array([ 0, 1, 2, ..., 12, 13, 14])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])\n\n\n#25. ๅฆ‚ไฝ•ๅฏผๅ…ฅๆ•ฐๅญ—ๅ’Œๆ–‡ๆœฌ็š„ๆ•ฐๆฎ้›†ไฟๆŒๆ–‡ๆœฌๅœจnumpyๆ•ฐ็ป„ไธญๅฎŒๅฅฝๆ— ๆŸ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅฏผๅ…ฅ้ธขๅฐพๅฑžๆค็‰ฉๆ•ฐๆฎ้›†๏ผŒไฟๆŒๆ–‡ๆœฌไธๅ˜ใ€‚\n\n#26. ๅฆ‚ไฝ•ไปŽ1็ปดๅ…ƒ็ป„ๆ•ฐ็ป„ไธญๆๅ–็‰นๅฎšๅˆ—๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ไปŽๅ‰้ข้—ฎ้ข˜ไธญๅฏผๅ…ฅ็š„ไธ€็ปด้ธขๅฐพๅฑžๆค็‰ฉๆ•ฐๆฎ้›†ไธญๆๅ–ๆ–‡ๆœฌๅˆ—็š„็‰ฉ็งใ€‚\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# iris_1d = np.genfromtxt(url, delimiter=',', dtype=None)\n\n\n#27. ๅฆ‚ไฝ•ๅฐ†1็ปดๅ…ƒ็ป„ๆ•ฐ็ป„่ฝฌๆขไธบ2็ปดnumpyๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**้€š่ฟ‡็œ็•ฅ้ธขๅฐพๅฑžๆค็‰ฉๆ•ฐๆฎ้›†็ง็ฑป็š„ๆ–‡ๆœฌๅญ—ๆฎต๏ผŒๅฐ†ไธ€็ปด้ธขๅฐพๅฑžๆค็‰ฉๆ•ฐๆฎ้›†่ฝฌๆขไธบไบŒ็ปดๆ•ฐ็ป„iris_2dใ€‚\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# iris_1d = np.genfromtxt(url, delimiter=',', dtype=None)\n\n\n#28. ๅฆ‚ไฝ•่ฎก็ฎ—numpyๆ•ฐ็ป„็š„ๅ‡ๅ€ผ๏ผŒไธญไฝๆ•ฐ๏ผŒๆ ‡ๅ‡†ๅทฎ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๆฑ‚ๅ‡บ้ธขๅฐพๅฑžๆค็‰ฉ่ผ็‰‡้•ฟๅบฆ็š„ๅนณๅ‡ๅ€ผใ€ไธญไฝๆ•ฐๅ’Œๆ ‡ๅ‡†ๅทฎ(็ฌฌ1ๅˆ—)\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# iris = np.genfromtxt(url, delimiter=',', dtype='object')\n\n\n\n#29. ๅฆ‚ไฝ•่ง„่ŒƒๅŒ–ๆ•ฐ็ป„๏ผŒไฝฟๆ•ฐ็ป„็š„ๅ€ผๆญฃๅฅฝไป‹ไบŽ0ๅ’Œ1ไน‹้—ด๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅˆ›ๅปบไธ€็งๆ ‡ๅ‡†ๅŒ–ๅฝขๅผ็š„้ธขๅฐพๅฑžๆค็‰ฉ้—ด้š”้•ฟๅบฆ๏ผŒๅ…ถๅ€ผๆญฃๅฅฝไป‹ไบŽ0ๅ’Œ1ไน‹้—ด๏ผŒ่ฟ™ๆ ทๆœ€ๅฐๅ€ผไธบ0๏ผŒๆœ€ๅคงๅ€ผไธบ1ใ€‚\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])\n\n\n#30. ๅฆ‚ไฝ•่ฎก็ฎ—Softmaxๅพ—ๅˆ†๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L3\n# **้—ฎ้ข˜๏ผš**่ฎก็ฎ—sepallength็š„softmaxๅˆ†ๆ•ฐใ€‚\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])\n\n\n\n#31. ๅฆ‚ไฝ•ๆ‰พๅˆฐnumpyๆ•ฐ็ป„็š„็™พๅˆ†ไฝๆ•ฐ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๆ‰พๅˆฐ้ธขๅฐพๅฑžๆค็‰ฉๆ•ฐๆฎ้›†็š„็ฌฌ5ๅ’Œ็ฌฌ95็™พๅˆ†ไฝๆ•ฐ\n# ็ป™ๅฎš๏ผš\n# url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n# sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])\n" }, { "alpha_fraction": 0.612500011920929, "alphanum_fraction": 0.6850000023841858, "avg_line_length": 25.600000381469727, "blob_id": "8b036ffa64e7cd2d40bd30cee033a9f9afd3c3ba", "content_id": "a26772a9746e0a066976009e84fdf57fe26999da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 74, "num_lines": 15, "path": "/OpenCV_Practice/contour_feature.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"15.jpg\",0)\nret, thres = cv2.threshold(img,50,255,cv2.THRESH_BINARY)\ncontours,_ = cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\nM = cv2.moments(contours[0])\ncx,cy = int(M['m10']/M['m00']),int(M['m01']/M['m00'])\nprint(\"้‡ๅฟƒ\",cx,cy)\n\narea = cv2.contourArea(contours[0])\nprint(\"้ข็งฏ\",area)\n\nperimeter = cv2.arcLength(contours[0], True)\nprint(\"ๅ‘จ้•ฟ:\", perimeter)\n\n" }, { "alpha_fraction": 0.4761904776096344, "alphanum_fraction": 0.523809552192688, "avg_line_length": 20.913043975830078, "blob_id": "2337a59d5d569e3d5a42b372bbd9bd16b876c29d", "content_id": "e472fa6ddf123c3c38151e1aee1bd0849f9b1321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 52, "num_lines": 23, "path": "/deep_learning/day02/resblock.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nimport torch\n\nclass ResNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.sequential = nn.Sequential(\n nn.Conv2d(16,16,3,padding=1,bias=False),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.Conv2d(16,16,3,padding=1,bias=False)\n )\n\n def forward(self,x):\n return self.sequential(x) + x\n\nif __name__ == '__main__':\n net = ResNet()\n x = torch.randn(1, 16, 32, 32)\n y = net(x)\n print(y.shape)\n" }, { "alpha_fraction": 0.5547945499420166, "alphanum_fraction": 0.6164383292198181, "avg_line_length": 20, "blob_id": "cc70ea510eb86da2cbcc8cd6b2b545820022bd5d", "content_id": "7d212a39137705c6de10ba7275e66abf14292b7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/DQN/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nQs = torch.tensor([[3,4],[5,6]])\na = torch.tensor([[0],[1]])\n\nb = torch.gather(Qs,1,a)\nprint(Qs.max(dim=1, keepdim=True)[0])\nprint(b)" }, { "alpha_fraction": 0.5458823442459106, "alphanum_fraction": 0.6188235282897949, "avg_line_length": 27.399999618530273, "blob_id": "4efee5a07dc4d767284e6ef1136330581c9c97cd", "content_id": "0c03dc4ae2b390bb31a9f7936608cbb7a4cd3961", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 60, "num_lines": 15, "path": "/Loss/test 2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\nlabel = torch.tensor([0, 1, 0])\nfeature = torch.tensor([[0.6, 0.8], [0.5, 0.7], [0.3, 0.1]])\ncenter = torch.tensor([[0.7, 0.8], [0.2, 0.3]])\n# print(label.shape,feature.shape,center.shape)\nc = torch.index_select(center, 0, label)\n_n = torch.histc(label.float(), 2, max=2)\nn = torch.index_select(_n,0,label)\nprint(c.shape,_n.shape)\nprint(c,_n,n)\n\nd = torch.sum((feature - c)**2, dim=1)**0.5\nloss = d / n\nprint(d)" }, { "alpha_fraction": 0.5483871102333069, "alphanum_fraction": 0.5722300410270691, "avg_line_length": 25.407407760620117, "blob_id": "4b0252069477e6e7019b7fe770455f38fa1dd310", "content_id": "170909ad87a4de41589ba9f38cfacb2679497a5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 64, "num_lines": 27, "path": "/GAN/data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch.utils.data import Dataset\nimport cv2, os, torch\nimport numpy as np\n\n\nclass FaceMyData(Dataset):\n def __init__(self, root):\n self.root = root\n self.dataset = os.listdir(root)\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n filename = self.dataset[index]\n img_data = cv2.imread(f\"{self.root}/{filename}\")\n img_data = img_data[..., ::-1]\n img_data = img_data.transpose(2, 0, 1)\n # ็”Ÿๆˆ็ฝ‘็ปœ้œ€่ฆ-1๏ผŒ1็›ดๅพ„ๆ•ฐๆฎ่ฐƒๆ•ด่พ“ๅ‡บ\n img_data = ((img_data / 255. -0.5)*2).astype(np.float32)\n # print(img_data.shape)\n return img_data\n\n\nif __name__ == '__main__':\n dataset = FaceMyData(\"./faces\")\n print(dataset[0])\n" }, { "alpha_fraction": 0.6781914830207825, "alphanum_fraction": 0.710106372833252, "avg_line_length": 22.4375, "blob_id": "c0d11844b6ba7248794bc9415c355c2ee63a8d32", "content_id": "f7dc112fd33fad2108113b0fbdaa585595b3d787", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 824, "license_type": "no_license", "max_line_length": 57, "num_lines": 16, "path": "/PythonStudy/Centigrade.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nๅฐ†ๅŽๆฐๆธฉๅบฆ่ฝฌๆขไธบๆ‘„ๆฐๆธฉๅบฆ\nๅŽๆฐๆธฉๅบฆๅˆฐๆ‘„ๆฐๆธฉๅบฆ็š„่ฝฌๆขๅ…ฌๅผไธบ๏ผšC=(F - 32) \\div 1.8ใ€‚\nVersion: 0.1\nAuthor: ้ช†ๆ˜Š\n\"\"\"\nf = float(input(\"่ฏท่พ“ๅ…ฅๅŽๆฐๆธฉๅบฆ\"))\nc = (f - 32) / 1.8\n'''\nๅœจ print() ๅ‡ฝๆ•ฐไธญ๏ผŒ็”ฑๅผ•ๅทๅŒ…ๅ›ด็š„ๆ˜ฏๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒ๏ผŒๅฎƒ็›ธๅฝ“ไบŽไธ€ไธชๅญ—็ฌฆไธฒๆจกๆฟ๏ผŒๅฏไปฅๆ”พ็ฝฎไธ€ไบ›่ฝฌๆข่ฏดๆ˜Ž็ฌฆ๏ผˆๅ ไฝ็ฌฆ๏ผ‰ใ€‚\nๆœฌไพ‹็š„ๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒไธญๅŒ…ๅซไธ€ไธช%s่ฏดๆ˜Ž็ฌฆ๏ผŒๅฎƒๆœ€็ปˆไผš่ขซๅŽ้ข็š„f,cๅ˜้‡็š„ๅ€ผๆ‰€ๆ›ฟไปฃใ€‚\nไธญ้—ด็š„%ๆ˜ฏไธ€ไธชๅˆ†้š”็ฌฆ๏ผŒๅฎƒๅ‰้ขๆ˜ฏๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒ๏ผŒๅŽ้ขๆ˜ฏ่ฆ่พ“ๅ‡บ็š„่กจ่พพๅผใ€‚\nๅฝ“็„ถ๏ผŒๆ ผๅผๅŒ–ๅญ—็ฌฆไธฒไธญไนŸๅฏไปฅๅŒ…ๅซๅคšไธช่ฝฌๆข่ฏดๆ˜Ž็ฌฆ๏ผŒ่ฟ™ไธชๆ—ถๅ€™ไนŸๅพ—ๆไพ›ๅคšไธช่กจ่พพๅผ๏ผŒ\n็”จไปฅๆ›ฟๆขๅฏนๅบ”็š„่ฝฌๆข่ฏดๆ˜Ž็ฌฆ๏ผ›ๅคšไธช่กจ่พพๅผๅฟ…้กปไฝฟ็”จๅฐๆ‹ฌๅท()ๅŒ…ๅ›ด่ตทๆฅ\n'''\nprint(\"%.1fๅŽๆฐๅบฆ = %.1fๆ‘„ๆฐๅบฆ\" % (f, c))\n\n" }, { "alpha_fraction": 0.45006778836250305, "alphanum_fraction": 0.5042927861213684, "avg_line_length": 25.987804412841797, "blob_id": "d1b350196ea88494dbe2bd136a8ce045f2c57e92", "content_id": "e75ef3d9b271aaf7abcfd1c1496ac46878aaf4de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2437, "license_type": "no_license", "max_line_length": 74, "num_lines": 82, "path": "/FACE_MTCNN/tools/utils.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef iou(box, boxes, is_min=False):\n # box = x1,y1,x2,y2\n box_area = (box[2] - box[0]) * (box[3] - box[1])\n boxes_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n xx1 = np.maximum(box[0], boxes[:, 0])\n yy1 = np.maximum(box[1], boxes[:, 1])\n xx2 = np.minimum(box[2], boxes[:, 2])\n yy2 = np.minimum(box[3], boxes[:, 3])\n\n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n if is_min:\n ovr = np.true_divide((w * h), np.minimum(box_area, boxes_area))\n return ovr\n else:\n ovr = np.true_divide((w * h), (box_area + boxes_area - (w * h)))\n return ovr\n\n\ndef old_nms(boxes, threshold, is_min=False):\n if boxes.shape[0] == 0:\n return np.array([])\n _boxes = boxes[(-boxes[:, 4]).argsort()]\n r_boxes = []\n\n while _boxes.shape[0] > 1:\n # ๅ–ๅ‡บ็ฌฌ1ไธชๆก†\n a_box = _boxes[0]\n # ๅ–ๅ‡บๅ‰ฉไฝ™็š„ๆก†\n b_boxes = _boxes[1:]\n\n # ๅฐ†1stไธชๆก†ๅŠ ๅ…ฅๅˆ—่กจ\n r_boxes.append(a_box) # ๆฏๅพช็Žฏไธ€ๆฌกๅพ€๏ผŒๆทปๅŠ ไธ€ไธชๆก†\n _boxes = b_boxes[iou(a_box, b_boxes, is_min) < threshold]\n\n if _boxes.shape[0] > 0:\n # ๆœ€ๅŽไธ€ๆฌก๏ผŒ็ป“ๆžœๅช็”จ1stไธช็ฌฆๅˆๆˆ–ๅชๆœ‰ไธ€ไธช็ฌฆๅˆ๏ผŒ่‹ฅๆก†็š„ไธชๆ•ฐๅคงไบŽ1๏ผ›\n # โ˜…ๆญคๅค„_boxes่ฐƒ็”จ็š„ๆ˜ฏwhilexๅพช็Žฏ้‡Œ็š„๏ผŒๆญคๅˆคๆ–ญๆกไปถๆ”พๅœจๅพช็Žฏ้‡Œๅ’Œๅค–้ƒฝๅฏไปฅ๏ผˆๅชๆœ‰ๅœจๅ‡ฝๆ•ฐ็ฑปๅค–ๆ‰ๅฏไบง็”Ÿๅฑ€้ƒจไฝœ็”จไบŽ๏ผ‰\n r_boxes.append(_boxes[0]) # ๅฐ†ๆญคๆก†ๆทปๅŠ ๅˆฐๅˆ—่กจไธญ\n\n return np.array(r_boxes)\n\ndef nms(dets, scores, thresh):\n \"\"\"Pure Python NMS baseline.\"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n\n return keep\n\n\nif __name__ == '__main__':\n b = [38, 50, 120, 180]\n bs = [[38, 50, 120, 180], [45, 56, 110, 200]]\n bs = np.array(bs)\n res = iou(b, bs)\n print(res)\n" }, { "alpha_fraction": 0.48381659388542175, "alphanum_fraction": 0.5104517936706543, "avg_line_length": 26.155963897705078, "blob_id": "85b4ddce62d79d016d2737ff0c40ccffdd9c3649", "content_id": "507c7058a6340247a5b8b7f7c837878faf002527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3044, "license_type": "no_license", "max_line_length": 71, "num_lines": 109, "path": "/MTCNN2/detect_self.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,os\nimport Net\nfrom PIL import Image,ImageDraw\nfrom torchvision import transforms\nfrom tools import utils\n\ntf = transforms.Compose([transforms.ToTensor()])\n\n\nclass Detect:\n def __init__(self):\n self.pnet = Net.PNet()\n self.pnet.load_state_dict(torch.load(\"pnet.pt\"))\n self.pnet.eval()\n\n self.rnet = Net.RNet()\n # self.rnet.load_state_dict(torch.load(\"rnet.pt\"))\n\n self.onet = Net.ONet()\n # self.onet.load_state_dict(torch.load(\"onet.pt\"))\n\n def __call__(self, imgs):\n\n boxes = self.detect_pnet(imgs)\n\n if boxes is None:\n return []\n\n # boxes = self.detect_rnet(imgs, boxes)\n # if boxes is None:\n # return []\n\n # boxes = self.detect_onet(imgs, boxes)\n # if boxes is None:\n # return []\n return boxes\n\n def detect_pnet(self, imgs):\n\n scale = 1\n img_scale = imgs\n w, h = imgs.size\n res_boxes = []\n min_side = min(w, h)\n while min_side > 12:\n img_scale_tensor = tf(img_scale)\n\n # img_scale_tensor = img_scale_tensor[None,...]\n img_scale_tensor = torch.unsqueeze(img_scale_tensor, 0)\n\n predict_boxes = self.pnet(img_scale_tensor)\n\n predict_boxes.cpu().detach()\n torch.sigmoid_(predict_boxes[:, 0, ...])\n\n feature_map = predict_boxes[0, 0]\n cls_mask = feature_map > 0.7\n idx = cls_mask.nonzero()\n\n predict_x1 = idx[:, 1] * 2\n predict_y1 = idx[:, 0] * 2\n predict_x2 = predict_x1 + 12\n predict_y2 = predict_y1 + 12\n\n offset = predict_boxes[0, 1:5, cls_mask]\n\n x1 = (predict_x1 - offset[0, :] * 12) / scale\n y1 = (predict_y1 - offset[1, :] * 12) / scale\n x2 = (predict_x2 - offset[2, :] * 12) / scale\n y2 = (predict_y2 - offset[3, :] * 12) / scale\n\n cls = predict_boxes[0, 0, cls_mask]\n res_boxes.append(torch.stack([x1, y1, x2, y2, cls], dim=1))\n\n scale *= 0.702\n w, h = int(w * scale), int(h * scale)\n img_scale = img_scale.resize((w, h))\n print(w,h)\n min_side = min(w, h)\n\n\n # ๆฏๅฑ‚ไบง็”Ÿ็š„่พนๆก†่ฟ›่กŒๆ€ปๆ‹ผๆŽฅ\n total_boxes = torch.cat(res_boxes, dim=0)\n ret = utils.nms(total_boxes.cpu().detach().numpy(),0.3)\n return ret\n\n def detect_rnet(self, imgs, boxes):\n pass\n\n def detect_onet(self, imgs, boxes):\n pass\n\n\nif __name__ == \"__main__\":\n test_img = Image.open(\"2.jpg\")\n img_draw = ImageDraw.Draw(test_img)\n detector = Detect()\n box = detector(test_img)\n\n for i in box: # ๅคšไธชๆก†๏ผŒๆฒกๅพช็Žฏไธ€ๆฌกๆก†ไธ€ไธชไบบ่„ธ\n x1 = int(i[0])\n y1 = int(i[1])\n x2 = int(i[2])\n y2 = int(i[3])\n\n # print((x1, y1, x2, y2))\n # print(\"conf:\", i[4]) # ็ฝฎไฟกๅบฆ\n img_draw.rectangle((x1, y1, x2, y2), outline='red',width=2)\n test_img.show() # ๆฏๅพช็Žฏไธ€ๆฌกๆก†ไธ€ไธชไบบ่„ธ\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.49028801918029785, "alphanum_fraction": 0.5418620109558105, "avg_line_length": 30.43157958984375, "blob_id": "703f67f012610bc909e8f1cd6790c8078bdebb31", "content_id": "0a574a35124c86d52d04da21959ef82c6d4217d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2994, "license_type": "no_license", "max_line_length": 77, "num_lines": 95, "path": "/GAN/net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass DNet(nn.Module):\n def __init__(self):\n super(DNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 64, 5, stride=3, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, 128, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(128, 256, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(256, 512, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(512, 1, 4, 1, padding=0, bias=False),\n )\n\n def forward(self, img):\n y = self.sequential(img)\n # print(y.shape)\n return y.reshape(-1)\n\n\nclass GNet(nn.Module):\n def __init__(self):\n super(GNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.ConvTranspose2d(128, 512, 4, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(512),\n # inplace ๅ†…ๅญ˜ๆ›ฟๆข\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2, inplace=True),\n nn.ConvTranspose2d(64, 3, 5, stride=3, padding=1, bias=False),\n nn.Tanh()\n )\n\n def forward(self, nosie):\n y = self.sequential(nosie)\n return y\n\n\nclass DCGAN(nn.Module):\n def __init__(self):\n super(DCGAN, self).__init__()\n self.gnet = GNet()\n self.dnet = DNet()\n\n self.loss_fn = nn.BCEWithLogitsLoss()\n\n def forward(self, noise):\n return self.gnet(noise)\n\n def get_D_loss(self, noise_d, real_img):\n real_y = self.dnet(real_img)\n g_img = self.gnet(noise_d)\n fake_y = self.dnet(g_img)\n\n real_tag = torch.ones(real_img.size(0)).cuda()\n fake_tag = torch.zeros(noise_d.size(0)).cuda()\n\n loss_real = self.loss_fn(real_y, real_tag)\n loss_fake = self.loss_fn(fake_y, fake_tag)\n\n loss_d = loss_fake + loss_real\n return loss_d\n\n def get_G_loss(self, noise_g):\n _g_img = self.gnet(noise_g)\n _real_y = self.dnet(_g_img)\n _real_tag = torch.ones(noise_g.size(0)).cuda()\n\n loss_g = self.loss_fn(_real_y, _real_tag)\n return loss_g\n\n\nif __name__ == '__main__':\n net = DCGAN()\n x = torch.randn(1, 128, 1, 1)\n real = torch.randn(4, 3, 96, 96)\n loss1, loss2 = net(x, x, real)\n # print(y.shape)\n" }, { "alpha_fraction": 0.6770833134651184, "alphanum_fraction": 0.71875, "avg_line_length": 18.399999618530273, "blob_id": "dba741d44870150bdf32a1ec9b18afcdb8dfe167", "content_id": "523a7690bd61ef51d7db116ed169e6e3f205823d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/test/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nprint(torch.cuda.is_available())\na = np.array([12, 13])\nprint(a)" }, { "alpha_fraction": 0.6474500894546509, "alphanum_fraction": 0.7494456768035889, "avg_line_length": 29.133333206176758, "blob_id": "ed48f9707602d8a3763918c96c958860e8c4c054", "content_id": "3fd58b7e58a0a06178c517039550b4a519fc7e70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/OpenCV_Practice/contour_match.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"16.jpg\",0)\nimg2 = cv2.imread(\"17.jpg\",0)\n\nret1,thresh1 = cv2.threshold(img,55,255,cv2.THRESH_BINARY)\ncontours,_ = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\ncnt1 = contours[0]\n\nret2,thresh2 = cv2.threshold(img,55,255,cv2.THRESH_BINARY)\ncontours,_ = cv2.findContours(thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\ncnt2 = contours[0]\n\nret = cv2.matchShapes(cnt1,cnt2,cv2.CONTOURS_MATCH_I2,0)\nprint(ret)" }, { "alpha_fraction": 0.4067796468734741, "alphanum_fraction": 0.47118642926216125, "avg_line_length": 23.19672203063965, "blob_id": "24ea6ce13c7dcfdbc4970c368f70bae326a8443d", "content_id": "dbe846080d17f32a4122eb48c56dbc1faa9e20af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1475, "license_type": "no_license", "max_line_length": 65, "num_lines": 61, "path": "/SEQ2SEQ/net.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 16, 7, 2, 3),\n nn.ReLU(),\n nn.Conv2d(16, 32, 3, 1, 1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, 1, 1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, 1, 1),\n nn.ReLU(),\n nn.Conv2d(128, 128, 1, 1, 0),\n )\n\n def forward(self, x):\n h = self.sequential(x)\n return h\n\n\nclass Decoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.rnn = nn.GRU(128 * 7 * 30, 128, 2, batch_first=True)\n self.output = nn.Linear(128, 10)\n\n def forward(self, x):\n x = x.reshape(-1, 128 * 7 * 30)\n x = x[:, None, :].repeat(1, 4, 1)\n h0 = torch.zeros(2 * 1, x.size(0), 128)\n output, hn = self.rnn(x, h0)\n outputs = self.output(output)\n return outputs\n\n\nclass CNN2SEQ(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder = Encoder()\n self.decoder = Decoder()\n\n def forward(self, x):\n f = self.encoder(x)\n y = self.decoder(f)\n return y\n\n\nif __name__ == '__main__':\n # net = Encoder()\n # net = Decoder()\n net = CNN2SEQ()\n x = torch.randn(2, 3, 60, 240)\n # x = torch.randn(2, 128, 7, 30)\n y = net(x)\n print(y.shape)" }, { "alpha_fraction": 0.5352941155433655, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 23.285715103149414, "blob_id": "cf41622a2deda8ff14663eccba6e21e40e91bba3", "content_id": "c75df338c98c6f389c77c32e40a38ac8ec0b7caf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/OpenCV_Practice/transform.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"1.jpg\")\n# dst = cv2.resize(img,(300,300))\n# dst = cv2.transpose(img)\ndst = cv2.flip(img,1) # 0,1,-1 ไธ‰็งๅ€ผ\ncv2.imshow(\"dst\",dst)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.5900276899337769, "alphanum_fraction": 0.6648199558258057, "avg_line_length": 24.714284896850586, "blob_id": "25e8e1bc7bc2bf210adcc2f208c6e12d1fb7bc5c", "content_id": "c5695086779c1671570c886917a1d12b18705713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 361, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/OpenCV_Practice/hist_equalize.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport matplotlib.pyplot as plt\nimg = cv2.imread(\"7.jpg\",0)\n\nhist = cv2.calcHist([img],[0],None,[256],[0,256])\nplt.plot(hist,label = \"hist\",color=\"r\")\n\ndst = cv2.equalizeHist(img)\nhist_eq = cv2.calcHist([dst],[0],None,[256],[0,256])\nplt.plot(hist_eq,label = \"hist_eq\",color=\"b\")\nplt.show()\ncv2.imshow(\"src\",img)\ncv2.imshow(\"dst\",dst)\ncv2.waitKey(0)\n\n" }, { "alpha_fraction": 0.790123462677002, "alphanum_fraction": 0.8148148059844971, "avg_line_length": 19.5, "blob_id": "a4836e55308afeca4ec737a9574b3ba61b053885", "content_id": "90c476f8ba5c3278fbaceda4d12b2b57ea6506a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/deep_learning/day02/ResNet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torchvision import models\n\nnet = models.resnet18(pretrained=True)\nprint(net)" }, { "alpha_fraction": 0.40928271412849426, "alphanum_fraction": 0.5189873576164246, "avg_line_length": 18.83333396911621, "blob_id": "2d30c86a6dfff7415b7d33669548791461b561bd", "content_id": "370848ef8bda6290e4918f261f600fd85a00c628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 86, "num_lines": 12, "path": "/PythonStudy/Numpy/pixel_shuffle_better.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\n\na = np.array([[[1, 1], [1, 1]], [[2, 2], [2, 2]], [[3, 3], [3, 3]], [[4, 4], [4, 4]]])\nprint(a.shape)\na = a.reshape(2,2,2,2)\nprint(a.shape)\n\na = a.transpose(2,0,3,1)\nprint(a.shape)\nprint(a)\na = a.reshape(4,4)\nprint(a)" }, { "alpha_fraction": 0.7497155666351318, "alphanum_fraction": 0.7565415501594543, "avg_line_length": 24.882352828979492, "blob_id": "38bbf85e1af95b588e00fcb37b7098ab237d96a0", "content_id": "edd90e46ecadaa4eed69b0c92cc6760d6b32dc4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 947, "license_type": "no_license", "max_line_length": 69, "num_lines": 34, "path": "/PythonStudy/Machine_Learning/KNN.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from sklearn import neighbors,datasets,preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\n\nnp.random.RandomState(0)\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\niris = datasets.load_iris()\n# print(iris)\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx,y = iris.data,iris.target\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)\n# print(x.shape,y.shape)\n# print(x_train.shape,x_test.shape)\n\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# print(x_train,x_test)\n\n# ๅˆ›ๅปบๆจกๅž‹\nknn =neighbors.KNeighborsClassifier(n_neighbors=12)\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nknn.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\nscores = cross_val_score(knn,x_train,y_train,cv=5,scoring='accuracy')\nprint(scores)\n# ้ข„ๆต‹\ny_pred = knn.predict(x_test)\n# ่ฏ„ไผฐ\nprint(accuracy_score(y_test,y_pred))" }, { "alpha_fraction": 0.35758835077285767, "alphanum_fraction": 0.48232847452163696, "avg_line_length": 23.075000762939453, "blob_id": "cab1c88a6c640147be5481ab8ccd1b68ab6dc70d", "content_id": "65f66a839da19c6705c31213fc419bca0f3fd0fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 962, "license_type": "no_license", "max_line_length": 47, "num_lines": 40, "path": "/MTCNN2/Note/check_test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nstrs = ['000001.jpg','95','71','226','313']\nx1 = int(strs[1])\ny1 = int(strs[2])\nw = int(strs[3])\nh = int(strs[4])\nx2 = x1 + w\ny2 = y1 + h\n\n\nx1 = int(x1 + w * 0.12)\ny1 = int(y1 + h * 0.1)\nx2 = int(x1 + w * 0.9)\ny2 = int(y1 + h * 0.85)\nw = x2 - x1\nh = y2 - y1\nprint(x1,y1,x2,y2)\ncx = int(x1 + (w / 2))\ncy = int(y1 + (w / 2))\n\n_cx = cx + np.random.randint(-w * 0.2, w * 0.2)\n_cy = cy + np.random.randint(-h * 0.2, h * 0.2)\n_w = w + np.random.randint(-w * 0.2, w * 0.2)\n_h = h + np.random.randint(-h * 0.2, h * 0.2)\n_x1 = int(_cx - (_w / 2))\n_y1 = int(_cy - (_h / 2))\n_x2 = int(_x1 + _w)\n_y2 = int(_y1 + _h)\nprint(_x1,_y1,_x2,_y2)\n_x1_off = (_x1 - x1) / _w\n_y1_off = (_y1 - y1) / _h\n_x2_off = (_x2 - x2) / _w\n_y2_off = (_y2 - y2) / _h\nprint(_x1_off,_y1_off,_x2_off,_y2_off)\noffset = [_x1_off,_y1_off,_x2_off,_y2_off]\nx1 = (_x1 - offset[0] * _w)\ny1 = (_y1 - offset[1] * _h)\nx2 = (_x2 - offset[2] * _w)\ny2 = (_y2 - offset[3] * _h)\nprint(x1,y1,x2,y2)" }, { "alpha_fraction": 0.6187335252761841, "alphanum_fraction": 0.6464380025863647, "avg_line_length": 24.266666412353516, "blob_id": "e9e96a7918c1d5ab2243097093e8fe1e565f0787", "content_id": "46338bca375db8a508cc9047e8b4e99703b89dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 74, "num_lines": 60, "path": "/PythonStudy/Machine_Learning/Preprocess.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from sklearn import preprocessing\nimport numpy as np\nx = np.array([[1., -1., 2.],\n [2., 0., 0.],\n [0., 1., -3.]])\n\n# ๆ ‡ๅ‡†ๅŒ–\n# ๅฐ†ๆฏไธ€ๅˆ—็‰นๅพๆ ‡ๅ‡†ๅŒ–ไธบๆ ‡ๅ‡†ๆญฃๅคชๅˆ†ๅธƒ๏ผŒๆณจๆ„๏ผŒๆ ‡ๅ‡†ๅŒ–ๆ˜ฏ้’ˆๅฏนๆฏไธ€ๅˆ—่€Œ่จ€็š„\n# x_scaler = preprocessing.scale(x)\n# print(x_scaler)\n# print(x_scaler.mean(axis=0), x_scaler.std(0))\n\n# ๆ ‡ๅ‡†ๅŒ–\n# scaler = preprocessing.StandardScaler()\n# x_scale = scaler.fit_transform(x)\n# print(x_scale)\n# print(x_scale.mean(0), x_scale.std(0))\n\n#minmax\n# x_scaler = preprocessing.minmax_scale(x)\n# print(x_scaler)\n# print(x_scaler.mean(axis=0), x_scaler.std(0))\n\n# MaxAbsScaler\n# scaler = preprocessing.MaxAbsScaler()\n# x_scale = scaler.fit_transform(x)\n# print(x_scale)\n# print(x_scale.mean(0), x_scale.std(0))\n\n# RobustScaler\n# scaler = preprocessing.RobustScaler()\n# x_scale = scaler.fit_transform(x)\n# print(x_scale)\n# print(x_scale.mean(0), x_scale.std(0))\n\n# Normalizer\n# scaler = preprocessing.Normalizer(norm=\"l2\")\n# x_scale = scaler.fit_transform(x)\n# print(x_scale)\n# print(x_scale.mean(0), x_scale.std(0))\n\n# ไบŒๅ€ผๅŒ–\n# scaler = preprocessing.Binarizer(threshold=0)\n# x_scale = scaler.fit_transform(x)\n# print(x_scale)\n\n\n# one_hot\n# enc = preprocessing.OneHotEncoder(n_values=3, sparse=False)\n# ans = enc.fit_transform([[0], [1], [2],[1]])\n# print(ans)\n\n# ็ผบๅคฑๆ•ฐๆฎ\nimp = preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0)\n# y_imp = imp.fit_transform([[np.nan, 2], [6, np.nan], [7, 6]])\n# print(y_imp)\n#\nimp.fit([[1, 2], [np.nan, 3], [7, 6]])\ny_imp = imp.transform([[np.nan, 2], [6, np.nan], [7, 6]])\nprint(y_imp)\n" }, { "alpha_fraction": 0.4219066798686981, "alphanum_fraction": 0.46044623851776123, "avg_line_length": 15.466666221618652, "blob_id": "a0b9f4b7eee4931c37a27a7e64078b3ec0ac4cf5", "content_id": "4f2ca1ca6f74ec666e335bf8832eb84d37aef2e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "no_license", "max_line_length": 51, "num_lines": 30, "path": "/PythonStudy/Numpy/Gradientdescent.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\n\n_x = [i/100. for i in range(100)]\n_y = [3 * e + 4 + random.random() / 10 for e in _x]\n\nprint(_x)\nprint(_y)\nw = random.random()\nb = random.random()\n\nfor i in range(30):\n for x, y in zip(_x, _y):\n z = w*x + b\n o = z - y\n loss = o ** 2\n\n dw = -2*o*x\n db = -2*o\n\n w = w + 0.1*dw\n b = b + 0.1*db\n\n print(w, b, loss)\n\nplt.plot(_x, _y, \".\")\n\nv = [w*e + b for e in _x]\nplt.plot(_x, v)\nplt.show()" }, { "alpha_fraction": 0.47277557849884033, "alphanum_fraction": 0.5059760808944702, "avg_line_length": 18.736841201782227, "blob_id": "a87d465e06e889be93f3370c2aace0958d6be659", "content_id": "2ce766f20a8ba6a5da5ee5e851b968b20ce8d45f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 62, "num_lines": 38, "path": "/PythonStudy/Numpy/Gradientdescent_torch.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import optim\n\nxs = torch.arange(0.01, 1, 0.01)\n# print(xs)\nys = 3 * xs + 4 + torch.randn(99)/100\n# print(ys)\n\n\nclass Line(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.randn(1))\n self.b = torch.nn.Parameter(torch.randn(1))\n\n def forward(self, x):\n return x * self.w + self.b\n\n\nif __name__ == '__main__':\n\n line = Line()\n opt = optim.SGD(line.parameters(), lr=0.01, momentum=0.01)\n\n for epoch in range(30):\n\n for _x, _y in zip(xs, ys):\n z = line(_x)\n loss = (z - _y)**2\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n print(loss)\n\n print(line.b, line.w)\n\n\n\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6761904954910278, "avg_line_length": 10.666666984558105, "blob_id": "6488a1888bd49fcd78baf0ddbf66559c86dafb7e", "content_id": "8ca3a77efb015260344b911d2b3df33e74b5676d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/Practice/helloworld.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\n\nprint(\"hello World\")\n\na = torch.randn(1,3,14,14)\nprint(a.shape)\n\nprint(torch.__version__)\n" }, { "alpha_fraction": 0.39833641052246094, "alphanum_fraction": 0.5083179473876953, "avg_line_length": 23.05555534362793, "blob_id": "16efdb51529d57f85b1354f7901f5ad729b2c791", "content_id": "b36a5e78c7c4a9ca09c25100cfa39d2b7337f1fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3030, "license_type": "no_license", "max_line_length": 65, "num_lines": 90, "path": "/PythonStudy/Numpy/Numpy_Practice.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "# 1ใ€ๅฏผๅ…ฅnumpyไฝœไธบnp๏ผŒๅนถๆŸฅ็œ‹็‰ˆๆœฌ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1 **้—ฎ้ข˜๏ผš**ๅฐ†numpyๅฏผๅ…ฅไธบ np ๅนถๆ‰“ๅฐ็‰ˆๆœฌๅทใ€‚\nimport numpy as np\nprint(np.__version__)\n\n# 2ใ€ๅฆ‚ไฝ•ๅˆ›ๅปบไธ€็ปดๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1 **้—ฎ้ข˜๏ผš**ๅˆ›ๅปบไปŽ0ๅˆฐ9็š„ไธ€็ปดๆ•ฐๅญ—ๆ•ฐ็ป„\n# ๆœŸๆœ›่พ“ๅ‡บ๏ผš\n# > array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\na = np.arange(0,10)\nprint(a)\n# 3. ๅฆ‚ไฝ•ๅˆ›ๅปบไธ€ไธชๅธƒๅฐ”ๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๅˆ›ๅปบไธ€ไธชnumpyๆ•ฐ็ป„ๅ…ƒ็ด ๅ€ผๅ…จไธบTrue๏ผˆ็œŸ๏ผ‰็š„ๆ•ฐ็ป„\nb = np.full((3,3),True,dtype=bool)\nc = np.ones((3,3),dtype=bool)\nprint(b,c)\n# 4. ๅฆ‚ไฝ•ไปŽไธ€็ปดๆ•ฐ็ป„ไธญๆๅ–ๆปก่ถณๆŒ‡ๅฎšๆกไปถ็š„ๅ…ƒ็ด ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ไปŽ arr ไธญๆๅ–ๆ‰€ๆœ‰็š„ๅฅ‡ๆ•ฐ\n# arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([1, 3, 5, 7, 9])\nprint(a[1::2])\nprint(a[a%2 == 1])\n# 5. ๅฆ‚ไฝ•็”จnumpyๆ•ฐ็ป„ไธญ็š„ๅฆไธ€ไธชๅ€ผๆ›ฟๆขๆปก่ถณๆกไปถ็š„ๅ…ƒ็ด ้กน๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๅฐ†arrไธญ็š„ๆ‰€ๆœ‰ๅฅ‡ๆ•ฐๆ›ฟๆขไธบ-1ใ€‚\n# ็ป™ๅฎš๏ผš\n# arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([ 0, -1, 2, -1, 4, -1, 6, -1, 8, -1])\n# a[a%2 == 1] = -1\n# a[1::2] = -1\n# print(a)\n# 6. ๅฆ‚ไฝ•ๅœจไธๅฝฑๅ“ๅŽŸๅง‹ๆ•ฐ็ป„็š„ๆƒ…ๅ†ตไธ‹ๆ›ฟๆขๆปก่ถณๆกไปถ็š„ๅ…ƒ็ด ้กน๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅฐ†arrไธญ็š„ๆ‰€ๆœ‰ๅฅ‡ๆ•ฐๆ›ฟๆขไธบ-1๏ผŒ่€Œไธๆ”นๅ˜arrใ€‚\n# ็ป™ๅฎš๏ผš\n# arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([ 0, -1, 2, -1, 4, -1, 6, -1, 8, -1])\nd = np.where(a%2 == 1,-1,a)\nprint(d)\n# 7. ๅฆ‚ไฝ•ๆ”นๅ˜ๆ•ฐ็ป„็š„ๅฝข็Šถ๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L1\n# **้—ฎ้ข˜๏ผš**ๅฐ†ไธ€็ปดๆ•ฐ็ป„่ฝฌๆขไธบ2่กŒ็š„2็ปดๆ•ฐ็ป„\n# ็ป™ๅฎš๏ผš\n# np.arange(10)\n# > array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([[0, 1, 2, 3, 4],\n# > [5, 6, 7, 8, 9]])\nprint(a.reshape(2,5))\n# 8. ๅฆ‚ไฝ•ๅž‚็›ดๅ ๅŠ ไธคไธชๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅž‚็›ดๅ †ๅ ๆ•ฐ็ป„aๅ’Œๆ•ฐ็ป„b\n# ็ป™ๅฎš๏ผš\n# a = np.arange(10).reshape(2,-1)\n# b = np.repeat(1, 10).reshape(2,-1)\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([[0, 1, 2, 3, 4],\n# > [5, 6, 7, 8, 9],\n# > [1, 1, 1, 1, 1],\n# > [1, 1, 1, 1, 1]])\n# m = np.arange(10).reshape(2,-1)\n# n = np.repeat(1,10).reshape(2,-1)\n# print(np.vstack((m,n)))\n# 9. ๅฆ‚ไฝ•ๆฐดๅนณๅ ๅŠ ไธคไธชๆ•ฐ็ป„๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅฐ†ๆ•ฐ็ป„aๅ’Œๆ•ฐ็ป„bๆฐดๅนณๅ †ๅ ใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.arange(10).reshape(2,-1)\n# b = np.repeat(1, 10).reshape(2,-1)\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([[0, 1, 2, 3, 4, 1, 1, 1, 1, 1],\n# > [5, 6, 7, 8, 9, 1, 1, 1, 1, 1]])\nm = np.arange(10).reshape(2,-1)\nn = np.repeat(1,10).reshape(2,-1)\nprint(np.hstack((m,n)))\n\n# 10. ๅฆ‚ไฝ•ๅœจๆ— ็กฌ็ผ–็ ็š„ๆƒ…ๅ†ตไธ‹็”Ÿๆˆnumpyไธญ็š„่‡ชๅฎšไน‰ๅบๅˆ—๏ผŸ\n# **้šพๅบฆ็ญ‰็บง๏ผš**L2\n# **้—ฎ้ข˜๏ผš**ๅˆ›ๅปบไปฅไธ‹ๆจกๅผ่€Œไธไฝฟ็”จ็กฌ็ผ–็ ใ€‚ๅชไฝฟ็”จnumpyๅ‡ฝๆ•ฐๅ’Œไธ‹้ข็š„่พ“ๅ…ฅๆ•ฐ็ป„aใ€‚\n# ็ป™ๅฎš๏ผš\n# a = np.array([1,2,3])`\n# ๆœŸๆœ›็š„่พ“ๅ‡บ๏ผš\n# > array([1, 1, 1, 2, 2, 2, 3, 3, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3])\ns = np.array([1,2,3])\nprint(np.r_[np.repeat(s, 3), np.tile(s, 3)])" }, { "alpha_fraction": 0.49280574917793274, "alphanum_fraction": 0.5755395889282227, "avg_line_length": 20.384614944458008, "blob_id": "be5c7c23edf3438c1e5f378a386abf8c0d0e2b01", "content_id": "38a176aa626e4a43700ae6549189a15e81d4f9f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 67, "num_lines": 13, "path": "/PythonStudy/Numpy/tesor.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\n\na = np.array(1)\nprint(a.shape)\nb = torch.tensor(2)\nprint(b.shape)\nc = torch.tensor([1, 2])\nprint(c.shape)\nd = torch.tensor([[2, 3, 4], [4, 5, 6]])\nprint(d.shape)\ne = torch.tensor([[[3, 4, 6], [4, 5, 6]], [[7, 8, 9], [9, 10, 6]]])\nprint(e.shape)\n" }, { "alpha_fraction": 0.5634218454360962, "alphanum_fraction": 0.6578171253204346, "avg_line_length": 20.25, "blob_id": "3299c0ad9a6bc37f808c907037df855538758efe", "content_id": "190bc7733dae57a6cc2e167be780612f206c4518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 35, "num_lines": 16, "path": "/OpenCV_Practice/filter.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"2.jpg\")\ncv2.bilateralFilter()\ndst = cv2.blur(img,(3,3))\ndst = cv2.medianBlur(img,(3,3))\ndst = cv2.GaussianBlur(img,(3,3),1)\n\ndst = cv2.Laplacian(img,-1)\n\ndst = cv2.Sobel(img,-1,1,0)\ndst = cv2.Sobel(img,-1,0,1)\ndst = cv2.Scharr(img,-1,1,0)\n\ncv2.imshow(\"src show\", img)\ncv2.imshow(\"dst show\", dst)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6575342416763306, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 21, "blob_id": "eb2539c39d98ff095755aa1838d5817437538e77", "content_id": "8ea0813604d93ff43d8f60e480e5dd87e8bc8147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/OpenCV_Practice/pyr_laplace.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"12.jpg\")\nimg_down = cv2.pyrDown(img)\nimg_up = cv2.pyrUp(img_down)\n\nimg_new = cv2.subtract(img, img_up)\nimg_new =cv2.convertScaleAbs(img_new,alpha=5)\n\ncv2.imshow(\"new\",img_new)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6126126050949097, "alphanum_fraction": 0.6186186075210571, "avg_line_length": 17.5, "blob_id": "74d15f03aebb1f14ee245727fd2fb51c00d1f239", "content_id": "265410607905689eae9da32bc092842175b71d50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/PythonStudy/Student.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\nๅฎšไน‰ไธ€ไธช็ฑป๏ผŒๅนถๅฏน็ฑป่ฟ›่กŒๅˆๅง‹ๅŒ–ๆ“ไฝœ\n่ฎพ่ฎก็ฑป็š„่กŒไธบๅนถ่ฐƒ็”จๆ‰ง่กŒ\n\n\"\"\"\n\nclass Student:\n def __init__(self, name, age):\n self.name = name # ็ฑปไธญๆ‰€ๅฎšไน‰็š„ๅ˜้‡ไธบๅฑžๆ€ง\n self.age = age\n\n def get_study(self, course_name): # ็ฑปไธญๆ‰€ๅฎšไน‰ๅ‡ฝๆ•ฐไธบๆ–นๆณ•ๆˆ–็งฐๆˆๅ‘˜ๅ‡ฝๆ•ฐ ๅฟ…้กปๅธฆๆœ‰selfๅ‚ๆ•ฐ\n print(\"%sๆญฃๅœจๅญฆไน %s่ฏพ็จ‹\" % (self.name, course_name))\n\n\nstu = Student(\"karson\", 28)\n\nstu.get_study(\"python\")\n" }, { "alpha_fraction": 0.7488372325897217, "alphanum_fraction": 0.7581395506858826, "avg_line_length": 25.91666603088379, "blob_id": "914d697980162c19f6a0a18a1a5040cd56d61a56", "content_id": "945bbb665e78485f80c0b7d28a005c10f7f49cca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "no_license", "max_line_length": 84, "num_lines": 24, "path": "/PythonStudy/Machine_Learning/random_forest.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import datasets,preprocessing,ensemble\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nwine = datasets.load_wine()\nx,y = wine.data,wine.target\n\n# ๅˆ’ๅˆ†่ฎญ็ปƒ้›†ไธŽๆต‹่ฏ•้›†\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3 , random_state=0)\n# ๆ•ฐๆฎ้ข„ๅค„็†\nscaler = preprocessing.StandardScaler().fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n# ๅˆ›ๅปบๆจกๅž‹\nrf = ensemble.RandomForestClassifier(n_estimators=25,max_depth=3)\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nrf.fit(x_train,y_train)\n# ไบคๅ‰้ชŒ่ฏ\n# ้ข„ๆต‹\ny_pred = rf.predict(x_test)\n# ่ฏ„ไผฐ\nprint(accuracy_score(y_test,y_pred))" }, { "alpha_fraction": 0.447247713804245, "alphanum_fraction": 0.5733944773674011, "avg_line_length": 26.3125, "blob_id": "a6ffb682f8742cf10dd9b3143789516e8758e512", "content_id": "4cd9fb1ac6abb3c05f347afd5ae05e4e1e25359d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/OpenCV_Practice/affine.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\n\nsrc = cv2.imread(\"1.jpg\")\nrows, cols, channel = src.shape\n# M = np.float32([[1, 0, 50], [0, 1, 50]])\n# M = np.float32([[0.5, 0, 0], [0, 0.5, 0]])\n# M = np.float32([[-0.5, 0, cols // 2], [0, 0.5, 0]])\n# M = np.float32([[1, 0.5, 0], [0, 1, 0]])\nM = cv2.getRotationMatrix2D((cols/2,rows/2),45,0.7)\ndst = cv2.warpAffine(src,M,(cols,rows))\n\ncv2.imshow('src pic', src)\ncv2.imshow('dst pic', dst)\n\ncv2.waitKey(0)" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.606249988079071, "avg_line_length": 16.66666603088379, "blob_id": "77bc3d83c4227aed866c1c6b4ff656d5042221b6", "content_id": "deba445f0f88a5d64b748ff5def2ed6e50071f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 64, "num_lines": 9, "path": "/PythonStudy/Check_year.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n่พ“ๅ…ฅไธ€ไธชๅนดไปฝๅˆคๆ–ญๆ˜ฏๅฆๆ˜ฏ้—ฐๅนด\nversion:0.1\nauthor:karson\n\"\"\"\n\nyear = int(input(\"่ฏท่พ“ๅ…ฅไธ€ไธชๅนดไปฝ๏ผš\"))\nis_leap = (year % 4 == 0 and year % 100 != 0) or year % 400 == 0\nprint(is_leap)\n\n" }, { "alpha_fraction": 0.501210629940033, "alphanum_fraction": 0.5028248429298401, "avg_line_length": 22.37735939025879, "blob_id": "bafc8dccb531deb47ec8b7fe13f3e994173126a7", "content_id": "2ae6649054cbaf508560774dad300b1953e0f1b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 77, "num_lines": 53, "path": "/PythonStudy/Data_structure/Linked_list/singly_list.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self,data):\n self.data = data\n self._next = None\n\n # def __repr__(self):\n # return f\"Node({self.data})\"\n\nclass Linkedlist:\n def __init__(self):\n self.head = None\n\n def is_empty(self):\n return self.head is None\n\n def insert_head(self,data):\n new_node = Node(data)\n new_node._next = self.head\n self.head = new_node\n\n def insert_tail(self, data):\n if self.head is None:\n self.insert_head(data) # if this is first node, call insert_head\n else:\n temp = self.head\n while temp.next: # traverse to last node\n temp = temp._next\n temp.next = Node(data) # create node & link to tail\n\n\n def print_list(self): # print every node data\n temp = self.head\n while temp:\n print(temp.data)\n temp = temp._next\n\n def items(self):\n \"\"\"้ๅŽ†้“พ่กจ\"\"\"\n # ่Žทๅ–headๆŒ‡้’ˆ\n cur = self.head\n # ๅพช็Žฏ้ๅŽ†\n while cur is not None:\n # ่ฟ”ๅ›ž็”Ÿๆˆๅ™จ\n yield cur.item\n # ๆŒ‡้’ˆไธ‹็งป\n cur = cur.next\n\nif __name__ == '__main__':\n A = Linkedlist()\n A.insert_head(5)\n A.insert_head(5)\n\n A.print_list()\n" }, { "alpha_fraction": 0.6410714387893677, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 19.035715103149414, "blob_id": "bf977d9fbc53e1246aff0ac0ac8aabcb812774a0", "content_id": "11c2f9b4b82bf8f96c710ddd8591097b8519d08a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 41, "num_lines": 28, "path": "/yolo/dataset.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset\nfrom torch import nn\nimport numpy as np\nfrom torchvision import transforms\nimport os\nimport math\n\nLABEL_FILE_PATH = 'data/person_label.txt'\nIMG_BASE_DIR = 'data'\n\ntf = transforms.Compose([\n transforms.ToTensor()\n])\ndef one_hot(cls_num, i):\n b = np.zeros(cls_num)\n b[i] = 1\n return b\nclass MyDataset(Dataset):\n def __init__(self):\n with open(LABEL_FILE_PATH) as f:\n self.dataset = f.readlines()\n \n def __len__(self):\n return len(self.dataset)\n \n \nclass Net():" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 15.428571701049805, "blob_id": "3dfc97ec379eb708798e3794dc0c229a333a2c55", "content_id": "2cb340b9863485aa05261c83a56184f1894336dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/OpenCV_Practice/add.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\na = np.uint8([250])\nb = np.uint8([10])\n\nprint(cv2.add(a,b))\nprint(cv2.subtract(a,b))" }, { "alpha_fraction": 0.5383631587028503, "alphanum_fraction": 0.5831202268600464, "avg_line_length": 24.225807189941406, "blob_id": "03908610abe9cfaad2c1b40dc5e815c079d133fd", "content_id": "9578fda1d544205683ce291e1dee891b03076ca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/deep_learning/day03/resnet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "from torch import nn\nfrom torchvision import models\n\n\nclass ResNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3,64,7,2,padding=3)\n self.bn = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(3,2,padding=1)\n\n self.layer1 = self.make_layer()\n self.layer2 = self.make_layer()\n self.layer3 = self.make_layer()\n self.layer4 = self.make_layer()\n\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(512,1000)\n def forward(self, x):\n pass\n\n def make_layer(self, block, planes, blocks, stride=1, dilate=False):\n pass\n\n\nif __name__ == '__main__':\n net = models.resnet18()\n net2 = models.MobileNetV2()\n print(net2)\n" }, { "alpha_fraction": 0.5113636255264282, "alphanum_fraction": 0.5568181872367859, "avg_line_length": 13.833333015441895, "blob_id": "a88d1b669c715a3afc970ac9d0f0217bfcec2b17", "content_id": "99ef74fda57a08d994f0e46d8b3461b812cc4c84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/PythonStudy/linear_algebra/neiji.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\na = np.array([1, 2])\nb = np.array([3, 4])\n\nc = np.sum(a * b)\nprint(c)" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.6159090995788574, "avg_line_length": 16.559999465942383, "blob_id": "be449aa4bdf983f751e013e2c4ba1990079929dd", "content_id": "d75ebfcc3187fdb3b5be8cc14b1a5a3ad0015a24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 34, "num_lines": 25, "path": "/PythonStudy/do_while.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\"\"\"\n็Œœๆ•ฐๅญ—ๆธธๆˆ\n่ฎก็ฎ—ๆœบๅ‡บไธ€ไธช1~50ไน‹้—ด็š„้šๆœบๆ•ฐ็”ฑไบบๆฅ็Œœ\n่ฎก็ฎ—ๆœบๆ นๆฎไบบ็Œœ็š„ๆ•ฐๅญ—ๅˆ†ๅˆซ็ป™ๅ‡บๆ็คบๅคงไธ€็‚น/ๅฐไธ€็‚น/็Œœๅฏนไบ†\n\nVersion: 0.1\nAuthor: karson\n\"\"\"\nimport random\nanswer = random.randint(1, 50)\ncounter = 0\nwhile True:\n counter += 1\n number = int(input(\"่ฏท่พ“ๅ…ฅไธ€ไธชๆ•ฐๅญ—\"))\n if answer > number:\n print(\"ๅคงไธ€็‚น\")\n elif answer < number:\n print(\"ๅฐไธ€็‚น\")\n else:\n print(\"ๆญๅ–œไฝ ็Œœๅฏนไบ†\")\n break # ็ปˆๆญขๆœฌๆฌกๅพช็Žฏ๏ผŒwhileๅพช็Žฏๅฟ…้กปๆœ‰\n\nprint(\"ๆ€ปๅ…ฑ็Œœไบ†%dๆฌก\" % counter)\nif counter > 7:\n print(\"ๆ‚จ็š„ๆ™บๅ•†ไธ่ถณ\")\n\n" }, { "alpha_fraction": 0.6607142686843872, "alphanum_fraction": 0.7366071343421936, "avg_line_length": 24, "blob_id": "f053831815033410b0d441286dab63d8a090b345", "content_id": "851cae4b724e66f72bd974bf6877b69cd14b21c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/OpenCV_Practice/threshold.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread(\"1.jpg\")\ngray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\nret,binary = cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n\ncv2.imshow(\"gray\",gray)\ncv2.imshow(\"binary\",binary)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 29.125, "blob_id": "2eafe7aa4dd9bf77abfd8d0dc3c23c927be9bc5e", "content_id": "9118da25284ad3f2b84772fc1ebadccfe10504ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/Center_Loss/test2.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\ndata = torch.tensor([[3, 4], [5, 6], [7, 8], [9, 8], [6, 5]], dtype=torch.float32)\nlabel = torch.tensor([0, 0, 1, 0, 1], dtype=torch.float32)\n\nc = data[label == 1, 0]\nd = data[label == 1] #tensor([[7., 8.],[6., 5.]])\nprint(c)" }, { "alpha_fraction": 0.4761904776096344, "alphanum_fraction": 0.5306122303009033, "avg_line_length": 12.181818008422852, "blob_id": "98f92196be35608d4e70882924d0e117d9ba02ef", "content_id": "03013a6e7c3eb2674bea74621d2faf54a3b359b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/PythonStudy/List.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\n\na_list = [1, 2, 3, 4, 5, 6]\nb_list = []\n\nfor a in a_list:\n b_list.append(a*2)\n\nprint(b_list)\n\nc_list = [x * 2 for x in a_list]\n\nprint(c_list)\n" }, { "alpha_fraction": 0.4148418605327606, "alphanum_fraction": 0.4768856465816498, "avg_line_length": 23.176469802856445, "blob_id": "a99067bf6c7bdc301db9867d395e33819c0073e3", "content_id": "5ec26ba8c1fb5efe308d3d098c74781b92e1c97a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 822, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/CenterLoss/LeNet.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass LeNet(nn.Module):\n\n def __init__(self):\n super(LeNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(6, 16, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n )\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n h = self.sequential(x)\n h = h.view(-1, 16 * 5 * 5)\n h = self.fc1(h)\n h = self.fc2(h)\n h = self.fc3(h)\n return h\n\n\nif __name__ == '__main__':\n net = LeNet()\n a = torch.randn(2, 1, 28, 28)\n b = net(a)\n print(b.shape)\n" }, { "alpha_fraction": 0.6417322754859924, "alphanum_fraction": 0.7017716765403748, "avg_line_length": 29.787878036499023, "blob_id": "5866f4b11f7cb99dcf06b9b1767fa840ba099df2", "content_id": "4b99f8d9bb2581e32e588b98d521a4ba97093370", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1052, "license_type": "no_license", "max_line_length": 82, "num_lines": 33, "path": "/PythonStudy/Machine_Learning/regression_tree.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn import linear_model\nfrom sklearn.tree import DecisionTreeRegressor\nimport matplotlib.pyplot as plt\n\n# ๆ•ฐๆฎๅŠ ่ฝฝ\nx = np.array(list(range(1, 11))).reshape(-1, 1)\ny = np.array([5.56, 5.70, 5.91, 6.40, 6.80, 7.05, 8.90, 8.70, 9.00, 9.05]).ravel()\n\n# ๅˆ›ๅปบๆจกๅž‹\nreg = DecisionTreeRegressor(max_depth=1)\nreg2 = DecisionTreeRegressor(max_depth=3)\nline_reg = linear_model.LinearRegression()\n# ๆจกๅž‹ๆ‹Ÿๅˆ\nreg.fit(x,y)\nreg2.fit(x,y)\nline_reg.fit(x,y)\n# ้ข„ๆต‹\nx_test = np.arange(0.0, 10.0, 0.01)[:, np.newaxis]\ny1 = reg.predict(x_test)\ny2 = reg2.predict(x_test)\ny3 = line_reg.predict(x_test)\n# ๅ›พๅƒๆ‰“ๅฐ\nplt.figure()\nplt.scatter(x, y, s=20, edgecolor=\"black\", c=\"darkorange\", label=\"data\")\nplt.plot(x_test, y1, color=\"cornflowerblue\", label=\"max_depth=1\", linewidth=2)\nplt.plot(x_test, y2, color=\"yellowgreen\", label=\"max_depth=3\", linewidth=2)\nplt.plot(x_test, y3, color='red', label='liner regression', linewidth=2)\nplt.xlabel(\"data\")\nplt.ylabel(\"target\")\nplt.title(\"Decision Tree Regression\")\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.48275861144065857, "alphanum_fraction": 0.5977011322975159, "avg_line_length": 13.666666984558105, "blob_id": "2cf6d14f7791f319a82b9a2f2e1bfb1212f2a5ba", "content_id": "ab31eee0308e309a85de2ddb4ed06e72d9cdcd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/Loss/test.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\n\na = torch.tensor([0,1,0])\nb = torch.tensor([0.2,0.3,0.5])\n\nprint(b[a==1])" }, { "alpha_fraction": 0.5705521702766418, "alphanum_fraction": 0.5705521702766418, "avg_line_length": 15.199999809265137, "blob_id": "72337db28a6784ff656804cc28ae4b4b5ef836a5", "content_id": "0abb387c8383ccf45eb56176a9095c18da749669", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 35, "num_lines": 10, "path": "/VGGNet/VGG.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\nclass VGG(nn.Module):\n def __init__(self):\n super(VGG, self).__init__()\n\n\n def forward(self, x):\n return x\n\n" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 20.75, "blob_id": "cf4c5e5636649c1eba4a6a47d38ec31ff5bf7323", "content_id": "985f5b62185f85570925994adccf2da7f28b703c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/OpenCV_Practice/read_image.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "\nimport cv2\nimg = cv2.imread(\"1.jpg\",flags=2)\ncv2.imshow(\"pic show\",img)\ncv2.waitKey(0)\n" }, { "alpha_fraction": 0.6449999809265137, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 27.64285659790039, "blob_id": "fa05a471967b4afb501aca8a37b9f9043460a562", "content_id": "1c6bf1d19efc4f2c4b22c4c1674e739c07d1a98e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/OpenCV_Practice/contour_approx.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import cv2\nimg = cv2.imread(\"26.jpg\")\ndst = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\nret,thr = cv2.threshold(dst,50,255,cv2.THRESH_BINARY)\n\ncontours, _ = cv2.findContours(thr,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\nepsilon = 40 #็ฒพๅบฆ\napprox = cv2.approxPolyDP(contours[0],epsilon,True)\n\nimg_contour= cv2.drawContours(img, [approx], -1, (0, 0, 255), 3)\n\ncv2.imshow(\"img_contour\", img_contour)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.5778103470802307, "alphanum_fraction": 0.600146472454071, "avg_line_length": 29.685392379760742, "blob_id": "e567a5fd77eadb934bb93c92073e40ae5575afa2", "content_id": "edb1378136ebeb089f7e8d09fb9b973971b73ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2731, "license_type": "no_license", "max_line_length": 117, "num_lines": 89, "path": "/deep_learning/day03/utils.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nimport math\nfrom torch import nn\n\n\nclass Swish(nn.Module):\n def forward(self, x):\n return x * torch.sigmoid(x)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.reshape(x.shape[0], -1)\n\n\ndef re_ch(filters, width_coefficient=None):\n if not width_coefficient:\n return filters\n filters *= width_coefficient\n new_filters = max(8, int(filters + 8 / 2) // 8 * 8)\n if new_filters < 0.9 * filters:\n new_filters += 8\n return int(new_filters)\n\n\ndef re_dp(repeats, depth_coefficient=None):\n if not depth_coefficient: return repeats\n return int(math.ceil(depth_coefficient * repeats))\n\n\ndef pad_same(size, kernel_size, stride, dilation):\n o = math.ceil(size / stride)\n pad = max((o - 1) * stride + (kernel_size - 1) * dilation + 1 - size, 0)\n pad_0 = pad // 2\n pad_1 = pad - pad_0\n return pad, pad_0, pad_1\n\n\nclass Conv2dSamePadding(nn.Module):\n\n def __init__(self, image_size, in_channels, out_channels, kernel_size, stride=1, dilation=1, group=1, bias=True):\n super().__init__()\n\n h_pad, h_pad_0, h_pad_1 = pad_same(image_size[0], kernel_size, stride, dilation)\n w_pad, w_pad_0, w_pad_1 = pad_same(image_size[1], kernel_size, stride, dilation)\n\n self.pad = [w_pad_0, w_pad_1, h_pad_0, h_pad_1]\n\n if h_pad > 0 or w_pad > 0:\n self.static_padding = nn.ZeroPad2d((w_pad_0, w_pad_1, h_pad_0, h_pad_1))\n else:\n self.static_padding = nn.Identity()\n\n self.module = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,\n dilation=dilation, groups=group, bias=bias)\n\n def forward(self, x):\n x = self.static_padding(x)\n return self.module(x)\n\n\nclass Conv2dSamePaddingBNSwish(nn.Module):\n\n def __init__(self, image_size, in_channels, out_channels, kernel_size, stride=1, group=1, bias=True):\n super().__init__()\n self.sequential = nn.Sequential(\n Conv2dSamePadding(image_size, in_channels, out_channels, kernel_size, stride, group=group, bias=bias),\n nn.BatchNorm2d(out_channels, 1e-3, 1e-2),\n Swish()\n )\n\n def forward(self, x):\n return self.sequential(x)\n\n\nclass SEModule(nn.Module):\n\n def __init__(self, in_channels, squeeze_channels):\n super().__init__()\n self.se = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, squeeze_channels, kernel_size=1, stride=1, padding=0, bias=False),\n Swish(),\n nn.Conv2d(squeeze_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=False),\n )\n\n def forward(self, x):\n h = self.se(x)\n return x * torch.sigmoid(h)\n" }, { "alpha_fraction": 0.42899850010871887, "alphanum_fraction": 0.5176880955696106, "avg_line_length": 33.60344696044922, "blob_id": "e2c001c2663f1d4743dd19ac848d92a21575b606", "content_id": "5d85f583ee49987f7ee769769d20d34ed7b1d7b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2019, "license_type": "no_license", "max_line_length": 81, "num_lines": 58, "path": "/AlexNet/AlexNet_Tiny.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\n\n\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n self.sequential = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=8, stride=2, padding=2), # 64, 31, 31\n nn.ReLU(),\n nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=1), # 64, 29, 29\n nn.Conv2d(64, 192, kernel_size=5, stride=1, padding=2), # 192, 29, 29\n nn.ReLU(),\n nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),\n nn.MaxPool2d(kernel_size=3, stride=2), # 192, 14, 14\n nn.Conv2d(192, 384, kernel_size=3, padding=1), # 384, 14, 14\n nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), # 256, 14, 14\n nn.ReLU(),\n nn.Conv2d(256, 256, kernel_size=3, padding=1), # 256, 14, 14\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2) # 256, 6, 6\n )\n self.classfifier = nn.Sequential(\n # inplace ๆ˜ฏๅฆ่ฟ›่กŒ่ฆ†็›–\n nn.Dropout(p=0.5, inplace=True),\n nn.Linear(256*6*6, 4096),\n nn.ReLU(),\n nn.Dropout(p=0.5, inplace=True),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, 200)\n )\n self.init_bias()\n\n def forward(self, x):\n x = self.sequential(x)\n x = x.view(-1, 256 * 6 * 6)\n x = self.classfifier(x)\n return x\n\n def init_bias(self):\n\n for layer in self.sequential:\n if isinstance(layer, nn.Conv2d):\n nn.init.normal_(layer.weight,mean=0,std=0.01)\n nn.init.constant_(layer.bias, 0)\n nn.init.constant_(self.sequential[4].bias, 1)\n nn.init.constant_(self.sequential[10].bias, 1)\n nn.init.constant_(self.sequential[12].bias, 1)\n\n\nif __name__ == '__main__':\n a = torch.randn(4, 3, 64, 64)\n net = AlexNet()\n b = net(a)\n print(b.shape)\n" }, { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.610859751701355, "avg_line_length": 23.592592239379883, "blob_id": "daf21d0e249c7dc9777e7cc394bc0b6091e7ac54", "content_id": "6607a0366f4b3cecbc8e73935a25927bcb59315e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/SEQ2SEQ/data.py", "repo_name": "greenkarson/python", "src_encoding": "UTF-8", "text": "import torch,os\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\ntf = transforms.ToTensor()\n\n\nclass Mydataset(Dataset):\n def __init__(self, root):\n self.dataset = os.listdir(root)\n self.root = root\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n filename = self.dataset[index]\n strs = filename.split(\".\")[0]\n label = np.array([int(x) for x in strs])\n img = tf(Image.open(f\"{self.root}/{filename}\"))\n return img, label\n\n\nif __name__ == '__main__':\n dataset = Mydataset(\"./code\")\n print(dataset[0])" } ]
177
juyonLee00/flask-study
https://github.com/juyonLee00/flask-study
3d5fbcc28398da413b1e18f91764af1c5e866068
002cbb2c3796f96a1976e8d18398b1eb2e47c7e0
708886fe3bed083ad2fb5e4ecb24d7a739157954
refs/heads/main
2023-02-27T08:47:52.298551
2021-02-08T14:05:51
2021-02-08T14:05:51
334,678,185
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6370370388031006, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 21.5, "blob_id": "53e984fd548ffed424f23cf94da5a5cdfcc44d2c", "content_id": "ce62f87a563b07a0ec82614823abd620c72427bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 450, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/README.md", "repo_name": "juyonLee00/flask-study", "src_encoding": "UTF-8", "text": "# flask-study\n1. flask๋ฅผ ์ด์šฉํ•œ ๊ธฐ๋ณธ ์›น์‚ฌ์ดํŠธ ๋งŒ๋“ค๊ธฐ\n2. flask๋กœ RESTful api๋ฅผ ๋งŒ๋“ค์–ด์„œ ML ๋ชจ๋ธ์ด ์˜ฌ๋ผ๊ฐ„ ์›น์‚ฌ์ดํŠธ ๊ตฌํ˜„ํ•˜๊ธฐ\n<br>\n<br>\n# ๊ณต๋ถ€ ๋ฐ ๋‚ด์šฉ ์ •๋ฆฌ ์ผ์ •\n2021-01-31 ~ 2021-02-06\n<br>\n<br>\n# ์ตœ์ข… ๋ชฉํ‘œ\n1. flask๋กœ ์ž์—ฐ์–ด ์ฒ˜๋ฆฌ ๋ชจ๋ธ์ด ๊ตฌํ˜„๋œ RESTful api ๋งŒ๋“ค๊ธฐ(Crowd-Funding-helper์— ์‘์šฉํ•˜๊ธฐ)\n2. ๋งŒ๋“  api ๋ฐ์ดํ„ฐ๋ฅผ spring์—์„œ ์ฒ˜๋ฆฌํ•˜์—ฌ ์›น์‚ฌ์ดํŠธ์— ๋„์›Œ์ง€๋„๋ก ํ•˜๊ธฐ\n" }, { "alpha_fraction": 0.6524999737739563, "alphanum_fraction": 0.6725000143051147, "avg_line_length": 21.27777862548828, "blob_id": "cc5786e77585d5687991a00f51cc49eeb27caf4d", "content_id": "253d464e5d1194da9f6c0eb14b24cdd117f8f74e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 46, "num_lines": 18, "path": "/app.py", "repo_name": "juyonLee00/flask-study", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport sys\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n return render_template(\"hello.html\")\n\n@application.route(\"/list\")\ndef list():\n return render_template(\"list.html\")\n\n@application.route(\"/apply\")\ndef apply():\n return render_template(\"apply.html\")\n\nif __name__ == \"__main__\":\n application.run(host='0.0.0.0', port=5002)" } ]
2
guy111a/Home-Assignment
https://github.com/guy111a/Home-Assignment
f52bb5c4054320df421923bdb4c09524817bfbf8
132c296f3724025fc19b002e98e22c0b640a4546
63d1bd5fe263794ab4f198a02ab4701b3b076403
refs/heads/main
2023-07-04T09:53:46.120802
2021-08-07T10:14:53
2021-08-07T10:14:53
393,647,391
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481782555580139, "alphanum_fraction": 0.5564588904380798, "avg_line_length": 37.16666793823242, "blob_id": "3ca0ce46ad04f4d4940141296fb087790f641e47", "content_id": "36db61d3f0b95be1d140bab18ef3df72a3a3c01c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6642, "license_type": "no_license", "max_line_length": 135, "num_lines": 174, "path": "/api.py", "repo_name": "guy111a/Home-Assignment", "src_encoding": "UTF-8", "text": "\nimport flask\nfrom flask import request, jsonify\nimport time\n# from pyspark import SparkContext\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark import SQLContext\n\nsc = SparkContext(conf=SparkConf())\nsqlContext = SQLContext(sc)\n\n# class model(object):\n# @staticmethod\n# def transformation_function(a_model):\n# delim = a_model.delim\n# def _transformation_function(row):\n# return row.split(delim)\n# return _transformation_function\n\n# def __init__(self):\n# self.delim = ','\n# self.requests = sc.textFile('requests.csv')\n# self.clicks = sc.textFile('clicks.csv')\n# self.impressions = sc.textFile('impressions.csv')\n \n# def run_model(self, uid):\n# self.requests = self.requests.map(model.transformation_function(self)).filter(lambda x: uid in x)\n# self.clicks = self.clicks.map(model.transformation_function(self)) #.filter(lambda x: uid in x)\n# self.rdd_join = self.requests.join(self.clicks.map(lambda x: x.split()))\n# #manager_df = manager.map(lambda x: list(x.split(','))).toDF([\"col1\",\"col2\"])\n# print(f'lookup: {self.requests.collect()}')\n# print(f'join: {self.rdd_join.collect()}')\n \n# test = model()\n# test.run_model(\"79c13312-ee60-4ce7-bac1-b03e42e62e8b\")\n# test.requests.take(10)\n\n\nrequests = sc.textFile('requests.csv')\nimpressions = sc.textFile('impressions.csv')\nclicks = sc.textFile('clicks.csv')\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# default api reply\n@app.route('/', methods = ['GET'])\ndef home():\n return \"<h1>A.P.I</h1><p>Access Point Interface.</p>\"\n\n\n# retuens the current server time\n@app.route('/keepalive', methods = ['GET'])\ndef translate():\n rply = { \"system_status\" : \"ready\",\n \"current_time\" : time.time()}\n print(rply)\n return rply\n\n\n'''\nuserStats:\nInput: user_id\nOutput:\nNum of requests\nNum of impressions\nNum of clikcs\nAverage price for bid (include only wins)\nMedian Impression duration\nMax time passed till click\n'''\n\n@app.route('/userStats', methods = ['GET'])\ndef userStat():\n err = \"Usage: IP_ADDRESS | URL:8089/userStats?uid=user id\"\n if 'uid' in request.args:\n if str(request.args['uid']) != \"\" : \n userID = str(request.args['uid'])\n \n data_rdd = requests.flatMap(lambda line: line.split()).filter(lambda x: userID in x)\n\n df_requests = data_rdd.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"partnerName\",\"userID\",\"bid\",\"win\"])\n df_clicks = clicks.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"duration\"])\n df_impressions = impressions.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"timePassed\"])\n \n answers = [] \n max_bid = 0\n \n for x in data_rdd.collect():\n answers.append(x)\n if float(x.split(\",\")[4]) > max_bid:\n max_bid = float(x.split(\",\")[4])\n print(f'{x}')\n print(f'max bid: {max_bid}')\n \n df_counting_impressions = df_requests.join(df_impressions, df_impressions.sessionID \\\n == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n df_impressions.sessionID).collect()\n \n df_counting_clicks = df_requests.join(df_clicks, df_clicks.sessionID \\\n == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n df_clicks.sessionID).collect()\n \n rply = { \"user id\" : userID, \\\n \"session id\": df_requests.collect()[0][1], \\\n \"number of requests\" : len(answers),\\\n \"number of impressions\": len(df_counting_impressions), \\\n \"number of clicks\" : len(df_counting_clicks) , \\\n \"longest impression\": df_impressions.agg({'timePassed': 'max'}).collect(), \\\n \"avg price of bid\": df_requests.agg({'bid': 'avg'}).collect(), \\\n \"max bid price\" : max_bid , \\\n \"median impression duration\": 1 }\n return rply \n else:\n return err\n else:\n return err\n\n\n\n'''\nsessionId:\nInput: session_id\nOutput:\nBegin: request timestamp\nFinish: latest timestamp (request/click/impression)\nPartner name\n'''\n\n@app.route('/sessionID', methods = ['GET'])\ndef sessionId():\n err = \"Usage: IP_ADDRESS | URL:8089/sessionID?sid=session id\"\n if 'sid' in request.args:\n if str(request.args['sid']) != \"\" : \n userID = str(request.args['sid'])\n \n data_rdd = requests.flatMap(lambda line: line.split()).filter(lambda x: userID in x)\n\n df_requests = data_rdd.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"partnerName\",\"userID\",\"bid\",\"win\"])\n df_clicks = clicks.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"duration\"])\n df_impressions = impressions.map(lambda x: list(x.split(','))).toDF([\"timeStamp\",\"sessionID\",\"timePassed\"])\n # df = df_requests.join(df_impressions, df_impressions.sessionID \\\n # == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n # df_impressions.timePassed)\\\n # .join(df_clicks, df_clicks.sessionID \\\n # == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n # df_clicks.duration).collect()\n \n \n df_requests.printSchema()\n \n df_click = df_requests.join(df_clicks, df_clicks.sessionID \\\n == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n df_clicks.duration).collect()\n clicked = df_click[0][1]\n \n df_impress = df_requests.join(df_impressions, df_impressions.sessionID \\\n == df_requests.sessionID, 'left').select(df_requests.sessionID, \\\n df_impressions.timePassed).collect()\n impressed = df_impress[0][1]\n \n rply = { \\\n \"begin\": str(df_requests.collect()[0][0]),\\\n \"clicked\" : clicked, \\\n \"impressed\" : impressed, \\\n \"partner name\": str(df_requests.collect()[0][2]) } # sorted(x.join(y).collect())\n return rply \n else:\n return err\n else:\n return err\n\n\n\napp.run(host='0.0.0.0', port='8089')\n" } ]
1
craigcomstock/self
https://github.com/craigcomstock/self
804e15b365e2c12154bbfba449bf64c30e901609
c6e8289315e7a4e55849e1dfeb575ef9142c2818
fdf429bfc548283caf4f5aed917929e6ead97161
refs/heads/master
2022-12-07T02:03:40.525917
2018-08-31T07:15:20
2018-08-31T07:15:20
147,691,962
0
0
MIT
2018-09-06T15:08:17
2018-08-31T07:15:23
2018-08-31T07:15:21
null
[ { "alpha_fraction": 0.5882070660591125, "alphanum_fraction": 0.5889261960983276, "avg_line_length": 42.010311126708984, "blob_id": "8708db9fafc95cc688ee3b9482c38ce123e86203", "content_id": "afc92008cb8cf56d1ac12f73ca0662adc6341c8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4172, "license_type": "permissive", "max_line_length": 89, "num_lines": 97, "path": "/tom/git.py", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "import os\nimport logging as log\nimport subprocess\n\n\nclass GitRepo():\n \"\"\"Class responsible for working with locally checked-out repository\"\"\"\n\n def __init__(self, dirname, repo_name, upstream_name, my_name, checkout_branch=None):\n \"\"\"Clones a remore repo to a directory (or freshens it if it's already\n checked out), configures it and optionally checks out a requested branch\n Args:\n dirname - name of directory in local filesystem where to clone the repo\n repo_name - name of repository (like 'core' or 'masterfiles')\n upstream_name - name of original owner of the repo (usually 'cfengine')\n We will pull from git@github.com:/upstream_name/repo_name\n my_name - name of github user where we will push and create PR from\n (usually 'cf-bottom')\n We will push to git@github.com:/my_name/repo_name\n checkout_branch - optional name of branch to checkout. If not provided,\n a branch from previous work might be left checked out\n \"\"\"\n self.dirname = dirname\n self.repo_name = repo_name\n self.username = my_name\n self.usermail = my_name + '@cfengine.com'\n\n fetch_url = 'git@github.com:{}/{}.git'.format(upstream_name,repo_name)\n push_url = 'git@github.com:{}/{}.git'.format(my_name,repo_name)\n\n if os.path.exists(dirname):\n self.run_command('remote', 'set-url', 'origin', fetch_url)\n self.run_command('fetch')\n else:\n self.run_command('clone', '--no-checkout', fetch_url, dirname)\n self.run_command('remote', 'set-url', '--push', 'origin', push_url)\n if checkout_branch is not None:\n self.checkout(checkout_branch)\n\n def run_command(self, *command, **kwargs):\n \"\"\"Runs a git command against git repo.\n Syntaxically this function tries to be as close to subprocess.run\n as possible, just adding 'git' with some extra parameters in the beginning\n \"\"\"\n git_command = [\n 'git', '-C', self.dirname, '-c', 'user.name=' + self.username, '-c',\n 'user.email=' + self.usermail, '-c', 'push.default=simple'\n ]\n git_command.extend(command)\n if 'check' not in kwargs:\n kwargs['check'] = True\n if 'capture_output' in kwargs:\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n del kwargs['capture_output']\n if command[0] == 'clone':\n # we can't `cd` to target folder when it does not exist yet,\n # so delete `-C self.dirname` arguments from git command line\n del git_command[1]\n del git_command[1]\n kwargs['universal_newlines'] = True\n log.debug('running command: {}'.format(' '.join(git_command)))\n return subprocess.run(git_command, **kwargs)\n\n def checkout(self, branch, new=False):\n \"\"\"Checkout given branch, optionally creating it.\n Note that it's an error to create-and-checkout branch which already exists.\n \"\"\"\n if new:\n self.run_command('checkout', '-b', branch)\n else:\n self.run_command('checkout', branch)\n self.run_command('reset', '--hard', 'origin/' + branch)\n\n def get_file(self, path):\n \"\"\"Returns contents of a file as a single string\"\"\"\n with open(self.dirname + '/' + path) as f:\n return f.read()\n\n def put_file(self, path, data, add=True):\n \"\"\"Overwrites file with data, optionally running `git add {path}` afterwards\"\"\"\n with open(self.dirname + '/' + path, 'w') as f:\n f.write(data)\n if add:\n self.run_command('add', path)\n\n def commit(self, message):\n \"\"\"Creates commit with message\"\"\"\n self.run_command('commit', '-m', message, '--allow-empty')\n\n def push(self, branch_name):\n \"\"\"Pushes local branch to remote repo, optionally also setting upstream\n \"\"\"\n if branch_name:\n self.run_command('push', '--set-upstream', 'origin', branch_name)\n else:\n self.run_command('push')\n" }, { "alpha_fraction": 0.5873684287071228, "alphanum_fraction": 0.5894736647605896, "avg_line_length": 16.592592239379883, "blob_id": "4d33af61cdb0e2b79f1b994eb165ea04d4ec8a0b", "content_id": "0a66a4d29c478a5b697ba118f2cc91d9a850ddef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "permissive", "max_line_length": 43, "num_lines": 27, "path": "/tom/utils.py", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "import sys\nimport json\n\n\ndef read_json(path):\n data = None\n try:\n with open(path, \"r\") as f:\n data = json.loads(f.read())\n except FileNotFoundError:\n pass\n return data\n\n\ndef confirmation(msg):\n print(msg)\n choice = input(\"Accept? \")\n choice = choice.strip().lower()\n return choice == \"y\" or choice == \"yes\"\n\n\ndef pretty(data):\n return json.dumps(data, indent=2)\n\n\ndef user_error(msg):\n sys.exit(\"Error: {}\".format(msg))\n" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 20, "blob_id": "29359ad30b4082e0a1bd0974273e8f662d7822ae", "content_id": "749a90617f9d873920bb32ddccb2b0916592d387", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "permissive", "max_line_length": 41, "num_lines": 3, "path": "/README.md", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "# CFEngine Bot, Tom\n\nI'm your friendly neighbourhood bot, Tom.\n" }, { "alpha_fraction": 0.5712818503379822, "alphanum_fraction": 0.5736778974533081, "avg_line_length": 37.44078826904297, "blob_id": "b96a3df4eea23e98f427b5e78eb033ee3551be1d", "content_id": "74e2de5d1e6de5f59da3f43771cffe8c056b7036", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5843, "license_type": "permissive", "max_line_length": 99, "num_lines": 152, "path": "/tom/slack.py", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "import traceback\nimport re\nimport sys\nimport json\nimport requests\nimport logging as log\n\n\nclass Slack():\n \"\"\"Class responsible for all iteractions with Slack, EXCEPT for receiving\n messages (They are received as HTTPS requests from Slack to a webserver,\n which currently feeds them to stdin of this script running with `--talk`\n argument)\n \"\"\"\n\n reply_to_channel = None\n reply_to_user = None\n\n def __init__(self, read_token, bot_token, app_token, username, interactive):\n self.read_token = read_token\n self.bot_token = bot_token\n self.app_token = app_token\n self.my_username = username\n self.interactive = interactive\n\n def api(self, name):\n return 'https://slack.com/api/' + name\n\n def post(self, url, data={}):\n if not url.startswith('http'):\n url = self.api(url)\n if not 'token' in data:\n data['token'] = self.bot_token\n r = requests.post(url, data)\n assert r.status_code >= 200 and r.status_code < 300\n try:\n log.debug(pretty(r.json()))\n return r.json()\n except:\n log.debug(pretty(r.text))\n return False\n\n def send_message(self, channel, text):\n \"\"\"Sends a message to a channel\"\"\"\n if not channel:\n return\n self.post('chat.postMessage', data={\"channel\": channel, \"text\": text})\n\n def reply(self, text, mention=False):\n \"\"\"Replies to saved channel, optionally mentioning saved user\"\"\"\n if mention:\n text = '<@{}>: {}'.format(self.reply_to_user, text)\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n log.info('SLACK: {}'.format(text))\n elif self.interactive:\n print(text)\n if self.reply_to_channel is not None:\n self.send_message(self.reply_to_channel, text)\n\n def parse_stdin(self, dispatcher):\n \"\"\"Reads raw message (in JSON format, as received from Slack servers)\n from stdin, checks it, and calls dispatcher.parse with message text\n \"\"\"\n\n message = json.load(sys.stdin)\n\n log.debug(pretty(message))\n if self.read_token == None:\n log.warning('no read token provided - bluntly trusting incoming message')\n else:\n if 'token' not in message or message['token'] != self.read_token:\n log.warning('Unauthorized message - ignoring')\n return\n if 'authed_users' in message and len(message['authed_users']) > 0:\n self.my_username = message['authed_users'][0]\n message = message['event']\n if not 'user' in message:\n # not a user-generated message\n # probably a bot-generated message\n # TODO: maybe check only for self.my_username here - to allow bots\n # talk to each other?\n log.warning('Not a user message - ignoring')\n return\n self.reply_to_channel = message['channel']\n self.reply_to_user = message['user']\n # remove bot username from string\n text = re.sub('<@{}> *:? *'.format(self.my_username), '', message['text'])\n dispatcher.parse_text(text)\n\n\nclass CommandDispatcher():\n \"\"\"Class responsible for processing user input (Slack messages) and\n dispatching relevant commands\n \"\"\"\n\n def __init__(self, slack):\n self.slack = slack\n self.help_lines = [\n 'List of commands bot recognises ' + '(prefix each command with bot name)'\n ]\n self.commands = [{}, {}]\n self.register_command(\n 'help', lambda: self.show_help(), False, 'Show this text',\n 'Shows overview of all commands')\n\n def register_command(self, keyword, callback, parameter_name, short_help, long_help=''):\n \"\"\"Register a command as recognised by Tom.\n Args:\n keyword - text that Tom should react to\n callback - function that should be called when Tom receives a\n message with keyword\n parameter_name - name of parameter for commands with parameter, or\n False for commands without\n short_help - short description of command (Tom prints it in reply\n to `@cf-bottom help` command)\n long_help - long description of command (Tom will print it in reply\n to `@cf-bottom help on <keyword>` command - TODO: implement)\n \"\"\"\n parameters_count = 1 if parameter_name else 0\n self.commands[parameters_count][keyword] = {'callback': callback, 'long_help': long_help}\n if parameter_name:\n self.help_lines.append(\n '{}: _{}_\\n- {}'.format(keyword, parameter_name.upper(), short_help))\n else:\n self.help_lines.append('{}\\n- {}'.format(keyword, short_help))\n\n def parse_text(self, text):\n \"\"\"Analyze user message and react on it - call a registered command\"\"\"\n m = re.match(' *([^:]*)(?:[:] *([^ ]*))?', text)\n keyword = m.group(1)\n argument = m.group(2)\n if argument:\n parameters_count = 1\n arguments = [argument]\n else:\n parameters_count = 0\n arguments = []\n if keyword in self.commands[parameters_count]:\n try:\n self.commands[parameters_count][keyword]['callback'](*arguments)\n except:\n self.slack.reply(\n 'I crashed on your command:' + '\\n```\\n{}\\n```'.format(traceback.format_exc()),\n True)\n else:\n self.slack.reply((\"Unknown command. Say \\\"<@{}> help\\\" for \"+\n \"list of known commands\")\\\n .format(self.slack.my_username))\n\n def show_help(self):\n \"\"\"Print basic help info\"\"\"\n self.slack.reply('\\n\\n'.join(self.help_lines))\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 23, "blob_id": "7968a55531fe14d44dd3e79431616b4f4ff61ea7", "content_id": "7d861a41d25412e9cd1fbeea19187f06345ce803", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "permissive", "max_line_length": 68, "num_lines": 11, "path": "/tom/__main__.py", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nif __name__ == \"__main__\":\n import os\n import sys\n\n above_dir = os.path.dirname(os.path.realpath(__file__)) + \"/../\"\n abspath = os.path.abspath(above_dir)\n sys.path.insert(0, abspath)\n\n from tom.main import main\n main()\n" }, { "alpha_fraction": 0.5882028341293335, "alphanum_fraction": 0.5945903062820435, "avg_line_length": 45.4505500793457, "blob_id": "63ca34312daaf7717581872ff8c8d79fc5357480", "content_id": "3d92a18a9ffc8098c283e42ed8c0a26fbbb86322", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12683, "license_type": "permissive", "max_line_length": 134, "num_lines": 273, "path": "/tom/dependencies.py", "repo_name": "craigcomstock/self", "src_encoding": "UTF-8", "text": "import re\nimport json\nimport requests\nimport datetime\nimport hashlib\nimport urllib.request\nimport logging as log\nfrom tom.git import GitRepo\nfrom tom.utils import pretty\n\nclass DependencyException(Exception):\n \"\"\"Base class for all exceptions in this file\"\"\"\n pass\n\nclass ReleaseMonitoringException(DependencyException):\n \"\"\"Exception that is risen if release-monitoring.org behaves unexpectedly\"\"\"\n pass\n\n\n\nclass UpdateChecker():\n \"\"\"Class responsible for doing dependency updates\n Currently it's working only with cfengine/buildscripts repo, as described at\n https://github.com/mendersoftware/infra/blob/master/files/buildcache/release-scripts/RELEASE_PROCESS.org#minor-dependencies-update\n \"\"\"\n\n def __init__(self, github, slack, dispatcher, username):\n self.github = github\n self.slack = slack\n self.username = username\n dispatcher.register_command(\n keyword='deps',\n callback=lambda branch: self.run(branch),\n parameter_name='branch',\n short_help='Run dependency updates',\n long_help='Try to find new versions of dependencies on given branch and create PR with them')\n\n def get_deps_list(self, branch='master'):\n \"\"\"Get list of dependencies for given branch.\n Assumes proper branch checked out by `self.buildscripts` repo.\n Returns a list, like this: [\"lcov\", \"pthreads-w32\", \"libgnurx\"]\n \"\"\"\n # TODO: get value of $EMBEDDED_DB from file\n embedded_db = 'lmdb'\n if branch == '3.7.x':\n options_file = self.buildscripts.get_file('build-scripts/install-dependencies')\n else:\n options_file = self.buildscripts.get_file('build-scripts/compile-options')\n options_lines = options_file.splitlines()\n if branch == '3.7.x':\n filtered_lines = (x for x in options_lines if re.match('\\s*DEPS=\".*\\\\$DEPS', x))\n only_deps = (re.sub('\\\\$?DEPS', '', x) for x in filtered_lines)\n only_deps = (re.sub('[=\";]', '', x) for x in only_deps)\n only_deps = (x.strip() for x in only_deps)\n else:\n filtered_lines = (x for x in options_lines if 'var_append DEPS' in x)\n only_deps = (re.sub('.*DEPS \"(.*)\".*', \"\\\\1\", x) for x in filtered_lines)\n # currently only_deps is generator of space-separated deps,\n # i.e. each item can contain several items, like this:\n # list(only_deps) = [\"lcov\", \"pthreads-w32 libgnurx\"]\n # to \"flattern\" it we first join using spaces and then split on spaces\n # in the middle we also do some clean-ups\n only_deps = ' '.join(only_deps)\\\n .replace('$EMBEDDED_DB', embedded_db)\\\n .replace('libgcc ','')\\\n .split(' ')\n # now only_deps looks like this: [\"lcov\", \"pthreads-w32\", \"libgnurx\"]\n log.debug(pretty(only_deps))\n return only_deps\n\n def increase_version(self, version, increment, separator='.'):\n \"\"\"increase last part of version - so 1.2.9 becomes 1.2.10\n Args:\n version - old version represented as string\n increment - by how much to increase\n separator - separator character between version parts. Typical\n values are '.' and '-'. Special case: if separator is 'char'\n string, then increase last character by 1 - so version\n '1.2b' becomes '1.2c'\n (we assume that we never meet version ending with 'z')\n Returns:\n new version as a string\n \"\"\"\n if separator == 'char':\n return version[:-1] + chr(ord(version[-1]) + increment)\n version_components = version.split(separator)\n version_components[-1] = str(int(version_components[-1]) + increment)\n return separator.join(version_components)\n\n def checkfile(self, url, md5=False):\n \"\"\"Checks if file on given URL exists and optionally returns its md5 sum\n Args:\n url - URL to check (starting with http or ftp, other protocols might not work)\n md5 - set it to True to force downloading file and returning md5 sum\n (otherwise, for http[s] we use HEAD request)\n Returns:\n True, False, or md5 of a linked file\n \"\"\"\n log.debug('checking URL: ' + url)\n try:\n if not md5 and url.startswith('http'):\n log.debug('testing with HEAD')\n r = requests.head(url)\n return r.status_code >= 200 and r.status_code < 300\n else:\n log.debug('getting whole file')\n m = hashlib.md5()\n with urllib.request.urlopen(url) as f:\n data = f.read(4096)\n while data:\n m.update(data)\n data = f.read(4096)\n return m.hexdigest()\n except:\n return False\n\n def maybe_replace(self, string, match, old, new):\n \"\"\"replaces `old` with `new` in `string` only if it contains `match`\n Does caseless compare by converting `string` to lowercase for comparison\n Args:\n string - string to work on\n match - string to look for, MUSTย BEย lowercase\n old - string to replace\n new - string to replace with\n \"\"\"\n if match not in string.lower():\n return string\n return string.replace(old, new)\n\n def extract_version_from_filename(self, dep, filename):\n if dep == 'openssl':\n version = re.search('-([0-9a-z.]*).tar', filename).group(1)\n separator = 'char'\n elif dep == 'pthreads-w32':\n version = re.search('w32-([0-9-]*)-rel', filename).group(1)\n separator = '-'\n else:\n version = re.search('[-_]([0-9.]*)[\\.-]',filename).group(1)\n separator = '.'\n return (version, separator)\n\n def find_new_version(self, old_url, old_version, separator):\n \"\"\"Finds new version by iteratively increasing version in URL and\n checking if it's still possible to download a file.\n Returns highest version for which a file exists.\n Note that if old_version is 1.2.3, and somebody released version\n 1.2.5 WITHOUT releasing 1.2.4 before that, then this function will NOT\n find it\n \"\"\"\n increment = 0\n url_result = True\n while url_result:\n increment += 1\n new_version = self.increase_version(old_version, increment, separator)\n # note that we change version on URL level, not on filename level -\n # because sometimes version might be in directory name, too\n new_url = old_url.replace(old_version, new_version)\n url_result = self.checkfile(new_url)\n # note that url_result might be True, False, or string with md5 hash\n # Loop ends when `increment` points to non-existing version -\n # so we need to decrease it to point to last existing one\n increment -= 1\n if increment == 0:\n return old_version\n return self.increase_version(old_version, increment, separator)\n\n def get_version_from_monitoring(self, dep):\n \"\"\"Gets latest version of a dependency from release-monitoring.org site.\n Returns latest version (string), or False if dependency not found in\n release-monitoring.json file.\n \"\"\"\n if dep not in self.monitoring_ids:\n return False\n id = self.monitoring_ids[dep]\n url = 'https://release-monitoring.org/api/project/{}'.format(id)\n try:\n data = requests.get(url).json()\n except:\n raise ReleaseMonitoringException('Failed to do a request to release-monitoring.org website')\n try:\n return data['version']\n except:\n raise ReleaseMonitoringException('Failed to get version from data received from release-monitoring.org website')\n\n def update_single_dep(self, dep):\n \"\"\"Check if new version of dependency dep was released and create\n commit updating it in *.spec, dist, source, and README.md files\n \"\"\"\n log.info('Checking new version of {}'.format(dep))\n dist_file_path = 'deps-packaging/{}/distfiles'.format(dep)\n dist_file = self.buildscripts.get_file(dist_file_path)\n dist_file = dist_file.strip()\n source_file_path = 'deps-packaging/{}/source'.format(dep)\n source_file = self.buildscripts.get_file(source_file_path)\n source_file = source_file.strip()\n old_filename = re.sub('.* ', '', dist_file)\n old_url = '{}{}'.format(source_file, old_filename)\n (old_version, separator) = self.extract_version_from_filename(dep, old_filename)\n new_version = self.get_version_from_monitoring(dep)\n if not new_version:\n log.warning('Dependency {} not found in release-monitoring.org or in data file'.format(dep))\n new_version = self.find_new_version(old_url, old_version, separator)\n if new_version == old_version:\n # no update needed\n return False\n new_filename = old_filename.replace(old_version, new_version)\n new_url = old_url.replace(old_version, new_version)\n md5sum = self.checkfile(new_url, True)\n if not md5sum:\n message = 'Update {} from {} to {} FAILED to download {}'.format(dep, old_version, new_version, new_url)\n log.warn(message)\n self.slack.reply(message)\n return False\n message = 'Update {} from {} to {}'.format(dep, old_version, new_version)\n log.info(message)\n dist_file = '{} {}'.format(md5sum, new_filename)\n self.buildscripts.put_file(dist_file_path, dist_file + '\\n')\n source_file = source_file.replace(old_version, new_version)\n self.buildscripts.put_file(source_file_path, source_file + '\\n')\n self.readme_lines = [\n self.maybe_replace(\n x, '* [{}]('.format(dep.replace('-hub', '')), old_version, new_version)\n for x in self.readme_lines\n ]\n readme_file = '\\n'.join(self.readme_lines)\n self.buildscripts.put_file(self.readme_file_path, readme_file)\n spec_file_path = 'deps-packaging/{}/cfbuild-{}.spec'.format(dep, dep)\n try:\n spec_file = self.buildscripts.get_file(spec_file_path)\n except:\n pass\n else:\n spec_file = spec_file.replace(old_version, new_version)\n self.buildscripts.put_file(spec_file_path, spec_file + '\\n')\n self.buildscripts.commit(message)\n return message\n\n def run(self, branch):\n \"\"\"Run the dependency update for a branch, creating PR in the end\"\"\"\n self.slack.reply(\"Running dependency updates for \" + branch)\n # prepare repo\n repo_name = 'buildscripts'\n upstream_name = 'cfengine'\n local_path = \"../\" + repo_name\n self.buildscripts = GitRepo(local_path, repo_name, upstream_name, self.username, branch)\n timestamp = re.sub('[^0-9-]', '_', str(datetime.datetime.today()))\n new_branchname = '{}-deps-{}'.format(branch, timestamp)\n self.buildscripts.checkout(new_branchname, True)\n self.readme_file_path = 'deps-packaging/README.md'\n readme_file = self.buildscripts.get_file(self.readme_file_path)\n self.readme_lines = readme_file.split('\\n')\n self.monitoring_file_path = 'deps-packaging/release-monitoring.json'\n self.monitoring_ids = json.loads(self.buildscripts.get_file(self.monitoring_file_path))\n updates_summary = []\n only_deps = self.get_deps_list(branch)\n for dep in only_deps:\n single_result = self.update_single_dep(dep)\n if single_result:\n updates_summary.append(single_result)\n self.slack.reply(single_result)\n if len(updates_summary) == 0:\n self.slack.reply(\"Dependency checked, nothing to update\")\n return\n self.buildscripts.push(new_branchname)\n updates_summary = '\\n'.join(updates_summary)\n pr_text = self.github.create_pr(\n target_repo='{}/{}'.format(upstream_name, repo_name),\n target_branch=branch,\n source_user=self.username,\n source_branch=new_branchname,\n title='Dependency updates for ' + branch,\n text=updates_summary)\n self.slack.reply(\"Dependency updates:\\n```\\n{}\\n```\\n{}\".format(updates_summary, pr_text), True)\n" } ]
6
briangohjw/Game-Route-Optimisation
https://github.com/briangohjw/Game-Route-Optimisation
87dd4d62340193e3cc0e525acc82b96b17c7afee
c00afc3af60435ab5b92d57b26970c6764862068
771eb9d687f625ce247934374f82519088170486
refs/heads/main
2023-03-12T21:58:41.514981
2021-03-03T17:17:20
2021-03-03T17:17:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6283400654792786, "alphanum_fraction": 0.65074223279953, "avg_line_length": 27.720930099487305, "blob_id": "c5e07169462763b0e3e7153b678714ff9d9de0d5", "content_id": "068bf94e2e635ac9c8ae7d17376f432b5459df41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3705, "license_type": "no_license", "max_line_length": 104, "num_lines": 129, "path": "/single_player.py", "repo_name": "briangohjw/Game-Route-Optimisation", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef get_route(p, v, flags):\n\tflag_dict = generate_flags_dict(flags)\n\n\tstart = ['start', '0', '0', '0']\n\tlist_of_routes = []\n\t\n\tfor i in range(25):\n\t\tflag_pool = flags.copy()\n\t\tpoints = 0\n\t\troute = [start]\n\n\t\twhile points < p and flag_pool:\n\t\t\tlast_flag = route[-1]\n\n\t\t\tlowest_dist_over_value = get_dist_over_value(route[0], flag_pool[0])\n\t\t\t# list which stores all dist_over_value from last_flag LOWER than stored flag\n\t\t\tlowest_dist_over_value_list = [[flag_pool[0], lowest_dist_over_value]]\n\n\t\t\tfor flag_candidate in flag_pool:\n\t\t\t\tthis_dist_over_value = get_dist_over_value(route[-1], flag_candidate)\n\t\t\t\tif(this_dist_over_value < lowest_dist_over_value):\n\t\t\t\t\tlowest_dist_over_value_list.append([flag_candidate, this_dist_over_value])\n\t\t\t\t\tlowest_dist_over_value = this_dist_over_value\n\n\t\t\t# keep best 2 flags\n\t\t\tflags_to_choose = lowest_dist_over_value_list[-2:]\n\n\t\t\tif len(flags_to_choose) == 2:\n\t\t\t\tflag1, flag2 = flags_to_choose\n\t\t\t\tflag1_dist_over_value = flag1[1]\n\t\t\t\tflag2_dist_over_value = flag2[1]\n\n\t\t\t\tperc_diff = get_change(flag1_dist_over_value, flag2_dist_over_value)\n\n\t\t\t\tif perc_diff < 30:\n\t\t\t\t\t# randomly select 1 flag\n\t\t\t\t\trand_int = np.random.randint(len(flags_to_choose))\n\t\t\t\t\tchosen_flag = flags_to_choose[rand_int][0]\n\t\t\t\telse:\n\t\t\t\t\tif flag1_dist_over_value < flag2_dist_over_value:\n\t\t\t\t\t\tchosen_flag = flag1[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tchosen_flag = flag2[0]\n\t\t\telse:\n\t\t\t\tchosen_flag = flags_to_choose[0][0]\n\n\t\t\troute.append(chosen_flag)\n\t\t\tflag_pool.remove(chosen_flag)\n\t\t\tpoints += float(chosen_flag[1])\n\n\t\tlist_of_routes.append(route)\n\n\tbest_route = list_of_routes[0]\n\tlowest_dist_of_all_routes = get_route_dist(list_of_routes[0], flag_dict, v)\n\n\tfor route in list_of_routes[1:]:\n\t\tthis_dist = get_route_dist(route, flag_dict, v)\n\t\tif(this_dist < lowest_dist_of_all_routes):\n\t\t\tlowest_dist_of_all_routes = this_dist\n\t\t\tbest_route = route\n\n\tbest_route = randomised_two_opt(best_route, flag_dict, v)\n\tbest_route.remove(start)\n\t\n\treturn ([flag[0] for flag in best_route])\n\ndef get_dist_over_value(old_node, new_node):\n\treturn get_distance(old_node, new_node) / float(new_node[1])\n\ndef get_distance(node_A, node_B):\n\treturn ((float(node_A[2]) - float(node_B[2])) ** 2 + (float(node_A[3]) - float(node_B[3])) ** 2) ** 0.5\n\ndef get_route_dist(your_route, flags_dict, v):\n\troute = your_route.copy()\n\n\tdist = 0\n\n\tstart_node = route[0]\n\tlast_node = start_node\n\n\tfor flag in route[1:]:\n\t\tflagID = flag[0]\n\t\tcurr_node = flags_dict[flagID]\n\t\tdist_to_curr_node = get_distance(last_node, curr_node)\n\t\tdist += dist_to_curr_node\n\n\t\tlast_node = curr_node\n\n\tif v == 2:\n\t\tdist += get_distance(last_node, start_node)\n\n\treturn dist\n\ndef generate_flags_dict(flags_list):\n d = {'start': ['start', 0, 0, 0]}\n for item in flags_list:\n # flagID, points, x, y\n d[item[0]] = [item[0], int(item[1]), float(item[2]), float(item[3])]\n return d\n\ndef randomised_two_opt(route, flag_dict, v):\n\tif v == 2:\n\t\troute = route[:]\n\t\tstart_point_idx = route.index(['start', '0', '0', '0'])\n\t\troute = route[start_point_idx:] + route[:start_point_idx]\n\n\toverall_best_route = route\n\tbest_route_dist = get_route_dist(overall_best_route, flag_dict, v)\n\n\tfor iteration in range(len(route)*10):\n\t\ti = np.random.randint(1, len(route)-2)\n\t\tj = np.random.randint(i+2, len(route))\n\t\t\n\t\tnew_route = overall_best_route.copy()\n\t\tnew_route[i:j+1] = new_route[j:i-1:-1]\n\t\tthis_dist = get_route_dist(new_route, flag_dict, v)\n\n\t\tif this_dist < best_route_dist:\n\t\t\toverall_best_route = new_route\n\t\t\tbest_route_dist = this_dist\n\n\treturn overall_best_route\n\ndef get_change(current, previous):\n\tif current == previous:\n\t\treturn 100.0\n\treturn (abs(current - previous) / previous) * 100.0\n" }, { "alpha_fraction": 0.755831241607666, "alphanum_fraction": 0.7662531137466431, "avg_line_length": 76.5, "blob_id": "a5238b98c4bf2909d469fbddc35778173f9e98f2", "content_id": "ea2ef9d9df5b50624d4e523ef211ee761a50db65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2015, "license_type": "no_license", "max_line_length": 392, "num_lines": 26, "path": "/README.md", "repo_name": "briangohjw/Game-Route-Optimisation", "src_encoding": "UTF-8", "text": "# Flag-Game-Route-Optimisation\nOptimising distance travelled by players in a flag collection game using a randomised 2-opt heuristic\n\n## Task 1: Single Player (single_player.py)\nYou are a player in this game. The objective is to collect at least p points. (Since players run at the same speed, this means that you want to minimize the distance taken in your route.) It does not matter how many points you manage to accumulate; as long as you get at least p points. Plan the route that you will take in your attempt to win the game. There are two variations of this game:\n(i) In the first variation, players stop at the last flag in their route to end the game; there\nis no need to move back to the SP.\n(ii) in the second variation, all players must get back to the SP to end the game.\n\nIn both variations, the objective is still the same: minimize the distance the player has to travel to collect at least p points.\n\n## Task 2: n-Players (n_players.py)\nYou manage a team of n players in this game (where n is a number from 1 to 8). The rules and objective of the game is the same as for Q1 except that: \n(i) Players in your team do not get points for touching the same flag more than once. If player 1 has already touched F0009, no other player in your team should touch the same flag. \n(ii) The total number of points collected by the whole team need to be at least p to end the game.\n\nPlan the routes for each player in your team so as to minimize the total distance travelled by all players in order to collect at least p points as a team.\n\n## Randomised 2-Opt Optimisation Algorithm \nRandomised variant was used to increase efficiency, as compared to brute force 2 opt\n\n1. Iterate len(route) * 10 times. In each iteration:\n a. Generate a random i index, and generate a random j index\n b. Swap 2 nodes based on these indices, where new_route[i:j+1] = new_route[j:i-1:-1]\n c. If distance of new_route is lower than distance of current best route, update best route and best route distance\n2. Return best route\n" }, { "alpha_fraction": 0.6270784139633179, "alphanum_fraction": 0.6493198275566101, "avg_line_length": 27.763975143432617, "blob_id": "b8a71bebcc0490a7a8c184f36cdc1694b04fa80f", "content_id": "de069f5888cece785b84ed7331f174c2e2cc32de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4631, "license_type": "no_license", "max_line_length": 105, "num_lines": 161, "path": "/n_players.py", "repo_name": "briangohjw/Game-Route-Optimisation", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef get_routes(p, v, flags, n):\n\tflag_dict = generate_flags_dict(flags)\n\tflag_dict['start'] = ['start', 0, 0, 0]\n\tflag_pool = flags\n\tlist_of_route_lists = []\n\tround_robin = list(range(1, n+1))\n\n\tstart = ['start', '0', '0', '0']\n\n\tfor i in range(10):\n\t\troute_dict = {}\n\n\t\tfor i in range(1, n+1):\n\t\t\troute_dict[i] = [['start', '0', '0', '0']]\n\n\t\tflag_pool = flags.copy()\n\t\tpoints = 0\n\t\t# route = [start]\n\n\t\twhile points < p and flag_pool:\n\t\t\tplayer_idx = round_robin[0]\n\t\t\tthis_route = route_dict[player_idx]\n\n\t\t\tlast_flag = this_route[-1]\n\n\t\t\tlowest_dist_over_value = get_dist_over_value(this_route[0], flag_pool[0])\n\t\t\t# list which stores all dist_over_value from last_flag LOWER than stored flag\n\t\t\tlowest_dist_over_value_list = [[flag_pool[0], lowest_dist_over_value]]\n\n\t\t\tfor flag_candidate in flag_pool:\n\t\t\t\tthis_dist_over_value = get_dist_over_value(this_route[-1], flag_candidate)\n\t\t\t\tif(this_dist_over_value < lowest_dist_over_value):\n\t\t\t\t\tlowest_dist_over_value_list.append([flag_candidate, this_dist_over_value])\n\t\t\t\t\tlowest_dist_over_value = this_dist_over_value\n\n\t\t\t# keep best 2 flags\n\t\t\tflags_to_choose = lowest_dist_over_value_list[-2:]\n\n\t\t\tif len(flags_to_choose) == 2:\n\t\t\t\tflag1, flag2 = flags_to_choose\n\t\t\t\tflag1_dist_over_value = flag1[1]\n\t\t\t\tflag2_dist_over_value = flag2[1]\n\n\t\t\t\tperc_diff = get_change(flag1_dist_over_value, flag2_dist_over_value)\n\n\t\t\t\tif perc_diff < 30:\n\t\t\t\t\t# randomly select 1 flag\n\t\t\t\t\trand_int = np.random.randint(len(flags_to_choose))\n\t\t\t\t\tchosen_flag = flags_to_choose[rand_int][0]\n\t\t\t\telse:\n\t\t\t\t\tif flag1_dist_over_value < flag2_dist_over_value:\n\t\t\t\t\t\tchosen_flag = flag1[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tchosen_flag = flag2[0]\n\t\t\telse:\n\t\t\t\tchosen_flag = flags_to_choose[0][0]\n\n\t\t\tthis_route.append(chosen_flag)\n\t\t\tflag_pool.remove(chosen_flag)\n\t\t\tpoints += float(chosen_flag[1])\n\n\n\t\tthis_list_of_routes = []\n\t\tfor i in range(1, len(route_dict) + 1):\n\t\t\tthis_list_of_routes.append(route_dict[i])\n\n\t\tlist_of_route_lists.append(this_list_of_routes)\n\n\tbest_route_list = list_of_route_lists[0]\n\tlowest_dist_of_all_routes = get_q2_total_dist(list_of_route_lists[0], flag_dict, v, n)\n\n\tfor route_list in list_of_route_lists[1:]:\n\t\tthis_dist = get_q2_total_dist(route_list, flag_dict, v, n)\n\t\tif(this_dist < lowest_dist_of_all_routes):\n\t\t\tlowest_dist_of_all_routes = this_dist\n\t\t\tbest_route_list = route_list\n\n\tfor i in range(len(best_route_list)):\n\t\tthis_route = best_route_list[i]\n\t\tif len(this_route) >= 4:\n\t\t\tthis_route = randomised_two_opt(this_route, flag_dict, v)\n\t\tthis_route.remove(start)\n\t\tbest_route_list[i] = [flag[0] for flag in this_route]\n\n\treturn best_route_list;\n\ndef get_dist_over_value(old_node, new_node):\n\treturn get_distance(old_node, new_node) / float(new_node[1])\n\ndef get_distance(node_A, node_B):\n return ((float(node_A[2]) - float(node_B[2])) ** 2 + (float(node_A[3]) - float(node_B[3])) ** 2) ** 0.5\n\ndef get_route_dist(your_route, flags_dict, v):\n\troute = your_route.copy()\n\n\tdist = 0\n\n\tstart_node = route[0]\n\tlast_node = start_node\n\n\tfor flag in route[1:]:\n\t\tflagID = flag[0]\n\t\tcurr_node = flags_dict[flagID]\n\t\tdist_to_curr_node = get_distance(last_node, curr_node)\n\t\tdist += dist_to_curr_node\n\n\t\tlast_node = curr_node\n\n\tif v == 2: # cycle back to SP\n\t\tdist += get_distance(last_node, start_node)\n\n\treturn dist # no error\n\ndef get_q2_total_dist(your_routes, flags_dict, v, n):\n\t# need to call get_dist_and_points_q1 for every route in your_routes\n\ttot_dist = 0\n\ttot_points = 0\n \n\tfor route in your_routes:\n\t\tcurr_dist = get_route_dist(route, flags_dict, v)\n\n\ttot_dist += curr_dist\n\n\treturn tot_dist\n\ndef generate_flags_dict(flags_list):\n d = {'start': ['start', 0, 0, 0]}\n for item in flags_list:\n # flagID, points, x, y\n d[item[0]] = [item[0], int(item[1]), float(item[2]), float(item[3])]\n return d\n\ndef randomised_two_opt(route, flag_dict, v):\n\tif v == 2:\n\t\troute = route[:]\n\t\tstart_point_idx = route.index(['start', '0', '0', '0'])\n\t\troute = route[start_point_idx:] + route[:start_point_idx]\n\n\toverall_best_route = route\n\tbest_route_dist = get_route_dist(overall_best_route, flag_dict, v)\n\n\tfor iteration in range(len(route)*10):\n\t\ti = np.random.randint(1, len(route)-2)\n\t\tj = np.random.randint(i+2, len(route))\n\t\t\n\t\tnew_route = overall_best_route.copy()\n\t\tnew_route[i:j+1] = new_route[j:i-1:-1]\n\t\tthis_dist = get_route_dist(new_route, flag_dict, v)\n\n\t\tif this_dist < best_route_dist:\n\t\t\toverall_best_route = new_route\n\t\t\tbest_route_dist = this_dist\n\n\treturn overall_best_route\n\ndef get_change(current, previous):\n\tif current == previous:\n\t\treturn 100.0\n\treturn (abs(current - previous) / previous) * 100.0\n" } ]
3
tiagosamaha/kruskal
https://github.com/tiagosamaha/kruskal
0b5ab5f7211f07004f008df9cf6ff0d1ca7c49c2
c75ff7ed9b7587c0af5c2f6ac8be0623c51acaf7
c13d19a3f241e7d6459f2bab6d940f0378fce772
refs/heads/master
2020-06-08T08:23:30.930452
2013-05-21T16:10:39
2013-05-21T16:10:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.2957746386528015, "alphanum_fraction": 0.3274647891521454, "avg_line_length": 16.75, "blob_id": "5dea0d91043262af4a714e98fa9ac1b0ddc602de", "content_id": "033b5779559fcd69a2966d32ca8bf11cd5baddbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/exemplo_2.py", "repo_name": "tiagosamaha/kruskal", "src_encoding": "UTF-8", "text": "from kruskal import kruskal\n\nvertexs = ['A', 'B', 'C', 'D', 'E', 'F']\nedges = [\n (5, 'A', 'B'),\n (1, 'A', 'F'),\n (2, 'A', 'D'),\n (3, 'B', 'C'),\n (3, 'B', 'E'),\n (1, 'C', 'D'),\n (4, 'C', 'F'),\n (4, 'D', 'E'),\n (2, 'E', 'F'),\n]\n\nprint kruskal(vertexs, edges)\n" }, { "alpha_fraction": 0.5914811491966248, "alphanum_fraction": 0.6176185607910156, "avg_line_length": 23.595237731933594, "blob_id": "43a36a67b7feebc0ae5cd3cd48ced857f83f35ba", "content_id": "ca20c0d8ace51f13b68b307df4be689426b9abdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/kruskal.py", "repo_name": "tiagosamaha/kruskal", "src_encoding": "UTF-8", "text": "parent = dict()\nrank = dict()\n\n# estrutura de dados conjunto-disjunto\n# http://en.wikipedia.org/wiki/Disjoint-set_data_structure\n\ndef make_set(vertex):\n parent[vertex] = vertex\n rank[vertex] = 0\n\ndef find(vertex):\n if parent[vertex] != vertex:\n parent[vertex] = find(parent[vertex]) \n return parent[vertex]\n\ndef union(vertex1, vertex2):\n root_vertex1 = find(vertex1)\n root_vertex2 = find(vertex2)\n \n if root_vertex1 == root_vertex2: return\n \n if root_vertex1 < root_vertex2:\n parent[root_vertex1] = root_vertex2\n elif root_vertex1 > root_vertex2:\n parent[root_vertex2] = root_vertex1\n else:\n parent[root_vertex2] = root_vertex1\n rank[root_vertex1] += 1 \n\ndef kruskal(vertexs, edges):\n mst = []\n for vertex in vertexs:\n make_set(vertex)\n \n edges.sort()\n \n for edge in edges:\n weight, vertex1, vertex2 = edge\n if find(vertex1) != find(vertex2):\n union(vertex1, vertex2)\n mst.append(edge)\n return mst\n" }, { "alpha_fraction": 0.3502304255962372, "alphanum_fraction": 0.37788018584251404, "avg_line_length": 15.692307472229004, "blob_id": "b1b9f03d366e8583bd72b6c434ad52d2ce4e9c39", "content_id": "7811f8ae4254036843c6d9d59ba25f9928bf1014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 30, "num_lines": 13, "path": "/exemplo_1.py", "repo_name": "tiagosamaha/kruskal", "src_encoding": "UTF-8", "text": "from kruskal import kruskal\n\nvertexs = ['A', 'B', 'C', 'D']\nedges = [\n (1, 'A', 'B'),\n (5, 'A', 'C'),\n (3, 'A', 'D'),\n (4, 'B', 'C'),\n (2, 'B', 'D'),\n (1, 'C', 'D'),\n]\n\nprint kruskal(vertexs, edges)\n" } ]
3
KordianD/Pulsars
https://github.com/KordianD/Pulsars
fc36a0069dbc7dffbf67e3fb1553cc36ad806b2c
444c8a98f52a814a32789b1b5f31676446f30ec0
f220ad0f7cdf3591e5cc3251a023ba79d2610a97
refs/heads/master
2021-01-22T11:10:54.516664
2017-06-06T06:56:46
2017-06-06T06:56:46
92,675,126
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.6779840588569641, "alphanum_fraction": 0.6880636811256409, "avg_line_length": 32.08771896362305, "blob_id": "98633122323b9793bcc9272be70c330cbfb2dcc2", "content_id": "22ff22368d46b492d39eaf4299b53caba8811e28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1885, "license_type": "no_license", "max_line_length": 99, "num_lines": 57, "path": "/data.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "# This module will prepare data for our project\nimport numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\n# loading and reshaping data\nsamples = 17898\ndata = pandas.read_csv(\"HTRU_2.csv\", header=None)\ndata = np.array(data)\nnp.random.shuffle(data)\n\nclasses = np.array(data[:, 8].astype(float))\ndataset = np.array(data[:, 0:8].astype(float))\ndataset = (dataset - np.mean(dataset, axis=0)) / np.std(dataset, axis=0)\n\n\ndef get_pulsar_data():\n \"\"\" It returns train_data, test_data, train_labels, test_labels \"\"\"\n train_data_length = int(0.7*samples)\n test_data_length = samples - train_data_length\n\n train_data = dataset[:train_data_length]\n test_data = dataset[train_data_length:]\n train_labels = classes[:train_data_length]\n test_labels = classes[train_data_length:]\n return train_data, test_data, train_labels, test_labels\n\n\ndef get_pulsar_data_reduced_to(n_features):\n \"\"\" It returns train_data, test_data, train_labels, test_labels reduced to n_features by PCA\"\"\"\n pca = PCA(n_components=n_features)\n datasetPCA = pca.fit_transform(dataset)\n train_data_length = int(0.7 * samples)\n test_data_length = samples - train_data_length\n\n train_data = datasetPCA[:train_data_length]\n test_data = datasetPCA[train_data_length:]\n train_labels = classes[:train_data_length]\n test_labels = classes[train_data_length:]\n return train_data, test_data, train_labels, test_labels\n\n\ndef get_number_of_samples():\n return len(data)\n\n\ndef show_data_in_2_dimensions(n_samples):\n pca = PCA(n_components=2)\n datasetPCA = pca.fit_transform(dataset)\n x = np.zeros(len(datasetPCA))\n y = np.zeros(len(datasetPCA))\n for i in range(len(datasetPCA)):\n x[i] = datasetPCA[i][0]\n y[i] = datasetPCA[i][1]\n plt.scatter(x[:n_samples], y[:n_samples], c=classes[:n_samples])\n plt.show()" }, { "alpha_fraction": 0.517578125, "alphanum_fraction": 0.521484375, "avg_line_length": 35.51785659790039, "blob_id": "da5577dea4b16dbc9c048b0ff25873f8f06ce94f", "content_id": "931040ecd9b217d068a4ad935b25c857c266d7b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2048, "license_type": "no_license", "max_line_length": 114, "num_lines": 56, "path": "/SVM_Classification.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "from sklearn.svm import SVC\nimport logging\nfrom Statistics import get_performance\nimport numpy as np\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\n\nclass SVM(object):\n\n def __init__(self, train_data, test_data, train_labels, test_labels, kernels = [], gammas = [], coefs = []):\n self.kernels = kernels\n self.gammas = gammas\n self.coefs = coefs\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n self.sensitivity = []\n self.specificity = []\n self.precision = []\n\n\n def perform(self):\n counter = 0\n for k in self.kernels:\n for g in self.gammas:\n for c in self.coefs:\n counter += 1\n logging.info(' Computing SVM ... '\n + str(int(100 * counter / (len(self.coefs) * len(self.gammas) * len(self.kernels)\n - (self.kernels.count('linear') * len(self.gammas) * len(self.coefs) - 1)\n - (self.kernels.count('rbf') * len(self.gammas) * (len(self.coefs) - 1)))))\n + '% done')\n\n clf = SVC(kernel=k, gamma=g, coef0=c)\n clf.fit(self.train_data, self.train_labels)\n res = clf.predict(self.test_data)\n\n performance = get_performance(self.test_labels, res)\n self.sensitivity.append(performance['sensitivity'])\n self.specificity.append(performance['specificity'])\n self.precision.append(performance['precision'])\n\n if k == 'linear' or k == 'rbf':\n break\n if k == 'linear':\n break\n\n def get_sensitivity(self):\n return self.sensitivity\n\n def get_specificity(self):\n return self.specificity\n\n def get_precision(self):\n return self.precision\n\n\n\n" }, { "alpha_fraction": 0.5720899701118469, "alphanum_fraction": 0.5753968358039856, "avg_line_length": 31.7608699798584, "blob_id": "eaaeec2acd958c597044e12241ecdcc580133c32", "content_id": "c4271023b142a75c51c3fe0ab0ea9c67d90aa5aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1512, "license_type": "no_license", "max_line_length": 98, "num_lines": 46, "path": "/Random_Forest.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "from sklearn.ensemble import RandomForestClassifier\nimport logging\nfrom Statistics import get_performance\nimport numpy as np\n\n\nclass RFC(object):\n\n def __init__(self, train_data, test_data, train_labels, test_labels, trees = [], depths = []):\n self.trees = trees\n self.depths = depths\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n self.sensitivity = []\n self.specificity = []\n self.precision = []\n\n def perform(self):\n counter = 0\n for t in self.trees:\n for d in self.depths:\n counter += 1\n logging.info(' Computing RF ... '\n + str(int(100 * counter / (len(self.trees) * len(self.depths))))\n + '% done')\n\n clf = RandomForestClassifier(n_estimators=t, max_depth=d)\n clf.fit(self.train_data, self.train_labels)\n res = clf.predict(self.test_data)\n\n performance = get_performance(self.test_labels, res)\n self.sensitivity.append(performance['sensitivity'])\n self.specificity.append(performance['specificity'])\n self.precision.append(performance['precision'])\n\n\n def get_sensitivity(self):\n return self.sensitivity\n\n def get_specificity(self):\n return self.specificity\n\n def get_precision(self):\n return self.precision\n\n\n\n\n\n" }, { "alpha_fraction": 0.5403677225112915, "alphanum_fraction": 0.5563549399375916, "avg_line_length": 26.19565200805664, "blob_id": "885f29b95bcab7d08700c61838d4a3f64853e803", "content_id": "adfe041d831f2e1c3a87ed89a6b5f6d57eb5032d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1251, "license_type": "no_license", "max_line_length": 108, "num_lines": 46, "path": "/Statistics.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "import numpy as np\n\ndef get_performance(labels, predictions):\n TP, FN, FP, TN = 0, 0, 0, 0\n for g, p in zip(labels, predictions):\n if g == p == 1:\n TP += 1\n if g == 1 and p == 0:\n FN += 1\n if g == 0 and p == 1:\n FP += 1\n if g == 0 and p == 0:\n TN += 1\n\n if TP or FN:\n sensitivity = TP / (TP + FN)\n else:\n sensitivity = 0\n\n if TN or FP:\n specificity = TN / (TN + FP)\n else:\n specificity = 0\n\n if TP or FP:\n precision = TP / (TP + FP)\n else:\n precision = 0\n\n return {'precision': precision,\n 'sensitivity': sensitivity,\n 'specificity': specificity}\n\n\ndef fscore(precision, sensitivity):\n return 2 * (np.array(precision) * np.array(sensitivity)) / (np.array(precision) + np.array(sensitivity))\n\ndef roc(sensitivity, specificity):\n return np.array(sensitivity), (1 - np.array(specificity))\n\ndef output(sensitivity, specificity, precision):\n print('Sensitivity ' + str(sensitivity))\n print('Specificity ' + str(specificity))\n print('Precision ' + str(precision))\n print('F-score ' + str(fscore(precision, sensitivity)))\n print('ROC ' + str(roc(sensitivity, specificity)))\n" }, { "alpha_fraction": 0.6591760516166687, "alphanum_fraction": 0.6619850397109985, "avg_line_length": 28.61111068725586, "blob_id": "ead7282101202cd8c0cd61c3d1a53d8e50158f58", "content_id": "34e13f51bc7711eb643ab47602c98d68d607e8a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/Log_Regression.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "import logging\nfrom sklearn.linear_model import LogisticRegression\nfrom Statistics import get_performance\n\nclass Log_Regression(object):\n def __init__(self, train_data, test_data, train_labels, test_labels):\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n self.sensitivity = 0\n self.specificity = 0\n self.precision = 0\n\n\n def perform(self):\n\n logging.info('Computing Logistic Regression')\n clf = LogisticRegression()\n clf.fit(self.train_data, self.train_labels)\n res = clf.predict(self.test_data)\n\n performance = get_performance(self.test_labels, res)\n self.sensitivity = performance['sensitivity']\n self.specificity = performance['specificity']\n self.precision = performance['precision']\n\n\n def get_sensitivity(self):\n return self.sensitivity\n\n def get_specificity(self):\n return self.specificity\n\n def get_precision(self):\n return self.precision\n\n\n" }, { "alpha_fraction": 0.5903061032295227, "alphanum_fraction": 0.5959183573722839, "avg_line_length": 37.3725471496582, "blob_id": "9f5bfd0ad965c51ab1da2bd17bf2c60150bcde0a", "content_id": "0499fac1da674601a0211cd1f8d20d8318cc275e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1960, "license_type": "no_license", "max_line_length": 124, "num_lines": 51, "path": "/KNN_Classification.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "# This module will be operating on out data using KNN classifier\n# We decided to use this classifier for 1, 3, 5, 7 neighbours\n# with 2 diffrent metric : euclidean, cityblock, minkowski\n# Also we will use 2 different \"weights\" functions : uniform and weights\n\nimport logging\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom Statistics import get_performance\nimport numpy as np\n\nclass KNN(object):\n def __init__(self, train_data, test_data, train_labels, test_labels, distance =[], n_neighbours =[], weights=[]):\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n self.distance = distance\n self.n_neighbours = n_neighbours\n self.weights = weights\n self.sensitivity = []\n self.specificity = []\n self.precision = []\n\n\n def perform(self):\n counter = 0\n for d in self.distance:\n for n in self.n_neighbours:\n for w in self.weights:\n counter += 1\n logging.info(' Computing KNN ... '\n + str(int(100*counter / (len(self.distance) * len(self.n_neighbours) * len(self.weights))))\n + '% done')\n clf = KNeighborsClassifier(n_neighbors=n, metric=d, weights=w)\n clf.fit(self.train_data, self.train_labels)\n res = clf.predict(self.test_data)\n\n performance = get_performance(self.test_labels, res)\n self.sensitivity.append(performance['sensitivity'])\n self.specificity.append(performance['specificity'])\n self.precision.append(performance['precision'])\n\n\n def get_sensitivity(self):\n return self.sensitivity\n\n def get_specificity(self):\n return self.specificity\n\n def get_precision(self):\n return self.precision\n\n\n\n" }, { "alpha_fraction": 0.7044830918312073, "alphanum_fraction": 0.7188166975975037, "avg_line_length": 31.13725471496582, "blob_id": "09e6e9f5c752ec1e79c64d1e3007b4c74e7f93c9", "content_id": "0905cf44b1c5ea4c6f88758c8358faf61b839bfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3279, "license_type": "no_license", "max_line_length": 126, "num_lines": 102, "path": "/main.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "# We will perform machine learning on pulsars data\n\nfrom SVM_Classification import SVM\nfrom Statistics import *\nfrom KNN_Classification import KNN\nfrom Log_Regression import Log_Regression\nfrom NN import NN\nfrom Random_Forest import RFC\nimport data\nimport pandas as pd\n\n\ntrain_data, test_data, train_labels, test_labels = data.get_pulsar_data_reduced_to(5)\n\n\n'''\n\nkernels_for_SVM = ['linear', 'poly', 'sigmoid']\ngammas_for_SVM = [0.000001]\ncoefs_for_SVM = [-1.0, 1.0, 0]\nSVM_classification = SVM(train_data, test_data, train_labels, test_labels,\n kernels_for_SVM,\n gammas_for_SVM,\n coefs_for_SVM)\nSVM_classification.perform()\n\nprecision = SVM_classification.get_precision()\nsensitivity = SVM_classification.get_sensitivity()\nspecificity = SVM_classification.get_specificity()\n\noutput(sensitivity, specificity, precision)\n'''\n'''\ndistance = ['euclidean']\nn_neighbours = [1, 2, 3, 5]\nweights = ['uniform', 'distance']\nKNN_classification = KNN(train_data, test_data, train_labels, test_labels,\n distance, n_neighbours, weights)\nKNN_classification.perform()\n\n\nprecision = KNN_classification.get_precision()\nsensitivity = KNN_classification.get_sensitivity()\nspecificity = KNN_classification.get_specificity()\n\noutput(sensitivity, specificity, precision)\n\n\nLR_classification = Log_Regression(train_data, test_data, train_labels, test_labels)\nLR_classification.perform()\n\nprecision = LR_classification.get_precision()\nsensitivity = LR_classification.get_sensitivity()\nspecificity = LR_classification.get_specificity()\n\noutput(sensitivity, specificity, precision)\n\n\nnumber_of_classes = 2\nnumber_of_hidden_units = [2, 5, 10, 20]\nnumber_of_layers = [1, 2, 5, 10]\nNN_classification = NN(train_data, test_data, train_labels, test_labels,\n number_of_layers, number_of_hidden_units, number_of_classes)\nNN_classification.perform()\n\nprecision = NN_classification.get_precision()\nsensitivity = NN_classification.get_sensitivity()\nspecificity = NN_classification.get_specificity()\n\noutput(sensitivity, specificity, precision)\n'''\n\n\ntrees = [5, 10, 20]\ndepths = [10, 50, 100]\nRFC_classification = RFC(train_data, test_data, train_labels, test_labels,\n trees, depths)\nRFC_classification.perform()\n\nprecision = RFC_classification.get_precision()\nsensitivity = RFC_classification.get_sensitivity()\nspecificity = RFC_classification.get_specificity()\n\noutput(sensitivity, specificity, precision)\n'''\n\n\ndata.show_data_in_2_dimensions(15000)\n\n\n\n\ndata = pd.DataFrame([\n [\"SVM:\"], SVM_classification.get_sensitivity(), SVM_classification.get_precision(), SVM_classification.get_specificity(),\n [\"KNN:\"], KNN_classification.get_sensitivity(), KNN_classification.get_precision(), KNN_classification.get_specificity(),\n [\"NN:\"], NN_classification.get_sensitivity(), NN_classification.get_precision(), NN_classification.get_specificity(),\n [\"RF:\"], RFC_classification.get_sensitivity(), RFC_classification.get_precision(), RFC_classification.get_specificity(),\n [\"LR:\"], [LR_classification.get_sensitivity()], [LR_classification.get_precision()], [LR_classification.get_specificity()]\n ])\ndata.to_csv(\"data.csv\", index=False, header=False)\n\n'''\n\n" }, { "alpha_fraction": 0.5849999785423279, "alphanum_fraction": 0.5885000228881836, "avg_line_length": 32.79661178588867, "blob_id": "9cd733b06507f2359a643d7b5e9746943ce870ad", "content_id": "7ad8a5db7c778ab1f1d08bbf94a53690a896645e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 108, "num_lines": 59, "path": "/NN.py", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom Statistics import get_performance\nimport numpy as np\n\n\nclass NN(object):\n\n def __init__(self, train_data, test_data, train_labels, test_labels, layers, hidden_units, classes):\n self.train_data = train_data\n self.train_labels = train_labels\n self.test_data = test_data\n self.test_labels = test_labels\n self.layers = layers\n self.hidden_units = hidden_units\n self.classes = classes\n self.feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=len(self.train_data[0]))]\n self.sensitivity = []\n self.specificity = []\n self.precision = []\n\n def get_train_inputs(self):\n x = tf.constant(self.train_data)\n y = tf.constant(self.train_labels)\n\n return x, y\n\n def get_test_inputs(self):\n x = tf.constant(self.test_data)\n y = tf.constant(self.test_labels)\n\n return x, y\n\n def get_sensitivity(self):\n return self.sensitivity\n\n def get_precision(self):\n return self.precision\n\n def get_specificity(self):\n return self.specificity\n\n\n\n def perform(self):\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=len(self.train_data[0]))]\n\n for lay in self.layers:\n for hid in self.hidden_units:\n classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,\n hidden_units=[hid] * lay,\n n_classes=2)\n classifier.fit(input_fn=self.get_train_inputs, steps=5000)\n\n res = classifier.predict(input_fn=self.get_test_inputs)\n\n performance = get_performance(self.test_labels, res)\n self.sensitivity.append(performance['sensitivity'])\n self.specificity.append(performance['specificity'])\n self.precision.append(performance['precision'])\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8275862336158752, "avg_line_length": 28, "blob_id": "7640b16624727a8fe00e7b91bb88e93d75310523", "content_id": "8413d19c9bfb8cbc1d0fdb9f5a8749dfe92e137d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/README.md", "repo_name": "KordianD/Pulsars", "src_encoding": "UTF-8", "text": "# Pulsars\nMachine learning project for detecting pulsars.\n" } ]
9
alvinye1/poseidon_mobile_appserver
https://github.com/alvinye1/poseidon_mobile_appserver
b444efc2fa41613baf1897182d358b0583286ebe
ce2445fe138c7b7a998c58e285b98b331621af7a
8bbfd685425bd298737f877d5e9de4f153325ea0
refs/heads/master
2020-04-24T02:29:46.824486
2019-02-21T06:45:19
2019-02-21T06:45:19
171,638,733
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5808823704719543, "alphanum_fraction": 0.5980392098426819, "avg_line_length": 18.428571701049805, "blob_id": "31461b4225ceb404c419af4e80661f4c1b94a050", "content_id": "b8ac72835fe8da18b592449537f2eddb69f2415f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/modules/common_util.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport os\nimport configparser\nimport tarfile\n\ndef get_config():\n \"\"\"\n ่ฏปๅ–project.conf\n \"\"\"\n pwd = os.path.dirname(os.path.realpath(__file__))\n conf_file = os.path.join(pwd, \"../\", \"conf/project.conf\")\n config = configparser.ConfigParser()\n config.read(conf_file)\n return config\n\n\nif __name__ == '__main__':\n print (\"test\")\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 28.33333396911621, "blob_id": "d7061926e32000342463c2401bab7c3c45478a0c", "content_id": "8b7663feea51bd19a8a9b7149e2d158ab8a863f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/tests/network.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "import ifcfg\nprint(ifcfg.interfaces().items())\nprint(ifcfg.default_interface()['inet'])\n" }, { "alpha_fraction": 0.6061946749687195, "alphanum_fraction": 0.6106194853782654, "avg_line_length": 17.91666603088379, "blob_id": "665c8ba684b19720edc40aa34a56f9bbdd00ea18", "content_id": "205b14556b6db52196bd266aab93d2ccfde6144c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 87, "num_lines": 12, "path": "/tests/jsontest.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\nimport json\n\nmodel = {}\nwith open(\"/TIVOLI/flask_ICBC/frontend/src/components/testdata/diamond.json\",'r') as f:\n a = json.loads(f)\n for x in a:\n name = x['name']\n\n\nprint(\"sdsa\")\nprint(model)" }, { "alpha_fraction": 0.4390243887901306, "alphanum_fraction": 0.4471544623374939, "avg_line_length": 12.55555534362793, "blob_id": "e0e7192dc43327dc842fb45af2cace9b631e8569", "content_id": "b67ee137704f7fb8134be4b616113fa8721778de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/tests/merger.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n\n\nclass Merge():\n '''\n mysql data to table json\n '''\n def __init__(self):\n pass\n\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.5166666507720947, "avg_line_length": 13.75, "blob_id": "5587f679a53d54bc4d5099333203efa9a0915de4", "content_id": "c1e5112020dc440a9893be0200f37355317bea9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/modules/__init__.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/9/7\n'''\n\n" }, { "alpha_fraction": 0.5274853706359863, "alphanum_fraction": 0.530994176864624, "avg_line_length": 29, "blob_id": "a0950e3956671506ba87ae0363bbeca13c405b8d", "content_id": "ce9c2c2b7f5ece2452206b82ff4d77484dcfc2fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3420, "license_type": "no_license", "max_line_length": 117, "num_lines": 114, "path": "/modules/database_controller/ctl_mysql.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# Modified by KnightQin\n# date: 2018-11-15\n# ======================================\n\nimport pymysql\nfrom modules.utility import *\nfrom DBUtils.PooledDB import *\nimport logging\n\n\nclass Label:\n \"\"\"A label means a alarm\"\"\"\n pass\n\n\nclass Database:\n \"\"\"mysql database connection.\"\"\"\n\n def __init__(self):\n # read from config file\n self.host = config.get('DATABASE', 'MYSQL_HOST')\n self.port = int(config.get('DATABASE', 'MYSQL_PORT'))\n self.user = config.get('DATABASE', 'MYSQL_USER')\n self.passwd = config.get('DATABASE', 'MYSQL_PASSWD')\n self.db = config.get('DATABASE', 'MYSQL_DB')\n # mysql database initialize\n '''\n self._db = pymysql.connect(host=self.host,\n port=self.port,\n user=self.user,\n passwd=self.passwd,\n db=self.db,\n charset=\"utf8\",\n autocommit=True)\n '''\n # init self.pool\n self.pool = PooledDB(pymysql, db=self.db, host=self.host, user=self.user, passwd=self.passwd, port=self.port,\n charset=\"utf8\", use_unicode=True, autocommit=True)\n self.conn = self.pool.connection(shareable=True)\n\n def __del__(self):\n self.conn.close()\n\n def cursor(self):\n # return self._db.cursor()\n cursor = self.conn.cursor()\n return cursor\n\n def update(self, sql):\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(sql)\n except Exception:\n logging.error(\"Update Exception Occurred\", Exception)\n logging.error(\"ERROR:\", sql)\n self.conn.rollback()\n finally:\n self.cursor.close()\n\n def insert(self, sql):\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(sql)\n except Exception:\n logging.error(\"Insert Exception Occurred\", Exception)\n logging.error(\"ERROR:\", sql)\n self.conn.rollback()\n finally:\n self.cursor.close()\n\n def selectone(self, sql):\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(sql)\n except Exception:\n logging.error(\"Select Exception Occurred\", Exception)\n finally:\n result = self.cursor.fetchone()\n self.cursor.close()\n return result\n\n def selectall(self, sql):\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(sql)\n except Exception:\n logging.error(\"Select Exception Occurred\", Exception)\n finally:\n results = self.cursor.fetchall()\n self.cursor.close()\n return results\n\n def delete(self, sql):\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(sql)\n except Exception:\n logging.error(\"Delete Exception Occurred\", Exception)\n self.conn.rollback()\n finally:\n self.cursor.close()\n\n\nif __name__ == \"__main__\":\n db = Database()\n sql = \"select * from poseidon_alarm_dcap \"\n logging.info(db.selectall(sql))\n" }, { "alpha_fraction": 0.5096638202667236, "alphanum_fraction": 0.5401970148086548, "avg_line_length": 27.17160987854004, "blob_id": "1b30a434a6c6ec06822f8d6592bf73dfb01100a6", "content_id": "e65943bffcf8482e3c11716f8cd435ea2a10950a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13437, "license_type": "no_license", "max_line_length": 99, "num_lines": 472, "path": "/tests/baknews.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/9/7\n'''\nimport flask\nfrom flask import jsonify\nimport time\nfrom flask import request\nimport json\nfrom backend.modules.db_util_118_alarm import MSSoup118alarm\nfrom backend.modules.db_util import MSSoup\nfrom backend.modules.db_util_118 import MSSoup118\nnews_bp = flask.Blueprint(\n 'news',\n __name__,\n url_prefix='/api'\n)\n\n\n\n@news_bp.route('/currentUser', methods=['GET'])\ndef get_currentUser():\n ms = MSSoup()\n res = ms.get_name()\n print('dddd')\n response = {\n 'name': 'Serati Ma',\n 'avatar': 'https://gw.alipayobjects.com/zos/rmsportal/BiazfanxmamNRoxxVxka.png',\n 'userid': '00000001',\n 'email': 'antdesign@alipay.com',\n 'signature': 'ๆตท็บณ็™พๅท๏ผŒๆœ‰ๅฎนไนƒๅคง',\n 'title': 'ไบคไบ’ไธ“ๅฎถ',\n 'group': '่š‚่š้‡‘ๆœ๏ผๆŸๆŸๆŸไบ‹ไธš็พค๏ผๆŸๆŸๅนณๅฐ้ƒจ๏ผๆŸๆŸๆŠ€ๆœฏ้ƒจ๏ผUED',\n 'tags': [\n {\n 'key': '0',\n 'label': 'ๅพˆๆœ‰ๆƒณๆณ•็š„',\n },\n {\n 'key': '1',\n 'label': 'ไธ“ๆณจ่ฎพ่ฎก',\n },\n {\n 'key': '2',\n 'label': '่พฃ~',\n },\n {\n 'key': '3',\n 'label': 'ๅคง้•ฟ่…ฟ',\n },\n {\n 'key': '4',\n 'label': 'ๅทๅฆนๅญ',\n },\n {\n 'key': '5',\n 'label': 'ๆตท็บณ็™พๅท',\n },\n ],\n 'notifyCount': 12,\n 'country': 'China',\n 'geographic': {\n 'province': {\n 'label': 'ๆต™ๆฑŸ็œ',\n 'key': '330000',\n },\n 'city': {\n 'label': 'ๆญๅทžๅธ‚',\n 'key': '330100',\n },\n },\n 'address': '่ฅฟๆน–ๅŒบๅทฅไธ“่ทฏ 77 ๅท',\n 'phone': '0752-268888888',\n }\n return jsonify(response)\n@news_bp.route('/test20181011', methods=['GET'])\ndef get_appname():\n app = MSSoup118()\n appname = app.get_appname()\n response = []\n print appname\n for i in appname:\n NAME = {}\n NAME['ENAME'] = i[0]\n response.append(NAME)\n return jsonify(response\n ) \n\n@news_bp.route('/detail', methods=['GET'])\ndef get_machinedetail():\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail()\n print machinedetail\n #response =[]\n for i in machinedetail:\n\tDetail = {}\n\tDetail['IP_ADDR'] = i[2]\n\tDetail['HOSTNAME'] = i[3]\n\tDetail['DEV_SN'] = i[4]\n\tDetail['CPU_AMT'] = i[5]\n\tDetail['CPU_CORE_AMT'] = i[6]\n\tDetail['MEM_CONTENT'] = i[7]\n\tDetail['DEV_POSITION'] = i[9]\n\tDetail['PART_TYPE'] = i[10]\n\tDetail['SOFT_VERSION'] = i[11]\n\tDetail['CI_DEPT'] = i[12]\n\tDetail['APPNAME'] = i[14]\n\tDetail['ENNAME_SIMPLE'] = i[15]\n\tDetail['APPNODE_DESC'] = i[16]\n\tDetail['DEPLOYMENT'] = i[18]\n\tDetail['MODEL'] = i[8]\n\t#response.append(Detail)\n return jsonify(Detail\n )\n\n\n\n@news_bp.route('/detail2', methods=['GET'])\ndef get_machinedetail2():\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail2()\n #print machinedetail\n response =[]\n id = 1\n for i in machinedetail:\n\t Detail = {}\n\t Detail['IP_ADDR'] = i[2]\n\t # Detail['HOSTNAME'] = i[3]\n\t # Detail['DEV_SN'] = i[4]\n\t # Detail['CPU_AMT'] = i[5]\n\t # Detail['CPU_CORE_AMT'] = i[6]\n\t # Detail['MEM_CONTENT'] = i[7]\n\t # Detail['DEV_POSITION'] = i[9]\n\t # Detail['PART_TYPE'] = i[10]\n\t # Detail['SOFT_VERSION'] = i[11]\n\t # Detail['CI_DEPT'] = i[12]\n\t # Detail['APPNAME'] = i[14]\n\t # Detail['ENNAME_SIMPLE'] = i[15]\n\t # Detail['APPNODE_DESC'] = i[16]\n\t # Detail['DEPLOYMENT'] = i[18]\n\t # Detail['MODEL'] = i[8]\n\t # Detail['ID'] = id\n\t # id += 1\n\t response.append(Detail)\n return jsonify(response\n )\n\n@news_bp.route('/detail3', methods=['GET','POST'])\ndef test2222():\n print \"test\"\n ip = request.args\n ip2 = ip.to_dict()\n ip3 = ip2['ip']\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail3(ip3)\n # print machinedetail\n response = []\n machinedetail2 = machinedetail[0]\n Detail = {}\n Detail['IP_ADDR'] = machinedetail2[2]\n Detail['HOSTNAME'] = machinedetail2[3]\n Detail['DEV_SN'] = machinedetail2[4]\n Detail['CPU_AMT'] = machinedetail2[5]\n Detail['CPU_CORE_AMT'] = machinedetail2[6]\n Detail['MEM_CONTENT'] = machinedetail2[7]\n Detail['DEV_POSITION'] = machinedetail2[9]\n Detail['PART_TYPE'] = machinedetail2[10]\n Detail['SOFT_VERSION'] = machinedetail2[11]\n Detail['CI_DEPT'] = machinedetail2[12]\n Detail['APPNAME'] = machinedetail2[14]\n Detail['ENNAME_SIMPLE'] = machinedetail2[15]\n Detail['APPNODE_DESC'] = machinedetail2[16]\n Detail['DEPLOYMENT'] = machinedetail2[18]\n Detail['MODEL'] = machinedetail2[8]\n # print Detail\n response.append(Detail)\n return jsonify(response\n )\n\n@news_bp.route('/ename3', methods=['GET','POST'])\ndef test3333():\n print \"test=======================================================\"\n ip = request.args(type=unicode)\n print(ip)\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n ip2 = ip.to_dict()\n ip3 = ip2['ename']\n print (ip3)\n # detail = MSSoup118()\n # machinedetail = detail.get_machine_detail3(ip3)\n # print ip\n # # print machinedetail\n # response = []\n # machinedetail2 = machinedetail[0]\n # Detail = {}\n # Detail['IP_ADDR'] = machinedetail2[2]\n # Detail['HOSTNAME'] = machinedetail2[3]\n # Detail['DEV_SN'] = machinedetail2[4]\n # Detail['CPU_AMT'] = machinedetail2[5]\n # Detail['CPU_CORE_AMT'] = machinedetail2[6]\n # Detail['MEM_CONTENT'] = machinedetail2[7]\n # Detail['DEV_POSITION'] = machinedetail2[9]\n # Detail['PART_TYPE'] = machinedetail2[10]\n # Detail['SOFT_VERSION'] = machinedetail2[11]\n # Detail['CI_DEPT'] = machinedetail2[12]\n # Detail['APPNAME'] = machinedetail2[14]\n # Detail['ENNAME_SIMPLE'] = machinedetail2[15]\n # Detail['APPNODE_DESC'] = machinedetail2[16]\n # Detail['DEPLOYMENT'] = machinedetail2[18]\n # Detail['MODEL'] = machinedetail2[8]\n # # print Detail\n # response.append(Detail)\n\n@news_bp.route('/rule', methods=['POST'])\ndef postRule():\n data = json.dumps(request.get_json())\n data_dict = json.loads(data.encode(\"utf-8\"))\n print (data_dict)\n data1 = data_dict['SyslogID']\n ms = MSSoup118alarm()\n for i in data1:\n print (i)\n ms.get_del(i)\n # response = {\"test\":\"aaaa\"}\n # return jsonify(response)\n res = ms.get_alarmname()\n response = {\"list\": []}\n print \"res\"\n print(res)\n key = 0\n for i in res:\n test = {}\n test['key'] = key\n test['NodeIP'] = i[0]\n test['NodeAlias'] = i[1]\n test['Component'] = i[2]\n test['SummaryCN'] = i[3]\n test['EventType'] = i[4]\n test['START_TIME'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(i[5])))\n test['Occurence'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(i[6])))\n test['FREQUENCY'] = i[7]\n test['EventNameCN'] = i[8]\n test['CustomerSeverity'] = i[9]\n test['SyslogID'] = i[10]\n print \"test\", test\n key += 1\n response['list'].append(test)\n print (response)\n return jsonify(response\n )\n\n\n\n # data_dict = json.loads(data.encode(\"utf-8\"))\n # if data_dict['method'] == 'merge':\n # print data\n#\n#\n# @news_bp.route('/rule', methods=['GET'])\n# def get_github_trend():\n# ms = MSSoup118alarm()\n# num = ms.get_num()\n# response = {\"list\": []}\n# key = 0\n# for i in num:\n# test_father = {}\n# ress = ms.get_context(i[1])\n# key += 1\n# for item in ress:\n# key_child = key*1000\n# test = {}\n# test['NodeIP'] = item[0]\n# test['NodeAlias'] = item[1]\n# test['Component'] = item[2]\n# test['SummaryCN'] = item[3]\n# test['EventType'] = item[4]\n# test['START_TIME'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(item[5])))\n# test['Occurence'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(item[6])))\n# test['FREQUENCY'] = item[7]\n# test['EventNameCN'] = item[8]\n# test['SyslogID'] = item[9]\n# test['FatherEvent'] = item[10]\n# if item[9] == item[10]:\n# test['key'] = key\n# test_father = test\n# if i[0] > 1:\n# test_children = []\n# else:\n# key_child += 1\n# test['key'] = key_child\n# test_children.append(test)\n# test_father['children'] = test_children\n#\n# response['list'].append(test_father)\n#\n# return jsonify(response)\n\n\n\n\n\n# @news_bp.route('/detail3',methods=['GET'])\n# def test2222():\n# ip = request.args\n# ip2 = ip.to_dict()\n# ip3 = ip2['ip']\n# detail = MSSoup118()\n# machinedetail = detail.get_machine_detail3(ip3)\n# # print machinedetail\n# print \"test\"\n# print machinedetail\n# # print machinedetail\n# response = []\n# # id = 1\n# Detail = {}\n# Detail['IP_ADDR'] = machinedetail[2]\n# Detail['HOSTNAME'] = machinedetail[3]\n# Detail['DEV_SN'] = machinedetail[4]\n# Detail['CPU_AMT'] = machinedetail[5]\n# Detail['CPU_CORE_AMT'] = machinedetail[6]\n# Detail['MEM_CONTENT'] = machinedetail[7]\n# Detail['DEV_POSITION'] = machinedetail[9]\n# Detail['PART_TYPE'] = machinedetail[10]\n# Detail['SOFT_VERSION'] = machinedetail[11]\n# Detail['CI_DEPT'] = machinedetail[12]\n# Detail['APPNAME'] = machinedetail[14]\n# Detail['ENNAME_SIMPLE'] = machinedetail[15]\n# Detail['APPNODE_DESC'] = machinedetail[16]\n# Detail['DEPLOYMENT'] = machinedetail[18]\n# Detail['MODEL'] = machinedetail[8]\n# # Detail['ID'] = id\n#\n# response.append(Detail)\n# return jsonify(response)\n\n@news_bp.route('/detail3',methods=['GET'])\ndef get_machinedetail3():\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail2()\n print machinedetail\n response = []\n for i in machinedetail:\n Detail = {}\n Detail['IP_ADDR'] = i[2]\n Detail['APPNAME'] = i[14]\n # if i[14] == Detail['APPNAME']:\n # Detail['IP_ADDR'] = i[2]\n # Detail['APPNAME'] = i[14]\n # print Detail\n response.append(Detail)\n print response\n # for z in response;\n # Detail2 = {}\n # Detail2[]\n return jsonify(response)\n\n@news_bp.route('/fake_list', methods=['GET'])\ndef get_appname_info():\n ms = MSSoup118()\n name = ms.get_appname()\n print name\n response = []\n for i in name:\n test = {\"test1\":{}}\n test['title'] = i[0]\n\ttest['cpu'] = \"10\"\n test['IP_ADDR'] = \"1\"\n test['HOSTNAME'] = \"2\"\n test['DEV_SN'] = \"3\"\n test['CPU_AMT'] = \"4\"\n test['CPU_CORE_AMT'] = \"5\"\n test['MEM_CONTENT'] = \"6\"\n test['DEV_POSITION'] = \"7\"\n test['PART_TYPE'] = \"8\"\n test['SOFT_VERSION'] = \"9\"\n test['CI_DEPT'] = \"10\"\n test['APPNAME'] = \"1\"\n test['ENNAME_SIMPLE'] = \"12\"\n test['APPNODE_DESC'] = \"13\"\n test['DEPLOYMENT'] = \"14\"\n test['MODEL'] = \"15\"\n\tresponse.append(test)\n return jsonify(response)\n\n@news_bp.route('/ename', methods=['GET'])\ndef get_ename_info():\n ms = MSSoup118()\n ename = ms.get_appname()\n print ename\n response = {}\n for i in ename:\n\tresponse['0'] = i[0]\n\tresponse.append(response)\n return jsonify(response)\n\n@news_bp.route('/ename2', methods=['GET'])\ndef get_enameip2_info():\n ms = MSSoup118()\n ename = ms.get_machine_enameandip()\n #print ename\n response = []\n response1 = {\"title\": [],\"ip\":[]}\n #print response1['title']\n #print ename\n for i in ename:\n if i[0] in response1['title']:\n b = [1]\n b.append(response1['ip'])\n response1['ip'].append(i[1])\n else:\n response1['title'] = i[0]\n response1['ip'] = i[1]\n print a\n a.append(response1['ip'])\n response1['ip'] = a\n print response1\n # print response1\n response.append(response1)\n #print response\n return jsonify(response)\n\n@news_bp.route('/rule', methods=['GET'])\ndef get_github_trend():\n ms = MSSoup118alarm()\n res = ms.get_alarmname()\n response = {\"list\":[]}\n print \"res\"\n print(res)\n key = 0\n for i in res:\n test = {}\n test['key'] = key\n test['NodeIP'] = i[0]\n test['NodeAlias'] = i[1]\n test['Component'] = i[2]\n test['SummaryCN'] = i[3]\n test['EventType'] = i[4]\n test['START_TIME'] = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(int(i[5])))\n test['Occurence'] = time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(int(i[6])))\n test['FREQUENCY'] = i[7]\n test['EventNameCN'] = i[8]\n test['CustomerSeverity'] = i[9]\n test['SyslogID'] = i[10]\n print \"test\",test\n key += 1\n response['list'].append(test)\n print (response)\n return jsonify(response\n )\n\n@news_bp.route('/toutiao/posts', methods=['GET'])\ndef get_toutiao_posts():\n toutiao = Toutiao()\n post_list = toutiao.get_posts()\n\n return jsonify(\n message='OK',\n data=post_list\n )\n\n\n\n\n\n# def after_request(response):\n# response.headers['Access-Control-Allow-Origin'] = '*'\n# response.headers['Access-Control-Allow-Methods'] = 'PUT,GET,POST,DELETE'\n# response.headers['Access-Control-Allow-Headers'] ='Content-Type.Authorization'\n# return response\n# def create_app():\n# app = Flask(__name__)\n# app.after_request(after_request())\n" }, { "alpha_fraction": 0.44483399391174316, "alphanum_fraction": 0.4561765193939209, "avg_line_length": 26.70857048034668, "blob_id": "1c475c88856853659518234b25d2818aad9ba3cc", "content_id": "647fb70f69dabc5562142aaa7846fb821c3f22fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4849, "license_type": "no_license", "max_line_length": 90, "num_lines": 175, "path": "/modules/readfile.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n__author__ = 'sysop'\n\nimport os\nimport socket\nimport sys\nimport simplejson as json\n#from modules.file_sender import *\n\n\n\nclass NMONfile(object):\n\n def __init__(self,hostname):\n self.host = hostname # hostname\n # self.path = str # directory\n # self.files = os.listdir(self.path) # filenames\n\n def get_names(self):\n return self.files\n\n # def get_first_line(self):\n # return self.files\n\n\n # get messages from socket\n def get_messages(self,ip,port,date):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, port))\n c = {'type': 'instant_nmon', 'data': date}\n # c = {'type': 'instant_nmon', 'data': '181130'}\n s.send(json.dumps(c).encode(encoding='utf8'))\n\n total_data = ''\n while True:\n data = s.recv(10240000).decode('utf-8')\n if not data:\n break\n total_data += data\n\n # print(\"total_datatotal_datatotal_datatotal_data\",total_data)\n json_total = json.loads(total_data)\n self.files = list(json_total.keys()) # all file names\n self.files.sort()\n #print(len(self.files))\n #print(self.files)\n self.files.remove(\"VM.json\")\n # self.files.remove(\"test.sh\")\n self.infos = []\n self.files_content = {}\n i = 0\n #for file in self.files:\n while i < len(self.files):\n\n file = self.files[i]\n\n if(len(json_total[file]) == 0):\n\n self.files.remove(file)\n #i -= 1\n continue\n #else:#print(file)\n\n i += 1\n\n\n content = json_total[file].replace(\",\\n\", \",\")\n content_str = \"[\" + content[0:len(content) - 1] + \"]\"\n content_list = json.loads(content_str, encoding=\"utf-8\")\n\n line_first = content_list[0]\n\n\n content_list[0] = line_first\n\n self.infos.append(line_first)\n self.files_content[file] = content_list\n\n\n self.get_cols_fields()\n print(self.type)\n\n return [self.files,self.cols,self.fields,self.files_content,self.type]\n\n\n\n # result\n # cpu_6 = cpu.replace(\",\\n\",\",\")\n # print(cpu_6)\n # cpu_ff = \"[\"+cpu_6[0:len(cpu_6)-1]+\"]\"\n # cpu_4 = json.loads(cpu_ff,encoding=\"utf-8\")\n # print(cpu_4)\n # print(type(cpu_4))\n\n\n\n\n # handle massages from socket\n def handle_massages(self):\n return self.json_total\n\n\n\n # all files first line\n def get_file_infos(self,):\n self.infos = []\n for file in self.files:\n domain = os.path.abspath(self.path)\n file = os.path.join(domain, file)\n file = open(file, 'r')\n file_json = json.load(file)\n #print(file_json[0])\n file.close()\n line = file_json[0]\n for key in line.keys():\n if (key.find(self.host) != -1):\n #print(\"1 \"+line[key])\n continue\n if (len(key) == 0 or line[key] == '-nan'):\n #print(\"2 \"+line[key])\n continue\n line[key] = float(line[key])\n self.infos.append(line)\n\n\n\n\n # get all cols and fields\n def get_cols_fields(self):\n self.fields = {}\n self.cols = []\n self.type = []\n for info in self.infos:\n\n key_list = []\n keys = list(info.keys())\n i = 0\n type = []\n while i < len(keys):\n if(keys[i].find(self.host) != -1):\n col = keys[i]\n self.cols.append(keys[i])\n keys.remove(keys[i])\n i -=1\n elif(len(keys[i]) == 0 or info[keys[i]] == \"\" or info[keys[i]] == '-nan'):\n keys.remove(keys[i])\n i -= 1\n\n if(keys[i].find(\"%\") != -1):\n type_tmp = \"%\"\n if keys[i] not in type:\n type.append(type_tmp)\n elif(keys[i].find(\"KB/s\") != -1):\n type_tmp = \"KB/s\"\n if keys[i] not in type:\n type.append(type_tmp)\n elif(keys[i].find(\"/s\") != -1):\n type_tmp = \"/s\"\n if keys[i] not in type:\n type.append(type_tmp)\n else:\n type_temp = \" \"\n #else:print(list[i])\n\n i += 1\n self.fields[col] = keys\n\n if (len(type) > 1):\n self.type.append(type[1])\n elif (len(type) == 1):\n self.type.append(type[0])\n else:\n self.type.append(\" \")\n\n return [self.cols, self.fields,self.type]\n" }, { "alpha_fraction": 0.5220356583595276, "alphanum_fraction": 0.5356768369674683, "avg_line_length": 20.670454025268555, "blob_id": "382fa5edd4e0135286f71ce4ae339ec2f5d8c8b7", "content_id": "02b474068650d2840ef5d340c578684443a9d550", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1906, "license_type": "no_license", "max_line_length": 72, "num_lines": 88, "path": "/modules/minion_controller/signal.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\n\nimport logging\nimport json\nimport socket\nimport sys\n\n'''\nUsage:\nFirst connect the server:\ns = Singal(host,port)\n\nThen exec command:\nresult = s.run(type,data)\n\nThen close:\ndel s\n'''\n\n\nclass Signal:\n \"\"\"to connect to the minion and control or get messages etc.\"\"\"\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.connect((self.host, self.port))\n self.s.settimeout(5)\n\n def reconnect(self):\n self.s.close()\n self.s.connect((self.host, self.port))\n self.s.settimeout(5)\n\n def run(self, type, data):\n \"\"\"execute command identified by type arg\"\"\"\n try:\n # eval(\"self.%s\" % type)(data)\n c = {'type': type, 'data': data}\n self.s.send(json.dumps(c).encode(encoding='utf8'))\n except:\n logging.error(\"[SIGNAL]no function found or excute failed.\")\n\n # decode messages\n total_data = ''\n while True:\n data = self.s.recv(10240000).decode('utf-8')\n if not data:\n break\n total_data += data\n\n # get json data\n json_total = json.loads(total_data)\n # return messages\n return json_total\n\n def __del__(self):\n self.s.close()\n\n def nmon(self):\n pass\n\n def instant_nmon(self):\n pass\n\n def instant_script(self):\n pass\n\n def script(self):\n pass\n\n def instant_heartbeat(self):\n pass\n\n def deploy_update(self):\n pass\n\n# if __name__ == '__main__':\n# s = Signal(\"80.2.238.218\",8885)\n# result = s.run(\"instant_script\", \"asdfsdf\")\n# print (result)" }, { "alpha_fraction": 0.44810864329338074, "alphanum_fraction": 0.4675072729587555, "avg_line_length": 28.46666717529297, "blob_id": "4c64699578591377ca3f505cfca8e43b4a44fafa", "content_id": "392096b8e5ca566f8c83eb6fac396884cde2a137", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3117, "license_type": "no_license", "max_line_length": 108, "num_lines": 105, "path": "/modules/database_controller/Mongodb.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "#!-*- coding:utf-8-*-\nimport pymongo\n\n\nclass MongoDB(object):\n '''\n # (>) ๅคงไบŽ - $gt\n # (<) ๅฐไบŽ - $lt\n # (>=) ๅคงไบŽ็ญ‰ไบŽ - $gte\n # (<= ) ๅฐไบŽ็ญ‰ไบŽ - $lte\n '''\n def __init__(self, host, port, db, site, user, passwd):\n self.host = host\n self.port = port\n self.db = db\n self.site = site\n self.user = user\n self.passwd = passwd\n self.con = self.connetion()\n\n def connetion(self):\n self.client = pymongo.MongoClient(host=self.host, port=self.port)\n self.client.admin.authenticate(self.user, self.passwd)\n dba = self.client[self.db]\n col = dba[self.site]\n return col\n\n def search(self, sql, types):\n '''\n :param sql: all \"\", one_field {\"fieldname\":\"target\"},some_field {\"fieldname1\":\"1\",\"fieldname2\":\"1\"}\n :param types:\n :return:\n '''\n data = []\n if types == \"all\":\n find = self.con.find()\n elif types == \"one_field\":\n find = self.con.find(sql,{\"_id\": 0})\n elif types == \"some_field\":\n find = self.con.find({}, sql)\n for x in find:\n data.append(x)\n return data\n\n def remove(self, sql, types):\n '''\n many sql={\"fieldname\": {\"$regex\": \"^F\"} }\n one sql = {\"fieldname\":\"target\"}\n '''\n if types == \"one\":\n self.con.delete_one(sql)\n elif types == \"many\":\n self.con.delete_many(sql)\n elif types == \"all\":\n self.con.delete_many({})\n elif types == \"drop\":\n self.con.drop()\n\n def insert(self, sql, types):\n '''\n type = one ,sql = {};\n type = many, sql = [{},{}];\n '''\n if types == \"one\":\n self.con.insert_one(sql)\n elif types == \"many\":\n self.con.insert_many(sql)\n\n def update(self, old, new, types):\n '''\n one old = { \"alexa\": \"10000\" } new = { \"$set\": { \"alexa\": \"12345\" } }\n many old = { \"name\": { \"$regex\": \"^F\" } } new = { \"$set\": { \"alexa\": \"123\" } }\n '''\n if types == \"one\":\n self.con.update_one(old, new)\n elif types == \"many\":\n self.con.update_many(old, new)\n\n def sorted(self, field, types):\n '''\n :param field: only one field\n :param types: up down\n :return:\n '''\n if types == \"up\":\n find = self.con.find().sort(field)\n elif types == \"down\":\n find = self.con.find().sort(field, -1)\n return find\n\n def __del__(self):\n self.client.close()\n\nif __name__ == '__main__':\n # a = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n # b = a.search('aaa', 'all')\n db = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n\n data = db.search(({'$or':[{'CPU':{'$regex':'/2.0/'}}]}), 'one_field')\n print(data)\n search = db.search(({\"IP Address\": \"10.252.1.10\"}), 'one_field')\n print(search)\n # c = a.insert({'name':\"itachi\"},'one')\n # d = a.search('dsad','all')\n # print(d)" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5677320957183838, "avg_line_length": 21.586206436157227, "blob_id": "bf2587d63df95e2e8b1bc79658ee07ed4ba409d7", "content_id": "e69174caff2a09e49731807ec60367615fa604a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 78, "num_lines": 29, "path": "/modules/net_detection.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\n\nimport socket\nimport ifcfg\n\nPRODUCTION_AREA = '80.7.83.'\n\n\ndef get_address():\n address = ifcfg.default_interface()['inet']\n return address\n\n\ndef area_detect():\n \"\"\"to define whether it's in the production envirorment or the test one\"\"\"\n address = get_address()\n if address.find(PRODUCTION_AREA) == -1:\n # this is a test area\n return 'test'\n else:\n # this is a production area\n return 'production'\n\n\n" }, { "alpha_fraction": 0.4066985547542572, "alphanum_fraction": 0.41626793146133423, "avg_line_length": 22.22222137451172, "blob_id": "f560e6763b41c36b1b7e521c7074b6d218dccd8b", "content_id": "ef5a5f80be5e75ce54c258ec266dae79ba939774", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 209, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/modules/minion_controller/__init__.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\n\n__all__=['signal']\n" }, { "alpha_fraction": 0.5374841094017029, "alphanum_fraction": 0.5603557825088501, "avg_line_length": 18.524999618530273, "blob_id": "5e0be9c638334902b22aed4a67d3912da1833c27", "content_id": "f42a6ce9a6bc8ed0b3173d533b155e606df81bc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 76, "num_lines": 40, "path": "/app.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": " #!/usr/bin/env python3\n# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\n\nfrom modules.app import create_app\nfrom modules.net_detection import *\n\napp = create_app(app_name=\"poseidon_mobile_appserver\")\n\nif __name__ == '__main__':\n app.run(host=get_address())\n\n# from flask_script import Manager, Server\n# manager = Manager(create_app)\n# manager.add_command('runserver', Server(host='192.168.31.193', port=3345))\n#\n\n# if __name__ == '__main__':\n# manager.run()\n#\n\n\n# from flask import Flask\n# app = Flask(__name__)\n#\n#\n# @app.route('/')\n# def hello_world():\n# return 'Hello World!'\n#\n#\n\n\n# if __name__ == '__main__':\n# app.run()\n" }, { "alpha_fraction": 0.472087562084198, "alphanum_fraction": 0.5250977277755737, "avg_line_length": 38.47530746459961, "blob_id": "6911e940d72bfa62450409f0ffb59c8583dc1a27", "content_id": "c487082043a8184101c2c76973736a66bc81187e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6701, "license_type": "no_license", "max_line_length": 627, "num_lines": 162, "path": "/modules/datamachine.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\nimport time\n\nclass Sourcedata():\n '''\n the source from poseidon_alarm_*\n '''\n def set_format(self, data, name):\n '''\n ๆ นๆฎ่พ“ๅ…ฅ๏ผŒ็”Ÿๆˆๅ‰็ซฏ้œ€่ฆ็š„ๆ•ฐๆฎๆ ผๅผ\n :param data: ๆฅ่‡ชๆ•ฐๆฎๅบ“็š„ๅŽŸๅง‹ๆ•ฐๆฎ\n :param name: ๅŽŸๅง‹ๆ•ฐๆฎไธญ๏ผŒๆฏๆฎตๅฏนๅบ”็š„ๆ•ฐๆฎๅ็งฐ\n :return: ้”ฎๅ€ผๅฏนๆ•ฐๆฎ\n '''\n lendata = len(data)\n lenstr = len(name)\n try:\n sourcedata = {}\n i = 0\n for i in range(lenstr):\n if name[i] in (\"START_TIME\",\"Occurence\"):\n sourcedata[name[i]] =time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(data[i])))\n else:\n sourcedata[name[i]] = data[i]\n return sourcedata\n except:\n print(\"Error data lenth =\", lendata, \"str lenth=\", lenstr)\n\n\nclass Alarmdata():\n '''\n alarm data to table json\n '''\n\n def __init__(self, name, father_field, children_field, aargs):\n '''\n :param name: ๆ•ฐๆฎ่กจๅคด\n :param father_field: ็ˆถIDๅˆ—\n :param children_field: ๅญIDๅˆ—\n :param args: ๅญ˜ๅ…ฅๆ•ฐๆฎ\n '''\n self.name = name\n self.data = aargs\n self.father_field = father_field\n self.children_field = children_field\n self.father_list = []\n self.children_list = []\n self.fatherid_pro = []\n self.fatherid = []\n self.fatherid_sed =[]\n self.format_list = []\n self.merchildrenlist = {}\n self.key = 0\n self.format()\n self.select_fatherid()\n self.select_fatherid_second()\n self.classify_father_chlidren()\n self.merge_children()\n\n def format(self):\n '''\n ๅฏน่พ“ๅ…ฅๆ•ฐๆฎ่ฟ›่กŒๆ ผๅผๅŒ–\n :return: ๆ ผๅผๅŒ–ๅŽๆ•ฐๆฎ๏ผŒ้”ฎๅ€ผๅฏน\n '''\n for i in self.data:\n single_alarm = Sourcedata().set_format(i, self.name)\n self.format_list.append(single_alarm)\n # print(self.format_list)\n\n def select_fatherid(self):\n '''\n ๆ นๆฎๅˆถๅฎškeyๅ€ผ๏ผŒๆ‰พๅ‡บๆ‰€ๆœ‰็š„็ˆถ็ฑป\n :return:fatherid\n '''\n for i in self.format_list:\n if i[self.father_field] == i[self.children_field]:\n self.fatherid_pro.append(i[self.father_field])\n\n def select_fatherid_second(self):\n '''\n ๆ นๆฎๅˆถๅฎškeyๅ€ผ๏ผŒๆ‰พๅ‡บๆ‰€ๆœ‰็š„็ˆถ็ฑป\n :return:fatherid\n '''\n for i in self.format_list:\n if (i[self.father_field] != i[self.children_field]) and (i[self.father_field] not in self.fatherid_pro):\n self.fatherid_sed.append(i[self.father_field])\n self.fatherid = self.fatherid_pro + self.fatherid_sed\n\n def classify_father_chlidren(self):\n '''\n ๅฐ†fatherๅ’Œchildrenๅˆ†็ฑป\n :return: ไธ€ไธชlist๏ผŒlist[0]ๆ˜ฏfather๏ผŒlist[1]ๆ˜ฏchildren\n '''\n for i in self.format_list:\n if i[self.father_field] == i[self.children_field]:\n self.father_list.append(i)\n elif i[self.father_field] in self.fatherid_sed:\n self.father_list.append(i)\n else:\n self.children_list.append(i)\n\n def merge_children(self):\n for i in self.fatherid:\n chlidren = []\n for x in self.children_list:\n if i == x[self.father_field]:\n chlidren.append(x)\n self.merchildrenlist[i] = chlidren\n\n def get_value(self, number, keyvalue, *args):\n self.value_number = number\n self.value_key = keyvalue\n self.value_data = args[0]\n for i in self.value_data:\n if i[self.value_key] == self.value_number:\n res = i\n return res\n\n def make_json(self):\n json = {\"list\": []}\n for i in self.fatherid:\n self.key += 1\n # alarm_cluster = {}\n father = self.get_value(i, self.father_field, self.father_list) # ็œŸๆญฃ็š„็ˆถๆŠฅ่ญฆ๏ผŒ้œ€่ฆ่กฅๅ……ๆ–นๆณ•\n father[\"key\"] = self.key\n # father[\"chlidren\"] = [] # ๅญ—ๅ…ธๅขžๅŠ ๅ…ƒ็ด ๆ–นๆณ•alarm_cluster = alarm_cluster.update(father)\n children = []\n # for x in self.merchildrenlist[i]: # ๆ‰พๅˆฐ็ˆถidไธบi็š„ๆŠฅ่ญฆ\n key_children = (self.key +1) * 1000\n if self.merchildrenlist == {}: #ๆ–ฐๅขžๅฝ“ๅญๆŠฅ่ญฆไธบ็ฉบๆ—ถๅˆคๆ–ญ\n json[\"list\"].append(father)\n else:\n x = self.merchildrenlist[i]\n if x != []:\n for y in x:\n key_children += 1\n y['key'] = key_children\n children.append(y)\n father['children'] = children\n # print(alarm_cluster)\n json[\"list\"].append(father)\n else:\n json[\"list\"].append(father)\n # json[\"list\"].append(father)\n return json\n\nif __name__ == '__main__':\n start = time.time()\n print(\"==========test===========\")\n a = Alarmdata((\"name\", \"age\", \"key2\"), \"key2\", \"age\", ((\"a\", \"1\", \"1\"), (\"b\", \"2\", \"2\"), (\"e\", \"5\", \"1\"), (\"f\", \"6\", \"3\"),(\"g\", \"7\", \"4\"),(\"h\", \"8\", \"1\")))\n # a = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\", \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\",\"FLAGBIT\"), \"FatherEvent\",\n # \"SyslogID\",((u'80.16.161.92', u'pdccbccisdbsb53', u'SuSE', u'NTP OFFSET is too large', u'0', u'1543804201', u'1543900201', 161, u'NTP OFFSET TOO LARGE', u'7', 17024, 3, 2), (u'80.20.97.36', u'PDCCBNCTSAPPZH2', u'LINUX', u'\\u4ea4\\u6362\\u7a7a\\u95f4\\u5229\\u7528\\u7387:swap_used_pct=25', 1, u'1543800149', u'1543900325', 835, u'\\u4ea4\\u6362\\u7a7a\\u95f4\\u5229\\u7528\\u7387', u'7', 3, 3, 1), (u'80.32.225.73', u'PDCCBCCISNDM001', u'Windows', u'\\u7cfb\\u7edf\\u5185\\u5b58\\u4f7f\\u7528\\u7387:memoryusage=100', 1, u'1543800159', u'1543899839', 167, u'\\u7cfb\\u7edf\\u5185\\u5b58\\u4f7f\\u7528\\u7387', u'7', 5, 3, 1)))\n # print(a.format_list)\n b = a.make_json()\n print(\"asdsasaddsdsa\",b)\n\n # #a = Alarmdata(,,,\"\",((u'80.16.145.127', u'pdccbebpfdbsro3', u'Linux', u'persondb delete arhcivelog before 1 day successfull...', u'0', u'1542618020', u'1542618020', 1, u'DELETE ARCHIVE LOG', u'7', 14894, 14894), (u'10.255.65.33', u'pdccjmimsdbsk33', u'None', u'name=/,disk_used_pct=82,disk_avail=1808,fs_total=10079', 1, u'1542352208', u'1542384037', 107, u'None', 5, 3644, 3644)))\n # end = time.time()\n # print(str(end - start))\n # a = Sourcedata()\n # b = a.set_format((\"111\"),(\"age\"))\n # print b\n" }, { "alpha_fraction": 0.6336800456047058, "alphanum_fraction": 0.6384593844413757, "avg_line_length": 50.550724029541016, "blob_id": "f7c2efdc641c5c7af8bcc1956dd6c96ef5271b7d", "content_id": "5a31787830fd85cf813e2928f97c03f42b2408a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3603, "license_type": "no_license", "max_line_length": 123, "num_lines": 69, "path": "/modules/alarm_processor/Close_alarm.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\nfrom modules.database_controller.ctl_mysql import *\nimport calendar\nimport time\nimport datetime\nimport logging\n\nclass Close_alarm:\n\n\n def __init__(self):\n \"\"\"ไฝฟ็”จๅ…ฑไบซ็š„ๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db = Database()\n\n def __del__(self):\n \"\"\"ๅฝ“ๅฎžไพ‹้”€ๆฏๆ—ถๅ…ณ้—ญๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db.__del__()\n\n def alarm_close(self,id,flagbit):\n\n now_stamp = time.time()\n utc_time = datetime.datetime.utcfromtimestamp(now_stamp)\n #time_datetime = datetime.datetime.now()\n time_utc = calendar.timegm(datetime.datetime.timetuple(utc_time))\n\n if flagbit == 2:\n logging.info(\"Ready to delete syslog alarm ..\")\n\n sql1 = \"UPDATE poseidon_alarm_sys SET END_TIME = '%s'\" % time_utc + \" WHERE SyslogID = '%s'\" % id\n self.db.update(sql1)\n\n sql2 = \"INSERT INTO poseidon_alarm_sys_hist(SyslogID,FatherEvent,Class,START_TIME,END_TIME,Occurence,\" \\\n \"FREQUENCY,Customer,NodeAlias,NodeIP,BusinessName,AppName,AppShortName,ComponentType,Component,\" \\\n \"SubComponent,InstanceId,InstanceValue,EventName,EventNameCN,Type,EventType,CustomerSeverity,\" \\\n \"FirstOccurrence,LastOccurrence,SourceServerSerial,ACK_Time,Close_Time,OwnerGroup,Summary,SummaryCN,\" \\\n \"Tally,Site,OrgID,DevOrgID,ProcessMode,EnrichStatus,MaintainStatus,SMSFlag,Acknowledged,EventStatus,\" \\\n \"ENNAME,FLAGBIT)\" \\\n \"SELECT SyslogID,FatherEvent,Class,START_TIME,END_TIME,Occurence,FREQUENCY,Customer,NodeAlias,NodeIP,\" \\\n \"BusinessName,AppName,AppShortName,ComponentType,Component,SubComponent,InstanceId,InstanceValue,\" \\\n \"EventName,EventNameCN,Type,EventType,CustomerSeverity,FirstOccurrence,LastOccurrence,\" \\\n \"SourceServerSerial,ACK_Time,Close_Time,OwnerGroup,Summary,SummaryCN,Tally,Site,OrgID,DevOrgID,\" \\\n \"ProcessMode,EnrichStatus,MaintainStatus,SMSFlag,Acknowledged,EventStatus,ENNAME,FLAGBIT \" \\\n \"FROM poseidon_alarm_sys WHERE SyslogID = '%d'\" % id\n self.db.insert(sql2)\n\n sql3 = \"DELETE FROM poseidon_alarm_sys WHERE SyslogID = '%s'\" % id\n self.db.delete(sql3)\n\n elif flagbit == 1:\n logging.info(\"Ready to delete dcap alarm ..\")\n # cursor = db.cursor()\n sql4 = \"UPDATE poseidon_alarm_dcap SET END_TIME = '%s'\" % time_utc + \" WHERE DCAPID = '%s'\" % id\n self.db.update(sql4)\n\n sql5 = \"INSERT INTO poseidon_alarm_dcap_hist(DCAPID,FatherEvent,Class,START_TIME,END_TIME,FREQUENCY,\" \\\n \"EventSource,NodeIP,IndicatorName,IndicatorName2,unknown,MoniStatus,Recovery,Occurence,Instance,\" \\\n \"InstanceName,IndicatorValue,Strategy,NodeAlias,Component,EventNameCN,EventType,CustomerSeverity,\" \\\n \"SummaryCN,ENNAME,FLAGBIT)\" \\\n \"SELECT DCAPID,FatherEvent,Class,START_TIME,END_TIME,FREQUENCY,EventSource,NodeIP,IndicatorName,\" \\\n \"IndicatorName2,unknown,MoniStatus,Recovery,Occurence,Instance,InstanceName,IndicatorValue,Strategy,\" \\\n \"NodeAlias,Component,EventNameCN,EventType,CustomerSeverity,SummaryCN,ENNAME,FLAGBIT \" \\\n \"FROM poseidon_alarm_dcap WHERE DCAPID = '%d'\" % id\n self.db.insert(sql5)\n\n sql6 = \"DELETE FROM poseidon_alarm_dcap WHERE DCAPID = '%s'\" % id\n self.db.delete(sql6)\n\n else:\n logging.error(\"There is an error when closing alarm\")\n" }, { "alpha_fraction": 0.6487395167350769, "alphanum_fraction": 0.6756302714347839, "avg_line_length": 21.037036895751953, "blob_id": "7388365bcf72de4d3009a37f960dfb370c2b40b8", "content_id": "51d90f26e13b2c70dec28a742b41b40386a9ef52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "no_license", "max_line_length": 48, "num_lines": 27, "path": "/modules/app.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n\"\"\"\nCreated by jojo at 2018/9/7\nModified by Alvin at 2019/2/20 v2.0\n\"\"\"\n\nfrom flask import Flask\nimport modules.configs as configs\nfrom controllers import blueprints\n\n__all__ = ['create_app']\n\n\ndef create_app(app_name, config=None):\n app = Flask(app_name)\n configure_app(app, config)\n return app\n\n\ndef configure_app(app, config):\n app.config.from_object(configs.BaseConfig())\n if not config:\n config = configs.config_map['dev']\n app.config.from_object(config)\n # register blueprints\n for bp in blueprints:\n app.register_blueprint(bp)\n" }, { "alpha_fraction": 0.5329949259757996, "alphanum_fraction": 0.5369430184364319, "avg_line_length": 27.829267501831055, "blob_id": "c3fece50bd3dcacf51c7971993551e9090ea05de", "content_id": "86a1864b984d65e7c638d6d6ca9c996b7132f725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3704, "license_type": "no_license", "max_line_length": 163, "num_lines": 123, "path": "/modules/database_controller/db_util.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport pymysql as MySQLdb\nfrom modules.common_util import get_config\nimport logging\nconfig = get_config()\n\n\nclass MSDB(object):\n \"\"\"\n ๅฐ่ฃ…MySQL็š„ๅขžๅˆ ๆ”นๆŸฅ๏ผŒๅ…ทๆ›ด็ป†่‡ด็š„ๆŸฅ่ฏขๅฐ่ฃ…ๅœจMSSoupไธญ\n \"\"\"\n def __init__(self):\n self.conn = self.get_connect()\n\n def __del__(self):\n self.conn.close()\n\n def get_connect(self):\n \"\"\"\n ไปŽ้…็ฝฎไธญ่ฏปๅ–ๆ•ฐๆฎๅบ“้…็ฝฎ๏ผŒๅปบ็ซ‹่ฟžๆŽฅ\n \"\"\"\n user = config.get(\"mysql\", \"user\")\n passwd = config.get(\"mysql\", \"passwd\")\n host = config.get(\"mysql\", \"host\")\n port = config.getint(\"mysql\", \"port\")\n db = config.get(\"mysql\", \"db\")\n # print(user, passwd, host, port)\n return MySQLdb.connect(user=user,\n passwd=passwd,\n host=host,\n port=port,\n db=db,\n charset=\"utf8\")\n\n def _update(self, sql):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n self.conn.commit()\n cursor.close()\n\n def _select(self, sql, use_dict=False):\n cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) \\\n if use_dict else self.conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\nclass MSSoup(object):\n \"\"\"\n ๅฐ่ฃ…ไธŽmysql็š„ไบคไบ’\n \"\"\"\n\n def __init__(self):\n \"\"\"ไฝฟ็”จๅ…ฑไบซ็š„ๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn = MSDB().get_connect()\n\n def __del__(self):\n \"\"\"ๅฝ“ๅฎžไพ‹้”€ๆฏๆ—ถๅ…ณ้—ญๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn.close()\n\n def _update_db(self, sql):\n \"\"\"ๆ›ดๆ–ฐๆ•ฐๆฎๅบ“ๅญ—ๆฎต็Šถๆ€\"\"\"\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n self.db_conn.commit()\n cursor.close()\n\n def read_db_data_to_dict(self, sql, dict_enabled=True):\n \"\"\"ๅฐ่ฃ…sqlๅ‘ฝไปคๆ‰ง่กŒ\"\"\"\n result = {}\n try:\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n except MySQLdb.Error as e:\n logging.info(\"DIFF_DATA DB Error:\" + e)\n finally:\n cursor.close()\n return result\n\n def get_name(self):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity from poseidon_alarm_sys limit 2 ;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_detail(self):\n sql = \"select * from poseidon_alarm_sys limit 1\";\n detail = self.read_db_data_to_dict(sql)\n return detail\n\n def get_num(self):\n sql = \"select count(1) as num,FatherEvent \" \\\n \"from(select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent \" \\\n \"from poseidon_alarm_sys \" \\\n \"limit 200) a \" \\\n \"group by FatherEvent\" \\\n \";\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_context(self, fatherEvent):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent \" \\\n \"from poseidon_alarm_sys \" \\\n \"where FatherEvent='%d'\" % fatherEvent\n res = self.read_db_data_to_dict(sql)\n return res\n\n\nif __name__ == '__main__':\n ms = MSSoup()\n res = ms.get_name()\n detail = ms.get_detail()\n logging.info(detail)\n logging.info(res)\n" }, { "alpha_fraction": 0.4895994961261749, "alphanum_fraction": 0.5175124406814575, "avg_line_length": 30.696609497070312, "blob_id": "3a2b8e5eb154070c3a310437c5bdc44099ce033a", "content_id": "a98b260a4bebb3ef12162f605688ee67c9a6b292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18785, "license_type": "no_license", "max_line_length": 114, "num_lines": 590, "path": "/controllers/news.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/9/7\n'''\nimport flask\nimport random\nfrom flask import jsonify\nfrom random import choice\nfrom flask import request\nfrom modules.datamachine import *\nimport simplejson\nfrom modules.database_controller.db_util import MSSoup\nfrom modules.database_controller.db_util_118 import MSSoup118\nfrom modules.readfile import NMONfile\nfrom modules.alarm_processor.Close_alarm import Close_alarm\nfrom modules.minion_controller.signal import Signal\nfrom modules.database_controller.Mongodb import *\nfrom modules.piedata import *\n\nnews_bp = flask.Blueprint(\n 'news',\n __name__,\n url_prefix='/api',\n)\n\n\n@news_bp.route('/test20181011', methods=['GET'])\ndef get_appname():\n app = MSSoup118()\n appname = app.get_appname()\n response = []\n appliststatus = \"/appliststatus/\"\n status = ['norm.svg']\n for i in appname:\n NAME = {}\n NAME['ENAME'] = i[0]\n NAME['APP_MAINT'] = i[1]\n NAME['DEPLOYMENT'] = i[2]\n NAME['ENNAME_SIMPLE'] = i[3]\n NAME['CPU'] = random.randint(1, 60)\n NAME['MEM'] = random.randint(1, 60)\n NAME['IO'] = random.randint(1, 60)\n NAME['CONNECT'] = random.randint(1, 60)\n NAME['STATUS'] = appliststatus + choice(status)\n NAME['status'] = \"list\"\n response.append(NAME)\n return jsonify(response\n )\n\n\n@news_bp.route('/detail2', methods=['GET'])\ndef get_machinedetail2():\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail2()\n response = []\n id = 1\n for i in machinedetail:\n Detail = {}\n Detail['IP_ADDR'] = i[2]\n response.append(Detail)\n return jsonify(response\n )\n\n\n@news_bp.route('/detail3', methods=['GET', 'POST'])\ndef test2222():\n ip = request.args\n ip2 = ip.to_dict()\n ip3 = ip2['ip']\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail3(ip3)\n response = []\n machinedetail2 = machinedetail[0]\n response = [{\"IP_ADDR\": \"IP\", \"DEV_POSITION\": machinedetail2[2]},\n {\"IP_ADDR\": \"ไธปๆœบๅ\", \"DEV_POSITION\": machinedetail2[3]},\n {\"IP_ADDR\": \"ๅ†…ๅญ˜\", \"DEV_POSITION\": machinedetail2[7]},\n {\"IP_ADDR\": \"CPU\", \"DEV_POSITION\": machinedetail2[5]},\n {\"IP_ADDR\": \"ๅบ”็”จ็ฎ€็งฐ\", \"DEV_POSITION\": machinedetail2[15]},\n {\"IP_ADDR\": \"ๆ“ไฝœ็ณป็ปŸ\", \"DEV_POSITION\": machinedetail2[11]},\n {\"IP_ADDR\": \"้ƒจ็ฝฒๅœฐ\", \"DEV_POSITION\": machinedetail2[18]},\n {\"IP_ADDR\": \"CI่ดŸ่ดฃ้ƒจ้—จ\", \"DEV_POSITION\": machinedetail2[12]}]\n return jsonify(response\n )\n\n\n@news_bp.route('/rule', methods=['GET'])\ndef get_github_trend():\n ms = MSSoup118alarm()\n res = ms.get_alarmname()\n dcap = ms.get_alarm_dcap()\n sum = res + dcap\n # print(\"=====================21321321321321312321321321213========================\")\n # print(sum)\n if not sum:\n json = {\"list\": []}\n return jsonify(json)\n else:\n data = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\",\n \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\", \"FLAGBIT\",\n \"START_SORT\", \"END_SORT\"),\n \"FatherEvent\",\n \"SyslogID\", sum)\n json = data.make_json()\n return jsonify(json)\n\n\n@news_bp.route('/rule', methods=['POST'])\ndef postRule():\n data = simplejson.dumps(request.get_json())\n data_dict = simplejson.loads(data.encode(\"utf-8\"))\n id = data_dict['SyslogID']\n print(id)\n flagbit = data_dict['AlarmType']\n cl = Close_alarm()\n for i in range(len(id)):\n cl.alarm_close(id[i], flagbit[i])\n ms = MSSoup118alarm()\n res = ms.get_alarmname()\n dcap = ms.get_alarm_dcap()\n sum = res + dcap\n # print(\"=====================21321321321321312321321321213========================\")\n # print(sum)\n if not sum:\n jsons = {\"list\": []}\n return jsonify(jsons)\n else:\n data = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\",\n \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\", \"FLAGBIT\",\n \"START_SORT\"),\n \"FatherEvent\",\n \"SyslogID\", sum)\n jsons = data.make_json()\n return jsonify(jsons)\n\n\n@news_bp.route('/detail3', methods=['GET'])\ndef get_machinedetail3():\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail2()\n print(machinedetail)\n response = []\n for i in machinedetail:\n Detail = {}\n Detail['IP_ADDR'] = i[2]\n Detail['APPNAME'] = i[14]\n response.append(Detail)\n print(response)\n\n return jsonify(response)\n\n\n@news_bp.route('/ename', methods=['GET'])\ndef get_ename_info():\n ms = MSSoup118()\n ename = ms.get_appname()\n response = {}\n for i in ename:\n response['0'] = i[0]\n response.append(response)\n return jsonify(response)\n\n\n@news_bp.route('/ename2', methods=['GET'])\ndef get_enameip2_info():\n ms = MSSoup118()\n ename = ms.get_machine_enameandip()\n # print ename\n response = []\n response1 = {\"title\": [], \"ip\": []}\n # print response1['title']\n # print ename\n for i in ename:\n if i[0] in response1['title']:\n b = [1]\n b.append(response1['ip'])\n response1['ip'].append(i[1])\n else:\n response1['title'] = i[0]\n response1['ip'] = i[1]\n a.append(response1['ip'])\n response1['ip'] = a\n # print response1\n response.append(response1)\n # print response\n return jsonify(response)\n\n\n@news_bp.route('/merge', methods=['POST'])\ndef get_merge_posts():\n data = simplejson.dumps(request.get_json())\n data_dict = simplejson.loads(data.encode(\"utf-8\"))\n ms = MSSoup118alarm()\n chlidrenid = data_dict[\"SyslogID\"]\n childrentype = data_dict[\"AlarmType\"]\n newfather = data_dict[\"newFahersyslogid\"]\n for i in range(len(chlidrenid)):\n print(type(childrentype[i]), childrentype[i])\n if childrentype[i] == 1:\n ms.merge_alarm_dcap(newfather, chlidrenid[i])\n else:\n ms.merge_alarm_sys(newfather, chlidrenid[i])\n return jsonify(\n message='OK'\n )\n\n\n\n@news_bp.route('/appenamesimple', methods=['POST']) # get all ips from cardlist\ndef get_app_ips():\n # app =request.args.to_dict()\n # print(\"apppppppppppppppppppp\",app)\n # app_dict = app\n app = simplejson.dumps(request.get_json())\n app_dict = simplejson.loads(app.encode(\"utf-8\"))\n response = []\n lista = {}\n webnum = 0\n dbsnum = 0\n appnum = 0\n\n if len(app_dict) == 2:\n app_name = app_dict['ename']\n ms = MSSoup118()\n ips = ms.get_app_ip(app_name)\n for i in ips:\n # response.append(i)\n Cluster = {}\n Cluster['IP'] = i[0]\n Cluster['HOSTNAME'] = i[1]\n if \"web\" in i[1]:\n Cluster['Pic'] = \"/clustersvg/web.svg\"\n webnum += 1\n elif \"dbs\" in i[1]:\n Cluster['Pic'] = \"/clustersvg/database.svg\"\n dbsnum += 1\n elif \"app\" in i[1]:\n Cluster['Pic'] = \"/clustersvg/APP.svg\"\n appnum += 1\n else:\n Cluster['Pic'] = \"/clustersvg/unknown.svg\"\n response.append(Cluster)\n # print(\"===========================\",response)\n # print(lista)\n testdata = [\n {\n \"IP_ADDR\": \"APP\",\n \"total\": appnum,\n \"erro\": \"0\",\n \"normal\": appnum,\n },\n {\n \"IP_ADDR\": \"ๆ•ฐๆฎๅบ“\",\n \"total\": dbsnum,\n \"erro\": \"0\",\n \"normal\": dbsnum,\n },\n {\n \"IP_ADDR\": \"WEB\",\n \"total\": webnum,\n \"erro\": \"0\",\n \"normal\": webnum,\n },\n {\n \"IP_ADDR\": \"IASS\",\n \"total\": \"0\",\n \"erro\": \"0\",\n \"normal\": \"0\",\n },\n {\n \"IP_ADDR\": \"PASS\",\n \"total\": \"0\",\n \"erro\": \"0\",\n \"normal\": \"0\",\n },\n {\n \"IP_ADDR\": \"VMWARE\",\n \"total\": \"10\",\n \"erro\": \"0\",\n \"normal\": \"10\",\n },\n\n ];\n lista['Cluster'] = response\n lista['Testdata'] = testdata\n return jsonify(lista)\n\n else:\n return jsonify(response)\n\n\n@news_bp.route('/get_History_Alarm', methods=['POST'])\ndef get_History_Alarm_Cluster():\n fname = simplejson.dumps(request.get_json())\n app_dict = simplejson.loads(fname.encode(\"utf-8\"))\n print(\"ssssssssssssssssssssss\", app_dict['Fname'])\n ms = MSSoup118alarm()\n dcapalarm = ms.history_alarm_dcap_cluster(app_dict['Fname'])\n sysalarm = ms.history_alarm_sys_cluster(app_dict['Fname'])\n sum = dcapalarm + sysalarm\n if not sum:\n jsons = {\"list\": []}\n return jsonify(jsons)\n else:\n data = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\",\n \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\", \"FLAGBIT\"),\n \"FatherEvent\",\n \"SyslogID\", sum)\n jsons = data.make_json()\n return jsonify(jsons)\n\n\n@news_bp.route('/get_History_Alarm_Detail', methods=['POST'])\ndef get_History_Alarm_Detail():\n fname = simplejson.dumps(request.get_json())\n app_dict = simplejson.loads(fname.encode(\"utf-8\"))\n ms = MSSoup118alarm()\n dcapalarm = ms.history_alarm_dcap_detail(app_dict['Fname'])\n sysalarm = ms.history_alarm_sys_detail(app_dict['Fname'])\n sum = dcapalarm + sysalarm\n if not sum:\n jsons = {\"list\": []}\n return jsonify(jsons)\n else:\n data = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\",\n \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\", \"FLAGBIT\"),\n \"FatherEvent\",\n \"SyslogID\", sum)\n jsons = data.make_json()\n return jsonify(jsons)\n\n\n@news_bp.route('/get_nmon_data', methods=['POST'])\ndef get_nmon_data():\n nmon = simplejson.dumps(request.get_json())\n nmon_dict = simplejson.loads(nmon.encode(\"utf-8\"))\n nmon_param = nmon_dict['payload']\n nmon_ip = nmon_param[0]\n nmon_host = nmon_param[1]\n\n #nmon_host = \"poseidon_YHZ\"\n\n #nmon_ip = \"80.2.238.218\"\n # nmon_ip = \"80.2.238.225\"\n try:\n nmon = NMONfile(nmon_host)\n [files, cols, fields, files_content,type] = nmon.get_messages(nmon_ip, 8885,'')\n print(files)\n return jsonify([files, cols, fields, files_content,type])\n except:\n mess = [{\"mess\":\"ok\"}]\n print(mess)\n return jsonify(mess)\n\n\n@news_bp.route('/get_Search_Info', methods=['POST'])\ndef get_search_data():\n fname2 = simplejson.dumps(request.get_json())\n app_dict2 = simplejson.loads(fname2.encode(\"utf-8\"))\n ms = MSSoup118alarm()\n # print(\"app_dict['Searchid']app_dict['Searchid']\", app_dict2['Searchid'])\n dcapalarm = ms.search_alarm_dcap(app_dict2['Searchid'])\n sysalarm = ms.search_alarm_sys(app_dict2['Searchid'])\n # print(\"dcapalarm\",dcapalarm)\n # print(\"sysalarm\",sysalarm)\n sum = dcapalarm + sysalarm\n # print(\"sumsumsumsumsumsumsum\",sum)\n if not sum:\n jsons = {\"list\": []}\n return jsonify(jsons)\n else:\n data = Alarmdata((\"NodeIP\", \"NodeAlias\", \"Component\", \"SummaryCN\", \"EventType\", \"START_TIME\", \"Occurence\",\n \"FREQUENCY\", \"EventNameCN\", \"CustomerSeverity\", \"SyslogID\", \"FatherEvent\", \"FLAGBIT\"),\n \"FatherEvent\",\n \"SyslogID\", sum)\n jsons = data.make_json()\n return jsonify(jsons)\n\n\n# get_one_data\n@news_bp.route('/get_one_data', methods=['POST'])\ndef get_one_data():\n command_origin = simplejson.dumps(request.get_json())\n command_dict = simplejson.loads(command_origin.encode(\"utf-8\"))\n command_param = command_dict['payload']\n command_host = command_param[0]\n command_port = command_param[1]\n command = command_param[2]\n command_last = (command.splitlines())[-1]\n try:\n s = Signal(command_host,8885)\n result = s.run('instant_script', command_last)\n if result == []:\n result = \"SUCCESS\"\n return jsonify(result)\n except:\n err = \"no minion\"\n return jsonify(err)\n\n\n\n# get_cardlistmasteripsearch\n@news_bp.route('/cardlistmasteripsearch', methods=['POST'])\ndef cardlistmasteripsearch():\n ip = simplejson.dumps(request.get_json())\n ip2 = simplejson.loads(ip.encode(\"utf-8\"))\n ip3 = ip2['ipinfosearch']\n detail = MSSoup118()\n machinedetail = detail.get_machine_detail3_cardlistsearch(ip3)\n respons = []\n appliststatus = \"/appliststatus/\"\n status = ['norm.svg']\n if machinedetail != {}:\n for i in machinedetail:\n NAME = {}\n NAME['ENAME'] = i[0]\n NAME['APP_MAINT'] = i[1]\n NAME['status'] = \"detail\"\n NAME['CPU'] = random.randint(1, 60)\n NAME['MEM'] = random.randint(1, 60)\n NAME['IO'] = random.randint(1, 60)\n NAME['CONNECT'] = random.randint(1, 60)\n NAME['STATUS'] = appliststatus + choice(status)\n respons.append(NAME)\n return jsonify(respons\n )\n else:\n messs = [{\"mess\": \"ok\", \"status\": \"detail\"}]\n return jsonify(messs)\n\n\n\n# get_cardlistipsearch\n@news_bp.route('/cardlistipsearch', methods=['POST'])\ndef cardlistipsearch():\n app = simplejson.dumps(request.get_json())\n app_dict = simplejson.loads(app.encode(\"utf-8\"))\n response = []\n lista = {}\n webnum = 0\n dbsnum = 0\n appnum = 0\n if len(app_dict) == 2:\n ms = MSSoup118()\n ips = ms.get_app_ip_hostname(app_dict['ipinfo'])\n for i in ips:\n # response.append(i)\n Cluster = {}\n Cluster['IP'] = app_dict['ipinfo']\n Cluster['HOSTNAME'] = i[0]\n if \"web\" in i[0]:\n Cluster['Pic'] = \"/clustersvg/web.svg\"\n webnum += 1\n elif \"dbs\" in i[0]:\n Cluster['Pic'] = \"/clustersvg/database.svg\"\n dbsnum += 1\n elif \"app\" in i[0]:\n Cluster['Pic'] = \"/clustersvg/APP.svg\"\n appnum += 1\n else:\n Cluster['Pic'] = \"/clustersvg/unknown.svg\"\n response.append(Cluster)\n # print(\"===========================\",response)\n # print(lista)\n testdata = [\n {\n \"IP_ADDR\": \"APP\",\n \"total\": appnum,\n \"erro\": \"0\",\n \"normal\": appnum,\n },\n {\n \"IP_ADDR\": \"ๆ•ฐๆฎๅบ“\",\n \"total\": dbsnum,\n \"erro\": \"0\",\n \"normal\": dbsnum,\n },\n {\n \"IP_ADDR\": \"WEB\",\n \"total\": webnum,\n \"erro\": \"0\",\n \"normal\": webnum,\n },\n {\n \"IP_ADDR\": \"IASS\",\n \"total\": \"0\",\n \"erro\": \"0\",\n \"normal\": \"0\",\n },\n {\n \"IP_ADDR\": \"PASS\",\n \"total\": \"0\",\n \"erro\": \"0\",\n \"normal\": \"0\",\n },\n {\n \"IP_ADDR\": \"VMWARE\",\n \"total\": \"10\",\n \"erro\": \"0\",\n \"normal\": \"10\",\n },\n\n ];\n lista['Cluster'] = response\n lista['Testdata'] = testdata\n return jsonify(lista)\n\n else:\n return jsonify(response)\n\n@news_bp.route('/adi_list', methods=['GET'])\ndef get_adi_list():\n db = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n data = db.search({\"_id\": 0}, 'some_field')\n return jsonify(data)\n\n@news_bp.route('/adi_search', methods=['POST'])\ndef get_adi_search():\n ip = simplejson.dumps(request.get_json())\n ip2 = simplejson.loads(ip.encode(\"utf-8\"))\n ip3 = ip2['ipinfosearch']\n # print(ip3)\n if ip3 == \"\":\n db = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n data = db.search({\"_id\": 0}, 'some_field')\n return jsonify(data)\n else:\n db = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n search = db.search(({\"IP Address\":ip3}), 'one_field')\n # print(search)\n return jsonify(search)\n\n@news_bp.route('/one_list', methods=['GET'])\ndef get_onelist():\n db = MongoDB('80.7.238.136', 8889, 'test', 'one', 'root', 'qwert789')\n data = db.search({\"_id\": 0}, 'some_field')\n return jsonify(data)\n\n@news_bp.route('/get_Search_Info_One', methods=['POST'])\ndef get_one_search():\n ip = simplejson.dumps(request.get_json())\n ip2 = simplejson.loads(ip.encode(\"utf-8\"))\n ip3 = ip2['ipinfosearch']\n print(ip3)\n if ip3 == \"\":\n db = MongoDB('80.7.238.136', 8889, 'test', 'one', 'root', 'qwert789')\n data = db.search({\"_id\": 0}, 'some_field')\n return jsonify(data)\n else:\n db = MongoDB('80.7.238.136', 8889, 'test', 'one', 'root', 'qwert789')\n search = db.search(({\"ip\":ip3}), 'one_field')\n # print(search)\n return jsonify(search)\n\n@news_bp.route('/homedata', methods=['GET'])\ndef get_home_data():\n data = {}\n try:\n d = homedata(86400)\n pie = d.piedata()\n total = d.alarm_total()\n data['pie'] = pie\n data['totalalarm'] = total\n return jsonify(data)\n except:\n data['pie'] = 0\n data['totalalarm'] = 0\n return jsonify(data)\n\n@news_bp.route('/mobile_alarm', methods=['POST','GET'])\ndef get_mobile_alarm():\n data = [\n {\n 'title': '80.7.83.119',\n 'extra': '30',\n 'key': '1',\n 'alarm': 'Poseidon minion 20ๅˆ†้’Ÿๆœชๆ”ถๅˆฐ่ฟ”ๅ›žๅ€ผ',\n 'starttime': '2019-02-19 02:50:02',\n 'currenttime': '2019-02-19 12:00:01',\n },\n {\n 'title': '80.7.83.118',\n 'extra': '300',\n 'key': '2',\n 'alarm': 'Poseidon minion 30ๅˆ†้’Ÿๆœชๆ”ถๅˆฐ่ฟ”ๅ›žๅ€ผ',\n 'starttime': '2019-02-19 22:50:02',\n 'currenttime': '2019-02-19 23:00:01',\n },\n ]\n print(\"test_mobile\")\n return jsonify(data)\n" }, { "alpha_fraction": 0.5592089891433716, "alphanum_fraction": 0.570016086101532, "avg_line_length": 30.287769317626953, "blob_id": "e861102cbd63d021d196919e65f5832b46e9aaa2", "content_id": "c92d2036630a919f4edf8cbfd0a4dbd47cbb8f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4507, "license_type": "no_license", "max_line_length": 185, "num_lines": 139, "path": "/modules/database_controller/db_util_118.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport pymysql as MySQLdb\nfrom modules.common_util import get_config\n\nconfig = get_config()\n\n\nclass MSDB(object):\n \"\"\"\n ๅฐ่ฃ…MySQL็š„ๅขžๅˆ ๆ”นๆŸฅ๏ผŒๅ…ทๆ›ด็ป†่‡ด็š„ๆŸฅ่ฏขๅฐ่ฃ…ๅœจMSSoupไธญ\n \"\"\"\n\n def __init__(self):\n self.conn = self.get_connect()\n\n def __del__(self):\n self.conn.close()\n\n def get_connect(self):\n \"\"\"\n ไปŽ้…็ฝฎไธญ่ฏปๅ–ๆ•ฐๆฎๅบ“้…็ฝฎ๏ผŒๅปบ็ซ‹่ฟžๆŽฅ\n \"\"\"\n user = config.get(\"mysql118\", \"user\")\n passwd = config.get(\"mysql118\", \"passwd\")\n host = config.get(\"mysql118\", \"host\")\n port = config.getint(\"mysql118\", \"port\")\n db = config.get(\"mysql118\", \"db\")\n # print(user, passwd, host, port)\n return MySQLdb.connect(user=user,\n passwd=passwd,\n host=host,\n port=port,\n db=db,\n charset=\"utf8\")\n\n def _update(self, sql):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n self.conn.commit()\n cursor.close()\n\n def _select(self, sql, use_dict=False):\n cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) \\\n if use_dict else self.conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\nclass MSSoup118(object):\n \"\"\"\n ๅฐ่ฃ…ไธŽmysql็š„ไบคไบ’\n \"\"\"\n\n def __init__(self):\n \"\"\"ไฝฟ็”จๅ…ฑไบซ็š„ๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn = MSDB().get_connect()\n\n def __del__(self):\n \"\"\"ๅฝ“ๅฎžไพ‹้”€ๆฏๆ—ถๅ…ณ้—ญๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn.close()\n\n def _update_db(self, sql):\n \"\"\"ๆ›ดๆ–ฐๆ•ฐๆฎๅบ“ๅญ—ๆฎต็Šถๆ€\"\"\"\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n self.db_conn.commit()\n cursor.close()\n\n def read_db_data_to_dict(self, sql, dict_enabled=True):\n \"\"\"ๅฐ่ฃ…sqlๅ‘ฝไปคๆ‰ง่กŒ\"\"\"\n result = {}\n try:\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n except MySQLdb.Error as e:\n print(\"DIFF_DATA DB Error:\" + e)\n finally:\n cursor.close()\n return result\n\n def get_appname(self):\n sql = \"select distinct APPNAME,APP_MAINT,DEPLOYMENT,ENNAME_SIMPLE from poseidon_app_info;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_machine_detail(self):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR limit 200;\"\n detail = self.read_db_data_to_dict(sql)\n return detail\n\n def get_machine_detail2(self):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR limit 40;\"\n detail2 = self.read_db_data_to_dict(sql)\n return detail2\n\n def get_machine_detail3(self, ip):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR = '%s'\" % (\n ip)\n detail3 = self.read_db_data_to_dict(sql)\n return detail3\n\n def get_machine_detail3_cardlistsearch(self, ip):\n sql = \"select IP_ADDR,HOSTNAME from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and CONCAT(`IP_ADDR`,`HOSTNAME`,`APPNAME`) like '%{0}%';\" .format(\n ip)\n detail3 = self.read_db_data_to_dict(sql)\n return detail3\n\n def get_machine_enameandip(self):\n sql = \"SELECT DISTINCT b.APPNAME, a.IP_ADDR FROM poseidon_server_info a,poseidon_app_info b WHERE a.APPNODECI = b.APPNODECI ORDER BY b.APPNAME limit 500;\"\n enameandip = self.read_db_data_to_dict(sql)\n return enameandip\n\n def get_app_ip(self, appname):\n sql = \"select IP_ADDR,HOSTNAME from poseidon_server_info where APPNODECI in (select APPNODECI from poseidon_app_info where APPNAME = '%s')\" % (\n appname)\n appip = self.read_db_data_to_dict(sql)\n return appip\n\n def get_app_ip_hostname(self, appname):\n sql = \"select HOSTNAME from poseidon_server_info where IP_ADDR = '%s'\" % (\n appname)\n appip = self.read_db_data_to_dict(sql)\n return appip\n\n\n\nif __name__ == '__main__':\n app = MSSoup118()\n # res = ms.get_name()\n # detail = ms.get_detail()\n # print(detail)\n # print(res)\n # print(\"111\")\n" }, { "alpha_fraction": 0.5073794722557068, "alphanum_fraction": 0.5116431713104248, "avg_line_length": 28.038095474243164, "blob_id": "3538b66b004dc53633408d05b32122bb70d8d7d5", "content_id": "221f9d31b3912bf1c42e9d5fef9348715fcbea68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 91, "num_lines": 105, "path": "/modules/database_controller/db_util_test.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport pymysql as MySQLdb\n# import MySQLdb\nfrom modules.common_util import get_config\n\nconfig = get_config()\n\n\nclass MSDB(object):\n def __init__(self, db_label):\n self.db_label = db_label\n self.conn = self.get_connect()\n\n def __del__(self):\n self.conn.close()\n\n def get_connect(self):\n user = config.get(self.db_label, \"user\")\n passwd = config.get(self.db_label, \"passwd\")\n host = config.get(self.db_label, \"host\")\n port = config.getint(self.db_label, \"port\")\n db = config.get(self.db_label, \"db\")\n print('============================= get_connect ==========================')\n print(user)\n print(passwd)\n print(host)\n print(port)\n print(db)\n return MySQLdb.connect(user=user,\n passwd=passwd,\n host=host,\n port=port,\n db=db,\n charset=\"utf8\")\n\n def _update(self, sql):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n self.conn.commit()\n cursor.close()\n\n def _select(self, sql, use_dict=False):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\nclass MSSoup(object):\n def __init__(self, db_name):\n # print('===============================ๅฐ่ฏ•่ฟžๆŽฅ==============================')\n self.db_conn = MSDB(db_name).get_connect()\n print('lianjie')\n\n def __del__(self):\n self.db_conn.close()\n\n def _update_db(self, sql):\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n self.db_conn.commit()\n cursor.close()\n\n def read_db_data_to_dict(self, sql, dict_enabled=True):\n result = {}\n try:\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n except MySQLdb.Error as e:\n logger.error(\"error\" + e)\n finally:\n cursor.close()\n return result\n\n def get_appname(self):\n sql = \"select APPNAME from cmdb_app_info limit 20\"\n result = self.read_db_data_to_dict(sql)\n return result\n\n def get_totalnum(self):\n sql = \"select count(1) from monitor_message\"\n result = self.read_db_data_to_dict(sql)\n return result\n\n def get_alarminfo(self, start, end):\n print(start, end)\n sql = \"select * from monitor_message where msg_id BETWEEN \" + str(start) \\\n + \" and \" + str(end)\n result = self.read_db_data_to_dict(sql)\n return result\n\n\nif __name__ == '__main__':\n db_map = {'mysql': 'cmdb_app_info', 'mysql_alart': 'monitor_message'}\n # db_name = 'mysql'\n db_label = 'mysql'\n ms = MSSoup(db_label)\n sql = \"select * from \" + db_map[db_label] + \" limit 20\"\n result_cmdb = ms.read_db_data_to_dict(sql)\n print(result_cmdb)\n" }, { "alpha_fraction": 0.42454493045806885, "alphanum_fraction": 0.4357016980648041, "avg_line_length": 23.314285278320312, "blob_id": "0303f065270caca6c8658c1da9dab097049853a7", "content_id": "9a71cc44fe2486ba66192169359b9372dab75f85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1703, "license_type": "no_license", "max_line_length": 61, "num_lines": 70, "path": "/modules/minion_controller/remote_shell.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\n\nimport pty\nimport tty\nimport select\nimport os\nimport time\nimport signal\nimport socket\n\n\ndef hup_handle(signum, frame):\n socket.sock.send(\"\\n\")\n socket.sock.close()\n raise SystemExit\n\n\ndef remote_bash(conn, addr):\n m, s = pty.openpty()\n print(os.ttyname(s))\n CHILD = 0\n pid = os.fork()\n if pid == CHILD:\n os.setsid()\n os.close(m)\n os.dup2(s, 0)\n os.dup2(s, 1)\n os.dup2(s, 2)\n\n tmp_fd = os.open(os.ttyname(s), os.O_RDWR)\n os.close(tmp_fd)\n os.execlp(\"/bin/bash\", \"/bin/bash\")\n os.close(s)\n else:\n os.close(s)\n signal.signal(signal.SIGINT, hup_handle)\n fds = [m, conn]\n\n mode = tty.tcgetattr(0)\n # tty.setraw(0)\n try:\n while True:\n if not conn.connect_ex(addr): raise Exception\n r, w, e = select.select(fds, [], [])\n\n if m in r:\n data = os.read(m, 1024)\n if data:\n conn.send(data)\n else:\n fds.remove(m)\n if conn in r:\n data = conn.recv(1024)\n if not data:\n fds.remove(conn)\n conn.close()\n socket.sock.close()\n if data:\n os.write(m, data)\n except:\n conn.close()\n socket.sock.close()\n raise SystemExit\n os.close(m)\n\n" }, { "alpha_fraction": 0.6008264422416687, "alphanum_fraction": 0.6024793386459351, "avg_line_length": 24.1875, "blob_id": "d3e10adbc9adcec95942abd6b002d4ae9aa4bdf3", "content_id": "a38bf685615caf4a0d9732bd72b1dc8aff5c4aa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1210, "license_type": "no_license", "max_line_length": 96, "num_lines": 48, "path": "/modules/utility.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# ======================================\n# POSEIDON PROGRAM FILE\n# APPLICATION:\n# Author:Alvin Ye .ICBC\n# Version(Internal):1.0\n# All rights reserved\n# ======================================\nimport time\nimport configparser\nimport logging\nimport os\n\n\ndef log(string):\n t = time.strftime(r\"%Y-%m-%d_%H-%M-%S\", time.localtime())\n print(\"[%s]%s\" % (t, string))\n\n\ndef duplicate(one_list):\n # list prove to remove the same alarm in a moment\n temp_list = []\n for one in one_list:\n if one not in temp_list:\n temp_list.append(one)\n return temp_list\n\n\nclass GetConfig:\n def __init__(self):\n pwd = os.path.dirname(os.path.realpath(__file__))\n conf_file = os.path.join(pwd, \"../conf\", \"poseidon_server.conf\")\n self.config = configparser.ConfigParser()\n self.config.read(conf_file)\n\n def get(self, section, option):\n return self.config.get(section=section, option=option)\n\n\ndef log_level():\n cfg = GetConfig()\n loglevel = cfg.get('BASIC', 'LOG_LEVEL')\n logging.basicConfig(level=loglevel, format='%(asctime)s %(name)s %(levelname)s:%(message)s')\n\n\n# initialize log level status\nlog_level()\n# initialize configuration\nconfig = GetConfig()\n\n" }, { "alpha_fraction": 0.47482776641845703, "alphanum_fraction": 0.4833068251609802, "avg_line_length": 29.934425354003906, "blob_id": "2884b26e6ce79daed59558b016c4b75d9f2c7af2", "content_id": "0458df3e4942b0b7c0ad3e8f2c6fe4731b16cdef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1911, "license_type": "no_license", "max_line_length": 76, "num_lines": 61, "path": "/modules/piedata.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\nfrom modules.database_controller.db_util_118_alarm import MSSoup118alarm\nimport datetime\nimport time\n\nclass homedata(object):\n def __init__(self, pietime):\n ms = MSSoup118alarm()\n self.pietime = pietime\n self.time = int(time.mktime(datetime.date.today().timetuple()))\n self.sys = ms.search_alarm_sys_today_hist(self.time, self.pietime)\n self.dcap = ms.search_alarm_dcap_today_hist(self.time, self.pietime)\n self.dcaphist = ms.search_alarm_dcap_today(self.time, self.pietime)\n self.syshist = ms.search_alarm_sys_today(self.time, self.pietime)\n self.alldcap = self.dcaphist + self.dcap\n self.allsys = self.sys + self.syshist\n self.allalarm = self.alldcap + self.allsys\n\n def piedata(self):\n sum = {}\n res = []\n for x in self.allalarm:\n for y in x:\n if y in sum:\n sum[y] += 1\n else:\n sum[y] = 1\n sum['Middleware'] = sum['Middleware'] + sum['WebServer']\n del sum['WebServer']\n for x, y in sum.items():\n v = {}\n if x in ['Middleware']:\n v['x'] = 'ไธญ้—ดไปถ'\n v['y'] = y\n res.append(v)\n elif x in ['OperatingSystem']:\n v['x'] = 'ๆ“ไฝœ็ณป็ปŸ'\n v['y'] = y\n res.append(v)\n elif x in ['Database']:\n v['x'] = 'ๆ•ฐๆฎๅบ“'\n v['y'] = y\n res.append(v)\n else:\n v['x'] = 'ๅ…ถไป–'\n v['y'] = y\n res.append(v)\n return res\n\n def alarm_total(self):\n return (len(self.allalarm))\n\n def today(self):\n unit = 3600\n nowtime = int(time.time())\n times = nowtime - (nowtime % unit)\n\n\nif __name__ == '__main__':\n d = homedata()\n d.today()\n" }, { "alpha_fraction": 0.5649324059486389, "alphanum_fraction": 0.5741499662399292, "avg_line_length": 30.496774673461914, "blob_id": "8a6b3e9b4880ab22b4048b93f1119c612d8e8698", "content_id": "b45c665eeb39d996557545805140b3a108dc31a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5040, "license_type": "no_license", "max_line_length": 172, "num_lines": 155, "path": "/backup/bakdb_util_118_alarm.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport pymysql as MySQLdb\nfrom modules.common_util import get_config\n\nconfig = get_config()\n\nclass MSDB(object):\n \"\"\"\n ๅฐ่ฃ…MySQL็š„ๅขžๅˆ ๆ”นๆŸฅ๏ผŒๅ…ทๆ›ด็ป†่‡ด็š„ๆŸฅ่ฏขๅฐ่ฃ…ๅœจMSSoupไธญ\n \"\"\"\n def __init__(self):\n self.conn = self.get_connect()\n\n def __del__(self):\n self.conn.close()\n\n def get_connect(self):\n \"\"\"\n ไปŽ้…็ฝฎไธญ่ฏปๅ–ๆ•ฐๆฎๅบ“้…็ฝฎ๏ผŒๅปบ็ซ‹่ฟžๆŽฅ\n \"\"\"\n user = config.get(\"mysqlalarm\", \"user\")\n passwd = config.get(\"mysqlalarm\", \"passwd\")\n host = config.get(\"mysqlalarm\", \"host\")\n port = config.getint(\"mysqlalarm\", \"port\")\n db = config.get(\"mysqlalarm\", \"db\")\n print (user,passwd,host,port)\n return MySQLdb.connect(user=user,\n passwd=passwd,\n host=host,\n port=port,\n db=db,\n charset=\"utf8\")\n\n def _update(self, sql):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n self.conn.commit()\n cursor.close()\n\n def _select(self, sql, use_dict=False):\n cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) \\\n if use_dict else self.conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\nclass MSSoup118alarm(object):\n \"\"\"\n ๅฐ่ฃ…ไธŽmysql็š„ไบคไบ’\n \"\"\"\n\n def __init__(self):\n \"\"\"ไฝฟ็”จๅ…ฑไบซ็š„ๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn = MSDB().get_connect()\n\n def __del__(self):\n \"\"\"ๅฝ“ๅฎžไพ‹้”€ๆฏๆ—ถๅ…ณ้—ญๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn.close()\n\n def _update_db(self, sql):\n \"\"\"ๆ›ดๆ–ฐๆ•ฐๆฎๅบ“ๅญ—ๆฎต็Šถๆ€\"\"\"\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n self.db_conn.commit()\n # cursor.close()\n\n def read_db_data_to_dict(self, sql, dict_enabled=True):\n \"\"\"ๅฐ่ฃ…sqlๅ‘ฝไปคๆ‰ง่กŒ\"\"\"\n result = {}\n try:\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n except MySQLdb.Error as e:\n print(\"DIFF_DATA DB Error:\" + e)\n finally:\n cursor.close()\n return result\n\n def get_appname(self):\n sql = \"select distinct APPNAME from poseidon_app_info limit 200;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n\n def get_machine_detail(self):\n\t sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR limit 200;\"\n\t detail = self.read_db_data_to_dict(sql)\n return detail\n\n def get_machine_detail2(self):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR limit 10;\"\n detail2 = self.read_db_data_to_dict(sql)\n return detail2\n\n def get_machine_detail3(self,ip):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR = '%s'\"%(ip)\n detail3 = self.read_db_data_to_dict(sql)\n return detail3\n\n def get_machine_enameandip(self):\n sql = \"SELECT DISTINCT b.APPNAME, a.IP_ADDR FROM poseidon_server_info a,poseidon_app_info b WHERE a.APPNODECI = b.APPNODECI ORDER BY b.APPNAME limit 500;\"\n enameandip = self.read_db_data_to_dict(sql)\n return enameandip\n\n def get_alarmname(self):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,SyslogID from poseidon_alarm_sys limit 40;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_del(self,syslogid):\n sql = \"delete from poseidon_alarm_sys where SyslogID= '%s'\"%(syslogid)\n # sql = \"DELETE FROM poseidon_alarm_sys WHERE SyslogID = 14001;\"\n # self.read_db_data_to_dict(sql)\n self._update_db(sql)\n print (\"==================del====================\")\n # return self.read_db_data_to_dict(sql)\n\n\n\n def get_num(self):\n sql = \"select count(1) as num,FatherEvent \" \\\n \"from(select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent \" \\\n \"from poseidon_alarm_sys \" \\\n \"limit 200) a \" \\\n \"group by FatherEvent\" \\\n \";\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n\n def get_context(self, fatherEvent):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent \" \\\n \"from poseidon_alarm_sys \" \\\n \"where FatherEvent='%d'\" % fatherEvent\n res = self.read_db_data_to_dict(sql)\n return res\n\n\nif __name__ == '__main__':\n app = MSSoup118alarm()\n #res = ms.get_name()\n # detail = ms.get_detail()\n # print(detail)\n # print(res)\n # print(\"111\")\n" }, { "alpha_fraction": 0.734375, "alphanum_fraction": 0.734375, "avg_line_length": 20.33333396911621, "blob_id": "ce6e5ec65f5af1b0fa57c04a32304c23f95c6a12", "content_id": "f850618c8fae8346d8e488260a6377d4a596c108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/README.md", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# poseidon_mobile_appserver\n## ๅฏๅŠจflask server๏ผš\n - python app.py\n" }, { "alpha_fraction": 0.5207207202911377, "alphanum_fraction": 0.5783783793449402, "avg_line_length": 28.210525512695312, "blob_id": "426ea40a497a9a4734e53c5a0d8e192871679471", "content_id": "d4de34b531aeb7a779edc5fe652ba4d4e5aea264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 80, "num_lines": 19, "path": "/modules/adiread.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "#-*- coding:utf-8 -*-\nimport xlrd\nfrom modules.database_controller.Mongodb import *\n\ndef adi():\n a = MongoDB('80.7.238.136', 8889, 'testposeidon', 'adi', 'root', 'qwert789')\n book = xlrd.open_workbook('/tmp/20190109.xls')\n sheet = book.sheet_by_index(0)\n name = []\n rows = sheet.nrows\n for i in range(28):\n name.append(sheet.cell(0,i).value)\n\n for x in range(1,rows):\n cols = []\n for y in range(28):\n cols.append(sheet.cell(x,y).value)\n test = dict(zip(name,cols))\n a.insert(test, 'one')\n" }, { "alpha_fraction": 0.5462185144424438, "alphanum_fraction": 0.605042040348053, "avg_line_length": 10.899999618530273, "blob_id": "48000eb60a74d7aa45c163b45c772c894c0931a9", "content_id": "0ce017eb28ca7d4cdad5b4bd71fcb2d0536f226b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/controllers/__init__.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/9/7\n'''\n\nimport controllers.news\n\nblueprints = [\n news.news_bp\n]\n" }, { "alpha_fraction": 0.6661561727523804, "alphanum_fraction": 0.6676875948905945, "avg_line_length": 25.1200008392334, "blob_id": "20bb4bbe0a1b97e7689d7e1d33c398ae950dbd69", "content_id": "4eded757205b14e157a86400932bb2a3bb257ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 81, "num_lines": 25, "path": "/tests/test.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\nfrom flask import Flask, request \nimport json \ndef after_request(response): \n\tresponse.headers['Access-Control-Allow-Origin'] = '*' \n\tresponse.headers['Access-Control-Allow-Methods'] = 'PUT,GET,POST,DELETE' \n\tresponse.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization' \n\treturn response \ndef create_app(): \n\tapp = Flask(__name__) \n\tapp.after_request(after_request) \n\n\t@app.route(\"/Detail\",methods=['POST']) \n\tdef add_Spec(): \n\t\tprint 'ๆ–ฐๅขž่ง„ๆ ผ' \n\t\tdata = request.get_data() \n\t\tjson_re = json.loads(data) \n\t\tprint json_re \n\t\treturn 'json_re'\n\n\nif __name__ == '__main__':\n\taaa = create_app()\n\tprint aaa\n\tprint \"aaa\"\n" }, { "alpha_fraction": 0.616746187210083, "alphanum_fraction": 0.6227874755859375, "avg_line_length": 40.20087432861328, "blob_id": "f1d8ab4e15b5b7c6ee5230fd15b6f2328929e744", "content_id": "36066e3874ddb5b26658aace7f1f5fcb3e703b94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9593, "license_type": "no_license", "max_line_length": 347, "num_lines": 229, "path": "/modules/database_controller/db_util_118_alarm.py", "repo_name": "alvinye1/poseidon_mobile_appserver", "src_encoding": "UTF-8", "text": "# -*-coding: utf-8 -*-\n'''\nCreated by jojo at 2018/7/5\n'''\nimport pymysql as MySQLdb\nfrom modules.common_util import get_config\n\nconfig = get_config()\n\n\nclass MSDB(object):\n \"\"\"\n ๅฐ่ฃ…MySQL็š„ๅขžๅˆ ๆ”นๆŸฅ๏ผŒๅ…ทๆ›ด็ป†่‡ด็š„ๆŸฅ่ฏขๅฐ่ฃ…ๅœจMSSoupไธญ\n \"\"\"\n\n def __init__(self):\n self.conn = self.get_connect()\n\n def __del__(self):\n self.conn.close()\n\n def get_connect(self):\n \"\"\"\n ไปŽ้…็ฝฎไธญ่ฏปๅ–ๆ•ฐๆฎๅบ“้…็ฝฎ๏ผŒๅปบ็ซ‹่ฟžๆŽฅ\n \"\"\"\n user = config.get(\"mysqlalarm\", \"user\")\n passwd = config.get(\"mysqlalarm\", \"passwd\")\n host = config.get(\"mysqlalarm\", \"host\")\n port = config.getint(\"mysqlalarm\", \"port\")\n db = config.get(\"mysqlalarm\", \"db\")\n # print(user, passwd, host, port)\n return MySQLdb.connect(user=user,\n passwd=passwd,\n host=host,\n port=port,\n db=db,\n charset=\"utf8\")\n\n def _update(self, sql):\n cursor = self.conn.cursor()\n cursor.execute(sql)\n self.conn.commit()\n cursor.close()\n\n def _select(self, sql, use_dict=False):\n cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) \\\n if use_dict else self.conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\nclass MSSoup118alarm(object):\n \"\"\"\n ๅฐ่ฃ…ไธŽmysql็š„ไบคไบ’\n \"\"\"\n\n def __init__(self):\n \"\"\"ไฝฟ็”จๅ…ฑไบซ็š„ๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn = MSDB().get_connect()\n\n def __del__(self):\n \"\"\"ๅฝ“ๅฎžไพ‹้”€ๆฏๆ—ถๅ…ณ้—ญๆ•ฐๆฎๅบ“้“พๆŽฅ\"\"\"\n self.db_conn.close()\n\n def _update_db(self, sql):\n \"\"\"ๆ›ดๆ–ฐๆ•ฐๆฎๅบ“ๅญ—ๆฎต็Šถๆ€\"\"\"\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n self.db_conn.commit()\n # cursor.close()\n\n def read_db_data_to_dict(self, sql, dict_enabled=True):\n \"\"\"ๅฐ่ฃ…sqlๅ‘ฝไปคๆ‰ง่กŒ\"\"\"\n result = {}\n try:\n cursor = self.db_conn.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n except MySQLdb.Error as e:\n print(\"DIFF_DATA DB Error:\" + e)\n finally:\n cursor.close()\n return result\n\n def get_appname(self):\n sql = \"select distinct APPNAME from poseidon_app_info;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_alarm_show(self, syslogid):\n sql = \"select NodeIP from poseidon_alarm_dcap WHERE SyslogID = '%s'\" % (syslogid)\n onealarm = self.read_db_data_to_dict(sql)\n return onealarm\n\n def get_machine_detail(self):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR ;\"\n detail = self.read_db_data_to_dict(sql)\n return detail\n\n def get_machine_detail2(self):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR ;\"\n detail2 = self.read_db_data_to_dict(sql)\n return detail2\n\n def get_machine_detail3(self, ip):\n sql = \"select * from poseidon_server_info a,poseidon_app_info b where a.APPNODECI = b.APPNODECI and IP_ADDR = '%s'\" % (\n ip)\n detail3 = self.read_db_data_to_dict(sql)\n return detail3\n\n def get_machine_enameandip(self):\n sql = \"SELECT DISTINCT b.APPNAME, a.IP_ADDR FROM poseidon_server_info a,poseidon_app_info b WHERE a.APPNODECI = b.APPNODECI ORDER BY b.APPNAME ;\"\n enameandip = self.read_db_data_to_dict(sql)\n return enameandip\n\n def get_alarmname(self):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,SyslogID,FatherEvent,FLAGBIT,START_TIME,Occurence from poseidon_alarm_sys ;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_alarm_dcap(self):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,DCAPID,FatherEvent,FLAGBIT,START_TIME,Occurence from poseidon_alarm_dcap;\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_del(self, syslogid):\n sql = \"delete from poseidon_alarm_sys where SyslogID= '%s'\" % (syslogid)\n # sql = \"DELETE FROM poseidon_alarm_sys WHERE SyslogID = 14001;\"\n # self.read_db_data_to_dict(sql)\n self._update_db(sql)\n print(\"==================del====================\")\n # return self.read_db_data_to_dict(sql)\n\n def get_num(self):\n sql = \"select count(1) as num,FatherEvent \" \\\n \"from(select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent,CustomerSeverity \" \\\n \"from poseidon_alarm_sys \" \\\n \") a \" \\\n \"group by FatherEvent\" \\\n \";\"\n res = self.read_db_data_to_dict(sql)\n return res\n\n def get_context(self, fatherEvent):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,\" \\\n \"START_TIME,Occurence,FREQUENCY,EventNameCN,SyslogID,\" \\\n \"FatherEvent,CustomerSeverity \" \\\n \"from poseidon_alarm_sys \" \\\n \"where FatherEvent='%d' limit 40\" % fatherEvent\n res = self.read_db_data_to_dict(sql)\n return res\n\n def merge_alarm_sys(self, fatherevent, syslogid):\n sql = \"update poseidon_alarm_sys set FatherEvent={0} where SyslogID = '{1}' \".format(fatherevent, syslogid)\n res = self._update_db(sql)\n\n def merge_alarm_dcap(self, fatherevent, dcapid):\n sql = \"update poseidon_alarm_dcap set FatherEvent={0} where DCAPID = '{1}' \".format(fatherevent, dcapid)\n res = self._update_db(sql)\n\n def history_alarm_dcap_cluster(self, fname):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,DCAPID,FatherEvent,FLAGBIT from poseidon_alarm_dcap where ENNAME = '{0}';\".format(\n fname)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def history_alarm_sys_cluster(self, fname):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,SyslogID,FatherEvent,FLAGBIT from poseidon_alarm_sys where ENNAME = '{0}';\".format(\n fname)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def history_alarm_dcap_detail(self, fname):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,DCAPID,FatherEvent,FLAGBIT from poseidon_alarm_dcap where NodeIP = '{0}';\".format(\n fname)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def history_alarm_sys_detail(self, fname):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,SyslogID,FatherEvent,FLAGBIT from poseidon_alarm_sys where NodeIP = '{0}';\".format(\n fname)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_dcap(self, searchid):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,DCAPID,FatherEvent,FLAGBIT from `poseidon_alarm_dcap` where CONCAT(`NodeIP`,`NodeAlias`,`Component`,`SummaryCN`,`EventType`,`FREQUENCY`,`EventNameCN`,`CustomerSeverity`,`DCAPID`,`FatherEvent`) LIKE '%{0}%';\".format(\n searchid)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_sys(self, searchid):\n sql = \"select NodeIP,NodeAlias,Component,SummaryCN,EventType,START_TIME,Occurence,FREQUENCY,EventNameCN,CustomerSeverity,SyslogID,FatherEvent,FLAGBIT from `poseidon_alarm_sys` where CONCAT(`NodeIP`,`NodeAlias`,`Component`,`SummaryCN`,`EventType`,`FREQUENCY`,`EventNameCN`,`CustomerSeverity`,`SyslogID`,`FatherEvent`) LIKE '%{0}%';\".format(\n searchid)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_sys_today_hist(self, time, ftime):\n sql = \"SELECT ComponentType FROM poseidon_alarm_sys_hist WHERE START_TIME-'{0}'<'{1}' AND START_TIME-'{2}'>0\".format(time, ftime, time)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_dcap_today_hist(self, time, ftime):\n sql = \"SELECT a.ComponentType FROM poseidon_alarm_severity a,poseidon_alarm_dcap_hist b WHERE a.IndicatorName = b.IndicatorName AND START_TIME-'{0}'<'{1}' AND START_TIME-'{2}'>0\".format(time, ftime, time)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_sys_today(self, time, ftime):\n sql = \"SELECT ComponentType FROM poseidon_alarm_sys WHERE START_TIME-'{0}'<'{1}' AND START_TIME-'{2}'>0\".format(time, ftime, time)\n res = self.read_db_data_to_dict(sql)\n return res\n\n def search_alarm_dcap_today(self, time, ftime):\n sql = \"SELECT a.ComponentType FROM poseidon_alarm_severity a,poseidon_alarm_dcap b WHERE a.IndicatorName = b.IndicatorName AND START_TIME-'{0}'<'{1}' AND START_TIME-'{2}'>0\".format(time, ftime, time)\n res = self.read_db_data_to_dict(sql)\n return res\n\n\n\nif __name__ == '__main__':\n app = MSSoup118alarm()\n # res = ms.get_name()\n # detail = ms.get_detail()\n # print(detail)\n # print(res)\n # print(\"111\")\n" } ]
29
Hang0213/Shark_Classification
https://github.com/Hang0213/Shark_Classification
d796248aba253880e36c6cae11b81740010508a5
404a2f7205b4e681c80d4117c85e630539fb2c42
18df73d187b5f7e78eb22d9ed12ba4459f080ad5
refs/heads/main
2023-03-21T05:00:35.807923
2021-03-05T06:14:53
2021-03-05T06:14:53
344,671,040
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 24, "blob_id": "b6a1b27347018600aa23c3a92b4edcd624dbeb35", "content_id": "f600dd640896fc557d7ffc321cfe2bd6dd574108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/README.md", "repo_name": "Hang0213/Shark_Classification", "src_encoding": "UTF-8", "text": "\"# Shark_Classification\" \n" }, { "alpha_fraction": 0.38264888525009155, "alphanum_fraction": 0.39624693989753723, "avg_line_length": 29.388429641723633, "blob_id": "2f00872d56fb0f5ceb73a7918ed063b641f343e3", "content_id": "91693855d73255a5b1d646d1955a3fc636c05a27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3677, "license_type": "permissive", "max_line_length": 74, "num_lines": 121, "path": "/bird_species_classification/modify_data.py", "repo_name": "Hang0213/Shark_Classification", "src_encoding": "UTF-8", "text": "from os.path import join, exists\nfrom os import listdir, makedirs\nfrom shutil import copyfile\n\nspecies = [\n \"Alopias\",\n \"Carcharias\",\n \"Carcharodon\",\n \"Galeocerdo\",\n \"Heterodontus\",\n \"Hexanchus\",\n \"Negaprion\",\n \"Orectolobus\",\n \"Prionace\",\n \"Rhincodon\",\n \"Sphyrna\",\n \"Triaenodon\",\n]\n\n\nsource_folder = \"./train_data/\"\ndestination_folder = \"./train/\"\n\n\ndef rename_files():\n \"\"\"\n Initially the file names are incosistent. This function\n changes the file name to make it more understanding.\n\n Example - for example, DSC_6272.jpg may be changed to 100101.jpg\n For shark_specie_counter < 10, in this,\n 100 -> original image, 1 -> Class Number, 01 -> Image Number\n\n Similarly, for the case if the species counter is greater than 10.\n \"\"\"\n shark_specie_counter = 1\n\n for shark_specie in species:\n\n #\n source_image_dir = join(source_folder, shark_specie)\n print(source_image_dir)\n source_images = listdir(source_image_dir)\n print(source_images)\n\n for source_image in source_images:\n\n destination = join(destination_folder, shark_specie)\n print(destination)\n if shark_specie_counter < 10:\n\n images = 0\n for source_image in source_images:\n\n if images < 10:\n copyfile(\n join(source_image_dir, source_image),\n join(\n destination,\n str(100)\n + str(shark_specie_counter)\n + str(0)\n + str(images)\n + \".jpg\",\n ),\n )\n\n elif images >= 10:\n copyfile(\n join(source_image_dir, source_image),\n join(\n destination,\n str(100)\n + str(shark_specie_counter)\n + str(images)\n + \".jpg\",\n ),\n )\n\n images += 1\n\n elif shark_specie_counter >= 10:\n\n images = 0\n\n for source_image in source_images:\n\n if images < 10:\n copyfile(\n join(source_image_dir, source_image),\n join(\n destination,\n str(10)\n + str(shark_specie_counter)\n + str(0)\n + str(images)\n + \".jpg\",\n ),\n )\n\n elif images >= 10:\n copyfile(\n join(source_image_dir, source_image),\n join(\n destination,\n str(10)\n + str(shark_specie_counter)\n + str(images)\n + \".jpg\",\n ),\n )\n images += 1\n\n shark_specie_counter += 1\n\n\nif __name__ == \"__main__\":\n for shark_specie in species:\n if not exists(join(destination_folder, shark_specie)):\n destination = makedirs(join(destination_folder, shark_specie))\n rename_files()\n" }, { "alpha_fraction": 0.5013466477394104, "alphanum_fraction": 0.534821093082428, "avg_line_length": 26.796791076660156, "blob_id": "566a9092a7bcca601aa45005ed489de79a2a79e8", "content_id": "157975223c48fc84a63c0c7d2bfd988fd30c8c44", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5198, "license_type": "permissive", "max_line_length": 78, "num_lines": 187, "path": "/bird_species_classification/data_augmentation.py", "repo_name": "Hang0213/Shark_Classification", "src_encoding": "UTF-8", "text": "import cv2\nfrom os.path import join\nimport os\nfrom imgaug import augmenters as iaa\n\naugmented_image_dir = \"./train/\"\n\nspecies = [\n \"Alopias\",\n \"Carcharias\",\n \"Carcharodon\",\n \"Galeocerdo\",\n \"Heterodontus\",\n \"Hexanchus\",\n \"Negaprion\",\n \"Orectolobus\",\n \"Prionace\",\n \"Rhincodon\",\n \"Sphyrna\",\n \"Triaenodon\",\n]\n\n\n\"\"\" Naming conventions can be different. This is\nwhat I've used at my time. I just followed the table\npresent to generate that much number of images.\n\nType of Augmentation:\n10 - Normal Image\n20 - Gaussian Noise - 0.1* 255\n30 - Gaussian Blur - sigma - 3.0\n40 - Flip - Horizaontal\n50 - Contrast Normalization - (0.5, 1.5)\n60 - Hue\n70 - Crop and Pad\n\nFlipped\n11 - Add - 2,3,4,5,6,12,13,14 7, 15, 16\n12 - Multiply - 2,3,4,5,6,12,13,14 7, 15, 16\n13 - Sharpen\n14 - Gaussian Noise - 0.2*255\n15 - Gaussian Blur - sigma - 0.0-2.0\n16 - Affine Translation 50px x, y\n17 - Hue Value\n\"\"\"\n\n\ndef save_images(\n augmentated_image,\n destination,\n number_of_images,\n bird_specie_counter,\n types\n):\n\n image_number = str(number_of_images)\n number_of_images = int(number_of_images)\n\n if bird_specie_counter < 10:\n\n if number_of_images < 10:\n cv2.imwrite(\n join(\n destination,\n str(types)\n + str(0)\n + str(bird_specie_counter)\n + image_number\n + \".jpg\",\n ),\n augmentated_image\n )\n\n elif number_of_images >= 10:\n cv2.imwrite(\n join(\n destination,\n str(types)\n + str(0)\n + str(bird_specie_counter)\n + image_number\n + \".jpg\",\n ),\n augmentated_image\n )\n\n elif bird_specie_counter >= 10:\n\n if number_of_images < 10:\n cv2.imwrite(\n join(\n destination,\n str(types)\n + str(bird_specie_counter)\n + image_number\n + \".jpg\",\n ),\n augmentated_image\n )\n\n elif number_of_images >= 10:\n cv2.imwrite(\n join(\n destination,\n str(types)\n + str(bird_specie_counter)\n + image_number\n + \".jpg\",\n ),\n augmentated_image\n )\n\n\n# Dataset Augmentation\n\ngauss = iaa.AdditiveGaussianNoise(scale=0.2 * 255)\n# blur = iaa.GaussianBlur(sigma=(3.0))\n# flip = iaa.Fliplr(1.0)\n# contrast = iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)\nsharp = iaa.Sharpen(alpha=(0, 0.3), lightness=(0.7, 1.3))\naffine = iaa.Affine(translate_px={\"x\": (-50, 50), \"y\": (-50, 50)})\n# add = iaa.Add((-20, 20), per_channel=0.5)\n# multiply = iaa.Multiply((0.8, 1.2), per_channel=0.5)\n\nhue = iaa.Sequential(\n [\n iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels(0, iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n ]\n)\n\naug = iaa.Sequential(\n [\n iaa.Fliplr(1.0),\n iaa.ChangeColorspace(from_colorspace=\"RGB\", to_colorspace=\"HSV\"),\n iaa.WithChannels(0, iaa.Add((50, 100))),\n iaa.ChangeColorspace(from_colorspace=\"HSV\", to_colorspace=\"RGB\"),\n ]\n)\n\n\ndef main():\n \"\"\"Read images, apply augmentation and save images.\n Two types of image augmentation is applied. One is on normal\n image whose image name starts with 1 nad another is one flipped\n image which starts with 4. Bird classes are mentioned above which\n type of augmentation is applied on which type of image and which\n type of specie. We check the first value of image path\n and compare it 1/4 to apply the data augmentation accordingly.\n \"\"\"\n for bird_specie in species:\n augmented_image_folder = join(augmented_image_dir, bird_specie)\n source_images = os.listdir(augmented_image_folder)\n print(source_images)\n source_images.sort(key=lambda f: int(\"\".join(filter(str.isdigit, f))))\n\n augmented_images_arr = []\n img_number = []\n bird_specie_number = source_images[0]\n bird_specie_number = int(bird_specie_number[2:4])\n for source_image in source_images:\n\n if int(source_image[0]) == 1:\n\n img_number.append(source_image[4:6])\n img_path = join(augmented_image_folder, source_image)\n\n img = cv2.imread(img_path)\n augmented_images_arr.append(img)\n\n counter = 0\n if len(augmented_images_arr) < 9:\n # Applying Gaussian image augmentation\n for augmented_image in gauss.augment_images(augmented_images_arr):\n save_images(\n augmented_image,\n augmented_image_folder,\n img_number[counter],\n bird_specie_number,\n 20,\n )\n counter += 1\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
3
michelelt/MyTool
https://github.com/michelelt/MyTool
1a0da1ade7d702a7853c4da51d89ebdad9b1ab97
e6416a8bdf64be0fb5dcee775afc8e41b896a9f6
6cbb5b3d24f8854f54a928101cd4bcdcfa201b5b
refs/heads/master
2021-01-01T17:58:45.925872
2017-10-30T10:59:17
2017-10-30T10:59:17
98,207,570
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7384026646614075, "alphanum_fraction": 0.7608799338340759, "avg_line_length": 57.11111068725586, "blob_id": "469eaed7fb95bbb63de87ca5fe1b7556ba5af1d6", "content_id": "0365a25fcaf79a4553bd28a2a22c67a5d5ec1b67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2091, "license_type": "no_license", "max_line_length": 98, "num_lines": 36, "path": "/Analysis/paths.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "car2go_bookings_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/car2go_p\"\nenjoy_bookings_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/enjoy_p\"\n\ncar2go_bookings_picke_path_zoned = \"/home/mc/Scrivania/Tesi/MyTool/pickles/car2go_zoned\"\nenjoy_bookings_picke_path_zoned = \"/home/mc/Scrivania/Tesi/MyTool/pickles/enjoy_zoned\"\n\ncar2go_parkings_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/car2go_parkings_p\"\nenjoy_parkings_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/enjoy_parkings_p\"\n\ncar2go_parkings_pickle_path_zoned = \"/home/mc/Scrivania/Tesi/MyTool/pickles/car2go_parkings_zoned\"\nenjoy_parkings_pickle_path_zoned = \"/home/mc/Scrivania/Tesi/MyTool/pickles/enjoy_parkings_zoned\"\n\nenjoy_disap_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/desappeared_cars_enjoy_p\"\ncar2go_disap_pickle_path = \"/home/mc/Scrivania/Tesi/MyTool/pickles/desappeared_cars_car2go_p\"\n\ngrid_path = \"/home/mc/Scrivania/Tesi/MyTool/SHAPE/grid.shp\"\n\nplots_path = \"/home/mc/Scrivania/Tesi/MyTool/plots/01/\"\nplots_path2 = \"/home/mc/Scrivania/Tesi/MyTool/plots/02/\"\nplots_path3 = \"/home/mc/Scrivania/Tesi/MyTool/plots/03/\"\nplots_path4 = \"/home/mc/Scrivania/Tesi/MyTool/plots/04/\"\nplots_path6 = \"/home/mc/Scrivania/Tesi/MyTool/plots/06/\"\nplots_path7 = \"/home/mc/Scrivania/Tesi/MyTool/plots/07/\"\nplots_path8 = \"/home/mc/Scrivania/Tesi/MyTool/plots/08/\"\nplots_path9 = \"/home/mc/Scrivania/Tesi/MyTool/plots/09/\"\nplots_path10 = \"/home/mc/Scrivania/Tesi/MyTool/plots/10/\"\n\n\nsim_path_nrad = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_nrad/\"\nsim_path_rnd = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_rnd/\"\nsym_path_3_alg_final =\"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_3_alg_no_rand_final/\"\nsym_path_best_rnd = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_best_rnd\"\nsym_path_SOC = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_SOC/\"\nsym_path_rnd_200_ppz_4 = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_rnd_200_ppz_4/\"\nsym_path_corr_eur = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_corr_eur/\"\nsym_path_bat = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res_bat/\"" }, { "alpha_fraction": 0.6299314498901367, "alphanum_fraction": 0.6545039415359497, "avg_line_length": 36.54292297363281, "blob_id": "c970a43ad67a65f759002c23d746523fe3b50484", "content_id": "4c135555b959e2bd5103fc0e9f9a1696e5798dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16197, "license_type": "no_license", "max_line_length": 113, "num_lines": 431, "path": "/Analysis/firstTest.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\n\n\nfrom DataBaseProxy import DataBaseProxy\ndbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 5\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month+2, day, 23, 59, 0)\n\n#\n#enjoy = dbp.query_bookings_df(\"enjoy\",\"Torino\", start, end)\n#enjoy_p = dbp.query_parkings_df(\"enjoy\", \"Torino\", start,end)\n##enjoy_2 = enjoy[enjoy[\"duration\"]< 120]\n##enjoy_2.duration.hist(bins=50, color='red')\n#car2go = dbp.query_bookings_df(\"car2go\",\"Torino\", start, end)\n#car2go_p = dbp.query_parkings_df(\"car2go\",\"Torino\", start, end)\n\n#enjoy.to_pickle(paths.enjoy_bookings_pickle_path, None)\n#car2go.to_pickle(paths.car2go_bookings_pickle_path, None)\n#\n#enjoy_p.to_pickle(paths.enjoy_parkings_pickle_path, None)\n#car2go_p.to_pickle(paths.car2go_parkings_pickle_path, None)\n\ndef clean_distances(df):\n df = df[df.distance >1]\n df = df[df.distance < df.distance.quantile(0.95)]\n df = df[df.distance > df.distance.quantile(0.05)]\n return df\n\n\ndef distances_per_car(df):\n out_df= pd.DataFrame()\n out_df[\"plate\"] = df.plate\n out_df['distance'] = df.distance\n out_df['duration'] = df.duration\n distaces_per_car = out_df.groupby('plate', as_index = False).sum()\n distaces_per_car['distance'] = distaces_per_car.distance.apply(lambda x : x/1000)\n freq_per_car = out_df.groupby('plate', as_index=False).count()\n distaces_per_car['freq'] = freq_per_car['distance']\n return distaces_per_car\n\ndef total_dist_per_car(df):\n# df = clean_distances(df)\n dist_per_car = distances_per_car(df)\n provider = util.get_provider(df)\n \n color = util.get_color(df)\n fig, ax = plt.subplots(1, 1, figsize=(20,10))\n my_xticks = dist_per_car.plate\n plt.xticks(dist_per_car.index, my_xticks, rotation=45)\n# plt.set_xticklabels(my_xticks, rotation=45)\n plt.plot(dist_per_car.index, dist_per_car.distance, linestyle='-', marker='x',\n color=color)\n plt.title(\"km per car - \" + provider)\n plt.ylabel(\"km\")\n plt.xlabel(\"plates\")\n# plt.savefig(paths.plots_path+provider+\"_dist_per_car.png\")\n# plt.savefig(paths.plots_path2+provider+\"_dist_per_car.png\")\n plt.show()\n return dist_per_car\n \ndef total_dist_per_car_no_outliers (df):\n df = clean_distances(df)\n dist_per_car = distances_per_car(df)\n \n fig, ax = plt.subplots(1, 1, figsize=(20,10))\n provider = util.get_provider(df)\n color = util.get_color(df)\n\n std = dist_per_car['distance'].std()\n avg = dist_per_car['distance'].mean()\n normalized_distance = dist_per_car[(dist_per_car['distance'] >= (avg-std)) &\n (dist_per_car['distance'] <= (avg+std))]\n \n my_xticks = normalized_distance.plate\n plt.xticks(normalized_distance.index, my_xticks, rotation=45)\n plt.plot(normalized_distance.index, normalized_distance.distance, \n linestyle='-', marker='x', color=color)\n plt.title(\"km per car normalized - \" + provider)\n plt.ylabel(\"Km\")\n plt.xlabel(\"plates\")\n# plt.savefig(paths.plots_path+provider+\"_dist_per_car_no_out.png\")\n# plt.savefig(paths.plots_path2+provider+\"_dist_per_car_no_out.png\")\n plt.show()\n return\n \ndef fuel_behavior_max_distnace(df):\n provider = util.get_provider(df)\n df2 = clean_distances(df)\n dist_per_car = distances_per_car(df2)\n \n \n id_max = dist_per_car.distance.idxmax(1) \n row = dist_per_car.loc[id_max]\n plate = row['plate']\n \n fuel_cons = df[df.plate == plate]\n# fuel_cons = fuel_cons[fuel_cons.distance_dr != -1]\n x = range(0,len(fuel_cons.index))\n fig, ax = plt.subplots(1, 1, figsize=(9,10))\n\n ax.plot(x, fuel_cons.init_fuel, 'bs', label='init fuel')\n ax.plot(x, fuel_cons.final_fuel,'r^', label='final fuel', alpha = 0.5)\n plt.title(\"Fuel consuption, big dist - \" + provider)\n plt.legend()\n plt.ylabel(\"Fuel level\")\n plt.xlabel(\"Chrological rent ID\")\n# plt.savefig(paths.plots_path+provider+\"_fuel_behav_max_dist.png\")\n# plt.savefig(paths.plots_path2+provider+\"_fuel_behav_max_dist.png\")\n plt.show()\n\n return fuel_cons\n \ndef fuel_behavior_max_booked(df): \n provider = util.get_provider(df)\n count_df = df.groupby('plate', as_index = True).count()\n id_max = count_df.distance.idxmax(1) \n #row = count_df.loc[id_max]\n plate = id_max\n fuel_cons = df[df.plate == plate]\n \n fig, ax = plt.subplots(1, 1, figsize=(9,10))\n x = range(0,len(fuel_cons.index))\n ax.plot(x, fuel_cons.init_fuel, 'bs', label='init fuel')\n ax.plot(x, fuel_cons.final_fuel,'r^', label= 'end fuel', alpha = 0.5)\n plt.title(\"Fuel consuption, most booked- \" + util.get_provider(df))\n plt.legend()\n plt.ylabel(\"Fuel level\")\n plt.xlabel(\"Chrological rent ID\")\n# plt.savefig(paths.plots_path+provider+\"_fuel_behav_most_booked.png\")\n# plt.savefig(paths.plots_path2+provider+\"_fuel_behav_most_booked.png\")\n plt.show()\n return fuel_cons\n \ndef hist_cdf_pdf(df_source, df_dist, column, valid_days, valid_days_clnd):\n provider = util.get_provider(df_source)\n color = util.get_color(df_source)\n if column == \"distance\" :\n xlabel = \"km\"\n elif column == \"duration\" :\n xlabel = \"min\"\n else:\n xlabel = \"\"\n \n res = {}\n \n fig, ax = plt.subplots(2, 4, figsize=(20,10))\n fig.suptitle(\"Bookings \" + column + \" - \" + provider)\n \n ## dirty ##\n ax[0,0].hist(df_dist[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,0].set_title(\"CDF - \" + column)\n ax[0,0].set_xlabel(xlabel)\n \n ax[1,0].hist(df_dist[column], 50, facecolor=color, alpha=0.75)\n ax[1,0].set_title(\"PDF - \" + column)\n ax[1,0].set_xlabel(xlabel)\n \n res[column+\"_mean\"] = df_dist[column].mean()\n res[column+\"_median\"] = df_dist[column].median()\n res[column+\"_std\"] = df_dist[column].std()\n\n ## filtering ##\n df_dist2 = util.clean_df(df_dist, column, df_dist[column].median(), df_dist[column].std())\n \n ax[0,1].hist(df_dist2[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,1].set_title(\"filtered (med;std) CDF - \" + column)\n ax[0,1].set_xlabel(xlabel)\n \n ax[1,1].hist(df_dist2[column], 50, facecolor=color, alpha=0.75)\n ax[1,1].set_title(\"filtered (med;std) PDF - \" + column)\n ax[1,1].set_xlabel(xlabel)\n \n res[column+\"_mean_flt\"] = df_dist2[column].mean()\n res[column+\"_median_flt\"] = df_dist2[column].median()\n res[column+\"_std_flt\"] = df_dist2[column].std()\n \n\n ## per day ##\n ax[0,2].hist(df_dist2[column]/valid_days, 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,2].set_title(\"filtered CDF per day - \" + column)\n ax[0,2].set_xlabel(xlabel)\n \n ax[1,2].hist(df_dist2[column]/valid_days, 50, facecolor=color, alpha=0.75)\n ax[1,2].set_title(\"filtered PDF per day - \" + column)\n ax[1,2].set_xlabel(xlabel)\n \n res[column+\"_mean_flt_per_day\"] = (df_dist2[column]/valid_days).mean()\n res[column+\"_median_flt_per_day\"] = (df_dist2[column]/valid_days).median()\n res[column+\"_std_flt_per_day\"] = (df_dist2[column]/valid_days).std()\n \n ## per day clnd ##\n ax[0,3].hist(df_dist2[column]/valid_days_clnd, 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,3].set_title(\"filtered CDF per day clnd - \" + column)\n ax[0,3].set_xlabel(xlabel)\n \n ax[1,3].hist(df_dist2[column]/valid_days_clnd, 50, facecolor=color, alpha=0.75)\n ax[1,3].set_title(\"filtered PDF per day clnd - \" + column)\n ax[1,3].set_xlabel(xlabel)\n \n res[column+\"_mean_flt_per_day_clnd\"] = (df_dist2[column]/valid_days_clnd).mean()\n res[column+\"_median_flt_per_day_clnd\"] = (df_dist2[column]/valid_days_clnd).median()\n res[column+\"_std_flt_per_day_clnd\"] = (df_dist2[column]/valid_days_clnd).std()\n \n\n# plt.savefig(paths.plots_path+provider+\"_PDF_CDF.png\")\n# plt.savefig(paths.plots_path2+provider+\"_PDF_CDF.png\")\n fig.savefig(paths.plots_path3+\"_\"+provider+\"_\"+column+\"_books_tats.png\", bbox_inches='tight')\n\n plt.show()\n return res\n\ndef valid_days(df):\n provider = util.get_provider(df)\n color = util.get_color(df)\n df = pd.DataFrame(df['init_date'])\n df['date'] = df.init_date.apply(lambda x : x.date())\n df = df.groupby('date').count()\n \n datelist = pd.date_range(pd.datetime(year,month,day), periods=32).tolist()\n dfdays = pd.DataFrame(datelist)\n dfdays['count'] = [0]*len(datelist)\n dfdays.set_index(0, inplace=True)\n df2= dfdays['count'] + df['init_date']\n df2.fillna(0, inplace=True)\n \n fig, ax = plt.subplots(1, 1, figsize=(9,10))\n plt.title(\"Entry per days - \" + provider)\n df2.plot(color=color)\n# fig.savefig(paths.plots_path+provider+\"_valid_days.png\")\n# fig.savefig(paths.plots_path2+provider+\"_valid_days.png\")\n\n return\n\ndef gd_vs_ed_hist(df_dist, provider, color):\n fig, ax = plt.subplots(1,1,figsize=(9,10))\n ax.axhline(0.9, color='black', linestyle='-')\n ax.axhline(0.95, color='black', linestyle='-')\n ax.axhline(0.99, color='black', linestyle='-')\n ax.set_title(provider+\" - Error google d. - Euclidean d.\", fontsize=20)\n df_dist.dr_over_dist.hist(ax=ax, cumulative=True, color=color, normed=True, bins=500)\n# fig.savefig(paths.plots_path2+provider+\"_errors_on_dist.png\", bbox_inches='tight')\n fig.savefig(paths.plots_path3+provider+\"_errors_on_dist.png\", bbox_inches='tight')\n plt.show()\n return\n \n\nenjoy = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\ncar2go = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\nenj_data = {}\nc2g_data = {}\nenj_data[\"general\"] = util.get_valid_days(enjoy,start,end)\nc2g_data[\"general\"] = util.get_valid_days(car2go,start,end)\n\nenj_bookings = len(pd.read_pickle(paths.enjoy_bookings_pickle_path))\nenj_parkings = len(pd.read_pickle(paths.enjoy_parkings_pickle_path))\nenj_cars = len(enjoy.plate.unique())\nenj_days = 35.0\n\nprint \"enj B/D \" + str(enj_bookings/enj_days)\nprint \"enj_B/D/C \" + str(enj_bookings/enj_days/enj_cars)\nprint \"enj P/D \" + str(enj_parkings/enj_days)\nprint \"enj P/D/C \" + str(enj_parkings/enj_days/enj_cars)\nprint\nc2g_bookings = len(pd.read_pickle(paths.car2go_bookings_pickle_path))\nc2g_parkings = len(pd.read_pickle(paths.car2go_parkings_pickle_path))\nc2g_cars = len(car2go.plate.unique())\nc2g_days = 38.0\n\nprint \"c2g B/D \" + str(c2g_bookings/c2g_days)\nprint \"c2g B/D/C \" + str(c2g_bookings/c2g_days/c2g_cars)\nprint \"c2g P/D \" + str(c2g_parkings/c2g_days)\nprint \"c2g P/D/C \" + str(c2g_parkings/c2g_days/c2g_cars)\n\n\n\n\nvalid_days(enjoy)\nvalid_days(car2go)\n\n\nenj_dist = distances_per_car(enjoy)\n#total_dist_per_car_no_outliers(enjoy)\nenj_data[\"distance\"] = hist_cdf_pdf(\n enjoy, \n enj_dist, \n \"distance\", \n enj_data[\"general\"][\"valid_days\"], \n enj_data[\"general\"][\"cleaned_valid_days\"]\n )\nenj_data[\"duration\"] = hist_cdf_pdf(\n enjoy, \n enj_dist, \n \"duration\", \n enj_data[\"general\"][\"valid_days\"], \n enj_data[\"general\"][\"cleaned_valid_days\"]\n )\n\n\n##fuel_behavior_max_distnace(enjoy)\n##fuel_cons = fuel_behavior_max_booked(enjoy)\n#\n#c2g_dist = total_dist_per_car(car2go)\n##total_dist_per_car_no_outliers(car2go)\n#c2g_data[\"distance\"] = hist_cdf_pdf(\n# car2go, \n# c2g_dist, \n# \"distance\", \n# c2g_data[\"general\"][\"valid_days\"], \n# c2g_data[\"general\"][\"cleaned_valid_days\"]\n# )\n#c2g_data[\"duration\"] = hist_cdf_pdf(\n# car2go, \n# c2g_dist, \n# \"duration\", \n# c2g_data[\"general\"][\"valid_days\"], \n# c2g_data[\"general\"][\"cleaned_valid_days\"]\n# )\n\n#fuel_cons = fuel_behavior_max_distnace(car2go)\n#fuel_behavior_max_booked(car2go)\n\n'''\ncerco macchine con cuty sbagliata\n'''\n\n#enj_dist_ok = enj_dist[(enj_dist[\"distance\"] > 30)]\n#enj_dist_not_ok = enj_dist[~enj_dist.isin(enj_dist_ok)].dropna()\n#\n#plates = enj_dist_not_ok['plate'].tolist()\n#print len(plates)\n#for pos in range(0, len(plates)):\n# plates[pos] = str(plates[pos])\n#\n#disappeared_cars = dbp.query_car_per_plate_df(\"enjoy\", plates, start,end)\n#disappeared_cars.to_pickle(paths.enjoy_disap_pickle_path, None)\n#grouped = pd.DataFrame(disappeared_cars.groupby([\"plate\", \"city\"]).size())\n#grouped = grouped.rename(columns={0 : \"bookings_per_car\"})\n#zzz = grouped.index\n#zzz = list(zzz)\n#grouped[\"temp\"] = zzz\n#grouped[\"plate_col\"] = grouped.temp.apply(lambda row: row[0])\n#grouped[\"city_col\"] = grouped.temp.apply(lambda row: row[1])\n#del grouped[\"temp\"]\n#\n#car_per_city = grouped.groupby('city').count()\n#car_per_city[\"log\"] = np.log10(car_per_city[\"bookings_per_car\"])\n#car_per_city.bookings_per_car.plot(color=dbp.get_color(enjoy), marker='o', linewidth=0)\n\n#enjoy_torino = dbp.query_bookings_df(\"enjoy\",\"Torino\", start, end)\n#enjoy_milano = dbp.query_bookings_df(\"enjoy\",\"Milano\", start, end)\n#enjoy_firenze = dbp.query_bookings_df(\"enjoy\",\"Firenze\", start, end)\n#enjoy_roma = dbp.query_bookings_df(\"enjoy\",\"Roma\", start, end)\n#enjoy_catania = dbp.query_bookings_df(\"enjoy\",\" Catania\", start, end)\n\n#enjoy = pd.DataFrame()\n#enjoy.append([enjoy_torino, enjoy_firenze, enjoy_milano, enjoy_roma, enjoy_catania], ignore_index=True)\n\n'''\nprendo i rental (ed elimino anche le entry senza google distnace)\n'''\n#enjoy = enj_bookings_filtered\n#enjoy_distances = enjoy[enjoy.distance>20] #macchine che si sono spostate\n#enjoy_distances = enjoy_distances[enjoy_distances.distance_dr != -1] #cars with valid entry of google_dir\n#enjoy_distances[\"dr_over_dist\"] = enjoy_distances[\"distance_dr\"] / enjoy_distances[\"distance\"]\n#enjoy_distances = enjoy_distances[\n# (enjoy_distances[\"dr_over_dist\"] >= enjoy_distances[\"dr_over_dist\"].quantile(0.01)) &\n# (enjoy_distances[\"dr_over_dist\"] <= enjoy_distances[\"dr_over_dist\"].quantile(0.99)) ]\n##enjoy_distances.dr_minus_dist.hist(normed=True, cumulative=True,color=\"red\", bins =500)\n#gd_vs_ed_hist(enjoy_distances, util.get_provider(enjoy), util.get_color(enjoy))\n#\n#\n#car2go = c2g_bookings_filtered \n#c2g_distances = car2go[car2go.distance>20] #macchine che si sono spostate\n#c2g_distances = c2g_distances[c2g_distances.distance_dr!= -1] #cars with valid entry of google_dir\n##c2g_invalid = c2g_distances[c2g_distances.distance_dr == -1] #cars with valid entry of google_dir\n#c2g_distances[\"dr_over_dist\"] = c2g_distances[\"distance_dr\"] / c2g_distances[\"distance\"]\n#c2g_distances = c2g_distances[\n# (c2g_distances[\"dr_over_dist\"] >= c2g_distances[\"dr_over_dist\"].quantile(0.01)) &\n# (c2g_distances[\"dr_over_dist\"] <= c2g_distances[\"dr_over_dist\"].quantile(0.99)) ]\n##c2g_distances.dr_minus_dist.hist(normed=True, cumulative=True, color=\"blue\", bins=500)\n#gd_vs_ed_hist(c2g_distances, util.get_provider(car2go), util.get_color(car2go))\n#\n#print \"car2go\" \n#print c2g_distances.dr_over_dist.quantile(0.9)\n#print c2g_distances.dr_over_dist.quantile(0.95)\n#print c2g_distances.dr_over_dist.quantile(0.99)\n#print \"enjoy\"\n#print enjoy_distances.dr_over_dist.quantile(0.9)\n#print enjoy_distances.dr_over_dist.quantile(0.95)\n#print enjoy_distances.dr_over_dist.quantile(0.99)\n\n'''\nfuel consuption vs distnace eucl\n'''\n\n#enjoy = enjoy [enjoy[\"distance\"] > 500]\n#enjoy[\"fuel_diff\"] = enjoy[\"final_fuel\"] - enjoy[\"init_fuel\"]\n#x=enjoy[\"distance\"] * enjoy_distances[\"dr_over_dist\"].quantile(0.9)\n#y=enjoy[\"fuel_diff\"]\n#fig, [ax1,ax2] = plt.subplots(1,2,figsize=(18,10))\n#ax1.scatter(x,y, color='red')\n#ax1.set_title(\"enjoy - fuel cosnuption vs distance\", fontsize=18)\n#ax1.set_ylabel(\"Fuel difference\")\n#ax1.set_xlabel(\"Distance [m]\")\n#\n#\n#car2go = car2go [car2go[\"distance\"] > 500]\n#car2go[\"fuel_diff\"] = car2go[\"final_fuel\"] - car2go[\"init_fuel\"]\n#x=car2go[\"distance\"] * c2g_distances[\"dr_over_dist\"].quantile(0.9)\n#y=car2go[\"fuel_diff\"]\n#ax2.scatter(x,y, color='blue')\n#ax2.set_title(\"car2go - fuel cosnuption vs distance\",fontsize=18)\n#ax2.set_ylabel(\"Fuel difference\")\n#ax2.set_xlabel(\"Distance [m]\")\n#\n#fig.savefig(paths.plots_path3+\"_scatter_fuel_diff.png\", bbox_inches='tight')\n#fig.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.824999988079071, "alphanum_fraction": 0.824999988079071, "avg_line_length": 39, "blob_id": "bd7e3337556b1c2ddfa6a5265bb80dee5d2f210e", "content_id": "4d0076bc7faabe9d5d0e8c60877046cd71b8847e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 80, "license_type": "no_license", "max_line_length": 72, "num_lines": 2, "path": "/README.md", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "Study of recharging stations placement for electric car sharing systems.\nMichele Cocca\n" }, { "alpha_fraction": 0.5414019823074341, "alphanum_fraction": 0.5642684102058411, "avg_line_length": 38.935184478759766, "blob_id": "52493150ef4e3070384bc1bcbbe7df7f0863198e", "content_id": "b540b7d20e37226635096ac3229cceda82a8966f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8659, "license_type": "no_license", "max_line_length": 123, "num_lines": 216, "path": "/Analysis/simulator/plot_algorithm.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport datetime\nimport time\nimport random\nimport sys\nimport os.path\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/')\nimport paths as paths\nfrom DataBaseProxy import DataBaseProxy\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/simulator')\nfrom util import Utility\nfrom car import Car\nfrom city import City\nfrom shapely.geometry import Point, Polygon\nfrom station import Station\nimport threading\nfrom multiprocessing import Process\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n\ndef plot_from_df_algorithm (df, torino, provider, column, paramter, ppz):\n \n if provider == \"car2go\":\n nob = float(len(torino.car2go))\n noz = float(len(torino.car2go_parkings_analysis))\n cap = 17.6\n else:\n nob = len(torino.enjoy)\n noz = float(len(torino.enjoy_parkings_analysis))\n cap = 25.2\n \n \n colors = {2:\"red\", 4:\"blue\", 6: \"green\", 8: \"black\"}\n markers = {2:\"o\", 4:\"x\", 6: \"^\", 8: \"d\"}\n labels = {2:\"ppz = 2\", 4:\"ppz = 4\", 6: \"ppz = 6\", 8:\"ppz = 8\",}\n alg_names = {\"max_avg_time\":\"Average parking time\", \n \"max_parking\":\"Number of parking\", \n \"max_time\": \"Parking time\", \n \"best_rnd\": \"Best random\", \n \"mean_rnd\":\" Average random (190 run)\"}\n \n div_facts = {\"tot_deaths\":nob, \"avg_bat_before\": cap, \"avg_bat_after\": cap, \"pieni\": nob}\n \n titles = {\"tot_deaths\": \" - Bat. discharge vs Zone coverage - ppz=\",\n \"avg_bat_before\": \" - Avg. SoC vs Zone coverage - ppz=\", \n \"avg_bat_after\": \" - Avg. after charnging SoC vs Zone coverage - ppz=\", \n \"pieni\": \" - Charging prob. vs Zone Coverage - ppz=\"}\n \n y_labels = {\"tot_deaths\": \"Battery discharge prob.\",\n \"avg_bat_before\": \"Avg. SoC [%]\", \n \"avg_bat_after\": \"Avg. SoC [%]\", \n \"pieni\": \"Charging prob.\"}\n \n saving_name = {\"tot_deaths\": \"bat_exaust_\",\n \"avg_bat_before\": \"SoC_Before_\", \n \"avg_bat_after\": \"SoC_After_\", \n \"pieni\": \"charging_\"}\n \n dir_name = {\"tot_deaths\": \"bat_exaust/\",\n \"avg_bat_before\": \"soc_before/\", \n \"avg_bat_after\": \"soc_after/\", \n \"pieni\": \"charging_prob/\"}\n \n \n if provider == \"car2go\":\n nob = len(torino.car2go)\n noz = float(len(torino.car2go_parkings_analysis))\n noc = float(len(torino.c2g_fleet))\n cap = 17.6\n a = noc*cap\n else:\n nob = len(torino.enjoy)\n noz = float(len(torino.enjoy_parkings_analysis))\n\n \n if column == \"mean_rnd\":\n mean_c2g = df[(df[\"provider\"] == provider) & (df[\"algorithm\"]==\"rnd\")]\n mean_c2g = mean_c2g.groupby([\"z\",\"ppz\"], as_index=False).mean()\n mean_c2g[\"algorithm\"] = \"mean_rnd\"\n df = mean_c2g\n \n elif column == \"best_rnd\":\n best_deaths = df[(df[\"provider\"] == provider) & (df[\"algorithm\"]==\"rnd\")]\n best_deaths = best_deaths.sort_values(\"tot_deaths\").groupby([\"z\",\"ppz\"], as_index=False).first()\n best_deaths = best_deaths.rename(columns={\"rnd\":\"mean_rnd\"})\n best_deaths[\"algorithm\"] = \"best_rnd\"\n\n df = best_deaths\n\n else : \n df = df[df[\"provider\"] == provider]\n \n\n \n fig = plt.figure(figsize=(30,10))\n ax = fig.gca()\n ax.set_title(provider + \" - \" + alg_names[column] + \"\", fontsize=36)\n ax.grid()\n for i in ppz :\n df2 = df[(df[\"ppz\"] ==i)]\n# df4 = df[(df[\"algorithm\"] == column) & (df[\"ppz\"] ==4)]\n# df6 = df[(df[\"algorithm\"] == column) & (df[\"ppz\"] ==6)]\n# df8 = df[(df[\"algorithm\"] == column) & (df[\"ppz\"] ==8)]\n ax.plot(df2[\"z\"], df2[paramter].div(div_facts[paramter]), color=colors[i], marker=markers[i], label=labels[i])\n# ax.plot(df4[\"z\"], df4[paramter].div(div_facts[paramter]), color=colors[4], marker=markers[4], label=labels[4])\n# ax.plot(df6[\"z\"], df6[paramter].div(div_facts[paramter]), color=colors[6], marker=markers[6], label=labels[6])\n# ax.plot(df8[\"z\"], df8[paramter].div(div_facts[paramter]), color=colors[8], marker=markers[8], label=labels[8])\n\n my_t = range( 10, 175, 10)\n my_ticks = [str((\"{0:.2f}\".format(x/noz))) for x in my_t ]\n labels = [\"\"] * len(my_t)\n for i in range(0,len(labels)):\n labels[i] = int(float(my_ticks[i])*100)\n \n plt.xticks(my_t, labels)\n plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])\n \n# ax.set_xticklabels(labels)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(27) \n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(27) \n \n ax.set_xlabel(\"Zones(%)\", fontsize=36)\n ax.set_ylabel(y_labels[paramter], fontsize=36)\n \n\n \n plt.legend(fontsize=36)\n# /home/mc/Scrivania/Tesi/toptesi/figures/_results/car2go/algorithms\n# /home/mc/Scrivania/Tesi/toptesi/figures/_results/car2go/algorithms/car2go_best_rnd_tot_deaths\n# my_path = \"/home/mc/Scrivania/Tesi/toptesi/figures/_results/\"\n# my_path += provider+\"/\"\n# my_path += \"algorithms/\"\n# my_path += provider+\"_\"+column +\"_\"+ paramter\n my_path=\"/home/mc/Immagini/pres_im/pzz_\"+str(len(ppz))\n \n plt.savefig(my_path, bbox_inches = 'tight')\n# print my_path\n plt.show()\n \n \n \n\nyear = 2017\nmonth = 5\nday = 6\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2, day, 23, 59, 0)\ntorino = City(\"Torino\", start,end)\ntorino.set_c2g_datasets(from_pickle=True)\ntorino.set_enj_datasets(from_pickle=True)\ntorino.get_fleet(\"car2go\")\ntorino.get_fleet(\"enjoy\")\n\n\nres2 = pd.DataFrame()\nroot = \"/home/mc/Scrivania/Tesi/MyTool/pickles/\"\nmyDir = \"sym_res_corr_rnd/\"\nname = \"\"\nfor j in range(0,760):\n res2 = res2.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n \nmyDir = \"sym_res_corr_eur/\"\nname = \"\"\nfor j in range(0,6):\n res2 = res2.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n\n\n#df = res2\n#provider = 'car2go'\n#best_deaths = df[(df[\"provider\"] == provider) & (df[\"algorithm\"]==\"rnd\")]\n#best_deaths = df[(df[\"provider\"] == provider) & (df[\"algorithm\"]!=\"rnd\")]\n\n\n#best_deaths = best_deaths.sort_values(\"tot_deaths\").groupby([\"z\",\"ppz\"], as_index=False).first()\n#best_deaths = best_deaths.rename(columns={\"rnd\":\"mean_rnd\"})\n#best_deaths[\"algorithm\"] = \"best_rnd\"\n\n\n#best_deaths = best_deaths.groupby([\"z\",\"ppz\"], as_index=False).mean()\n#best_deaths = best_deaths.rename(columns={\"rnd\":\"mean_rnd\"})\n#best_deaths[\"algorithm\"] = \"mean_rnd\"\n\n#best_deaths[\"pieni\"] = best_deaths[\"pieni\"].div(len(torino.car2go))*100\n#best_deaths[\"z\"] = best_deaths[\"z\"].mul(100).div(238).astype(int)\n#best_deaths = best_deaths[[\"z\", \"ppz\", \"algorithm\", \"pieni\"]]\n#best_deaths = best_deaths[(best_deaths[\"z\"] == 25) & (best_deaths[\"ppz\"] == 6)]\n#\n#aab = aaa[\"distance\"]\n#bat = pd.DataFrame()\n#myDir =\"sym_res_bat/\"\n#for j in range(0,3):\n# bat = bat.append(pd.read_pickle(root+myDir+name+\"bat_\"+str(j)), ignore_index=True)\n\n \nplot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", \"tot_deaths\", [2]) \nplot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", \"tot_deaths\", [2,4]) \nplot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", \"tot_deaths\", [2,4,6]) \nplot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", \"tot_deaths\", [2,4,6,8]) \n#aaa = aaa[aaa[\"z\"] == 60]\n#aaa[\"tot_deaths\"] = aaa[\"tot_deaths\"].div(len(torino.car2go)).mul(100)\n#plot_from_df_algorithm (res2, torino, \"enjoy\", \"max_time\", \"tot_deaths\") \n#plot_from_df_algorithm (res2, torino, \"enjoy\", \"max_avg_time\", \"tot_deaths\") \n#plot_from_df_algorithm (res2, torino, \"car2go\", \"best_rnd\", 'avg_bat_before') \n#plot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", 'avg_bat_before') \n\n#plot_from_df_algorithm (res2, torino, \"enjoy\", \"max_parking\") \n#plot_from_df_algorithm (res2, torino, \"enjoy\", \"max_time\") \n#plot_from_df_algorithm (res2, torino, \"enjoy\", \"max_avg_time\") \n#plot_from_df_algorithm (res2, torino, \"car2go\", \"best_rnd\", 'tot_deaths') \n#plot_from_df_algorithm (res2, torino, \"car2go\", \"mean_rnd\", 'avg_bat_before') \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5160499215126038, "alphanum_fraction": 0.5324390530586243, "avg_line_length": 38.49690628051758, "blob_id": "7ad39d52b2f7108e1736524ab63d2bec3fd75ad3", "content_id": "f6ad7bb9047f838371b52762f11bbda82f8d00db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19159, "license_type": "no_license", "max_line_length": 139, "num_lines": 485, "path": "/Analysis/simulator/city.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport datetime\nimport time\nimport random\nimport sys\nimport os.path\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/')\nimport paths as paths\nfrom DataBaseProxy import DataBaseProxy\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/simulator')\nfrom util import Utility\nfrom car import Car\nfrom shapely.geometry import Point, Polygon\nfrom station import Station\nimport threading\nfrom multiprocessing import Process\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n\n\nutil = Utility()\n\ndef pointfy (lon, lat):\n return pd.Series(Point(float(lon), float(lat)))\n\n\nclass City (object):\n def __init__(self, name, start, end):\n self.name = name\n self.start = start\n self.end = end\n self.days = (end-start).days + 1\n self.crs = {\"init\": \"epsg:4326\"}\n self.turin = gpd.read_file(\"../../SHAPE/grid500.dbf\").to_crs(self.crs)\n self.stations={}\n\n\n return\n \n def parkings_analysis(self, df, days=0):\n g = pd.DataFrame(df.groupby(\"zone\").count()[\"plate\"]).rename(columns={\"plate\":\"parking_per_zone\"})\n g = g[g[\"parking_per_zone\"] >= days]\n g[\"duration_per_zone\"] = df.groupby(\"zone\").sum()\n g[\"avg_duration_per_zone\"] = 1.0* g[\"duration_per_zone\"] /60/g[\"parking_per_zone\"]\n g.index = g.index.astype(int)\n g[\"geometry\"] = self.turin[\"geometry\"]\n out = gpd.GeoDataFrame(g)\n return out\n \n \n def load_parkings(self, provider):\n if provider == \"car2go\":\n if os.path.exists(paths.car2go_parkings_pickle_path_zoned):\n self.car2go_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path_zoned)\n self.car2go_parkings = self.car2go_parkings.sort_values(\"init_time\").dropna()\n else :\n df1 = self.car2go\n self.car2go_parkings = pd.DataFrame(columns = [\"plate\", \"city\", \"geometry\", \"init_time\", \"final_time\", \"duration\", \"zone\"])\n row =0\n for plate in set(df1.plate):\n car_bookings = df1[df1[\"plate\"] == plate].sort_values(\"init_time\").reset_index()\n for i in range(len(car_bookings)-1) :\n b2 = car_bookings.loc[i+1]\n b1 = car_bookings.loc[i]\n s = pd.Series()\n s = b1[[\"plate\", \"city\", \"geometry\"]]\n s[\"init_time\"]= b1[\"final_time\"]\n s[\"final_time\"] = b2[\"init_time\"]\n s[\"duration\"] = float(b2[\"init_time\"] - b1[\"final_time\"]) / 60\n s[\"zone\"] = b1[\"final_zone\"]\n self.car2go_parkings.loc[row] = s\n row +=1\n self.car2go_parkings.to_pickle(paths.car2go_parkings_pickle_path_zoned).sort_values(\"init_time\").dropna()\n return\n\n\n if provider == \"enjoy\":\n if os.path.exists(paths.enjoy_parkings_pickle_path_zoned):\n self.enjoy_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path_zoned)\n self.enjoy_parkings = self.enjoy_parkings.sort_values(\"init_time\").dropna()\n else :\n df1 = self.enjoy\n self.enjoy_parkings = pd.DataFrame(columns = [\"plate\", \"city\", \"geometry\", \"init_time\", \"final_time\", \"duration\", \"zone\"])\n row =0\n for plate in list(set(df1.plate)):\n car_bookings = df1[df1[\"plate\"] == plate].sort_values(\"init_time\").reset_index()\n for i in range(len(car_bookings)-1) :\n b2 = car_bookings.loc[i+1]\n b1 = car_bookings.loc[i]\n s = pd.Series()\n s = b1[[\"plate\", \"city\", \"geometry\"]]\n s[\"init_time\"]= b1[\"final_time\"]\n s[\"final_time\"] = b2[\"init_time\"]\n s[\"duration\"] = float(b2[\"init_time\"] - b1[\"final_time\"]) / 60\n s[\"zone\"] = b1[\"final_zone\"]\n self.enjoy_parkings.loc[row] = s\n row +=1\n if row % 1000 ==0:\n print row\n self.enjoy_parkings.to_pickle(paths.enjoy_parkings_pickle_path_zoned).sort_values(\"init_time\").dropna()\n return\n \n\n \n def set_c2g_datasets(self, from_pickle=True):\n if from_pickle == True:\n df = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\n df = df[(df[\"duration\"] <= 120) &(df[\"distance\"] >= 20)]\n self.car2go = gpd.GeoDataFrame(df, crs=self.crs)\n self.assign_zone_numbers(\"car2go\")\n else:\n dbp = DataBaseProxy()\n df = dbp.query_bookings_df(\"car2go\",self.name, self.start, self.end)\n df.to_pickle(paths.car2go_bookings_pickle_path, None)\n df = df[(df[\"duration\"] <= 120) &(df[\"distance\"] >= 20)]\n self.car2go = gpd.GeoDataFrame(df, crs=self.crs)\n self.assign_zone_numbers(\"car2go\")\n \n self.car2go = self.car2go.dropna(axis=0).sort_values(\"init_time\")\n self.load_parkings(\"car2go\")\n self.car2go_parkings_analysis = self.parkings_analysis(self.car2go_parkings, days=self.days)\n self.car2go_stations_mp = self.car2go_parkings_analysis.sort_values(\"parking_per_zone\", ascending=False)\n self.car2go_stations_mat = self.car2go_parkings_analysis.sort_values(\"avg_duration_per_zone\", ascending=False)\n\n\n\n \n def set_enj_datasets(self, from_pickle=True):\n if from_pickle == True:\n df = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\n df = df[(df[\"duration\"] <= 120) &(df[\"distance\"] >= 20)]\n self.enjoy = gpd.GeoDataFrame(df, crs=self.crs)\n self.assign_zone_numbers(\"enjoy\")\n else:\n dbp = DataBaseProxy()\n df = dbp.query_bookings_df(\"enjoy\",self.name, self.start, self.end)\n df.to_pickle(paths.enjoy_bookings_pickle_path, None)\n df = df[(df[\"duration\"] <= 120) &(df[\"distance\"] >= 20)]\n self.enjoy = gpd.GeoDataFrame(df, crs=self.crs)\n self.assign_zone_numbers(\"enjoy\")\n \n self.load_parkings(\"enjoy\")\n self.enjoy = self.enjoy.dropna(axis=0).sort_values(\"init_time\")\n self.enjoy_parkings_analysis = self.parkings_analysis(self.enjoy_parkings, days=self.days)\n self.enjoy_stations_mp = self.enjoy_parkings_analysis.sort_values(\"parking_per_zone\", ascending=False)\n self.enjoy_stations_mat = self.enjoy_parkings_analysis.sort_values(\"avg_duration_per_zone\", ascending=False)\n\n\n \n def get_fleet(self, provider):\n if provider == 'car2go':\n self.c2g_fleet = util.get_fleet(self.car2go, \n util.get_valid_days(self.car2go, self.start, self.end)[\"valid_days\"])\n self.c2g_fleet = self.c2g_fleet[self.c2g_fleet[\"bookings_per_car\"] >= self.days]\n else :\n self.enj_fleet = util.get_fleet(self.enjoy, \n util.get_valid_days(self.enjoy, self.start, self.end)[\"valid_days\"])\n self.enj_fleet = self.enj_fleet[self.enj_fleet[\"bookings_per_car\"] >= self.days]\n \n def assign_zone_numbers(self, provider):\n if provider == \"car2go\":\n if os.path.exists(paths.car2go_bookings_picke_path_zoned):\n self.car2go = pd.read_pickle(paths.car2go_bookings_picke_path_zoned, None)\n else :\n self.car2go['geometry'] = self.car2go.apply(\n lambda row: pointfy(row['start_lon'], row['start_lat']), axis = 1)\n self.car2go = gpd.sjoin(self.car2go, self.turin, how='inner', op='within')\n self.car2go.rename(columns={\"FID\": \"init_zone\"}, inplace=True)\n self.car2go.drop(\"index_right\", axis=1, inplace=True)\n \n self.car2go['geometry'] = self.car2go.apply(\n lambda row: pointfy(row['end_lon'], row['end_lat']), axis = 1)\n c2g2 = gpd.sjoin(self.car2go, self.turin, how='inner', op='within')\n c2g2.rename(columns={\"FID\": \"final_zone\"}, inplace=True)\n c2g2.drop(\"index_right\", axis=1, inplace=True)\n self.car2go[\"final_zone\"] = c2g2[\"final_zone\"]\n del c2g2\n \n self.car2go.to_pickle(paths.car2go_bookings_picke_path_zoned, None)\n else :\n if os.path.exists(paths.enjoy_bookings_picke_path_zoned):\n self.enjoy = pd.read_pickle(paths.enjoy_bookings_picke_path_zoned, None)\n else :\n self.enjoy['geometry'] = self.enjoy.apply(\n lambda row: pointfy(row['start_lon'], row['start_lat']), axis = 1)\n self.enjoy = gpd.sjoin(self.enjoy, self.turin, how='inner', op='within')\n self.enjoy.rename(columns={\"FID\": \"init_zone\"}, inplace=True)\n self.enjoy.drop(\"index_right\", axis=1, inplace=True)\n \n self.enjoy['geometry'] = self.enjoy.apply(\n lambda row: pointfy(row['end_lon'], row['end_lat']), axis = 1)\n c2g2 = gpd.sjoin(self.enjoy, self.turin, how='inner', op='within')\n c2g2.rename(columns={\"FID\": \"final_zone\"}, inplace=True)\n c2g2.drop(\"index_right\", axis=1, inplace=True)\n self.enjoy[\"final_zone\"] = c2g2[\"final_zone\"]\n \n self.enjoy.to_pickle(paths.enjoy_bookings_picke_path_zoned, None)\n \n\n \n def create_c2g_cars_collections(self):\n self.cars_c2g={}\n for plate in self.car2go[\"plate\"].unique():\n self.cars_c2g[plate] = Car(plate,\"car2go\",self.car2go.iloc[0]) \n return self.cars_c2g\n \n def create_enj_cars_collections(self):\n self.cars_enj={}\n for plate in self.enjoy[\"plate\"].unique():\n self.cars_enj[plate] = Car(plate,\"enjoy\",self.enjoy.iloc[0]) \n return self.cars_enj\n \n def place_stations(self, no_ps, no_ps_per_station, provider, algorithm=\"rnd\", station_type=1):\n no_ps = int(no_ps)\n no_ps_per_station = int(no_ps_per_station)\n \n if provider == \"car2go\":\n df = self.car2go_parkings_analysis\n else:\n df = self.enjoy_parkings_analysis\n \n if no_ps % no_ps_per_station == 0:\n max_stat = no_ps / no_ps_per_station\n else:\n max_stat = (no_ps/no_ps_per_station)+1\n \n stations={}\n self.stations = stations\n print algorithm\n if algorithm == \"max_parking\":\n zones = df.sort_values(\"parking_per_zone\", ascending=False).head(max_stat)\n for i in range(0,len(zones)):\n zone = zones.iloc[[i]].index.get_values()[0]\n stations[zone] = Station(zone, no_ps_per_station,0, station_type)\n# print zones.index\n \n elif algorithm == \"max_avg_time\" :\n zones = df.sort_values(\"avg_duration_per_zone\", ascending=False).head(max_stat)\n for i in range(0,len(zones)):\n zone = zones.iloc[[i]].index.get_values()[0]\n# print zone\n stations[zone] = Station(zone, no_ps_per_station,0, station_type) \n# print zones.index\n\n\n elif algorithm == \"max_time\":\n zones = df.sort_values(\"duration_per_zone\", ascending=False).head(max_stat)\n for i in range(0,len(zones)):\n zone = zones.iloc[[i]].index.get_values()[0]\n stations[zone] = Station(zone, no_ps_per_station,0, station_type)\n# print zones.index\n\n\n elif algorithm == \"rnd\": \n seed = random.randint(1, 1e6) % random.randint(1, 1e6) \n zones = df.sample(n=max_stat, random_state=seed)\n for i in range(0,len(zones)):\n zone = zones.iloc[[i]].index.get_values()[0]\n stations[zone] = Station(zone, no_ps_per_station,0, station_type)\n# print stations.keys()[0:10]\n else:\n print \"error\"\n return\n \n self.stations = stations\n \n return stations\n \n \n \n def run(self, provider, threshold):\n# print \"running...\"\n if provider == \"car2go\":\n df = self.car2go\n cars = self.create_c2g_cars_collections()\n corrective_factor = 1.82\n nob = len(self.car2go)\n\n \n if provider == \"enjoy\":\n df = self.enjoy\n cars = self.create_enj_cars_collections()\n corrective_factor = 1.82\n nob = len(self.enjoy)\n\n \n stations = self.stations\n \n refused_bookings = 0\n s = time.time()\n# for i in range (1, len(df)-1):\n# df.drop(\"geometry\",axis=1, inplace=True)\n# df = pd.DataFrame(df)\n# self.avg_bat_before = 0\n self.avg_bat_after = 0\n self.avg_bat_before = 0\n pieni = 0 \n for index, row in df.iterrows() :\n# if i%10000 == 0:\n# print i\n c_booking = row\n c_plate = str(c_booking[\"plate\"])\n c_car = cars[c_plate]\n old_cap = c_car.current_capacity\n \n self.avg_bat_before = self.avg_bat_before + c_car.current_capacity\n# if c_car.current_capacity > c_car.capacity:\n# print index, c_car.plate, c_car.current_capacity\n\n if c_car.in_charge == True :\n \n old_cap = c_car.current_capacity\n rech_z = c_car.last_final_zone() \n stations[rech_z].decrease_supplied_cars()\n stations[rech_z].increase_recharged_counter(c_plate)\n c_car.compute_recharge(stations[rech_z], c_booking)\n \n\n if old_cap - c_car.current_capacity < 0:\n pieni = pieni + 1\n \n \n# fz = c_booking[\"final_zone\"].astype(np.int64)\n fz = int(c_booking[\"final_zone\"])\n\n# print c_car.current_capacuty, threshold\n if c_car.current_capacity >= threshold:\n c_car.compute_consuption(c_booking[\"distance\"] * corrective_factor)\n# print fz, type(fz), stations.keys()[3], type(stations.keys()[3]), fz==stations.keys()[3] \n# if fz in stations.keys():\n# print\"o\n if fz in stations.keys() and stations[fz].cars < stations[fz].max_cars :\n stations[fz].increase_supplied_cars()\n c_car.set_in_charge()\n c_car.assign_last_booking(c_booking)\n else:\n c_car.set_not_in_charge()\n c_car.assign_last_booking(c_booking)\n\n else:\n refused_bookings +=1\n self.avg_bat_after = self.avg_bat_after + c_car.current_capacity\n\n df = pd.DataFrame.from_records([cars[c].to_dict() for c in cars])\n self.avg_bat_after = self.avg_bat_after / nob\n self.avg_bat_before = self.avg_bat_before / nob\n\n self.et = time.time()-s\n self.pieni = pieni\n self.rech_cars = pd.DataFrame(columns=[\"disticint_cars_in_zone\"])\n \n for zone_id in self.stations.keys() :\n self.rech_cars.loc[zone_id,\"disticint_cars_in_zone\"] = len(self.stations[zone_id].charged_cars)\n stats = pd.DataFrame()\n for plate in cars :\n row = cars[plate].car2df()\n stats = stats.append(row, ignore_index=True)\n stats.set_index(\"plate\", inplace =True)\n return stats\n \n \n\n\n#year = 2017\n#month = 5\n#day = 6\n#start = datetime.datetime(year, month, day, 0, 0, 0)\n#end = datetime.datetime(year, month +2, day, 23, 59, 0)\n#torino = City(\"Torino\", start,end)\n#torino.set_c2g_datasets(from_pickle=True)\n#torino.set_enj_datasets(from_pickle=True)\n#torino.get_fleet(\"car2go\")\n#torino.get_fleet(\"enjoy\")\n\n#print len(torino.car2go)/39.0/395\n#print len(torino.enjoy)/39.0/315\n#\n#print len(torino.car2go_parkings)/39.0/395\n#print len(torino.enjoy_parkings) /39.0/315\n###\n##ms = time.time()\n#print \"max_parking \",\n#torino.place_stations(90,\n# 4,\n# \"car2go\",\n# \"max_parking\",\n# station_type=1)\n#torino.run('car2go', 0)\n#print torino.avg_bat_after, torino.avg_bat_before\n#print\n#\n#print \"max_avg_time\"\n#print torino.place_stations(20,\n# 2,\n# \"car2go\",\n# \"max_avg_time\",\n# station_type=1).keys()\n#print\n#\n#print \"max_time \"\n#print torino.place_stations(20,\n# 2,\n# \"car2go\",\n# \"max_time\",\n# station_type=1).keys()\n#print\n#\n#zzz = torino.car2go_parkings_analysis\n#zzz2 = torino.car2go_parkings_analysis\n\n#print torino.run(\"car2go\", 0)[\"deaths\"].sum()\n#print time.time() - ms\n\n#zones = 17.0\n#ppz = 4\n#algs = 3\n#prov = 2\n#texec = 14\n#print zones* ppz* algs* prov\n\n \n#n_z = range(10,260, 50)\n#n_ppz = [3,9]\n#commands = {}\n#j=0\n#for cso in [\"car2go\", \"enjoy\"]:\n# for alg in [\"max_parking\", \"rnd\", \"max_avg_time\"] :\n# for ppz in n_ppz:\n# d = {}\n# d[\"alg\"] = alg\n# d[\"ppz\"] = ppz\n# d[\"out\"] = \"/home/mc/Scrivania/Tesi/MyTool/pickles/sym_res/sym_res_\"+str(j) \n# d[\"cso\"] = cso\n# commands[j] = d\n# j=j+1\n#def return_path(cso, alg, ppz, z):\n# string = str(cso) +\"_\"+ str(alg) + \"_\" + str(ppz) + \"_\"+ str(z)\n# return string\n#\n#\n#node_sim_list=[]\n#process_list = []\n#for i in commands.keys():\n# node_sim_list.append(commands[i])\n# \n# \n#def worker(node):\n# resutls = pd.DataFrame() \n# for z in n_z:\n# torino.place_stations(z * node[\"ppz\"],\n# node[\"ppz\"],\n# node[\"cso\"],\n# algorithm=node[\"alg\"],\n# station_type=1)\n# c2g_stats = torino.run(node[\"cso\"], threshold=0)\n# row = pd.Series()\n# row[\"z\"] = z\n# row[\"ppz\"] = node[\"ppz\"]\n# row[\"p\"] = z*node[\"ppz\"]\n# row[\"provider\"] = node[\"cso\"]\n# row[\"algorithm\"] = node[\"alg\"]\n# row[\"mean_dpc\"] = c2g_stats[\"deaths\"].mean()\n# row[\"median_dpc\"] = c2g_stats[\"deaths\"].median()\n# row[\"tot_deaths\"] = c2g_stats[\"deaths\"].sum() \n# resutls = resutls.append(row, ignore_index=True)\n# resutls.to_pickle(node[\"out\"])\n#\n#\n#init_time = time.time()\n#for node in node_sim_list:\n# p = Process(target=worker, args=(node,))\n# process_list.append(p)\n# p.start()\n#\n#for p in process_list:\n# p.join()\n# \n#print time.time() - init_time\n#\n#res = pd.DataFrame()\n#for node in node_sim_list:\n# res = res.append(pd.read_pickle(node[\"out\"]), ignore_index=True)\n\n\n\n" }, { "alpha_fraction": 0.48797595500946045, "alphanum_fraction": 0.5128256678581238, "avg_line_length": 28.449703216552734, "blob_id": "1b7bab357b24c13f4a52300effec404609aabc4b", "content_id": "184bc60fa88955dd7ca5f8c0c9c0d2a04f0567f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4990, "license_type": "no_license", "max_line_length": 104, "num_lines": 169, "path": "/Analysis/util.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nfrom time import gmtime, strftime\nfrom fiona.crs import from_epsg\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nfrom math import radians,degrees, cos, sin, asin, sqrt, acos\n\n\nclass Utility (object):\n\n def __init__ (self):\n return\n \n \n def get_color(self, df):\n provider = self.get_provider(df)\n if provider == \"enjoy\" :\n return \"red\"\n else:\n return \"blue\"\n \n def get_provider(self, df):\n return df.iloc[0].vendor\n \n #return a df with the valid date with entries and #parkings/#bookings\n def get_valid_days(self,df,start,end):\n year = start.year\n month = start.month\n day = start.day\n \n delta = end - start\n \n df = pd.DataFrame(df['init_date'])\n df['date'] = df.init_date.apply(lambda x : x.date())\n df = df.groupby('date').count()\n \n datelist = pd.date_range(pd.datetime(year,month,day), periods=delta.days).tolist()\n dfdays = pd.DataFrame(datelist)\n dfdays['count'] = [0]*len(datelist)\n dfdays.set_index(0, inplace=True)\n df2= dfdays['count'] + df['init_date']\n df2 = pd.DataFrame(df2)\n df2.fillna(0, inplace=True)\n df2 = df2.rename(columns={0:'entries'})\n \n df2[\"date\"] = df2.index\n df2[\"date\"] = df2.date.apply(lambda x: x.strftime(\"%A\"))\n \n res={}\n res[\"df\"] = df2\n res[\"valid_days\"] = len(df2[df2[\"entries\"]>0])\n tmp = df2[df2[\"entries\"] > 0]\n res[\"mean\"] = tmp.entries.mean()\n res[\"std\"] = tmp.entries.std()\n res[\"median\"] = tmp.entries.median()\n \n tmp = tmp[(tmp.entries >= res[\"median\"] - res[\"std\"]) &\n (tmp.entries <= res[\"median\"] + res[\"std\"])\n ]\n res[\"cleaned_valid_days\"] =len(tmp)\n\n return res\n \n def clean_df (self, df, column, median, std):\n tmp = df[(df[column] >= median - std) &\n (df[column] <= median + std)\n ]\n return tmp\n \n def haversine(self, lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return int(km*1000)\n \n def step_x(self, meters):\n phi1 = radians(7)\n R = 6367.0\n \n a = radians(float(meters)/(2*R))\n numeratore = 2*(sin(a)*sin((a)))\n denominatore = (cos(phi1)*cos(phi1))\n fract = numeratore/denominatore\n step = acos(1-fract)\n return step\n \n def step_y(self, meters):\n return\n \n def grid_df (self, xmin, xmax, ymin, ymax, s_x, s_y):\n start_x = xmin\n final_x = xmax\n step_x = s_x \n start_y = ymin\n final_y = ymax\n step_y = s_y\n \n x = start_x\n y= start_y\n newdata = gpd.GeoDataFrame()\n newdata.crs = from_epsg(4326)\n newdata['geometry'] = None\n gdf_row = 0\n while x <= final_x:\n y = start_y\n while y <= final_y:\n p1 = (x,y)\n p2 = (x+step_x,y)\n p3 = (x+step_x, y+step_y)\n p4 = (x, y+step_y)\n q= Polygon([p1,p2,p3,p4])\n newdata.loc[gdf_row, 'geometry'] = q\n gdf_row = gdf_row + 1\n y = y + step_y\n \n x = x + step_x\n \n outfp = r\"/home/mc/Scrivania/Tesi/MyTool/SHAPE/grid.shp\"\n newdata.to_file(outfp)\n \n def get_fleet(self, df, days):\n df = pd.DataFrame(df.groupby(\"plate\").count()[\"_id\"]).rename(columns={\"_id\":\"bookings_per_car\"})\n df = df[df[\"bookings_per_car\"] > days]\n return df\n\n def get_fleet_in_date (self, df, start, end, days):\n df = df[(df[\"init_date\"]>= start) & \n (df[\"init_date\"]<= end)]\n return self.get_fleet(df) \n \n\n\n#meters = 50.0\n#phi1 = radians(7)\n#R = 6367.0*1000.0\n#\n#a = radians(meters/(2*R))\n#numeratore = 2*(sin(a)*sin((a)))\n#denominatore = (cos(phi1)*cos(phi1))\n#fract = numeratore/denominatore\n#step = acos(1-fract)\n#\n#u = Utility()\n#my_test_m = []\n#distances = []\n#for test_m in range(0,100000):\n# my_test_m.append(test_m)\n# distances.append(u.haversine(7.0, 45, 7.0+u.step_x(test_m), 45.0))\n##print u.step_x(test_m)\n##print u.haversine(7.0, 45, 7.0+u.step_x(test_m), 45.0)\n#\n#df = pd.DataFrame()\n#df[\"distances\"] = distances\n#df[\"my_test_m\"] = my_test_m\n#\n#df.plot()\n#plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5875763893127441, "alphanum_fraction": 0.6262729167938232, "avg_line_length": 26.933332443237305, "blob_id": "ab5c88659a5752aa5f7ca473420ad72e88abe2b0", "content_id": "237db8b0e88e559f1db672be0433092bb2bfdafa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2946, "license_type": "no_license", "max_line_length": 111, "num_lines": 105, "path": "/Analysis/zz_corrective_factor.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\nimport time\n\n\nfrom DataBaseProxy import DataBaseProxy\n#dbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 6\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2, day, 23, 59, 0)\n\nenj_bookings = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\nc2g_bookings = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\n\nenj_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)\nc2g_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)\n\nenj_bookings = enj_bookings[\n (enj_bookings[\"duration\"] <= 120) & \n (enj_bookings[\"distance\"] >=20) &\n (enj_bookings[\"distance_dr\"] != -1)\n ]\nenj_bookings[\"corr_fact\"] = enj_bookings[\"distance_dr\"].div(enj_bookings[\"distance\"])\n\n\nc2g_bookings = c2g_bookings[\n (c2g_bookings[\"duration\"] <= 120) & \n (c2g_bookings[\"distance\"] >=20) &\n (c2g_bookings[\"distance_dr\"] != -1)\n ]\nc2g_bookings[\"corr_fact\"] = c2g_bookings[\"distance_dr\"].div(c2g_bookings[\"distance\"])\n\n\n\n\ndef plot_gd_vs_df(df1, provider, path):\n if provider == 'car2go':\n color = 'blue'\n else:\n color = 'red'\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.gca()\n ax.set_title(provider + \" - Google dist. over Eucl. dist.\", fontsize=36)\n ax.grid()\n\n q=0.01\n df1 = df1[\n (df1[\"corr_fact\"] <= df1[\"corr_fact\"].quantile(1-q)) & \n (df1[\"corr_fact\"] >= df1[\"corr_fact\"].quantile(q) )\n ]\n \n values = [df1[\"corr_fact\"].quantile(0.9), df1[\"corr_fact\"].quantile(0.95), df1[\"corr_fact\"].quantile(0.99)]\n percen = [0.9, 0.95, 0.99]\n print provider\n print values\n print\n ax.hist(df1[\"corr_fact\"], cumulative=True, normed=True, bins=100, color=color)\n \n for i in range(len(values)):\n ax.axhline(percen[i], color='black',linewidth=3)\n \n ax.set_xlabel(\"Google dist over Eucl.dist\", fontsize=36)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n y_ticks = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99]\n ax.set_yticks(y_ticks) \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n\n ax.set_ylabel(\"ECDF\", fontsize=36)\n\n if len(path) > 0 :\n plt.savefig(path,\n bbox_inches = 'tight')\n\n plt.show()\n \n\n\ndir_= \"04_data_analysis\"\nname= \"correctvie_factor\"\n\nprovider = 'enjoy'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+name\nplot_gd_vs_df(enj_bookings, \"enjoy\", path)\n\n\nprovider = 'car2go'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+name\nplot_gd_vs_df(c2g_bookings, \"car2go\", path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5788716077804565, "alphanum_fraction": 0.6001726984977722, "avg_line_length": 28.69230842590332, "blob_id": "8428175e2f49b426400d2566789024cccf2fa25e", "content_id": "695e5bb56179dfe115062f03bed512cffe87e6e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3474, "license_type": "no_license", "max_line_length": 92, "num_lines": 117, "path": "/Analysis/griding.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import os, sys\nimport ogr\nfrom math import ceil, cos,sin, sqrt, degrees, radians\nfrom util import Utility\nimport paths as paths\n\n\ndef griding(outputGridfn,xmin,xmax,ymin,ymax,gridHeight,gridWidth):\n\n # convert sys.argv to float\n xmin = float(xmin)\n xmax = float(xmax)\n ymin = float(ymin)\n ymax = float(ymax)\n gridWidth = float(gridWidth)\n gridHeight = float(gridHeight)\n\n # get rows\n rows = ceil((ymax-ymin)/gridHeight)\n # get columns\n cols = ceil((xmax-xmin)/gridWidth)\n\n # start grid cell envelope\n ringXleftOrigin = xmin\n ringXrightOrigin = xmin + gridWidth\n ringYtopOrigin = ymax\n ringYbottomOrigin = ymax-gridHeight\n\n # create output file\n outDriver = ogr.GetDriverByName('ESRI Shapefile')\n if os.path.exists(outputGridfn):\n os.remove(outputGridfn)\n outDataSource = outDriver.CreateDataSource(outputGridfn)\n outLayer = outDataSource.CreateLayer(outputGridfn,geom_type=ogr.wkbPolygon )\n featureDefn = outLayer.GetLayerDefn()\n print \"cols \" + str(cols)\n print \"rows \" + str(rows)\n # create grid cells\n countcols = 0\n while countcols < cols:\n countcols += 1\n\n # reset envelope for rows\n ringYtop = ringYtopOrigin\n ringYbottom =ringYbottomOrigin\n countrows = 0\n\n while countrows < rows:\n print \"C: \" + str(countcols) + \" R: \" + str(countrows)\n countrows += 1\n ring = ogr.Geometry(ogr.wkbLinearRing)\n ring.AddPoint(ringXleftOrigin, ringYtop)\n ring.AddPoint(ringXrightOrigin, ringYtop)\n ring.AddPoint(ringXrightOrigin, ringYbottom)\n ring.AddPoint(ringXleftOrigin, ringYbottom)\n ring.AddPoint(ringXleftOrigin, ringYtop)\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n\n # add new geom to layer\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(poly)\n outLayer.CreateFeature(outFeature)\n outFeature.Destroy\n\n # new envelope for next poly\n ringYtop = ringYtop - gridHeight\n ringYbottom = ringYbottom - gridHeight\n\n # new envelope for next poly\n ringXleftOrigin = ringXleftOrigin + gridWidth\n ringXrightOrigin = ringXrightOrigin + gridWidth\n\n # Close DataSources\n outDataSource.Destroy()\n \n#50m in decimal degree 0.0045\ndef convert_geo2cart(lat, lon, high):\n phi = radians(lat)\n lam = radians(lon)\n a = 6378137\n alfa = 1/298.257223563\n C = a*(1-alfa)\n e2 = (a*a - C*C)/(a*a)\n fact = e2*(sin(phi)*sin(phi))\n w = sqrt(1-fact)\n res = {\n \"x\" : ((a/w)+high) * cos(phi) * cos(lam),\n \"y\" : ((a/w)+high) * cos(phi) * sin(lam),\n \"z\" : ((a/w)*(1-e2)+high) * sin(phi)\n }\n \n return res\n\nhigh_left = {\n \"lat\" : 45.20,\n \"lon\" : 7.4,\n \"h\": 300\n }\nbottom_right = {\n \"lat\" : 44.85,\n \"lon\" : 7.9,\n \"h\": 300\n }\n\nx = []\ny = []\nx.append(convert_geo2cart(high_left[\"lat\"], high_left[\"lon\"], high_left[\"h\"])[\"x\"])\ny.append(convert_geo2cart(high_left[\"lat\"], high_left[\"lon\"], high_left[\"h\"])[\"y\"])\nx.append(convert_geo2cart(bottom_right[\"lat\"], bottom_right[\"lon\"], bottom_right[\"h\"])[\"x\"])\ny.append(convert_geo2cart(bottom_right[\"lat\"], bottom_right[\"lon\"], bottom_right[\"h\"])[\"y\"])\n\ngriding(paths.grid_path, \n min(x), max(x), \n min(y), max(y), \n 40000/50, \n 40000/50 )\n" }, { "alpha_fraction": 0.4295622408390045, "alphanum_fraction": 0.4399249255657196, "avg_line_length": 39.54138946533203, "blob_id": "b95ff8159cdf636b593ce6b2a7c0c401f6178c33", "content_id": "4a5d169442d157d4434a81f052d6ce0c5c2785c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24511, "license_type": "no_license", "max_line_length": 144, "num_lines": 604, "path": "/Analysis/DataBaseProxy.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "from math import radians, cos, sin, asin, sqrt\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return int(km*1000)\n\nregex ={\n \"Torino\" : \" #10... T#\",\n \"Milano\" : \"20... # Milano\",\n \"Firenze\" : \"50... Firenze\",\n \"Roma\" : \"00... Roma\",\n \"Catania\" : \"95... Catania\"\n }\n\n\n\n#print haversine(7.60, 45.20, 7.85, 45.20)\n\nimport datetime\nimport pandas as pd\nimport numpy as np\n#import recordFileds\n\n#from workalendar.europe import Italy\n\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nfrom sshtunnel import SSHTunnelForwarder\n\nMONGO_HOST = \"bigdatadb.polito.it\"#\"REMOTE_IP_ADDRESS\"\nMONGO_DB = \"carsharing\" #\"DATABASE_NAME\"\nMONGO_USER = \"ict4transport\"#\"LOGIN\"\nMONGO_PASS = \"pwd_ict4_2017\"#\"PASSWORD\"\n#server.stop()\n\nclass DataBaseProxy (object):\n \n def __init__ (self):\n server = SSHTunnelForwarder(\n MONGO_HOST,\n ssh_username=MONGO_USER,\n ssh_password=MONGO_PASS,\n remote_bind_address=('127.0.0.1', 27017)\n )\n server.start()\n client = MongoClient('127.0.0.1', server.local_bind_port)\n client.carsharing.authenticate('ictts', 'Ictts16!', mechanism= 'SCRAM-SHA-1')\n self.db = client[MONGO_DB]\n\n def query_bookings(self, vendor, city, start, end):\n # porta nuova 45.0625, 7.678889\n if (vendor == \"enjoy\") :\n return self.db[\"enjoy_PermanentBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'city' : city,\n\n }).sort([(\"_id\", 1)]) \n elif (vendor == \"car2go\") :\n return self.db[\"PermanentBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'city' : city,\n \n }).sort([(\"_id\", 1)]) \n else:\n return \"err\"\n \n def query_bookings_df(self, vendor, city, start, end):\n books_cursor = self.query_bookings(vendor, city, start, end)\n if (books_cursor == \"err from cursor\" or books_cursor.count() == 0):\n return \"err\"\n else :\n# print books_cursor.count()\n# bookings_df = pd.DataFrame(columns = pd.Series(books_cursor.next()).index)\n bookings_df = pd.DataFrame(list(books_cursor))\n \n \n \n bookings_df['duration_dr'] = bookings_df.driving.apply(lambda x: float(x['duration']/60))\n bookings_df['distance_dr'] = bookings_df.driving.apply(lambda x: x['distance'])\n bookings_df = bookings_df.drop('driving',1)\n \n bookings_df['type'] = bookings_df.origin_destination.apply(lambda x : x['type'])\n bookings_df['coordinates'] = bookings_df.origin_destination.apply(lambda x : x['coordinates'])\n bookings_df = bookings_df.drop('origin_destination',1)\n \n bookings_df['start'] = bookings_df.coordinates.apply(lambda x : x[0])\n bookings_df['end'] = bookings_df.coordinates.apply(lambda x : x[1])\n bookings_df = bookings_df.drop('coordinates',1)\n \n bookings_df['start_lon'] = bookings_df.start.apply(lambda x : float(x[0]) )\n bookings_df['start_lat'] = bookings_df.start.apply(lambda x : float(x[1]) )\n bookings_df = bookings_df.drop('start',1)\n \n bookings_df['end_lon'] = bookings_df.end.apply(lambda x : float(x[0]) )\n bookings_df['end_lat'] = bookings_df.end.apply(lambda x : float(x[1]) )\n bookings_df = bookings_df.drop('end', 1)\n \n bookings_df['distance'] = bookings_df.apply(lambda x : haversine(\n float(x['start_lon']),float(x['start_lat']),\n float(x['end_lon']), float(x['end_lat'])), axis=1\n )\n \n bookings_df['duration'] = bookings_df.final_date - bookings_df.init_date \n bookings_df['duration'] = bookings_df['duration'].apply(lambda x: x.days*24*60 + x.seconds/60)\n \n bookings_df['duration_pt'] = bookings_df.public_transport.apply(lambda x : x['duration'] )\n bookings_df['distance_pt'] = bookings_df.public_transport.apply(lambda x : x['distance'] )\n bookings_df['arrival_date_pt'] = bookings_df.public_transport.apply(lambda x : x['arrival_date'] )\n bookings_df['arrival_time_pt'] = bookings_df.public_transport.apply(lambda x : x['arrival_time'] )\n bookings_df = bookings_df.drop('public_transport',1)\n \n bookings_df = bookings_df[ bookings_df[\"start_lon\"] <= 7.8] \n\n return bookings_df\n \n def get_provider(self, df):\n return df.iloc[0].vendor\n\n \n def query_parkings(self, vendor, city, start, end):\n if (vendor == \"enjoy\") :\n return self.db[\"enjoy_PermanentParkings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'city' : city,\n# 'loc' : { \n# '$near' : {\n# '$geometry' : { 'type' : \"Point\" ,\n# 'coordinates' : [ 7.678889, 45.0625 ] } ,\n# '$maxDistance' : 50000 }\n# }\n }).sort([(\"_id\", 1)]) \n elif (vendor == \"car2go\") :\n return self.db[\"PermanentParkings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'city' : city,\n# 'loc' : { \n# '$near' : {\n# '$geometry' : { 'type' : \"Point\" ,\n# 'coordinates' : [ 7.678889, 45.0625 ] } ,\n# '$maxDistance' : 50000 }\n# }\n }).sort([(\"_id\", 1)]) \n else:\n return \"err\"\n \n def query_parkings_df(self, vendor, city, start, end):\n parks_cursor = self.query_parkings(vendor, city, start, end)\n if (parks_cursor == \"err\" or parks_cursor.count() == 0):\n return parks_cursor.count()\n else :\n# print books_cursor.count()\n# bookings_df = pd.DataFrame(columns = pd.Series(books_cursor.next()).index)\n parkings_df = pd.DataFrame(list(parks_cursor))\n \n parkings_df['type'] = parkings_df['loc'].apply(lambda x : x['type'])\n parkings_df['coordinates'] = parkings_df['loc'].apply(lambda x : x['coordinates'])\n parkings_df = parkings_df.drop('loc',1)\n \n parkings_df['lon'] = parkings_df.coordinates.apply(lambda x : float(x[0]))\n parkings_df['lat'] = parkings_df.coordinates.apply(lambda x : float(x[1]))\n parkings_df = parkings_df.drop('coordinates',1)\n \n parkings_df['duration'] =parkings_df.final_date - parkings_df.init_date \n parkings_df['duration'] = parkings_df['duration'].apply(lambda x: x.days*24*60 + x.seconds/60)\n parkings_df = parkings_df[parkings_df[\"lon\"] <= 7.8] \n\n return parkings_df\n \n def get_color (self, provider):\n if isinstance(provider, pd.DataFrame):\n provider = self.get_provider(provider)\n if (provider == \"enjoy\"):\n return \"red\"\n else:\n return \"blue\"\n \n if provider == 'enjoy':\n return \"red\"\n elif provider == 'car2go':\n return \"blue\"\n \n \n def query_car_per_plate_df(self, vendor, plate, start, end):\n if vendor == 'car2go' :\n cursor = self.db[\"PermanentBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'plate' : {'$in': plate}\n }).sort([(\"_id\", 1)]) \n \n if vendor == 'enjoy' :\n cursor = self.db[\"enjoy_PermanentBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'plate' : {'$in': plate}\n }) \n \n df = pd.DataFrame(list(cursor))\n return df\n \n def query_car_per_plate_active_df(self, vendor, plate, start, end):\n if vendor == 'car2go' :\n cursor = self.db[\"ActiveBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'plate' : {'$in': [plate]}\n }).sort([(\"_id\", 1)]) \n \n if vendor == 'enjoy' :\n cursor = self.db[\"enjoy_ActiveBookings\"].find(\n {'init_date':\n {\n '$gt': start,\n '$lt': end\n },\n 'plate' : {'$in': plate}\n }) \n \n df = pd.DataFrame(list(cursor))\n return df\n \n \n \n\n# def query_fleet_by_day (self, provider, city, start, end):\n# \n# return self.db['fleet'].find \\\n# ({\n# 'day':\n# {\n# '$gt': start,\n# '$lt': end\n# },\n# 'provider': provider,\n# 'city':city\n# }).sort([(\"_id\", 1)]) \n# \n# def query_raw_by_time (self, provider, city, start, end):\n# \n# return self.db[\"snapshots\"].find \\\n# ({\"timestamp\":\n# {\n# '$gte': start,\n# '$lt': end\n# },\n# \"provider\":provider,\n# \"city\":city\n# }).sort([(\"_id\", 1)])\n#\n# def query_fleet (self, provider, city):\n#\n# return self.db[\"fleet\"].find \\\n# ({\n# \"provider\":provider,\n# \"city\":city\n# }).sort([(\"_id\", 1)])\n# \n# def query_parks (self, provider, city, start, end):\n# \n# return self.db[\"parks\"].find \\\n# ({\"start\":\n# {\n# '$gte': start,\n# '$lt': end\n# },\n# \"provider\":provider,\n# \"city\":city\n# }).sort([(\"_id\", 1)])\n#\n# def query_books (self, provider, city, start, end):\n# \n# return self.db[\"books\"].find \\\n# ({\"start\":\n# {\n# '$gte': start,\n# '$lt': end\n# },\n# \"provider\":provider,\n# \"city\":city\n# }).sort([(\"_id\", 1)])\n#\n# def query_books_group (self, provider, city, start, end):\n# \n# return self.db[\"books_aggregated\"].find \\\n# ({\"day\":\n# {\n# '$gt': start,\n# '$lt': end\n# },\n# \"provider\":provider,\n# \"city\":city\n# }).sort([(\"_id\", 1)])\n#\n# def process_books_df (self, provider, books_df):\n#\n# def riding_time (provider, df): \n# \n# df[\"reservation_time\"] = df[\"duration\"] - df[\"duration_driving\"]\n# df.loc[df.reservation_time < 0, \"riding_time\"] = df[\"duration\"]\n# df.loc[df.reservation_time > 0, \"riding_time\"] = df[\"duration_driving\"]\n# \n# return df \n# \n# def get_bill (provider, df):\n# \n# if provider == \"car2go\":\n# free_reservation = 20\n# ticket = 0.24\n# extra_ticket = 0.24 \n# elif provider == \"enjoy\":\n# free_reservation = 15\n# ticket = 0.25\n# extra_ticket = 0.10 \n# \n# indexes = df.loc[df.reservation_time > free_reservation].index\n# extra_minutes = df.loc[indexes, 'reservation_time'] - free_reservation\n# df.loc[indexes,\"min_bill\"] = df.loc[indexes, 'riding_time'].apply(lambda x: x * ticket) + \\\n# extra_minutes.apply(lambda x: x * extra_ticket) \n# df.loc[indexes,\"max_bill\"] = df.loc[indexes, 'duration'].apply(lambda x: x * ticket)\n# \n# indexes = df.loc[(df.reservation_time <= free_reservation) & (df.reservation_time > 0)].index\n# df.loc[indexes,\"min_bill\"] = df.loc[indexes, 'riding_time'].apply(lambda x: x * ticket) \n# df.loc[indexes,\"max_bill\"] = df.loc[indexes, 'riding_time'].apply(lambda x: x * ticket)\n# \n# indexes = df.loc[df.reservation_time < 0].index\n# df.loc[indexes,\"min_bill\"] = df.loc[indexes, 'riding_time'].apply(lambda x: x * ticket)\n# df.loc[indexes,\"max_bill\"] = df.loc[indexes, 'riding_time'].apply(lambda x: x * ticket) \n# \n# return df\n# \n# books_df[\"duration\"] = \\\n# (books_df[\"end\"] - books_df[\"start\"])/np.timedelta64(1, 'm')\n# books_df[\"distance\"] = books_df.apply\\\n# (lambda row: haversine(row[\"start_lon\"], row[\"start_lat\"], \n# row[\"end_lon\"], row[\"end_lat\"]), axis=1)\n# books_df[\"fuel_consumption\"] = \\\n# books_df[\"start_fuel\"] - books_df[\"end_fuel\"]\n#\n# books_df = riding_time(provider, books_df)\n# books_df = get_bill(provider, books_df)\n#\n# return books_df \n# \n# def query_parks_df (self, provider, city, start, end):\n# \n# parks_cursor = self.query_parks(provider, city, start, end)\n# parks_df = pd.DataFrame(columns = pd.Series(parks_cursor.next()).index)\n# for doc in parks_cursor:\n# s = pd.Series(doc)\n# parks_df = pd.concat([parks_df, pd.DataFrame(s).T], ignore_index=True) \n#\n# parks_df[\"duration\"] = \\\n# (parks_df[\"end\"] - parks_df[\"start\"])/np.timedelta64(1, 'm') \n# \n# return parks_df[parks_cols]\n# \n# def query_books_df (self, provider, city, start, end):\n# \n# books_cursor = self.query_books(provider, city, start, end) \n# books_df = pd.DataFrame(columns = pd.Series(books_cursor.next()).index)\n# for doc in books_cursor:\n# s = pd.Series(doc)\n# books_df = pd.concat([books_df, pd.DataFrame(s).T], ignore_index=True) \n# \n# return self.process_books_df(provider, books_df)[books_cols].replace({None:np.NaN})\n#\n# def query_parks_df_intervals (self, provider, city, dates_list):\n# \n# parks_cursor = self.query_parks_intervals(provider, city, dates_list)\n# parks_df = pd.DataFrame(columns = pd.Series(parks_cursor.next()).index)\n# for doc in parks_cursor:\n# s = pd.Series(doc)\n# parks_df = pd.concat([parks_df, pd.DataFrame(s).T], ignore_index=True) \n#\n# parks_df[\"duration\"] = \\\n# (parks_df[\"end\"] - parks_df[\"start\"])/np.timedelta64(1, 'm') \n# \n# return parks_df[parks_cols]\n#\n# def query_books_df_intervals (self, provider, city, dates_list):\n# \n# books_cursor = self.query_books_intervals(provider, city, dates_list) \n# books_df = pd.DataFrame(columns = pd.Series(books_cursor.next()).index)\n# for doc in books_cursor:\n# s = pd.Series(doc)\n# books_df = pd.concat([books_df, pd.DataFrame(s).T], ignore_index=True) \n# \n# return self.process_books_df(provider, books_df)[books_cols].replace({None:np.NaN})\n#\n# def query_books_df_aggregated (self, provider, city, start, end):\n# \n# books_cursor = self.query_books_group(provider, city, start, end) \n#\n# books_df = pd.DataFrame()\n#\n# for doc in books_cursor:\n# \n# s = pd.DataFrame(doc['books'])\n# books_df = pd.concat([books_df, s], ignore_index=True) \n# \n# return self.process_books_df(provider, books_df)[books_cols].replace({None:np.NaN})\n#\n# def filter_books_df_outliers (self, df):\n#\n# df['reservations'] = df['distance'].apply(lambda w: (w == 0)) \n# df['ride'] = df['distance'].apply(lambda w: (w > 0.05))\n# df['short_trips'] = df['duration'].apply(lambda w: (w < 40))\n# df['medium_trips'] = df['duration'].apply(lambda w: (w > 40) and (w < 120))\n# df['long_trips'] = df['duration'].apply(lambda w: (w > 120) and (w < 1440))\n# \n# return df\n# \n# def filter_df_days (self, df, start, end):\n# \n# cal = Italy()\n# \n# holidays = []\n# pre_holidays = []\n# \n# #holidays collection creation\n# if start.year == end.year:\n# for h in cal.holidays(start.year):\n# holidays.append(h[0])\n# else:\n# for year in range (start.year, end.year+1):\n# for h in cal.holidays(year):\n# holidays.append(h[0])\n# \n# for d in holidays:\n# if (d - datetime.timedelta(days = 1)) not in holidays:\n# pre_holidays.append(d - datetime.timedelta(days = 1))\n# \n# df['all'] = True \n# df['week_day'] = df['start'].apply(lambda x: x.weekday())\n# df['business'] = df['week_day'].apply(lambda w: (0 <= w) and (w <= 4))\n# df['weekend'] = df['week_day'].apply(lambda w: (5 <= w) and (w <= 6))\n# df['holiday'] = df['start'].apply(lambda x: x.date()).isin(holidays) \n# df['week'] = df['start'].apply(lambda x: x.week)\n#\n# return df\n#\n# def filter_date (self, start, end, day_type):\n# \n# cal = Italy()\n# \n# holidays = []\n# holidays_ = []\n# pre_holidays = []\n# pre_holidays_ = []\n# business = []\n# weekends = []\n# \n# #holidays collection creation\n# if start.year == end.year:\n# for h in cal.holidays(start.year):\n# holidays.append(h[0])\n# else:\n# for year in range (start.year, end.year+1):\n# for h in cal.holidays(year):\n# holidays.append(h[0])\n# \n# for d in holidays:\n# if (d - datetime.timedelta(days = 1)) not in holidays:\n# pre_holidays.append(d - datetime.timedelta(days = 1))\n#\n# date_list = [end - datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\n#\n# if day_type == \"business\":\n# for day in date_list:\n# if (day.weekday() >= 0) & (day.weekday() <= 4) & (day not in holidays):\n# business.append(day)\n# return business\n#\n# if day_type == \"weekend\":\n# for day in date_list:\n# if (day.weekday() >= 5) & (day.weekday() <= 6) & (day not in holidays):\n# weekends.append(day)\n# return weekends\n#\n# if day_type == \"holiday\":\n# for day in date_list:\n# if (day.date() in holidays):\n# holidays_.append(day)\n# return holidays_\n#\n# if day_type == \"preholiday\":\n# for day in date_list:\n# if (day.date() in holidays):\n# pre_holidays_.append(day)\n# return pre_holidays_\n#\n# def query_books_intervals(self, provider, city, dates_list):\n#\n# query = []\n# for end_ in dates_list:\n# start_ = (end_ - datetime.timedelta(days = 1))\n# q = {'start': {\n# '$gt': start_,\n# '$lt': end_\n# }\n# }\n# query.append(q)\n#\n# return self.db['books'].find \\\n# ({ \n# '$or': query,\n# 'provider': provider,\n# 'city': city \n# })\n#\n# def query_parks_intervals(self, provider, city, dates_list):\n#\n# query = []\n# for end_ in dates_list:\n# start_ = (end_ - datetime.timedelta(days = 1))\n# q = {'start': {\n# '$gt': start_,\n# '$lt': end_\n# }\n# }\n# query.append(q)\n#\n# return self.db['parks'].find \\\n# ({ \n# '$or': query,\n# 'provider': provider,\n# 'city': city \n# })\n# \n# def query_books_df_filtered (self, provider, city, start, end):\n#\n# books_df = self.query_books_df(provider, city, start, end)\n# return self.filter_books_df_outliers(self.filter_df_days(books_df, start, end))\n#\n# def query_parks_df_filtered (self, provider, city, start, end):\n# \n# parks_df = self.query_parks_df(provider, city, start, end)\n# return self.filter_df_days(parks_df, start, end)\n#\n# def query_books_df_filtered_v2 (self, provider, city, start, end, day_type):\n#\n# if day_type == \"full\":\n# return self.query_books_df(provider, city, start, end)\n# else:\n# lista_date = self.filter_date(start, end, day_type)\n# return self.query_books_df_intervals(provider, city, lista_date)\n#\n# def query_parks_df_filtered_v2 (self, provider, city, start, end, day_type):\n# \n# if day_type == \"full\":\n# return self.query_parks_df(provider, city, start, end)\n# else:\n# lista_date = self.filter_date(start, end, day_type)\n# return self.query_parks_df_intervals(provider, city, lista_date)\n#\n# def query_books_df_filtered_v3 (self, provider, city, start, end):\n#\n# books_df = self.query_books_df_aggregated(provider, city, start, end)\n# return self.filter_books_df_outliers(self.filter_df_days(books_df, start, end))\n#\n# def query_fleetsize_series (self, provider, city):\n#\n# cursor = self.db['fleet'].aggregate([\n# {\"$match\":{\"provider\":provider}}, \n# {\"$group\":{\"_id\": \"$day\", \"daysize\": {\"$sum\": {\"$size\": \"$fleet\"}}}}\n# ])\n# return pd.Series({doc[\"_id\"]: doc[\"daysize\"] for doc in cursor})\n# " }, { "alpha_fraction": 0.6116945147514343, "alphanum_fraction": 0.6381861567497253, "avg_line_length": 29.04316520690918, "blob_id": "91afdf0cb223997ec05ac401835de6331eea6df5", "content_id": "b1f344a6a95cd7d34b89590e6b5fdf3136ffdd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4190, "license_type": "no_license", "max_line_length": 77, "num_lines": 139, "path": "/Analysis/zz_valid_days_plot.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\nimport time\n\n\nfrom DataBaseProxy import DataBaseProxy\n#dbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 6\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2, day, 23, 59, 0)\n\nenj_bookings = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\nc2g_bookings = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\n\nenj_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)\nc2g_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)\n\nenj_data = util.get_valid_days(enj_bookings, start,end)\nc2g_data = util.get_valid_days(c2g_bookings, start, end)\n\nenj_data_b = util.get_valid_days(enj_parkings, start,end)\nc2g_data_b = util.get_valid_days(c2g_parkings, start, end)\n\n\n\n#enj_cars = len(enj_bookings.plate.unique())\n#enj_bookings_len = len(pd.read_pickle(paths.enjoy_bookings_pickle_path))\n#enj_parkings_len = len(pd.read_pickle(paths.enjoy_parkings_pickle_path))\n#\n#enj_days = float(enj_data[\"valid_days\"])\n#\n#print \"enj B/D \" + str(enj_bookings_len/enj_days)\n#print \"enj_B/D/C \" + str(enj_bookings_len/enj_days/enj_cars)\n#print \"enj P/D \" + str(enj_parkings_len/enj_days)\n#print \"enj P/D/C \" + str(enj_parkings_len/enj_days/enj_cars)\n#print\n#\n#c2g_cars = len(c2g_bookings.plate.unique())\n#c2g_bookings_len = len(pd.read_pickle(paths.car2go_bookings_pickle_path))\n#c2g_parkings_len = len(pd.read_pickle(paths.car2go_parkings_pickle_path))\n#c2g_days = float(c2g_data[\"valid_days\"])\n#\n#print \"c2g B/D \" + str(c2g_bookings_len/c2g_days)\n#print \"c2g B/D/C \" + str(c2g_bookings_len/c2g_days/c2g_cars)\n#print \"c2g P/D \" + str(c2g_parkings_len/c2g_days)\n#print \"c2g P/D/C \" + str(c2g_parkings_len/c2g_days/c2g_cars)\n\n\n\ndef plot_valid_days(df1, provider, path):\n if provider == 'car2go':\n color = 'blue'\n else:\n color = 'red'\n \n months_dict = {\"mag\": \"May\", \"giu\": \"Jun\", \"lug\":\"Jul\"}\n \n fig = plt.figure(figsize=(30,10))\n \n ax = fig.gca()\n ax.set_title(provider + \" - Valid Days\", fontsize=36)\n ax.grid()\n \n width=0.5\n ind = np.arange(len(df1.index))\n ax.bar(ind, df1[\"entries\"], width, color=color)\n \n ax.set_ylabel(\"Entries per day\", fontsize=36)\n \n ticks = [datetime.date.today()]*len(df1.index)\n ticks[0:len(df1.index):5] = df1.index[range(0,len(ind),5)]\n for i in range(len(ticks)):\n if i % 5 == 0 :\n t = ticks[i]\n date = t.strftime(\"%d %b %Y\").split(\" \")\n date[1] = months_dict[date[1]]\n ticks[i] = str(date[0]) + \" \" + str(date[1]) + \" \" + str(date[2])\n \n else:\n ticks[i] = \"\"\n\n ax.set_xticks(ind + width /32)\n ax.set_xticklabels(ticks, rotation=30)\n# \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(20)\n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n\n fig.autofmt_xdate()\n plt.savefig(path,\n bbox_inches = 'tight')\n\n plt.show()\n \n#plot_valid_days(enj_data['df'], 'enjoy')\n#plot_valid_days(c2g_data['df'], 'car2go')\n\n#enj_bookings = enj_bookings[\n# (enj_bookings[\"duration\"] <= 120) & \n# (enj_bookings[\"distance\"] >=20)\n# ]\n#\n#c2g_bookings = c2g_bookings[\n# (c2g_bookings[\"duration\"] <= 120) & \n# (c2g_bookings[\"distance\"] >=20)\n# ]\n\n\n\ndir_= \"03_data_charact/\"\nname= \"valid_days\"\n\nprovider = 'enjoy'\npath = \"/home/mc/Scrivania/Tesi/toptesi/figures/\" + dir_ +provider+\"_\"+name\nenj_data = util.get_valid_days(enj_bookings, start,end)\nplot_valid_days(enj_data['df'], 'enjoy', path)\nenj_data[\"filtered_fleet\"] = util.get_fleet(enj_bookings, 61)\n\nprovider = 'car2go'\npath = \"/home/mc/Scrivania/Tesi/toptesi/figures/\" + dir_ + provider+\"_\"+name\nc2g_data = util.get_valid_days(c2g_bookings, start,end)\nplot_valid_days(c2g_data['df'], 'car2go', path)\nc2g_data[\"filtered_fleet\"] = util.get_fleet(c2g_bookings, 61)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6214970350265503, "alphanum_fraction": 0.6450321674346924, "avg_line_length": 37.330230712890625, "blob_id": "54007005d067cee9552ec99cbede53f37888ade9", "content_id": "64adc671e589ec2c959fe672c62d84f666617872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8244, "license_type": "no_license", "max_line_length": 118, "num_lines": 215, "path": "/Analysis/parkingsAnalysis.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport paths as paths\n\nfrom DataBaseProxy import DataBaseProxy\nfrom util import Utility\nutil = Utility()\ndbp = DataBaseProxy()\n\n\nyear = 2017\nmonth = 5\nday = 6\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2, day, 23, 59, 0)\nend2 = datetime.datetime(year, month, day, 23,59,0)\n\ndef clean_durations(df):\n df = df[df.duration < df.duration.quantile(0.99)]\n df = df[df.duration > df.duration.quantile(0.01)]\n return df\n\ndef duration_per_car(df) :\n out_df= pd.DataFrame()\n out_df[\"plate\"] = df.plate\n out_df['duration'] = df.duration\n dur_per_car = out_df.groupby('plate', as_index = False).sum()\n return dur_per_car\n\ndef bookings_per_car(df):\n df_freq = df.groupby('plate').count()\n df_freq = df_freq[['_id']].copy()\n df_freq = df_freq.rename(columns={'_id': 'freq'})\n return df_freq\n\ndef parkings_per_car(df) :\n out_df= pd.DataFrame()\n out_df[\"plate\"] = df.plate\n out_df['number_of_parkings'] = df.duration\n dur_per_car = out_df.groupby('plate', as_index = False).count()\n \n return dur_per_car\n\ndef total_dur_per_car(df, df2):\n provider = util.get_provider(df)\n color = util.get_color(df)\n df = clean_durations(df)\n dur_per_car = duration_per_car(df)\n freq_per_car = bookings_per_car(df2)\n \n\n fig, ax = plt.subplots(1, 1, figsize=(9,10))\n my_xticks = dur_per_car.plate\n# print len(my_xticks)\n ax.plot(dur_per_car.index, dur_per_car.duration, linestyle='-', marker='x',color=color)\n# ax.set_xticks(my_xticks)\n ax.set_title(\"min per car - \" + provider)\n ax.set_xlabel(\"Plate\")\n ax.set_ylabel(\"Total minutes\")\n plt.show()\n dur_per_car.set_index('plate', inplace=True)\n dur_per_car['freq'] = freq_per_car['freq']\n dur_per_car.dropna()\n return dur_per_car\n \ndef total_dist_per_car_no_outliers (df):\n provider = util.get_provider(df)\n color = util.get_color(df)\n df = clean_durations(df)\n dur_per_car = duration_per_car(df)\n \n std = dur_per_car['duration'].std()\n avg = dur_per_car['duration'].median()\n normalized_durations = dur_per_car[(dur_per_car['duration'] >= (avg-std)) &\n (dur_per_car['duration'] <= (avg+std))]\n \n fig, ax = plt.subplots(1, 1, figsize=(9,10))\n# my_xticks = normalized_durations.plate\n# print len(my_xticks)\n# plt.xticks(normalized_durations.index, my_xticks)\n plt.plot(normalized_durations.index, normalized_durations['duration'], linestyle='-', marker='x',color=color)\n ax.set_title(\"min per car in std - \" + provider)\n ax.set_xlabel(\"Plate\")\n ax.set_ylabel(\"Total minutes\")\n plt.show()\n \ndef hist_dur_freq(column, df, df_source, data):\n provider = util.get_provider(df_source)\n color = util.get_color(df_source)\n\n if column == \"duration\":\n xlabel = \"min\"\n else :\n xlabel = \"\"\n \n if column == \"freq\":\n df = df.dropna()\n \n fig, ax = plt.subplots(2, 4, figsize=(20,10))\n fig.suptitle(provider + ' - ' + column + ' distributions')\n \n #uncleaned data\n ax[0,0].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,0].set_title(\"CDF - \" + column)\n ax[0,0].set_xlabel(xlabel)\n \n ax[1,0].hist(df[column], 50, facecolor=color, alpha=0.75)\n ax[1,0].set_title(\"PDF - \" + column)\n ax[1,0].set_xlabel(xlabel)\n \n #filtering - only cars with at least 3 parkings at day\n df = df[df.freq > 30]\n ax[0,1].hist(df[column], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,1].set_title(\"filtered CDF - \" + column)\n ax[0,1].set_xlabel(xlabel)\n \n ax[1,1].hist(df[column], 50, facecolor=color, alpha=0.75)\n ax[1,1].set_title(\"filtered PDF - \" + column)\n ax[1,1].set_xlabel(xlabel)\n \n #divided per number of days\n ax[0,2].hist(df[column]/data[\"valid_days\"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,2].set_title(\"filtered CDF per day - \" + column)\n ax[0,2].set_xlabel(xlabel)\n \n ax[1,2].hist(df[column]/data[\"valid_days\"], 50, facecolor=color, alpha=0.75)\n ax[1,2].set_title(\"filtered PDF per day - \" + column)\n ax[1,2].set_xlabel(xlabel)\n \n #divided per number of days in interval\n ax[0,3].hist(df[column]/data[\"cleaned_valid_days\"], 50, facecolor=color, alpha=0.75, cumulative=True, normed=True)\n ax[0,3].set_title(\"filtered CDF per day clnd - \" + column)\n ax[0,3].set_xlabel(xlabel)\n \n ax[1,3].hist(df[column]/data[\"cleaned_valid_days\"], 50, facecolor=color, alpha=0.75)\n ax[1,3].set_title(\"filtered PDF per day clnd - \" + column)\n ax[1,3].set_xlabel(xlabel)\n \n res = {\n column+\"_mean\" : df[column].mean(),\n column+\"_median\": df[column].median(),\n column+\"_std\" : df[column].std(),\n column+\"_mean_valid_days\" : (df[column]/data[\"valid_days\"]).mean(),\n column+\"_median_valid_days\": (df[column]/data[\"valid_days\"]).median(),\n column+\"_std_valid_days\" : (df[column]/data[\"valid_days\"]).std(),\n column+\"_mean_valid_days_clnd\" : (df[column]/data[\"cleaned_valid_days\"]).mean(),\n column+\"_median_valid_days_clnd\": (df[column]/data[\"cleaned_valid_days\"]).median(),\n column+\"_std_valid_days_clnd\" : (df[column]/data[\"cleaned_valid_days\"]).std()\n }\n \n fig.savefig(paths.plots_path3+\"_\"+provider+\"_\"+column+\"_parkings_tats.png\", bbox_inches='tight')\n return df,res\n#\n#enjoy_parkings = dbp.query_parkings_df('enjoy','Torino', start, end)\n#car2go_parkings = dbp.query_parkings_df('car2go','Torino', start, end)\n#enjoy_parkings.to_pickle(paths.enjoy_parkings_pickle_path, None)\n#car2go_parkings.to_pickle(paths.car2go_parkings_pickle_path, None)\n\nenjoy = pd.read_pickle(paths.enjoy_pickle_path, None)\ncar2go = pd.read_pickle(paths.car2go_pickle_path, None)\nenjoy_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)\ncar2go_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)\n\n\n#enj_data = util.get_valid_days(enjoy,start,end)\n#c2g_data = util.get_valid_days(car2go,start,end)\n\n\n#enjoy_parkings_duration = duration_per_car(enjoy_parkings)\n#enj_park_duration_freq = total_dur_per_car(enjoy_parkings, enjoy)\n#total_dist_per_car_no_outliers(enjoy)\n#enj_clean, enj_data[\"park_stats_duration\"] = hist_dur_freq(\"duration\", enj_park_duration_freq, enjoy, enj_data)\n#enj_clean, enj_data[\"park_stats_freq\"] = hist_dur_freq(\"freq\", enj_park_duration_freq, enjoy, enj_data)\n#\n#car2go_parkings_duration = duration_per_car(car2go_parkings)\n#car2go_park_duration_freq = total_dur_per_car(car2go_parkings, car2go)\n#total_dist_per_car_no_outliers(car2go)\n#c2g_clean, c2g_data[\"park_stats_duration\"] = hist_dur_freq(\"duration\", car2go_park_duration_freq, car2go, c2g_data)\n#c2g_clean, c2g_data[\"park_stats_freq\"] = hist_dur_freq(\"freq\", car2go_park_duration_freq, car2go, c2g_data)\n\n\"\"\"\nAvg parking time per car (valid days)\n\"\"\"\n#enj_clean[\"duration_per_day\"] = enj_park_duration_freq[\"duration\"]/(enj_data[\"cleaned_valid_days\"])\n#enj_clean[\"freq_per_day\"] = enj_park_duration_freq[\"freq\"]/(enj_data[\"cleaned_valid_days\"])\n#c2g_clean[\"duration_per_day\"] = car2go_park_duration_freq[\"duration\"]/(c2g_data[\"cleaned_valid_days\"])\n#c2g_clean[\"freq_per_day\"] = car2go_park_duration_freq[\"freq\"]/(enj_data[\"cleaned_valid_days\"])\n#\n#\n#fig,ax =plt.subplots(1, 1, figsize=(9,10))\n#enj_clean.hist(ax=ax, color=util.get_color(enjoy))\n#fig2,ax2 = plt.subplots(1, 1, figsize=(9,10))\n#c2g_clean.hist(ax=ax2, color=util.get_color(car2go))\n\n'''\ncome informazione ho il numero di minuti in cui รจ stata ferma la macchina, e il numero di prenotazioni che questa ha\nricevuto\n'''\n#total_dist_per_car_no_outliers(enjoy_parkings)\n\n#dur_per_car['index'] = dur_per_car['index'] / (dur_per_car['index'].sum())\n#dur_per_car.hist(bins=100, cumulative=True, normed=True)\n\n\n#df2 = parkings_per_car(enjoy_parkings)\n#enjoy_parkings_duration['count'] = df2['number_of_parkings']\n#\n#df = enjoy_parkings[\n# (enjoy_parkings.plate == 'EZ049TY')\n# ]\n\n\n" }, { "alpha_fraction": 0.7430070042610168, "alphanum_fraction": 0.748251736164093, "avg_line_length": 21.038461685180664, "blob_id": "e2e93e0e1200bc749f9973adbd74fe9817a0a868", "content_id": "f0f41da62f7d7198dfd080442e10973efe276511", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 71, "num_lines": 26, "path": "/Analysis/simulator/parallel_exectuion.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport datetime\nimport time\nimport random\nimport sys\nimport os.path\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/')\nimport paths as paths\nfrom DataBaseProxy import DataBaseProxy\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/simulator')\nfrom util import Utility\nfrom car import Car\nfrom shapely.geometry import Point, Polygon\nfrom station import Station\nimport threading\n\ndef main(args): \n a = int(args[0])\n time.sleep(a)\n print a\n \n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5786713361740112, "alphanum_fraction": 0.6159673929214478, "avg_line_length": 25.091602325439453, "blob_id": "1533ac7f83202681e6f79c98796dec20eeb460ee", "content_id": "1bae506063173d65f1c3604f93201aa4db4daee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3432, "license_type": "no_license", "max_line_length": 87, "num_lines": 131, "path": "/Analysis/zz_dis_dur.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\nimport time\n\n\nfrom DataBaseProxy import DataBaseProxy\n#dbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 6\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2, day, 23, 59, 0)\n\nenj_bookings = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\nc2g_bookings = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\n\nenj_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)\nc2g_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)\n\nenj_bookings = enj_bookings[\n (enj_bookings[\"duration\"] <= 120) & \n (enj_bookings[\"distance\"] >=20)\n ]\nenj_cf = 1.82/1000\nenj_bookings[\"distance\"] = enj_bookings[\"distance\"].mul(enj_cf)\n\nc2g_bookings = c2g_bookings[\n (c2g_bookings[\"duration\"] <= 120) & \n (c2g_bookings[\"distance\"] >=20) \n ]\nc2g_cf = 1.88/1000\nc2g_bookings[\"distance\"] = c2g_bookings[\"distance\"].mul(c2g_cf)\nc2g_bookings.drop(35354, inplace=True)\n\n\n\n\ndef plot_cdf(df1, provider, column, path):\n if provider == 'car2go':\n color = 'blue'\n else:\n color = 'red'\n \n if column == 'distance':\n title = provider + \" - Distance CDF.\"\n xlabel = \"Distance [km]\"\n else:\n title = provider + \" - Duration CDF.\"\n xlabel = \"Duration [min]\"\n\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.gca()\n ax.set_title(title, fontsize=36)\n ax.grid()\n\n \n values = [df1[column].quantile(0.25), \n df1[column].quantile(0.50), \n df1[column].quantile(0.75), \n df1[column].quantile(0.99), \n df1[column].mean(),\n df1[column].median(),\n df1[column].std()\n ]\n print provider, column\n print values\n print\n ax.hist(df1[column], cumulative=True, normed=True, bins=100, color=color)\n \n# for i in range(len(values)):\n# ax.axhline(percen[i], color='black',linewidth=3)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n# y_ticks = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99]\n# ax.set_yticks(y_ticks) \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(20) \n \n ax.set_xlabel(xlabel, fontsize=36)\n ax.set_ylabel(\"ECDF\", fontsize=36)\n\n\n if len(path) > 0 :\n plt.savefig(path,\n bbox_inches = 'tight',pad_inches = 0.25)\n \n\n plt.show()\n \n\n\ndir_= \"04_data_analysis\"\nname= \"cdf\"\n\ncolumn = \"duration\"\n\nprovider = 'enjoy'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+column+\"_\"+name\nplot_cdf(enj_bookings, \"enjoy\", column, path)\n\n\nprovider = 'car2go'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+column+\"_\"+name\nplot_cdf(c2g_bookings, \"car2go\", column, path)\n\n\n\ncolumn = \"distance\"\n\nprovider = 'enjoy'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+column+\"_\"+name\nplot_cdf(enj_bookings, \"enjoy\", column, path)\n\n\nprovider = 'car2go'\npath = \"/home/mc/Scrivania/Tesi/Writing/figures/\"+dir_+\"/\"+provider+\"_\"+column+\"_\"+name\nplot_cdf(c2g_bookings, \"car2go\", column, path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6098928451538086, "alphanum_fraction": 0.6404325366020203, "avg_line_length": 35.00724792480469, "blob_id": "a15b4993289d6764047b6f1a702dbb8a801f3707", "content_id": "a068a5d7fb33e499d94c335789580818bd133da4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9987, "license_type": "no_license", "max_line_length": 125, "num_lines": 276, "path": "/Analysis/final_analysis.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\n\n\nfrom DataBaseProxy import DataBaseProxy\ndbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 6\n\n#km macchine per enjoy e car2go in una settimana\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month, day, 23, 59, 0)\n\n#enjoy = dbp.query_bookings_df(\"enjoy\",\"Torino\", start, end)\n#car2go = dbp.query_bookings_df(\"car2go\",\"Torino\", start, end)\n\n'''\nloading data from pickles\n'''\nenj_bookings = pd.read_pickle(paths.enjoy_bookings_pickle_path, None)\nc2g_bookings = pd.read_pickle(paths.car2go_bookings_pickle_path, None)\n\n#enj_parkings = pd.read_pickle(paths.enjoy_parkings_pickle_path, None)\n#c2g_parkings = pd.read_pickle(paths.car2go_parkings_pickle_path, None)\n\n#fig = plt.figure(figsize=(10,10))\n#ax = fig.gca()\n#c2g_bookings = c2g_bookings[c2g_bookings.start_lat > 0]\n#caselle = c2g_bookings[c2g_bookings[\"init_address\"].str.contains(\"Caselle\")]\n#c2g_bookings = c2g_bookings[c2g_bookings[\"start_lat\"] < 45.17]\n#ax.scatter(c2g_bookings[\"start_lon\"], c2g_bookings[\"start_lat\"], color='blue', s=0.1)\n#ax.scatter(caselle[\"start_lon\"], caselle[\"start_lat\"], color='blue', s=50)\n#plt.show()\n\ndef operative_area(df, provider, path):\n df = df[df[\"start_lat\"] > 0 ]\n df = df[df[\"start_lon\"] > 0 ]\n\n if provider == 'car2go':\n color= 'blue'\n else:\n color = 'red'\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.gca()\n# ax.set_xticks([])\n# ax.set_yticks([])\n ax.set_title(provider + \" - Operative area\", fontsize=36)\n ax.set_xlabel(\"Longitude [$^\\circ$]\", fontsize=36)\n ax.set_ylabel(\"Latitude [$^\\circ$]\", fontsize=36)\n ax.grid()\n \n df = df[df.start_lat > 0]\n caselle = df[df[\"init_address\"].str.contains(\"Caselle\")]\n df = df[df[\"start_lat\"] < 45.17]\n ax.scatter(df[\"start_lon\"], df[\"start_lat\"], color=color, s=0.1)\n ax.scatter(caselle[\"start_lon\"], caselle[\"start_lat\"], color=color, s=50)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(24) \n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(24) \n \n if len(path) > 0:\n plt.savefig(\"/home/mc/Scrivania/Tesi/toptesi/figures/03_data_charact/\"+provider+\"_operative\")\n \n plt.show()\n \noperative_area(c2g_bookings, 'car2go', \"yes\")\noperative_area(enj_bookings, 'enjoy',\"yes\")\n\n\n#\n\n\n\n\n\n#'''\n#statistics on bookigns\n#-avg distnaces per car\n#-avg duration per car\n#'''\n#\n### filtering ##\n#\n#c2g_bookings_filtered = c2g_bookings[\n# (c2g_bookings[\"duration\"] <= 120) &\n# (c2g_bookings[\"distance\"] >= 20)\n# ]\n#\n#enj_bookings_filtered = enj_bookings[\n# (enj_bookings[\"duration\"] <= 120) & \n# (enj_bookings[\"distance\"] >= 20)\n# ]\n#\n#\n#\n### fleet size ##\n#c2g_fleet = util.get_fleet(c2g_bookings_filtered, \n# util.get_valid_days(c2g_bookings_filtered, start, end)[\"valid_days\"])\n#\n#enj_fleet = util.get_fleet(enj_bookings_filtered, \n# util.get_valid_days(enj_bookings_filtered, start, end)[\"valid_days\"])\n#\n#\n### entries per day ##\n#c2g_bookings_filtered[\"day\"] = c2g_bookings_filtered[\"init_date\"].apply(lambda x : x.date())\n#c2g_bf = pd.DataFrame(c2g_bookings_filtered.groupby(\"day\").count()[\"_id\"]).rename(columns={\"_id\":\"entry_per_day\"})\n#\n#enj_bookings_filtered[\"day\"] = enj_bookings_filtered[\"init_date\"].apply(lambda x : x.date())\n#enj_bf = pd.DataFrame(enj_bookings_filtered.groupby(\"day\").count()[\"_id\"]).rename(columns={\"_id\":\"entry_per_day\"})\n#\n### avg freq per car - smaller than data in reports ##\n#c2g_avg_freq = float(\n# float(len(c2g_bookings_filtered))/\n# float(len(c2g_fleet))/\n# float(util.get_valid_days(c2g_bookings_filtered, start, end)[\"valid_days\"])\n# )\n#\n#enj_avg_freq = float(\n# float(len(enj_bookings_filtered))/\n# float(len(enj_fleet))/\n# float(util.get_valid_days(enj_bookings_filtered, start, end)[\"valid_days\"])\n# )\n#print \n#print \"c2g - freq per car \" + str(c2g_avg_freq)\n#print \"enj - freq per car \" + str(enj_avg_freq)\n#print\n#\n### duration per booking ##\n## removing the big quantiles -> outliers #\n#\n#print \"Car2go - Duration Per Trip\"\n#print \" mean[min]: \" + str(float(sum(c2g_bookings_filtered.duration))/ float(len(c2g_bookings_filtered)))\n#print \" median[min]: \" + str(c2g_bookings_filtered.duration.median())\n#print \" std[min]: \" + str(c2g_bookings_filtered.duration.std())\n#print\n#print \"Car2go - Trip duration per day\"\n#print \" mean[min]: \" + str(float(sum(c2g_bookings_filtered.duration)*c2g_avg_freq)/ float(len(c2g_bookings_filtered)))\n#print \" median[min]: \" + str(c2g_bookings_filtered.duration.median()*c2g_avg_freq)\n#print\n#print \"Enjoy - Duration Per Trip\"\n#print \" mean[min]: \" + str(float(sum(enj_bookings_filtered.duration))/ float(len(enj_bookings_filtered)))\n#print \" median[min]: \" + str(enj_bookings_filtered.duration.median())\n#print \" std[min]: \" + str(enj_bookings_filtered.duration.std())\n#print\n#print \"Enjoy - Trip duration per day\"\n#print \" mean[min]: \" + str(float(sum(c2g_bookings_filtered.duration)*enj_avg_freq)/ float(len(c2g_bookings_filtered)))\n#print \" median[min]: \" + str(c2g_bookings_filtered.duration.median()*enj_avg_freq)\n#print\n#\n### factor correction between gd/ed ##\n#c2g_dist = c2g_bookings_filtered[\n# (c2g_bookings_filtered[\"distance\"] > 0) &\n# (c2g_bookings_filtered[\"distance_dr\"] > 0)\n# ]\n#c2g_dist[\"ed_over_gd\"] = c2g_dist[\"distance_dr\"] / c2g_dist[\"distance\"]\n#c2g_dist = c2g_dist[\n# c2g_dist[\"ed_over_gd\"] < 7\n# ]\n#\n#enj_dist = enj_bookings_filtered[\n# (enj_bookings_filtered[\"distance\"] > 0) &\n# (enj_bookings_filtered[\"distance_dr\"] > 0)\n# ]\n#enj_dist[\"ed_over_gd\"] = enj_dist[\"distance_dr\"] / enj_dist[\"distance\"]\n#enj_dist = enj_dist[\n# enj_dist[\"ed_over_gd\"] < 7\n# ]\n\n#print \"Car2go - Corrective factors \"\n#print \" 0.90: \" + str(c2g_dist.ed_over_gd.quantile(0.90))\n#print \" 0.95: \" + str(c2g_dist.ed_over_gd.quantile(0.95))\n#print \" 0.99: \" + str(c2g_dist.ed_over_gd.quantile(0.99))\n#print\n#print \"Enjoy - Corrective factors \"\n#print \" 0.90: \" + str(enj_dist.ed_over_gd.quantile(0.90))\n#print \" 0.95: \" + str(enj_dist.ed_over_gd.quantile(0.95))\n#print \" 0.99: \" + str(enj_dist.ed_over_gd.quantile(0.99))\n#print\n#\n### bookings distances ##\n#print \"Car2go distances\"\n#print \" mean[km]: \" + str(c2g_bookings_filtered.distance.mean()/1000)\n#print \" mean google[km]: \" + str(c2g_dist.distance_dr.mean()/1000)\n#print \" mean * factor: \" + str(c2g_bookings_filtered.distance.mean()*c2g_dist.ed_over_gd.quantile(0.90))\n#\n#print \" median[km]: \" + str(c2g_bookings_filtered.distance.median()/1000)\n#print \" median google[km]: \" + str(c2g_dist.distance_dr.median()/1000)\n#print \" median * factor: \" + str(c2g_bookings_filtered.distance.median()*c2g_dist.ed_over_gd.quantile(0.90))\n#print\n#\n#print \"Enjoy distances\"\n#print \" mean[km]: \" + str(enj_bookings_filtered.distance.mean()/1000)\n#print \" mean google[km]: \" + str(enj_dist.distance_dr.mean()/1000)\n#print \" mean * factor: \" + str(enj_bookings_filtered.distance.mean()*enj_dist.ed_over_gd.quantile(0.90))\n#\n#print \" median[km]: \" + str(enj_bookings_filtered.distance.median()/1000)\n#print \" median google[km]: \" + str(enj_dist.distance_dr.median()/1000)\n#print \" median * factor: \" + str(enj_bookings_filtered.distance.median()*enj_dist.ed_over_gd.quantile(0.90))\n#print\n#\n#\n#'''\n#statistics on parkigns\n#- duration\n#'''\n#\n### Derivated analysis ##\n#print \"Car2go - parking duration from bookings\"\n#print \"mean[h]: \" + str(24-(float(sum(c2g_bookings_filtered.duration)*c2g_avg_freq)/ float(60*len(c2g_bookings_filtered))))\n#print \" median[h]: \" + str( 24 - (c2g_bookings_filtered.duration.median()*c2g_avg_freq/60))\n#print\n#print \"Enjoy - parking duration from bookings\"\n#print \"mean[h]: \" + str(24-(float(sum(enj_bookings_filtered.duration)*enj_avg_freq)/ float(60*len(enj_bookings_filtered))))\n#print \" median[h]: \" + str( 24 - (enj_bookings_filtered.duration.median()*enj_avg_freq/60))\n#print\n#\n### filtering ##\n#q=0.01\n#c2g_parkings_filtered = c2g_parkings[\n# (c2g_parkings[\"duration\"] <= c2g_parkings[\"duration\"].quantile(1-q)) & \n# (c2g_parkings[\"duration\"] >= 20 )\n# ]\n#tail = c2g_parkings_filtered[\n# c2g_parkings_filtered[\"duration\"] >= c2g_parkings[\"duration\"].quantile(1-q)\n# ]\n#len(tail)\n#\n#\n#\n#q=0.01\n#enj_parkings_filtered = enj_parkings[\n# (enj_parkings[\"duration\"] <= enj_parkings[\"duration\"].quantile(1-q)) & \n# (enj_parkings[\"duration\"] >= enj_parkings[\"duration\"].quantile(q))\n# ]\n##enj_parkings_filtered.duration.hist(cumulative=True,bins=200)\n#\n#\n#\n#c2g_avg_freq2 = float(\n# float(len(c2g_parkings_filtered.duration))/\n# float(len(c2g_fleet))/\n# float(util.get_valid_days(c2g_parkings_filtered, start, end)[\"valid_days\"])\n# )\n#\n#mean = float(\n# float(sum(c2g_parkings_filtered.duration))/\n# float(len(c2g_fleet))/\n# float(util.get_valid_days(c2g_parkings_filtered, start, end)[\"valid_days\"])\n# )\n#\n#print \"Car2go - parking duration from data\"\n#print \" mean[h]: \" + str(c2g_parkings_filtered[\"duration\"].mean()*c2g_avg_freq2/60)\n#print \" median[h]: \" + str(c2g_parkings_filtered[\"duration\"].median()*c2g_avg_freq2/60)\n#print \" std[h]: \" + str(c2g_parkings_filtered[\"duration\"].std()/60)\n#print\n#\n#print \"Car2go - parking duration from data\"\n#print \" mean[h]: \" + str(c2g_parkings_filtered[\"duration\"].mean()*c2g_avg_freq2/60)\n#print \" median[h]: \" + str(c2g_parkings_filtered[\"duration\"].median()*c2g_avg_freq2/60)\n#print \" std[h]: \" + str(c2g_parkings_filtered[\"duration\"].std()/60)\n#print\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7280163764953613, "alphanum_fraction": 0.7515337467193604, "avg_line_length": 25.29729652404785, "blob_id": "f6dd7e4599d54f925f00b7f26fbbc58e88ab016c", "content_id": "31cafa250e7eb997a81553fb178dc2b9ba42d71b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "no_license", "max_line_length": 67, "num_lines": 37, "path": "/Analysis/loading_data.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport datetime\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom math import *\nimport numpy as np\nimport paths as paths\nfrom util import Utility\n\n\nfrom DataBaseProxy import DataBaseProxy\ndbp = DataBaseProxy()\nutil = Utility()\n\nyear = 2017\nmonth = 5\nday = 6\n\nstart = datetime.datetime(year, month, day, 0, 0, 0)\nend = datetime.datetime(year, month +2 , day, 23, 59, 0)\n\n## bookings ##\nc2g_bookings = dbp.query_bookings_df(\"car2go\",\"Torino\", start, end)\nc2g_bookings.to_pickle(paths.car2go_booking_pickle_path)\n\nenj_bookings = dbp.query_bookings_df(\"enjoy\",\"Torino\", start, end)\nenj_bookings.to_pickle(paths.enjoy_booking_pickle_path)\n\n\n##parkings\nc2g_parkings = dbp.query_parkings_df(\"car2go\",\"Torino\", start, end)\nc2g_parkings.to_pickle(paths.car2go_parkings_pickle_path)\n\nenj_parkings = dbp.query_parkings_df(\"enjoy\",\"Torino\", start, end)\nenj_parkings.to_pickle(paths.enjoy_parkings_pickle_path)\n\n\n\n\n\n" }, { "alpha_fraction": 0.5078431367874146, "alphanum_fraction": 0.5283224582672119, "avg_line_length": 31.439716339111328, "blob_id": "46b8536ac11bf145fb163ddad8a9790dd03e17aa", "content_id": "66218247dbb6ec30f19ee38401de329d37ebf7db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4590, "license_type": "no_license", "max_line_length": 87, "num_lines": 141, "path": "/Analysis/simulator/main.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport datetime\nimport time\nimport random\nimport sys\nimport os.path\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/')\nimport paths as paths\nfrom DataBaseProxy import DataBaseProxy\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/simulator')\nfrom util import Utility\nfrom car import Car\nfrom city import City\nfrom shapely.geometry import Point, Polygon\nfrom station import Station\nimport threading\nfrom multiprocessing import Process\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n## service functions\ndef worker(node):\n resutls = pd.DataFrame()\n for cso in [\"car2go\", \"enjoy\"]:\n for ppz in [2,4,6,8]:\n for z in [30,60,90,120,150,180,210,238]:\n node['ppz'] = ppz\n node[\"cso\"] = cso\n \n torino.place_stations(z*node[\"ppz\"],\n node['ppz'],\n node[\"cso\"],\n algorithm=node[\"alg\"],\n station_type=1)\n c2g_stats = torino.run(node[\"cso\"], threshold=0)\n row = pd.Series()\n row[\"z\"] = z\n row[\"ppz\"] = node[\"ppz\"]\n row[\"p\"] = z*node[\"ppz\"]\n row[\"provider\"] = node[\"cso\"]\n row[\"algorithm\"] = node[\"alg\"]\n row[\"mean_dpc\"] = c2g_stats[\"deaths\"].mean()\n row[\"median_dpc\"] = c2g_stats[\"deaths\"].median()\n row[\"tot_deaths\"] = c2g_stats[\"deaths\"].sum()\n row[\"pieni\"] = torino.pieni\n row[\"avg_bat_after\"] = torino.avg_bat_after\n row[\"avg_bat_before\"] = torino.avg_bat_before\n row[\"rech_cars\"] = torino.rech_cars\n resutls = resutls.append(row, ignore_index=True)\n \n resutls.to_pickle(node[\"out\"])\n\n\ndef return_path(cso, alg, ppz, z):\n string = str(cso) +\"_\"+ str(alg) + \"_\" + str(ppz) + \"_\"+ str(z)\n return string\n\n \n\nif __name__ == \"__main__\":\n ## build the city ##\n\n year = 2017\n month = 5\n day = 6\n start = datetime.datetime(year, month, day, 0, 0, 0)\n end = datetime.datetime(year, month +2, day, 23, 59, 0)\n torino = City(\"Torino\", start,end)\n torino.set_c2g_datasets(from_pickle=True)\n torino.set_enj_datasets(from_pickle=True)\n torino.get_fleet(\"car2go\")\n torino.get_fleet(\"enjoy\")\n \n Series = torino.car2go_parkings_analysis.iloc[0]\n DataFrame = torino.car2go_parkings_analysis\n\n \n # parameter for the parallel simulation ##\n# n_z = [30,60,90,120,150,180,210,238]\n# n_ppz = [2]\n algorithms = ['max_parking', 'max_time', 'max_avg_time']\n# algorithms = [\"rnd\"]\n commands = {}\n \n j= 0\n init_time = time.time()\n print \"start at: \" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(init_time))\n print j\n# for cso in [\"car2go\"]:\n for alg in algorithms :\n# for ppz in n_ppz:\n d = {}\n d[\"alg\"] = alg\n d[\"out\"] = paths.sym_path_bat+\"bat_\"+str(j) \n# d[\"cso\"] = 'car2go'\n commands[j] = d\n j=j+1\n \n \n ## builidng the coomand lists\n node_sim_list=[]\n process_list = []\n print commands.keys()\n for i in commands.keys():\n node_sim_list.append(commands[i])\n \n ## run\n for node in node_sim_list:\n p = Process(target=worker, args=(node,))\n process_list.append(p)\n p.start()\n \n for p in process_list:\n p.join()\n final_time = time.time() - init_time\n\n print \"ends at: \" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n print \"duration: \" + str(final_time)\n print\n \n print \"END\"\n # rebuilding the resutls\n# res = pd.DataFrame()\n# root = \"/home/mc/Scrivania/Tesi/MyTool/pickles/\"\n# myDir = \"sym_res_rnd/\"\n# name = \"rnd_\"\n# for j in range(0,56):\n# res = res.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n# \n# myDir = \"sym_res_rnd_c2g/\"\n# name = \"res_rnd\"\n# for j in range(0,200):\n# res = res.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n# \n# myDir = \"sym_res_3_alg_no_rand_final/\"\n# name = \"sym_res_3_alg_no_rand_final3_alg_fin_\"\n# for j in range(0,24):\n# res = res.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n# \n \n \n\n\n" }, { "alpha_fraction": 0.617691159248352, "alphanum_fraction": 0.638680636882782, "avg_line_length": 18.647058486938477, "blob_id": "95257d32d300ff7405296934f6b28a3779e3c787", "content_id": "6b3be18b129d195aaa3ca52c829a854d502d8009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "no_license", "max_line_length": 59, "num_lines": 34, "path": "/Analysis/simulator/pseudo_code.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 24 17:48:30 2017\n\n@author: mc\n\"\"\"\n\nextract booking and plate\nc = car[plate] \nif (c.start_pos in recharging zone) :\n #tryng to decide if a car could be recharged\n order the queue in the zone per arrival time increasing\n if plate in first N pos:\n compute parking time: TPc - Tfl\n update energy\n else:\n pass\n queue.pop(car)\nelse:\n pass\n\ncar.assing_last_booking(booking)\nif c.energy > energy_min:\n compute consumption\n update_energy (consuption)\n if (bookigins.arrival == station):\n enque the booking\n else:\n pass\nelse:\n pass\n\ntime++" }, { "alpha_fraction": 0.7531172037124634, "alphanum_fraction": 0.7830423712730408, "avg_line_length": 29.923076629638672, "blob_id": "57b5372727c9f21d5f349d18bfdf0ba553021f0e", "content_id": "eb82fead29585bfd67b6ca3925a2854272b9a663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 158, "num_lines": 13, "path": "/Analysis/shapefile_tests.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nimport datetime\nimport matplotlib.pyplot as plt\nimport paths as p\nfrom util import Utility\nimport paths as paths\nimport thread\n\nturin_df = gpd.read_file(\"/home/mc/Scrivania/taglio_fogli_di_carta_tecnica_1000_geo/taglio_fogli_di_carta_tecnica_1000_geo.dbf\").to_crs({\"init\": \"epsg:4326\"})\nturin_df.plot()\nplt.show()" }, { "alpha_fraction": 0.5359178781509399, "alphanum_fraction": 0.5387685298919678, "avg_line_length": 17.06185531616211, "blob_id": "2434ab8fcff5a298b94dff82b3c9e0c0cbe4966d", "content_id": "125bd271bd0422cf7ce283d0a179ca64d4539140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "no_license", "max_line_length": 82, "num_lines": 97, "path": "/Analysis/recordFileds.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#car2go\ncar2go_bookings_cols = [\n \"_id\",\n \"init_fuel\",\n \"city\" ,\n \"walking\",\n \"vendor\",\n \"driving\",\n \"final_time\",\n \"plate\",\n \"engineType\",\n \"init_time\" ,\n \"vin\",\n \"smartPhoneRequired\",\n \"interior\",\n \"final_fuel\",\n \"exterior\",\n \"init_date\",\n \"final_date\",\n \"init_address\",\n \"final_address\",\n \"origin_destination\",\n \"public_transport\"\n ]\n\ncar2go_parkings_cols=[\n \"_id\",\n \"plate\" ,\n \"vendor\",\n \"final_time\" ,\n \"loc\",\n \"init_time\" ,\n \"vin\",\n \"smartPhoneRequired\",\n \"interior\",\n \"exterior\",\n \"address\",\n \"init_date\",\n \"final_date\",\n \"city\",\n \"fuel\",\n \"engineType\"\n]\nwalking_cols=[\"duration\", \"distance\"]\ndriving_cols=[\"duration\", \"distance\"]\norigin_destination_cols = [\"type\", \"coordinates\"]\npublic_transport_cols = [\"duration\",\"destination\", \"arrival_date\", \"arrival_time\"]\n\nenjoy_bookings_cols = [\n \"_id\",\n \"init_fuel\",\n \"virtual_rental_type_id\",\n \"walking\" ,\n \"final_time\",\n \"final_fuel\",\n \"init_date\",\n \"final_date\",\n \"final_address\",\n \"city\",\n \"driving\",\n \"carModelData\",\n \"plate\",\n \"vendor\",\n \"car_category_id\",\n \"init_time\",\n \"car_category_type_id\",\n \"car_name\",\n \"onClick_disabled\",\n \"origin_destination\",\n \"init_address\" ,\n \"virtual_rental_id\",\n \"public_transport\"\n]\n\nenjoy_parkings_col = [\n \"_id\",\n \"city\",\n \"vendor\",\n \"final_time\",\n \"plate\",\n \"car_category_id\",\n \"init_time\",\n \"car_category_type_id\",\n \"virtual_rental_type_id\",\n \"carModelData\",\n \"car_name\",\n \"init_date\",\n \"onClick_disabled\",\n \"virtual_rental_id\",\n \"fuel\",\n \"final_date\",\n \"loc\",\n \"address\"\n]\n\n\n" }, { "alpha_fraction": 0.5758286118507385, "alphanum_fraction": 0.604062020778656, "avg_line_length": 32.815093994140625, "blob_id": "6d2bcd41f7648f80f78ab605900a233fb12fa858", "content_id": "a26f534388541c74f7d036baa7446c951662d8cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8961, "license_type": "no_license", "max_line_length": 115, "num_lines": 265, "path": "/Analysis/parkingsMap.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nimport datetime\nimport matplotlib.pyplot as plt\nimport paths as p\nfrom util import Utility\nimport paths as paths\nimport thread\n\n\nu = Utility()\n\n\nturin_df = gpd.read_file(\"../SHAPE/Zonizzazione.dbf\").to_crs({\"init\": \"epsg:4326\"})\ngrid_df = gpd.read_file(\"../SHAPE/grid.dbf\").to_crs({\"init\": \"epsg:4326\"})\n\ndef pointfy (lon, lat):\n return Point(float(lon), float(lat))\n\n\n\npa_enjoy = pd.read_pickle(p.enjoy_parkings_pickle_path_zoned, None)\npa_car2go = pd.read_pickle(p.car2go_parkings_pickle_path_zoned, None)\n\n\ndef parkings_per_zone(df_in):\n# turin = gpd.read_file(\"../SHAPE/Zonizzazione.dbf\").to_crs({\"init\": \"epsg:4326\"})\n \n turin = gpd.read_file(\"../SHAPE/grid500.dbf\").to_crs({\"init\": \"epsg:4326\"})\n \n# df_in = df_in[df_in.duration > 20]\n# df_in['geometry'] = df_in.apply(lambda row: pointfy(row['lon'], row['lat']), axis = 1)\n# df_in ['geometry'] = pointfy(df_in[\"lon\"],df_in[\"lat\"])\n\n crs = {\"init\": \"epsg:4326\"}\n df_in = gpd.GeoDataFrame(df_in, crs=crs)\n \n turin['zone_id'] = turin.index\n df = gpd.sjoin(df_in, turin, how='right', op='within')\n df2 = df.groupby('zone_id').count()\n# df3 = pd.DataFrame(df.groupby('zone_id')['duration', 'zone_name'].sum())\n df3 = pd.DataFrame(df.groupby('zone_id')['duration'].sum())\n\n turin['count'] = df2[df2.columns[0]]\n turin['duration'] = (df3['duration'])\n turin_out = turin.dropna()\n \n \n ##total duration per zone /60 -> total duration in hours (a)\n ##(a)/ #parkings per zone -> hour of stop i each zone\n turin_out['max_avg_time'] = turin_out['duration']/60.0/turin_out['count']\n turin_out[\"max_parking\"] = turin_out[\"count\"]\n turin_out[\"max_time\"] = turin_out[\"duration\"]/60.0\n del turin\n return turin_out, df\n\n\n#turin_c2g.to_csv(\"my_gdf_def.csv\")\n#turin_c2g.to_file('MyGeometries.shp', driver='ESRI Shapefile')\n\n\ndef plot_clorophlet_colorbar (gdf, column, filtered, vmin, vmax, provider, path):\n fig, ax = plt.subplots(1, 1, figsize=(10,10))\n gdf.plot(column=column, cmap='jet', ax=ax, linewidth=0.1)\n \n titles = { \"max_time\": provider + \" - Whole parking time\" + filtered,\n \"max_avg_time\" : provider + \" - Avg. parking time\"+ filtered,\n \"max_parking\" : provider + \" - Parkings number\"+ filtered\n }\n \n labels = { \"max_time\": \"Cumulative parking time sum [h]\",\n \"max_avg_time\" : \"Average parking time [h]\",\n \"max_parking\" : \"Max. number of parkings\"\n }\n plt.title(titles[column], fontsize = 36)\n ax.grid(linestyle='-', linewidth=1.0)\n plt.xticks([])\n plt.yticks([])\n# plt.xlabel(\"Latitude\", fontproperties=font)\n# plt.ylabel(\"Longitude\", fontproperties=font)\n \n\n cax = fig.add_axes([0.9, 0.1, 0.03, 0.8,])\n sm_ = plt.cm.ScalarMappable(cmap='jet',\n norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm_._A = []\n# fig.colorbar(sm_, cax=cax)\n cbar = plt.colorbar(sm_, cax=cax)\n cbar.ax.tick_params(labelsize=20)\n cbar.set_label(labels[column], rotation=270, fontsize=20, labelpad=30)\n# gdf.apply(lambda x: ax.annotate(s=x.N, xy=(x.geometry.centroid.x, x.geometry.centroid.y), ha='center'),axis=1)\n \n fig.savefig(path, bbox_inches='tight')\n plt.show()\n \n return\n\n\ndef gloabl_function (df_parking, provider, ub, lb, column, path):\n parkings_filtered = df_parking[\n (df_parking[\"duration\"] <= ub) & \n (df_parking[\"duration\"] >= lb )\n ]\n turin_df, parkings = parkings_per_zone(parkings_filtered) \n parkings = parkings.dropna()\n \n df = turin_df \n maxs = { \"max_time\": 4500,\n \"max_avg_time\" : 25,\n \"max_parking\" : 1506}\n my_min = 0\n my_max = maxs[column]\n if lb > 0:\n title = \"\"\n else :\n title= \"\"\n print provider, column, my_min, my_max\n if \"car2go\" in path:\n df.loc[426,'max_time'] = 4500\n plot_clorophlet_colorbar(df, column, title,my_min,my_max, provider, path)\n return df\n \ndef duration_cdf (df, provider, path):\n if provider == 'car2go':\n color = 'blue'\n else:\n color = 'red'\n \n column='duration'\n df[column] = df[column].div(60.0)\n \n fig = plt.figure(figsize=(10,10))\n ax = fig.gca()\n ax.set_title(provider + \" - Parking duration\", fontsize=36)\n ax.grid()\n \n df1=df\n column='duration'\n values = [df1[column].quantile(0.25), \n df1[column].quantile(0.50), \n df1[column].quantile(0.75), \n df1[column].quantile(0.99), \n df1[column].mean(),\n df1[column].median(),\n df1[column].std()\n ]\n print provider, column\n print values\n print\n \n ax.hist(df[\"duration\"], bins=100, cumulative=True, normed=True, color=color) \n \n ax.set_ylabel(\"ECDF\", fontsize=36)\n ax.set_xlabel(\"Duration [h]\", fontsize=36)\n \n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(27) \n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(27) \n \n if len(path) > 0:\n plt.savefig(path,bbox_inches = 'tight',pad_inches = 0.25)\n print \"salva\"\n\n \n'''\nc2g - avg time per park in zone\n'''\n\n#q=0.01\n#ub_c2g = pa_car2go[\"duration\"].quantile(1-q/10)\n#lb_c2g = pa_car2go[\"duration\"].quantile(q*9)\n#\n#q=0.01\n#ub_enj = pa_car2go[\"duration\"].quantile(1-q/10)\n#lb_enj = pa_car2go[\"duration\"].quantile(q*9)\n\n#provider = 'car2go'\n#path = \"/home/mc/Scrivania/Tesi/Writing/figures/04_data_analysis/parkings/\"+provider\n#column = 'max_time'\n#gloabl_function(pa_car2go, provider, ub_c2g, lb_c2g, column, path+\"_\"+column+\"_f\")\n#zzz = gloabl_function(pa_car2go, provider, max(pa_car2go[\"duration\"]), 0, column, path+\"_\"+column)\n\n#column = 'max_avg_time'\n#gloabl_function(pa_car2go, provider, ub_c2g, lb_c2g, column, path+\"_\"+column+\"_f\") \n#gloabl_function(pa_car2go, provider, max(pa_car2go[\"duration\"]), 0, column, path+\"_\"+column)\n\ncolumn = 'max_parking'\n#gloabl_function(pa_car2go, provider, pa_car2go[\"duration\"].quantile(0.95), 20, column, path+\"_\"+column+\"_f\")\n#gloabl_function(pa_car2go, provider, max(pa_car2go[\"duration\"]), 0, column, path+\"_\"+column)\n\n#provider='enjoy'\n#path = \"/home/mc/Scrivania/Tesi/Writing/figures/04_data_analysis/parkings/\"+provider\ncolumn = 'max_time'\n#gloabl_function(pa_enjoy, provider, ub_enj, lb_enj, column, path+\"_\"+column+\"_f\")\n#zzz2 = gloabl_function(pa_enjoy, provider, max(pa_enjoy[\"duration\"]), 0, column, path+\"_\"+column)\n\n#column = 'max_avg_time'\n#gloabl_function(pa_enjoy, provider, ub_enj, lb_enj, column, path+\"_\"+column+\"\") ### THE OBLY ONE FILTERED! ####\n#gloabl_function(pa_enjoy, provider, max(pa_enjoy[\"duration\"]), 0, column, path+\"_\"+column)\n\n#column = 'max_parking'\n#gloabl_function(pa_enjoy, provider, lb_enj, 20, column, path+\"_\"+column+\"_f\")\n#gloabl_function(pa_enjoy, provider, max(pa_enjoy[\"duration\"]), 0, column, path+\"_\"+column)\n\n\nprovider = 'car2go'\npath = \"/home/mc/Scrivania/Tesi/toptesi/figures/04_data_analysis/parkings/\"+provider\nq=0.01\nub = pa_car2go[\"duration\"].quantile(1-q/10)\nlb = pa_car2go[\"duration\"].quantile(q*9)\ndf1 = pa_car2go[\n (pa_car2go[\"duration\"] <= ub) & \n (pa_car2go[\"duration\"] >= lb )\n ]\nduration_cdf(df1, 'car2go', path+\"_pd_cdf\")\n\nprovider = 'enjoy'\npath = \"/home/mc/Scrivania/Tesi/toptesi/figures/04_data_analysis/parkings/\"+provider\nq=0.01\nup = pa_car2go[\"duration\"].quantile(1-q/10)\nlb = pa_car2go[\"duration\"].quantile(q*9)\ndf2 = pa_enjoy[\n (pa_enjoy[\"duration\"] <= ub) & \n (pa_enjoy[\"duration\"] >= lb)\n ]\nduration_cdf(df2, 'enjoy', path+\"_pd_cdf\")\n\n\n\n\ndef cdf_pdf_parking_time_per_zone(df_parkings,bins):\n\n color = u.get_color(df_parkings)\n provider = u.get_provider(df_parkings)\n \n check_big_zones = df_parkings.groupby(\"zone_id\").count()\n zones = []\n zones = check_big_zones.index\n# path = paths.plots_path4+\"/\"+provider+\"_zones/\"+ str(zones[0])\n# print path\n for i in range (1, len(zones)) :\n df = df_parkings[df_parkings[\"zone_id\"] == zones[i]]\n \n title1 = \"zone \"+str(zones[i]) + \" - cdf, total element \" + str(len(df))\n title2 = \"zone \"+str(zones[i]) + \" - pdf, total element \" + str(len(df))\n path = paths.plots_path4+\"/\"+provider+\"_zones/\"+ str(zones[i])\n print path\n \n fig, ax= plt.subplots(1,2, figsize=(20,10))\n \n ax1 = ax[0]\n ax2 = ax[1]\n df[\"duration\"].hist(ax=ax1, bins=bins, cumulative=True, normed=True, color=color)\n ax1.set_title(title1)\n ax1.set_xlabel(\"minutes\")\n \n df[\"duration\"].hist(ax=ax2, bins=bins, normed=True , color=color)\n ax2.set_title(title2)\n ax2.set_xlabel(\"minutes\")\n plt.ioff()\n# fig.savefig(paths.plots_path4+\"/\"+provider+\"_zones50/\"+ str(zones[i]), bbox_inches='tight')\n plt.close(fig)\n" }, { "alpha_fraction": 0.5009369254112244, "alphanum_fraction": 0.5153029561042786, "avg_line_length": 28.55555534362793, "blob_id": "e0bcf66421362d5f69a03858d265de7139ec9a49", "content_id": "5e75f98665b9eefc4401290a4fd7e5bbcc88bb23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1601, "license_type": "no_license", "max_line_length": 99, "num_lines": 54, "path": "/Analysis/simulator/station.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\n\n\n\nclass Station (object):\n \n def __init__ (self, s_id, max_cars, cars, s_type=1):\n self.s_id = s_id\n self.max_cars = max_cars\n self.cars = cars\n self.set_station_profile(s_type)\n self.charged_cars = pd.DataFrame(columns=[\"recharge\"])\n return\n \n def __repr__(self):\n string = \"id: \" + str(self.s_id) + \"\\n\" \n string += \"max cars:\" + str(self.max_cars) + \"\\n\"\n string += \"cars: \" + str(self.cars) + \"\\n\"\n string += \"s_type: \" + str(self.s_type) +\"\\n\\n\"\n return string\n \n def increase_supplied_cars(self):\n if self.cars <= self.max_cars :\n self.cars = self.cars + 1\n return\n \n def decrease_supplied_cars(self):\n if self.cars >= 1 :\n self.cars = self.cars -1\n return\n \n def compute_centroid (self, geometry):\n self.lat = geometry.centroid.y\n self.lon = geometry.centroid.x\n return\n \n def set_station_profile(self, s_type):\n #data from paper A\n if s_type == 1:\n self.s_type = 1\n self.kw = 1.92\n elif s_type == 2:\n self.s_type = 2\n self.kw = 19.2 #or 2.5\n else :\n self.s_type =3\n self.kw = 240.0\n \n def increase_recharged_counter(self, plate):\n if plate not in self.charged_cars.index:\n self.charged_cars.loc[plate, \"recharge\"] = 1\n else:\n self.charged_cars.loc[plate, \"recharge\"] = self.charged_cars.loc[plate, \"recharge\"] + 1\n \n" }, { "alpha_fraction": 0.551369845867157, "alphanum_fraction": 0.621004581451416, "avg_line_length": 22.078947067260742, "blob_id": "a75ba4d26f2e02389ad2af5a8c5308e1fc40a777", "content_id": "7be3d7956ee0e4975480f09da31232a0bf9c0fb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "no_license", "max_line_length": 59, "num_lines": 38, "path": "/Analysis/greeding2.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "from fiona.crs import from_epsg\nimport pandas as pd\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point, Polygon\nimport paths as paths\n#grid creation\n\n#coordinates of turin (all area)\nstart_x = 7.58257\nfinal_x = 7.7320933\nstep_x = 0.00064 * 5 #50m\nstart_y = 45.009132\nfinal_y = 45.20\nstep_y = 0.00045 * 5 #50m\n\nx = start_x\ny= start_y\nnewdata = gpd.GeoDataFrame()\nnewdata.crs = from_epsg(4326)\nnewdata['geometry'] = None\ngdf_row = 0\nwhile x <= final_x:\n y = start_y\n while y <= final_y:\n p1 = (x,y)\n p2 = (x+step_x,y)\n p3 = (x+step_x, y+step_y)\n p4 = (x, y+step_y)\n q= Polygon([p1,p2,p3,p4])\n newdata.loc[gdf_row, 'geometry'] = q\n gdf_row = gdf_row + 1\n y = y + step_y\n \n x = x + step_x\n\noutfp = r\"/home/mc/Scrivania/Tesi/MyTool/SHAPE/grid250.shp\"\nnewdata.to_file(outfp)" }, { "alpha_fraction": 0.5430359244346619, "alphanum_fraction": 0.5645115971565247, "avg_line_length": 45.17448043823242, "blob_id": "7aa1b91720e1ed442f51fbc8c947102bc2781f89", "content_id": "8339d56fdbeba1859270d9eb386b13609e772079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17741, "license_type": "no_license", "max_line_length": 196, "num_lines": 384, "path": "/Analysis/simulator/plot.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport datetime\nimport time\nimport random\nimport sys\nimport os.path\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/')\nimport paths as paths\nfrom DataBaseProxy import DataBaseProxy\nsys.path.insert(0, '/home/mc/Scrivania/Tesi/MyTool/Analysis/simulator')\nfrom util import Utility\nfrom car import Car\nfrom city import City\nfrom shapely.geometry import Point, Polygon\nfrom station import Station\nimport threading\nfrom multiprocessing import Process\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\n\ndef plot_from_df (df, torino, provider, ppz, column, inputs):\n fig = plt.figure(figsize=(30,10))\n \n if provider == \"car2go\":\n nob = float(len(torino.car2go))\n noz = float(len(torino.car2go_parkings_analysis))\n cap = 17.6\n else:\n nob = len(torino.enjoy)\n noz = float(len(torino.enjoy_parkings_analysis))\n cap = 25.2\n\n colors = {\"max_avg_time\":\"red\", \"max_parking\":\"blue\", \"max_time\": \"black\", \"best_rnd\": \"gray\", \"mean_rnd\":\"green\"}\n markers = {\"max_avg_time\":\"o\", \"max_parking\":\"x\", \"max_time\": \"^\", \"best_rnd\": \"d\", \"mean_rnd\":\"+\"}\n labels = {\"max_avg_time\":\"max average parking time\", \"max_parking\":\"max number of parking\", \"max_time\": \"max parking time\", \"best_rnd\": \"best random\", \"mean_rnd\":\"average random(190 run)\"}\n div_facts = {\"tot_deaths\":nob, \"avg_bat_before\": cap, \"avg_bat_after\": cap, \"pieni\": nob}\n \n titles = {\"tot_deaths\": \" - Bat. discharge vs Zone coverage - ppz=\",\n \"avg_bat_before\": \" - Avg. SoC vs Zone coverage - ppz=\", \n \"avg_bat_after\": \" - Avg. after charnging SoC vs Zone coverage - ppz=\", \n \"pieni\": \" - Charging prob. vs Zone Coverage - ppz=\"}\n \n y_labels = {\"tot_deaths\": \"Battery discharge prob.\",\n \"avg_bat_before\": \"Avg. SoC prob.\", \n \"avg_bat_after\": \"Avg. SoC - After charging prob.\", \n \"pieni\": \"Charging prob.\"}\n \n saving_name = {\"tot_deaths\": \"bat_exaust_\",\n \"avg_bat_before\": \"SoC_Before_\", \n \"avg_bat_after\": \"SoC_After_\", \n \"pieni\": \"charging_\"}\n \n dir_name = {\"tot_deaths\": \"bat_exaust/\",\n \"avg_bat_before\": \"soc_before/\", \n \"avg_bat_after\": \"soc_after/\", \n \"pieni\": \"charging_prob/\"}\n\n# res = df[(df[\"z\"]>=80) & (df[\"z\"]<=100)]\n\n res = df\n mean_c2g = res[(res[\"provider\"] == provider) & (res[\"algorithm\"]==\"rnd\")]\n mean_c2g = mean_c2g.groupby([\"z\",\"ppz\"], as_index=False).mean()\n mean_c2g = mean_c2g[mean_c2g[\"ppz\"] == ppz]\n \n\n \n best_deaths = res[(res[\"provider\"] == provider) & (res[\"algorithm\"]==\"rnd\")]\n best_deaths = best_deaths.sort_values(\"tot_deaths\").groupby([\"z\",\"ppz\"], as_index=False).first()\n best_deaths = best_deaths[best_deaths[\"ppz\"] == ppz]\n \n \n det_alg = res[(res[\"provider\"] == provider) & (res[\"ppz\"] == ppz) & (res[\"algorithm\"]!=\"rnd\")]\n max_parking = det_alg[det_alg[\"algorithm\"] == \"max_parking\"]\n max_avg_time = det_alg[det_alg[\"algorithm\"] == \"max_avg_time\"]\n max_time = det_alg[det_alg[\"algorithm\"] == \"max_time\"]\n \n ax = fig.gca()\n \n ax.set_title(provider + titles[column]+str(ppz), fontsize=48)\n \n df_dict={\n \"mean_rnd\": mean_c2g,\n \"best_rnd\":best_deaths,\n \"max_parking\":max_parking,\n \"max_avg_time\":max_avg_time,\n \"max_time\": max_time\n }\n\n ax.grid()\n if len(res['algorithm'].unique()) > 3 and column not in [\"avg_bat_before\", 'avg_bat-after'] and provider == 'car2go':\n# ax.plot(mean_c2g[\"z\"], mean_c2g[column].div(div_facts[column]), color=colors[\"mean_rnd\"], marker=markers[\"mean_rnd\"], label=labels[\"mean_rnd\"])\n# ax.plot(best_deaths[\"z\"], best_deaths[column].div(div_facts[column]), color=colors[\"best_rnd\"], marker=markers[\"best_rnd\"], label=labels[\"best_rnd\"])\n# ax.plot(max_parking[\"z\"], max_parking[column].div(div_facts[column]), color=colors[\"max_parking\"], marker=markers[\"max_parking\"], label=labels[\"max_parking\"])\n# ax.plot(max_avg_time[\"z\"], max_avg_time[column].div(div_facts[column]), color=colors[\"max_avg_time\"], marker=markers[\"max_avg_time\"], label=labels[\"max_avg_time\"])\n# ax.plot(max_time[\"z\"], max_time[column].div(div_facts[column]), color=colors[\"max_time\"], marker=markers[\"max_time\"], label=labels[\"max_time\"])\n print \"ok\"\n for alg in inputs:\n df_dict[alg][\"z\"]\n df_dict[alg][column]\n div_facts[column]\n colors[alg]\n markers[alg]\n labels[alg]\n ax.plot(df_dict[alg][\"z\"], df_dict[alg][column].div(div_facts[column]), color=colors[alg], marker=markers[alg], label=labels[alg])\n\n my_t = range( 10, 175, 10)\n my_ticks = [str((\"{0:.2f}\".format(x/noz))) for x in my_t ]\n labels = [\"\"] * len(my_t)\n for i in range(0,len(labels)):\n labels[i] = int(float(my_ticks[i])*100)\n \n plt.xticks(my_t, labels)\n plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])\n\n \n elif len(res['algorithm'].unique()) > 3 and column not in [\"avg_bat_before\", 'avg_bat-after'] and provider == 'enjoy':\n ax.plot(max_parking[\"z\"], max_parking[column].div(div_facts[column]), color=colors[\"max_parking\"], marker=markers[\"max_parking\"], label=labels[\"max_parking\"])\n ax.plot(max_avg_time[\"z\"], max_avg_time[column].div(div_facts[column]), color=colors[\"max_avg_time\"], marker=markers[\"max_avg_time\"], label=labels[\"max_avg_time\"])\n ax.plot(max_time[\"z\"], max_time[column].div(div_facts[column]), color=colors[\"max_time\"], marker=markers[\"max_time\"], label=labels[\"max_time\"])\n my_t = range( 10, 175, 10)\n my_ticks = [str((\"{0:.2f}\".format(x/noz))) for x in my_t ]\n labels = [\"\"] * len(my_t)\n for i in range(0,len(labels)):\n labels[i] = int(float(my_ticks[i])*100)\n \n plt.xticks(my_t, labels)\n\n \n elif len(res['algorithm'].unique()) == 3 and column not in [\"avg_bat_before\", 'avg_bat-after']: \n print \"2\"\n ax.plot(max_parking[\"z\"], max_parking[column].div(div_facts[column]), color=colors[\"max_parking\"], marker=markers[\"max_parking\"], label=labels[\"max_parking\"])\n ax.plot(max_avg_time[\"z\"], max_avg_time[column].div(div_facts[column]), color=colors[\"max_avg_time\"], marker=markers[\"max_avg_time\"], label=labels[\"max_avg_time\"])\n ax.plot(max_time[\"z\"], max_time[column].div(div_facts[column]), color=colors[\"max_time\"], marker=markers[\"max_time\"], label=labels[\"max_time\"])\n my_t = range( 10, 175, 10)\n my_ticks = [str((\"{0:.2f}\".format(x/noz))) for x in my_t ]\n labels = [\"\"] * len(my_t)\n for i in range(0,len(labels)):\n# if i%2 == 0:\n# labels[i] == \"\"\n# else:\n labels[i] = int(float(my_ticks[i])*100)\n \n plt.xticks(my_t, labels)\n\n elif column in [\"avg_bat_before\", \"avg_bat_after\"]:\n print column\n# ax.plot(mean_c2g[\"z\"], mean_c2g[column].div(div_facts[column]), color=colors[\"mean_rnd\"], marker=markers[\"mean_rnd\"], label=labels[\"mean_rnd\"])\n# \n# ax.plot(best_deaths[\"z\"], best_deaths[column].div(div_facts[column]), color=colors[\"best_rnd\"], marker=markers[\"best_rnd\"], label=labels[\"best_rnd\"])\n \n ax.plot(max_parking[\"z\"], max_parking[column].div(div_facts[column]), color=colors[\"max_parking\"], marker=markers[\"max_parking\"], label=labels[\"max_parking\"])\n \n ax.plot(max_avg_time[\"z\"], max_avg_time[column].div(div_facts[column]), color=colors[\"max_avg_time\"], marker=markers[\"max_avg_time\"], label=labels[\"max_avg_time\"])\n \n ax.plot(max_time[\"z\"], max_time[column].div(div_facts[column]), color=colors[\"max_time\"], marker=markers[\"max_time\"], label=labels[\"max_time\"])\n# cstm_t = [30,60,90,120,150,180,210,238]\n my_t = [30,60,90,120,150,180,210]\n if provider == 'car2go':\n my_t.append(238)\n# print noz, my_t\n my_ticks = [str((\"{0:.2f}\".format(x/noz))) for x in my_t ]\n# print my_ticks\n labels = [\"\"] * len(my_t)\n for i in range(0,len(labels)):\n# if i%2 == 0:\n# labels[i] == \"\"\n# else:\n labels[i] = int(float(my_ticks[i])*100)\n plt.xticks(my_t, labels)\n\n \n else:\n print \"error\"\n return\n \n plt.xticks(my_t, labels)\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(36) \n \n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(36) \n \n ax.set_xlabel(\"Zones coverage(%)\", fontsize=48)\n ax.set_ylabel(y_labels[column], fontsize=48)\n plt.legend(fontsize=36)\n \n# column = 'tot_deaths'\n# ppz = 6\n# provider = 'car2go'\n# dir_name = {\"tot_deaths\": \"bat_exaust/\",\n# \"avg_bat_before\": \"soc_before/\", \n# \"avg_bat_after\": \"soc_after/\", \n# \"pieni\": \"charging_prob/\"}\n# saving_name = {\"tot_deaths\": \"bat_exaust_\",\n# \"avg_bat_before\": \"SoC_Before_\", \n# \"avg_bat_after\": \"SoC_After_\", \n# \"pieni\": \"charging_\"}\n my_path=\"/home/mc/Immagini/pres_im/pzz_alg_\"+str(len(inputs))\n plt.savefig(my_path, bbox_inches = 'tight')\n\n# plt.show()\n \n \n \n \n\n\nif __name__ == \"__main__\":\n ## build the city ##\n\n \n year = 2017\n month = 5\n day = 6\n start = datetime.datetime(year, month, day, 0, 0, 0)\n end = datetime.datetime(year, month +2, day, 23, 59, 0)\n torino = City(\"Torino\", start,end)\n torino.set_c2g_datasets(from_pickle=True)\n torino.set_enj_datasets(from_pickle=True)\n torino.get_fleet(\"car2go\")\n torino.get_fleet(\"enjoy\")\n \n# print \"END\"\n ## rebuilding the resutls\n res2 = pd.DataFrame()\n root = \"/home/mc/Scrivania/Tesi/MyTool/pickles/\"\n myDir = \"sym_res_corr_rnd/\"\n name = \"\"\n for j in range(0,760):\n res2 = res2.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n \n myDir = \"sym_res_corr_eur/\"\n name = \"\"\n for j in range(0,6):\n res2 = res2.append(pd.read_pickle(root+myDir+name+str(j)), ignore_index=True)\n \n bat = pd.DataFrame()\n myDir =\"sym_res_bat/\"\n for j in range(0,3):\n bat = bat.append(pd.read_pickle(root+myDir+name+\"bat_\"+str(j)), ignore_index=True)\n \n \n\n plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\", [\"mean_rnd\"])\n plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\", [\"mean_rnd\",\"best_rnd\"])\n plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\", [\"mean_rnd\",\"best_rnd\",\"max_avg_time\"])\n plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\", [\"mean_rnd\",\"best_rnd\",\"max_avg_time\",\"max_parking\"])\n plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\", [\"max_time\", \"mean_rnd\",\"best_rnd\",\"max_avg_time\",\"max_parking\"])\n \n# plot_from_df(res2, torino, \"car2go\", 4, \"tot_deaths\")\n# plot_from_df(res2, torino, \"car2go\", 6, \"tot_deaths\")\n# plot_from_df(res2, torino, \"car2go\", 8, \"tot_deaths\")\n\n# plot_from_df(res2, torino, \"enjoy\", 2, \"tot_deaths\")\n# plot_from_df(res2, torino, \"enjoy\", 4, \"tot_deaths\")\n# plot_from_df(res2, torino, \"enjoy\", 6, \"tot_deaths\")\n# plot_from_df(res2, torino, \"enjoy\", 8, \"tot_deaths\")\n\n# plot_from_df(bat, torino, \"car2go\", 2, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 4, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 6, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 6, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 8, \"avg_bat_before\")\n\n# plot_from_df(bat, torino, \"car2go\", 2, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 4, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 6, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"car2go\", 8, \"avg_bat_before\")\n\n# plot_from_df(bat, torino, \"enjoy\", 2, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"enjoy\", 4, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"enjoy\", 6, \"avg_bat_before\")\n# plot_from_df(bat, torino, \"enjoy\", 8, \"avg_bat_before\")\n#\n# plot_from_df(res2, torino, \"car2go\", 2, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"car2go\", 4, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"car2go\", 6, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"car2go\", 8, \"avg_bat_after\")\n\n# plot_from_df(res2, torino, \"enjoy\", 2, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"enjoy\", 4, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"enjoy\", 6, \"avg_bat_after\")\n# plot_from_df(res2, torino, \"enjoy\", 8, \"avg_bat_after\")\n\n# plot_from_df(res2, torino, \"car2go\", 2, \"pieni\")\n# plot_from_df(res2, torino, \"car2go\", 4, \"pieni\")\n# plot_from_df(res2, torino, \"car2go\", 6, \"pieni\")\n# plot_from_df(res2, torino, \"car2go\", 8, \"pieni\")\n\n# plot_from_df(res2, torino, \"enjoy\", 2, \"pieni\")\n# plot_from_df(res2, torino, \"enjoy\", 4, \"pieni\")\n# plot_from_df(res2, torino, \"enjoy\", 6, \"pieni\")\n# plot_from_df(res2, torino, \"enjoy\", 8, \"pieni\")\n\n \n \n# fig = plt.figure(figsize=(30,10))\n# ax = fig.gca()\n# ax.set_title(\"car2go\" + \" - Solution Improving\", fontsize=36)\n# ax.grid()\n## ax.plot(df_to_plot.index, df_to_plot.min_no_deaths.div(len(torino.car2go)), color='blue', label=\"Min number of deaths\")\n# ax.plot(df_to_plot.index, df_to_plot.min_no_deaths.div(len(torino.car2go)), color='blue', label=\"Min number of deaths\")\n#\n# ax.legend()\n# for tick in ax.xaxis.get_major_ticks():\n# tick.label.set_fontsize(27) \n# \n# for tick in ax.yaxis.get_major_ticks():\n# tick.label.set_fontsize(27) \n# \n# ax.set_xlabel(\"No. of run\", fontsize=36)\n# ax.set_ylabel(\"Deaths (%)\", fontsize=36)\n \n \n\n \n# zzz = res[res[\"algorithm\"] == \"duration_per_zone\"]\n# plot_from_df(res, torino, \"car2go\", ['max_parking', 'max_avg_time' ,'max_time'], 4, \"tot\" )\n# plot_from_df(res, torino, \"enjoy\", [\"max_parking\"], 4, \"tot\" )\n# \n# plot_from_df(res, \"car2go\", [\"max_avg_time\", \"rnd\", \"max_parking\"], 10, \"tot\" )\n# plot_from_df(res, \"enjoy\", [\"max_avg_time\", \"rnd\", \"max_parking\"], 10, \"tot\" )\n\n# bar_plot_parkings_stats(torino.car2go_parkings_analysis, \"car2go\", \"parking_per_zone\")\n# bar_plot_parkings_stats(torino.car2go_parkings_analysis, \"car2go\", \"duration_per_zone\")\n# bar_plot_parkings_stats(torino.car2go_parkings_analysis, \"car2go\", \"avg_duration_per_zone\")\n# \n# bar_plot_parkings_stats(torino.enjoy_parkings_analysis, \"enjoy\", \"parking_per_zone\")\n# bar_plot_parkings_stats(torino.enjoy_parkings_analysis, \"enjoy\", \"duration_per_zone\")\n# bar_plot_parkings_stats(torino.enjoy_parkings_analysis, \"enjoy\", \"avg_duration_per_zone\")\n \n# torino.car2go_parkings_analysis[\"taken\"] = 0\n# k=torino.place_stations(50 * 10,\n# 10,\n# \"car2go\",\n# algorithm=\"max_parking\",\n# station_type=1)\n# k=set(k.keys())\n# \n# torino.car2go_parkings_analysis.loc[k, \"taken\"] = 1000\n# plot_clorophlet_colorbar_solutions(torino, \"car2go\", \"max_parking\",\"parking_per_zone\", 50, 10)\n# plot_clorophlet_colorbar_solutions(torino, \"enjoy\",\"max_parking\", \"parking_per_zone\", 50, 10)\n \n \n# plot_clorophlet_colorbar_solutions(torino.car2go_parkings_analysis, \"car2go\", \"avg_duration_per_zone\")\n# \n# z = 60\n# ppz = 6\n# res = res2\n# provider = 'car2go'\n# \n# mean_c2g = res[(res[\"provider\"] == provider) & (res[\"algorithm\"]==\"rnd\")]\n#\n# mean_c2g = res[(res[\"provider\"] == provider) & (res[\"algorithm\"]==\"rnd\")]\n# mean_c2g = mean_c2g.groupby([\"z\",\"ppz\"], as_index=False).mean()\n# mean_c2g['tot_deaths'] = mean_c2g['tot_deaths'].div(len(torino.car2go)).mul(100)\n# mean_c2g = mean_c2g[mean_c2g['ppz']==6]\n#\n## mean_c2g = mean_c2g[mean_c2g['z']==z]\n#\n# mean_c2g['tot_deaths'] = mean_c2g['tot_deaths'].div(len(torino.car2go)).mul(100)\n# \n# best_deaths = res[(res[\"provider\"] == provider) & (res[\"algorithm\"]==\"rnd\")]\n# best_deaths = best_deaths.sort_values(\"tot_deaths\").groupby([\"z\",\"ppz\"], as_index=False).first()\n# best_deaths['tot_deaths'] = best_deaths['tot_deaths'].div(len(torino.car2go)).mul(100)\n# best_deaths = best_deaths[best_deaths['ppz']==6]\n#\n# best_deaths = best_deaths[best_deaths['z']==z]\n\n\n# best_deaths = best_deaths[best_deaths[\"ppz\"] == ppz]\n# plot_clorophlet_colorbar_solutions(torino, \"car2go\", \"max_parking\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"car2go\", \"max_time\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"car2go\", \"max_avg_time\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"car2go\", \"rnd\", z, ppz) \n \n# plot_clorophlet_colorbar_solutions(torino, \"enjoy\", \"max_parking\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"enjoy\", \"max_time\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"enjoy\", \"max_avg_time\", z, ppz)\n# plot_clorophlet_colorbar_solutions(torino, \"enjoy\", \"rnd\", z, ppz) \n\n\n \n\n\n" }, { "alpha_fraction": 0.5372191071510315, "alphanum_fraction": 0.5495084524154663, "avg_line_length": 29.17021369934082, "blob_id": "e422cad797cb6451a504738ae844c7dc4e7cbc6d", "content_id": "268794075e91ec446c37a85c89d71f3dba487710", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2848, "license_type": "no_license", "max_line_length": 85, "num_lines": 94, "path": "/Analysis/simulator/car.py", "repo_name": "michelelt/MyTool", "src_encoding": "UTF-8", "text": "import random\nimport pandas as pd\n\nclass Car (object):\n\n def __init__ (self, plate, provider,last_booking):\n self.plate = plate\n self.last_booking = last_booking\n self.capacity = self.parameter(provider)[\"capacity\"]\n self.consumption = self.parameter(provider)[\"cons\"]\n self.current_capacity = self.capacity\n self.in_charge = False\n self.deaths = 0 \n return \n \n def __repr__(self):\n string = \"plate: \" + self.plate + \"\\n\"\n string += \"capacity: \" + str(self.capacity) + \"\\n\"\n string += \"curr_cap: \" + str(self.current_capacity) + \"\\n\"\n string += \"charging: \" + str(self.in_charge) + \"\\n\"\n string += \"deaths: \" + str(self.deaths) + \"\\n\"\n return string\n \n def parameter(self, provider):\n if provider == 'car2go':\n capacity = 17.6\n kwh_km = 0.13\n else:\n capacity = 25.2\n kwh_km = 0.188\n \n res = {'capacity': capacity, 'cons': kwh_km}\n return res\n \n def compute_consuption(self, distance):\n dist_km = self.m2km(distance)\n dc = dist_km * self.consumption\n self.current_capacity = self.current_capacity - dc\n if self.current_capacity <= 0:\n self.deaths = self.deaths + 1\n self.current_capacity = 0\n return dc\n \n def compute_recharge(self, station, cb):\n duration = (cb[\"init_time\"] - self.last_booking[\"final_time\"])/60/60 #in hour\n delta_c = duration * station.kw\n if (self.current_capacity + delta_c <= self.capacity):\n self.current_capacity = self.current_capacity + delta_c\n else:\n self.current_capacity = self.capacity\n return delta_c\n \n \n def last_final_zone(self):\n return int(self.last_booking[\"final_zone\"])\n\n \n def assign_last_booking(self, new_booking):\n self.last_booking = new_booking\n return\n \n def m2km(self, distance):\n return distance/1000\n \n def random_refil (self):\n self.current_capacity = random.random()\n \n def set_in_charge(self):\n self.in_charge=True\n \n def set_not_in_charge(self):\n self.in_charge=False\n \n def car2df(self):\n df = pd.Series()\n df[\"plate\"] = self.plate\n df[\"capacity\"] = self.capacity\n df[\"cc\"] = self.current_capacity\n df[\"deaths\"] = self.deaths\n return df\n \n def to_dict(self):\n d = {}\n d[\"capacity\"] = self.capacity\n d[\"current_capacity\"] = self.current_capacity\n d[\"charging\"] = self.in_charge\n d[\"deaths\"] = self.deaths\n return d\n \n \n \n \n\"http://it.smart.com/it/it/index/smart-fortwo-electric-drive-453/technical-data.html\"\n\"https://www.fiatusa.com/en/500e/\" \n " } ]
24
johvnguyen/Grad-ML
https://github.com/johvnguyen/Grad-ML
b959df39d29b8b5ec625462fd054a0c7166dba9f
73616c09f1de70fa547c332cbc8f365ed2eac41b
19962868a65671526564a8e03c2c3e69b1fde448
refs/heads/main
2023-04-09T09:59:35.404548
2021-04-26T21:17:02
2021-04-26T21:17:02
361,892,548
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6086246371269226, "alphanum_fraction": 0.6252858638763428, "avg_line_length": 28.108373641967773, "blob_id": "29e7110b449c18e569df4c04c66cd4ab24bd2211", "content_id": "a82462d908ca01a387c33f3224ddbd49f88c4942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6122, "license_type": "no_license", "max_line_length": 129, "num_lines": 203, "path": "/Homework 2/hw2/p7/hw2_multi_svm.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nfrom cvxopt import matrix as cvxopt_matrix\r\nfrom cvxopt import solvers as cvxopt_solvers\r\nimport MyMultiLogReg\r\n\r\ndef rbf_kernel(x1, x2, sigma):\r\n distance = np.linalg.norm(x1 - x2) ** 2\r\n divisor = 2 *(sigma ** 2)\r\n return np.exp(-distance/divisor)\r\n\r\ndef rbf_sim_train(X, y, c, sigma):\r\n # Setting up values\r\n n_rows, n_cols = X.shape\r\n X = X.values\r\n n_entries, n_col = X.shape\r\n y = y.values.reshape(-1, 1) * 1.\r\n \r\n \r\n # Getting Gram matrix\r\n K = np.zeros((n_rows, n_rows))\r\n for i in range(n_rows):\r\n for j in range(n_rows):\r\n K[i, j] = rbf_kernel(X[i], X[j], sigma)\r\n \r\n H = np.outer(y, y) * K\r\n \r\n # Putting in cvxopt format\r\n P = cvxopt_matrix(H, tc='d')\r\n q = cvxopt_matrix(-np.ones((n_entries, 1)))\r\n A = cvxopt_matrix(y, (1, n_entries), tc='d')\r\n b = cvxopt_matrix(np.zeros(1))\r\n \r\n G = cvxopt_matrix(np.vstack((np.identity(n_entries) * -1, np.identity(n_entries))))\r\n h = cvxopt_matrix(np.hstack((np.zeros(n_entries), np.ones(n_entries) * c)))\r\n\r\n \r\n # Solving the dual\r\n sol = cvxopt_solvers.qp(P, q, G, h, A, b)\r\n \r\n # Getting lagrange multipliers\r\n lambdas = np.ravel(sol['x'])\r\n \r\n # Get nonzero lagrange multipliers\r\n alphas = (lambdas > 1e-8).flatten()\r\n \r\n # We return the non-negative lagrange mult. since we pass in the training X and y into the predict function ot get support vecs\r\n return lambdas\r\n \r\ndef predict(test_X, train_X, train_y, alpha, sigma):\r\n train_X = train_X.values\r\n train_y = train_y.values\r\n \r\n test_X = test_X.values\r\n \r\n support_vector_idx = alpha > 1e-4\r\n lagr_mult = alpha[support_vector_idx]\r\n \r\n support_vectors = train_X[support_vector_idx]\r\n support_vector_labels = train_y[support_vector_idx]\r\n \r\n predictions = []\r\n \r\n n_sv, _ = support_vectors.shape\r\n n_samples, n_features = test_X.shape\r\n \r\n for i in range(n_samples):\r\n current_prediction = 0\r\n \r\n for j in range(n_sv):\r\n kernel_distance = rbf_kernel(support_vectors[j], test_X[i], sigma)\r\n current_prediction += support_vector_labels[j] * lagr_mult[j] * kernel_distance\r\n \r\n predictions.append(current_prediction)\r\n \r\n return predictions\r\n\r\n# No need to pass in y because we are splitting based on indices; not segmenting the dataframes\r\ndef kfold(X, k):\r\n #Create array to return\r\n ret = []\r\n \r\n #Get range of indices of X\r\n n = len(X.index)\r\n all_indices = range(n)\r\n \r\n #Partition indices of X into k approximately evenly sized chunks\r\n partitions = np.array_split(all_indices, k)\r\n \r\n #Create the train/test split for each fold\r\n for i in range(k):\r\n test = partitions[i]\r\n train = np.setdiff1d(all_indices, test)\r\n \r\n ret.append([train, test])\r\n \r\n return ret\r\n\r\nif __name__ == '__main__':\r\n training_data = pd.read_csv(\"../HW2-data/mfeat_train.csv\")\r\n testing_data = pd.read_csv(\"../HW2-data/mfeat_test.csv\")\r\n \r\n train_y = training_data[training_data.columns[-1]]\r\n train_X = training_data.drop('y', 1)\r\n train_X = train_X.drop('Unnamed: 0', 1)\r\n \r\n test_y = testing_data[testing_data.columns[-1]]\r\n test_X = testing_data.drop('y', 1)\r\n test_X = test_X.drop('Unnamed: 0', 1)\r\n \r\n k = 10\r\n c = 100\r\n sigma = 100\r\n \r\n alphas_array = []\r\n train_bin_y_array = []\r\n \r\n # Training begins here\r\n # For some reason, the MNIST dataset maps 0 to 10; so we iterate the classes from 1 through 10, inclusive\r\n for i in range(1, 11):\r\n # Convert target vector to 1s and 0s here; 1 vs ALL strategy\r\n class_indices = train_y == i\r\n train_bin_y = class_indices.astype(int)\r\n train_bin_y = train_bin_y.replace(0, -1)\r\n \r\n \r\n alphas_i = rbf_sim_train(train_X, train_bin_y, c, sigma)\r\n \r\n alphas_array.append(alphas_i)\r\n train_bin_y_array.append(train_bin_y)\r\n \r\n # Testing begins here\r\n predictions_array = []\r\n \r\n # range 1 to 11 to remeber that I have off by one error; in the 0 index is the 1 predictor and in the 9 index is the 10 pred\r\n for i in range(1, 11):\r\n alphas = alphas_array[i-1]\r\n train_bin_y = train_bin_y_array[i-1]\r\n \r\n predictions = predict(test_X, train_X, train_bin_y, alphas, sigma)\r\n \r\n predictions_array.append(predictions)\r\n \r\n \r\n n_samples, _ = test_X.shape\r\n \r\n correct = 0\r\n incorrect = 0\r\n confusion_matrix = np.zeros((10, 10))\r\n \r\n for j in range(n_samples):\r\n true_y = test_y.iloc[j]\r\n \r\n model_predictions = []\r\n \r\n for i in range(1, 11):\r\n model_i_prediction = predictions_array[i-1]\r\n model_predictions.append(model_i_prediction[j])\r\n \r\n # Due to off-by-one error, in the 0th index is the 1's prediction and in the 9th index, I have the 10 prediction\r\n prediction = np.argmax(model_predictions)+1\r\n \r\n if (prediction == true_y):\r\n correct += 1\r\n else:\r\n incorrect += 1\r\n \r\n # The '-1' due to the off-by-one described above\r\n confusion_matrix[prediction-1, true_y-1] += 1\r\n \r\n print(\"Correct: \", correct)\r\n print(\"Incorrect: \", incorrect)\r\n print(\"Confusion Matrix: \\n\", confusion_matrix)\r\n \r\n # Now we begin our analysis using Multiclass Logistic Regression\r\n n_features = 64\r\n n_classes = 10\r\n batch_size = n_samples//5\r\n model = MyMultiLogReg.MyMultiLogReg(n_features, n_classes, batch_size)\r\n \r\n lr_train_y = pd.get_dummies(train_y)\r\n \r\n weights = model.train(train_X, lr_train_y)\r\n predictions = model.predict(test_X)\r\n \r\n lr_confusion_matrix = np.zeros((10, 10))\r\n \r\n for i in range(n_samples):\r\n prediction_vec = predictions[i]\r\n pred = np.argmax(prediction_vec)+1\r\n \r\n true_y = test_y.iloc[i]\r\n \r\n lr_confusion_matrix[pred-1, true_y-1] += 1\r\n \r\n print(\"Correct: \", np.trace(lr_confusion_matrix))\r\n print(\"Incorrect: \", n_samples - np.trace(lr_confusion_matrix))\r\n print(\"LogReg Confusion Matrix: \\n\", lr_confusion_matrix)\r\n \r\n np.savetxt('data/log_reg_confusion_matrix.csv', lr_confusion_matrix, delimiter=',')\r\n np.savetxt('data/svm_confusion_matrix.csv', confusion_matrix, delimiter=',')\r\n np.savetxt('data/svm_weights.csv', weights[0],delimiter=',')\r\n np.savetxt('data/svm_intercept.csv', weights[1],delimiter=',')\r\n \r\n \r\n " }, { "alpha_fraction": 0.6644274592399597, "alphanum_fraction": 0.6845801472663879, "avg_line_length": 27.52252197265625, "blob_id": "ad34c6116eb7b7ad964e25baf85d31cae646733d", "content_id": "ff5b045850931fa56a5a14727cae9f166eec1cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3275, "license_type": "no_license", "max_line_length": 91, "num_lines": 111, "path": "/Homework 4/hw4/p6/hw4_random_forest.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nimport RandomForest\r\n\r\n# loading train and test data\r\ntrain_data = pd.read_csv(\"../data/health_train.csv\")\r\ntest_data = pd.read_csv(\"../data/health_test.csv\")\r\n\r\n# seperate target from training data\r\ntrain_target = train_data[['y']]\r\ntrain_data = train_data.drop(columns = 'y')\r\n\r\n# seperate target from testing data\r\ntest_target = test_data[['y']]\r\ntest_data = test_data.drop(columns = 'y')\r\n\r\n# Initialize arrays\r\nfeature_set_size = [50, 100, 150, 200, 250]\r\ntest_accuracy = []\r\ntrain_accuracy = []\r\n\r\n# iterate through feature set size\r\nfor i in range(5):\r\n # create and train random forest\r\n model = RandomForest.RandomForest(feature_set_size[i], 100, 1000)\r\n model.train(train_data, train_target)\r\n \r\n # predict on test and train data\r\n test_prediction = model.predict(test_data)\r\n train_prediction = model.predict(train_data)\r\n \r\n # Calculating error rate\r\n test_correct = 0\r\n train_correct = 0\r\n \r\n # calculate error rate for train and test data\r\n for j in range(len(test_prediction)):\r\n if test_target.values[j] == test_prediction[j]:\r\n test_correct += 1\r\n \r\n for j in range(len(train_prediction)):\r\n if (train_target.values[j] == train_prediction[j]:\r\n train_correct += 1\r\n \r\n test_acc = (test_correct/len(test_target.values))\r\n train_acc = (train_correct/len(train_target.values))\r\n \r\n # save accuracies\r\n train_accuracy.append(train_acc)\r\n test_accuracy.append(test_acc)\r\n\r\n# plotting train/test accuracy vs feature set size\r\nplt.plot(feature_set_size, test_accuracy, label=\"Test Accuracy\")\r\nplt.plot(feature_set_size, train_accuracy, label=\"Train Accuracy\")\r\n\r\nplt.legend()\r\nplt.xlabel(\"Feature Set Size\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.title(\"Feature Set Size vs Accuracy (with 100 trees and bag size = 1000)\")\r\nplt.xticks(feature_set_size)\r\n\r\nplt.savefig(\"figures/feature_set_size_plot.png\")\r\nplt.close()\r\n\r\n\r\n# initialize arrays for (c)\r\nn_trees = [50, 100, 150, 200, 250]\r\ntest_accuracy = []\r\ntrain_accuracy = []\r\n\r\n# iterate over number of trees\r\nfor i in range(5):\r\n # create random forests\r\n model = RandomForest.RandomForest(250, n_trees[i], 1000)\r\n model.train(train_data, train_target)\r\n \r\n # predict on train/test data\r\n test_prediction = model.predict(test_data)\r\n train_prediction = model.predict(train_data)\r\n \r\n # Calculating error rate\r\n test_correct = 0\r\n train_correct = 0\r\n \r\n # calculate accuracies\r\n for j in range(len(test_prediction)):\r\n if (test_target.values[j] == test_prediction[j]):\r\n test_correct += 1\r\n \r\n if (train_target.values[j] == train_prediction[j]):\r\n train_correct += 1\r\n \r\n test_acc = (test_correct/len(test_target.values))\r\n train_acc = (train_correct/len(test_target.values))\r\n \r\n # save train/test accuracies\r\n test_accuracy.append(test_acc)\r\n train_accuracy.append(train_acc)\r\n\r\n# plot accuracies vs number of trees\r\nplt.plot(n_trees, test_accuracy, label=\"Test Accuracy\")\r\nplt.plot(n_trees, train_accuracy, label=\"Train Accuracy\")\r\n\r\nplt.xlabel(\"Number of Trees in Random Forest\")\r\nplt.ylabel(\"Accuracy\")\r\nplt.title(\"Random Forest Size vs Accuracy (with 250 feature set size and bag size = 1000)\")\r\nplt.xticks(n_trees)\r\n\r\nplt.savefig(\"figures/forest_size_plot.png\")" }, { "alpha_fraction": 0.6629334688186646, "alphanum_fraction": 0.6700409054756165, "avg_line_length": 37.68376159667969, "blob_id": "9ee49a692e00d6acbd8316f775a03eae489abeeb", "content_id": "20f26c81310fb92801e012093e8b0d954c9afb03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4643, "license_type": "no_license", "max_line_length": 739, "num_lines": 117, "path": "/Homework 1/hw1/p7/hw1-logistic.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport MyLogisticReg\r\n\r\n'''\r\nb). In this code, implement and run logarithmic regression using the given data. In our code, we use the log likelihood of the logairthmic regression objective function. This substitution works because the log function is an increasing function of its input, so the objective function and the log likelihood of the objective function are minimized at the exact same values. We optimize the log likelihood function using gradient descent. So we are calculating its gradient with respect to the model weights. In our implementation, we encode the intercept term with the other model weights so that calculating the gradient becomes simpler.\r\n\r\nThe log likelihood function we optimize is: y ln(sigmoid(wx)) + (1 - y) ln(1 - sigmoid(wx))\r\n (x, y) is a features and target pairs\r\n sigmoid(z) refers to the sigmoid function\r\n ln(z) refers to the log base exp function\r\n w refers to the model weights, including the intercept\r\n \r\nThe derivative of the log likelihood function with respect to w is: -y ln(sigmoid(wx)) - (1 - y)ln(1 - sigmoid(wx))\r\n\r\nc). My choice of learning rate came from running my code a few times. I chose a learning rate which did not cause the program to run for longer than 1 minute. This is so that I can test my code in a timely manner. The learning rate is in MyLogisticReg.py. When my learning rate was too small, I would get very good results but the model would take almost 5-10 minutes to converge. On the other hand, if my learning rate was too big, logistic regression would converge instantly but have terrible results. When I chose the wrong learning rate and convergence criteria, the model would do well on the training set, but classify all samples in the validation set as 0 or 1. Thus I had to check which parameters would like to the best results.\r\n\r\nNOTE: Program takes about 4 minutes to complete.\r\n\r\n'''\r\n\r\n\r\ndef get_next_train_valid(X, y, itr):\r\n k = 5\r\n n = len(X.index)\r\n all_indices = range(n)\r\n #all_indices = X.index\r\n \r\n partitions = np.array_split(all_indices, k)\r\n \r\n valid_indices = partitions[itr]\r\n train_indices = np.setdiff1d(all_indices, valid_indices)\r\n \r\n X_valid = X.iloc[valid_indices]\r\n X_train = X.iloc[train_indices]\r\n \r\n y_valid = y.iloc[valid_indices]\r\n y_train = y.iloc[train_indices]\r\n \r\n return X_train, y_train, X_valid, y_valid\r\n \r\n# Loading data data\r\nX = pd.read_csv(\"../HW1-data/IRISFeat.csv\", header=None)\r\ny = pd.read_csv(\"../HW1-data/IRISlabel.csv\", header=None)\r\n\r\n\r\n# Shuffling data; need to put y as column in X so we do not lose feature, label pairs when shuffling\r\nX['target'] = y.values\r\n\r\n\r\nX = X.sample(frac=1).reset_index(drop=True)\r\n\r\n\r\ny = X['target']\r\nX = X.drop(columns=['target'])\r\n\r\ndim = len(X.columns)\r\n\r\n# Making my logistic regression model\r\nmodel = MyLogisticReg.MyLogisticReg(dim)\r\nk = 5\r\n\r\nfor i in range(k):\r\n # Get training and validation sets\r\n X_train, y_train, X_valid, y_valid = get_next_train_valid(X, y, i)\r\n \r\n # Train the logistic regression model\r\n model_weights, model_intercept = model.train(X_train, y_train)\r\n \r\n # Get results for validation set\r\n y_predicted_class = model.predict(X_valid, model_weights, model_intercept)\r\n \r\n true_positives = 0\r\n true_negatives = 0\r\n false_positives = 0\r\n false_negatives = 0\r\n \r\n # Caclulating confusion matrix\r\n for j in range(len(y_predicted_class)):\r\n if (y_valid.iloc[j] == y_predicted_class[j]):\r\n if (y_predicted_class[j] == 1):\r\n true_positives += 1\r\n else:\r\n true_negatives += 1\r\n else:\r\n if (y_predicted_class[j] == 1):\r\n false_positives += 1\r\n else:\r\n false_negatives += 1\r\n \r\n error_rate = 1 - (true_positives + true_negatives) / len(y_predicted_class)\r\n \r\n print(\"\\n\\nFOLD: \", i+1, \"\\n\")\r\n \r\n print(\"CONFUSION MATRIX\")\r\n print(\"True Positives: \", true_positives)\r\n print(\"True Negatives: \", true_negatives)\r\n print(\"False Positives: \", false_positives)\r\n print(\"False Negatives: \", false_negatives)\r\n print(\"ERROR RATE: \", error_rate)\r\n print(\"\\n\\n\")\r\n \r\n loss_history = model.get_loss_history()\r\n iterations = range(1, len(loss_history)+1)\r\n \r\n plt.plot(iterations, loss_history)\r\n fig = plt.gcf()\r\n plt.title(\"Convergence in Fold \"+str(i+1))\r\n plt.xlabel(\"Iterations\")\r\n plt.ylabel(\"Loss\")\r\n \r\n fname = \"Convergence_Plot_FOLD=\"+str(i+1)+\".png\"\r\n fig.savefig(\"figures/\"+fname)\r\n plt.show()\r\n plt.clf()\r\n" }, { "alpha_fraction": 0.5891625881195068, "alphanum_fraction": 0.6561576128005981, "avg_line_length": 22.16666603088379, "blob_id": "b24cd4640cd0fe760e11786f2acad135e0b08797", "content_id": "29e7f4c241932ada7dfef1ea106800c0eeab3ffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 61, "num_lines": 42, "path": "/Homework 2/hw2/p4/plotting.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nX = pd.read_csv(\"../HW2-data/hw2data.csv\", header=None)\r\n'''\r\ny = X[X.columns[-1]]\r\n\r\nX = X.drop(columns=[2])\r\nX1 = X[X.columns[0]]\r\nX2 = X[X.columns[1]]\r\n'''\r\n\r\n# Plot the data; something weird is going on with my accuracy\r\n\r\nclass1_indices = X[X.columns[-1]] == -1\r\nclass1 = X[class1_indices]\r\ny_class1 = class1[class1.columns[-1]]\r\nclass1 = class1.drop(columns=[2])\r\n\r\nclass2_indices = X[X.columns[-1]] == 1\r\nclass2 = X[class2_indices]\r\ny_class2 = class2[class2.columns[-1]]\r\nclass2 = class2.drop(columns=[2])\r\n\r\nclass3 = \r\n\r\nclass1_x1 = class1[class1.columns[0]].values\r\nclass1_x2 = class1[class1.columns[1]].values\r\n\r\nclass2_x1 = class2[class2.columns[0]].values\r\nclass2_x2 = class2[class2.columns[1]].values\r\n\r\nfig = plt.figure()\r\nax=fig.add_axes([0,0,1,1])\r\nax.scatter(class1_x1, class1_x2, color='r')\r\nax.scatter(class2_x1, class2_x2, color='b')\r\nax.set_xlabel('x1')\r\nax.set_ylabel('x2')\r\n\r\nplt.show()\r\nfig.savefig('P4_data.png')\r\n" }, { "alpha_fraction": 0.5821138024330139, "alphanum_fraction": 0.5849593281745911, "avg_line_length": 25.33333396911621, "blob_id": "5b85366d5a55fda7d018cb8bb6a914fc39ada3cc", "content_id": "ecb54039348afbb28e94322db696b9de74bc0f7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2460, "license_type": "no_license", "max_line_length": 99, "num_lines": 90, "path": "/Homework 4/hw4/p5/AdaboostDT.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n\r\nclass AdaboostDT:\r\n def __init__(self, t = 100):\r\n self.t = t\r\n \r\n self.trees = np.array([])\r\n self.alphas = np.array([])\r\n self.margin_distribution = np.array([])\r\n \r\n def train(self, X, y):\r\n n_samples, n_features = X.shape\r\n \r\n X = X.values\r\n \r\n y = y.values.flatten()\r\n \r\n # initialize uniform distribution\r\n curr_distribution = np.ones(n_samples) / n_samples\r\n \r\n # create and train the trees in the forests\r\n for i in range(self.t):\r\n # create a tree\r\n tree = DecisionTreeClassifier(criterion = 'gini', max_depth = 1, max_leaf_nodes = 2)\r\n \r\n # train a tree\r\n tree.fit(X, y, sample_weight = curr_distribution)\r\n \r\n # predict on training data\r\n prediction = tree.predict(X)\r\n \r\n # create new weights\r\n alpha = self.get_alpha(y, prediction, curr_distribution)\r\n \r\n # using a temp variable because I found a strange bug causing my code to run non-sequentially\r\n temp = (curr_distribution * np.exp(-alpha * y * prediction))\r\n \r\n # update weights\r\n curr_distribution = temp / np.sum(temp)\r\n \r\n # saving trees and weights\r\n self.trees = np.append(self.trees, tree)\r\n self.alphas = np.append(self.alphas, alpha)\r\n \r\n return\r\n \r\n # calculating alphas\r\n def get_alpha(self, y, predictions, D):\r\n indicator = y != predictions\r\n \r\n indicator = np.array(indicator)\r\n \r\n epsilon = D.dot(indicator)\r\n \r\n alpha = (np.log(1 - epsilon) - np.log(epsilon))/2\r\n \r\n return alpha\r\n \r\n # predict with the forest\r\n def predict(self, X):\r\n n_samples, n_features = X.shape\r\n predictions = np.array([])\r\n \r\n X = X.values\r\n \r\n curr_prediction = np.zeros(n_samples)\r\n \r\n for j in range(len(self.trees)):\r\n curr_prediction += (self.alphas[j] * self.trees[j].predict(X))\r\n \r\n \r\n return np.sign(curr_prediction)\r\n \r\n # calculate margin distribution\r\n def get_margin_dist(self, X, y):\r\n X = X.values\r\n \r\n n_samples, n_features = X.shape\r\n curr_prediction = np.zeros(n_samples)\r\n \r\n for j in range(len(self.trees)):\r\n tree_prediction = self.trees[j].predict(X)\r\n alpha = self.alphas[j] / np.sum(self.alphas)\r\n curr_prediction += (alpha * tree_prediction)\r\n \r\n \r\n return y.values.flatten() * curr_prediction\r\n" }, { "alpha_fraction": 0.7432432174682617, "alphanum_fraction": 0.7674252986907959, "avg_line_length": 115.33333587646484, "blob_id": "495e25ba76b447b83ff7600d366a186e98def6e1", "content_id": "a51a635f91cffc459366a5daa0c463a471ea4ad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1406, "license_type": "no_license", "max_line_length": 506, "num_lines": 12, "path": "/Homework 3/README.txt", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "In this folder, you will find:\r\n\r\nREADME.txt - This file, which explains what everything is\r\nhw3_john_nguyen.pdf - The written solutions to Problems 1 - 5\r\np6/ - Folder which contains all the code and figures associated with Problem 6\r\n\r\nComments:\r\n For Problem 6, I had to use Google Colab because the CSE Machines I usually use for programming homework do not have Pytorch installed. Therfore, I used Google Colab for Problem 6 and downloaded the notebooks as .py files. For this reason, I was not able to run the code to make sure it works as a .py file. In case it does not work, here are links to the Google Colab notebooks I used to obtain my data and figures:\r\n hw3_mnistfc.ipynb : https://colab.research.google.com/drive/1PgTt7_i6Cti6SL9xRUoEqyMZLRV6SiL8\r\n hw3_mnistcnn.ipynb : https://colab.research.google.com/drive/12AgxMqbOOC_Zoxk_gnjnZ0QzIjfN0o2I\r\n \r\n Due to the vagueness of Problem 6, I ran all my code using absolute loss difference as my stopping condition. Since Problem 6 did not state a stopping condition for training, I used my own. Even though the Pytorch tutorial used a max epoch number as the stopping condition, this did not make sense in the context of Problem 6.4, since there would barely be a difference between running 30 epochs through training data with batch size 32 versus running 30 epochs through training data with batch size 128." }, { "alpha_fraction": 0.718875527381897, "alphanum_fraction": 0.763052225112915, "avg_line_length": 48.20000076293945, "blob_id": "f5bb641646d572752b6b7c5bbf16684493c9197a", "content_id": "c839e4e8582c2993a6b8e2f58402922997aac7c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 249, "license_type": "no_license", "max_line_length": 96, "num_lines": 5, "path": "/Homework 2/README.txt", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "In the /hw2/ directory, there is the data and code for Problems 4-7.\r\n\r\nNote that the code for Problem 5 takes several hours to run, and prints results to the terminal.\r\n\r\nCSCI_5525_HW_2.pdf contains the written solutions and plots for Problems 1-7." }, { "alpha_fraction": 0.6623656153678894, "alphanum_fraction": 0.6731182932853699, "avg_line_length": 25.899999618530273, "blob_id": "0a689ab9283817be3e49b924bf7055f5e6d784fc", "content_id": "5ea8d6b0340a350550e8ac16a6bfc7369da42bb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2790, "license_type": "no_license", "max_line_length": 77, "num_lines": 100, "path": "/Homework 4/hw4/p5/hw4_adaboost.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nimport AdaboostDT\r\n\r\n# loading the training data\r\ntrain_data = pd.read_csv(\"../data/cancer_train.csv\", sep=',')\r\ntest_data = pd.read_csv(\"../data/cancer_test.csv\", sep=',')\r\n\r\n# seperating target and features\r\ntrain_target = train_data[['y']]\r\ntrain_target = train_target.replace(0, -1)\r\ntrain_data = train_data.drop(columns = \"y\")\r\n\r\n# seperating target and features\r\ntest_target = test_data[['y']]\r\ntest_target = test_target.replace(0, -1)\r\ntest_data = test_data.drop(columns = \"y\")\r\n\r\n# reformatting targets\r\ny_train = train_target.values.flatten()\r\ny_test = test_target.values.flatten()\r\n\r\n# initialize arrays\r\nweak_learners = [25, 50, 75, 100]\r\ntrain_mc = []\r\ntest_mc = []\r\n\r\ncorrect = 0\r\n\r\n# loop through the number of weak learners\r\nfor t in weak_learners:\r\n # initialize the model\r\n model = AdaboostDT.AdaboostDT(t = t)\r\n \r\n # train the model\r\n model.train(train_data, train_target)\r\n \r\n # predict on train set\r\n train_prediction = model.predict(train_data)\r\n \r\n # count the number of misclassifications\r\n train_mc_error = 0\r\n \r\n for i in range(len(train_prediction)):\r\n if train_prediction[i] != y_train[i]:\r\n train_mc_error += 1\r\n \r\n # get training misclassification error\r\n train_mc_error = train_mc_error / len(y_train)\r\n \r\n # predict on test set\r\n test_prediction = model.predict(test_data)\r\n \r\n \r\n # counting testing misclassification error\r\n test_mc_error = 0\r\n \r\n for i in range(len(test_prediction)):\r\n if test_prediction[i] != y_test[i]:\r\n test_mc_error += 1\r\n \r\n test_mc_error = test_mc_error / len(y_test)\r\n \r\n train_mc_error = train_mc_error / len(y_train)\r\n \r\n # save the misclassification errors\r\n train_mc.append(train_mc_error)\r\n test_mc.append(test_mc_error)\r\n \r\n # get margin distribution\r\n margin_distribution = model.get_margin_dist(train_data, train_target)\r\n \r\n margin_distribution = np.sort(margin_distribution)\r\n ecd = np.arange(1, len(margin_distribution) + 1) / len(margin_distribution)\r\n\r\n ecd = np.arange(-1, 1, 0.01)\r\n \r\n # plot margin distribution\r\n plt.hist(margin_distribution, bins=ecd, histtype = 'step')\r\n plt.xlabel(\"Margin Distribution\")\r\n plt.ylabel(\"Empirical Cumlative Distribution\")\r\n\r\n plt.margins(0.02)\r\n\r\n fig = plt.gcf()\r\n fig.savefig((\"figures/t=\" + str(t) + \"_margin_distribution.png\"))\r\n plt.close()\r\n\r\n# plot misclassification errors\r\nplt.plot(weak_learners, train_mc, label = \"Training Error\")\r\nplt.plot(weak_learners, test_mc, label = \"Test Error\")\r\nplt.title(\"Misclassification Error versus Number of Weak Learners\")\r\nplt.xlabel(\"Number of Weak Learners\")\r\nplt.ylabel(\"Misclassification Error\")\r\nplt.yticks([0, 1])\r\nplt.legend()\r\n\r\nplt.savefig(\"figures/misclassification_error.png\")\r\n" }, { "alpha_fraction": 0.5677187442779541, "alphanum_fraction": 0.5864962339401245, "avg_line_length": 26.77011489868164, "blob_id": "b75e5241ed7ad59be46ea67a6a94b0c9e29bad6e", "content_id": "d670f548e6f19f0fa3e4ff1d34d5ea607b97b8af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2503, "license_type": "no_license", "max_line_length": 123, "num_lines": 87, "path": "/Homework 1/hw1/p7/MyLogisticReg.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\n\r\nclass MyLogisticReg:\r\n def __init__(self, d):\r\n # Regression params; adjust dimension for extra column\r\n # This allows me to encode the intercept as another dimension of the weights\r\n self.dim = d+1\r\n self.w_1 = np.random.uniform(-0.1, 0.1, d+1) # Dummy initial params\r\n\r\n # Analysis/Bookeeping Vars\r\n self.loss_history = []\r\n\r\n #GD params\r\n self.learning_rate = 1e-6\r\n \r\n def train(self, X, y):\r\n # initial params suggested by textbook\r\n self.w_1 = np.random.uniform(-00.1, 0.01, self.dim)\r\n self.loss_history = []\r\n \r\n # Use gradient descent to optimize w_1 and w_0 by adding an extra column so we only have to optimize one parameter: w_1\r\n self.add_col(X)\r\n \r\n # Dummy initial values\r\n old_loss = -20\r\n loss = -10\r\n \r\n n_data, n_features = X.shape\r\n \r\n while(abs(old_loss - loss) > 1e-7):\r\n old_loss = loss\r\n \r\n # Calculating the gradient\r\n gradient = np.zeros(self.dim)\r\n z = np.dot(X.values, self.w_1)\r\n predict = self.sigmoid(z)\r\n gradient = np.dot(X.values.T, (predict - y.values)) / n_features\r\n\r\n # Updating the vector\r\n self.w_1 = self.w_1 - (self.learning_rate)*(gradient)\r\n\r\n # Getting the new loss\r\n loss = self.loss(predict, y)\r\n \r\n self.loss_history.append(loss)\r\n \r\n # Seperating intercept from the weights\r\n model_weights = self.w_1[1:]\r\n model_intercept = self.w_1[0]\r\n return model_weights, model_intercept\r\n \r\n def predict_helper(self, X):\r\n # Returns predictions for input data X\r\n z = np.dot(X.values, self.w_1)\r\n prob = self.sigmoid(z)\r\n \r\n # Labeling all the points\r\n ret = np.where(prob >= 0.5, 1, 0)\r\n return ret\r\n \r\n def predict(self, X, weights, intercept):\r\n self.w_1 = np.append(intercept, weights)\r\n self.add_col(X)\r\n predictions = self.predict_helper(X)\r\n return predictions\r\n\r\n\r\n def sigmoid(self, z):\r\n return 1.0 / (1 + np.exp(-z))\r\n \r\n def add_col(self, X):\r\n # Adds a column of 1's so we can encode a constant term in our vector w\r\n new_col = np.ones((X.shape[0], 1))\r\n X.insert(0, \"adj\", new_col, True)\r\n return\r\n \r\n def loss(self, h, y):\r\n loss_val = ((-y.values * np.log(h)) - ((1 - y.values) * np.log(1 - h)))\r\n length = len(loss_val)\r\n loss_val = loss_val.sum()\r\n loss_val = loss_val / length\r\n \r\n return loss_val\r\n \r\n def get_loss_history(self):\r\n return self.loss_history\r\n" }, { "alpha_fraction": 0.7283236980438232, "alphanum_fraction": 0.7341040372848511, "avg_line_length": 46.57143020629883, "blob_id": "482503ad368e042e0e3595b4950196db7cc97373", "content_id": "c4c9e2d761017cc93e303b590c79e5abb20b1a57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 346, "license_type": "no_license", "max_line_length": 187, "num_lines": 7, "path": "/Homework 4/README.txt", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "Files:\r\n\r\nhw4/: Folder which contains all data, code and figures requested in assignment.\r\njn_solution.pdf: Written solutions.\r\n\r\nNotes:\r\n Code associated with Problem 4 was not run because any machine I tried to run it on had memory errors. This is likely due to initializing massive arrays. This includes CSELabs machines.\r\n \r\n " }, { "alpha_fraction": 0.5445154905319214, "alphanum_fraction": 0.5553369522094727, "avg_line_length": 25.876712799072266, "blob_id": "ff5491e5b98e57e3406e6e4c1289d9998316c7b9", "content_id": "e5503b7e4e710689b3c73990e0cc07ecec78f607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2033, "license_type": "no_license", "max_line_length": 119, "num_lines": 73, "path": "/Homework 2/hw2/p7/MyMultiLogReg.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\nclass MyMultiLogReg:\r\n def __init__(self, n_features, n_classes, batch_size):\r\n self.W = np.zeros((n_features, n_classes))\r\n self.b = np.zeros(n_classes)\r\n \r\n \r\n self.batch_size = batch_size\r\n self.learning_rate = 1e-5\r\n \r\n def train(self, X, y):\r\n n_samples, _ = X.shape\r\n X = X.values\r\n y = y.values\r\n \r\n # initialize dummy values for loss\r\n loss = 10\r\n old_loss = -20\r\n counter = 0\r\n itr = 0\r\n batch_itr = 0\r\n \r\n # figure out new convergence condition\r\n while(abs(old_loss - loss) > 1e-5):\r\n old_loss = loss\r\n batch = X[counter:(counter+self.batch_size % n_samples)]\r\n batch_y = y[counter:(counter+self.batch_size % n_samples)]\r\n counter = counter + self.batch_size % n_samples\r\n \r\n # Getting predictions\r\n z = np.dot(batch, self.W) + self.b\r\n predictions = self.softmax(z)\r\n d_y = batch_y - predictions\r\n \r\n # Applying the gradient\r\n self.W += self.learning_rate * np.dot(batch.T, d_y)\r\n self.b += self.learning_rate * np.mean(d_y, axis=0)\r\n \r\n loss = self.negative_log_likelihood(batch, batch_y)\r\n \r\n itr+=1\r\n \r\n if (counter+self.batch_size >= n_samples):\r\n counter = 0\r\n \r\n return (self.W, self.b)\r\n \r\n \r\n def negative_log_likelihood(self, X, y):\r\n z = np.dot(X, self.W) + self.b\r\n sigmoid_activation = self.softmax(z)\r\n \r\n cross_entropy = -np.mean(np.sum(y * np.log(sigmoid_activation) + (1 - y) * np.log(1 - sigmoid_activation), axis=1))\r\n \r\n return cross_entropy\r\n \r\n def softmax(self, z):\r\n e = np.exp(z - np.max(z))\r\n return e / np.array([np.sum(e, axis=1)]).T\r\n \r\n def predict(self, X):\r\n X = X.values\r\n \r\n z = np.dot(X, self.W) + self.b\r\n return self.softmax(z)\r\n \r\n def add_col(self, X):\r\n # Adds a column of 1's so we can encode a constant term in our vector w\r\n new_col = np.ones((X.shape[0], 1))\r\n X.insert(0, \"adj\", new_col, True)\r\n return" }, { "alpha_fraction": 0.5752546191215515, "alphanum_fraction": 0.5778951048851013, "avg_line_length": 27.131868362426758, "blob_id": "a77ab7be234867316de5ec33e0c4275576a0d157", "content_id": "90a19576f99f2570423647586a236e264e89ce8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2651, "license_type": "no_license", "max_line_length": 85, "num_lines": 91, "path": "/Homework 4/hw4/p6/RandomForest.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\nclass RandomForest:\r\n def __init__(self, feature_subset_size, n_trees, bag_size):\r\n self.feature_subset_size = feature_subset_size\r\n self.n_trees = n_trees\r\n self.bag_size = bag_size\r\n self.tree_features = []\r\n \r\n self.forest = np.array([])\r\n \r\n def train(self, X, y):\r\n y = y.values\r\n \r\n cols = X.columns.values\r\n n_features = len(cols)\r\n n_samples, _ = X.shape\r\n \r\n \r\n # create trees in forest\r\n for i in range(self.n_trees):\r\n # create a tree\r\n tree = DecisionTreeClassifier(criterion = 'gini')\r\n \r\n # getting the subset of features\r\n choice = np.random.choice(n_features, self.feature_subset_size)\r\n tree_features = cols[choice]\r\n \r\n # take subset of features\r\n chosen_data = X[tree_features]\r\n \r\n chosen_data = chosen_data.values\r\n \r\n # Bagging here\r\n bag = []\r\n y_bag = np.array([])\r\n \r\n # fill up bag\r\n for j in range(self.bag_size):\r\n bag_choice = np.random.choice(n_samples, 1)[0] # to allow for repeating terms\r\n bag.append(chosen_data[bag_choice])\r\n y_bag = np.append(y_bag, y[bag_choice])\r\n \r\n \r\n bag = np.array(bag)\r\n \r\n # train tree on bag\r\n tree.fit(bag, y_bag)\r\n \r\n # append tree to forest\r\n self.forest = np.append(self.forest, tree)\r\n \r\n # save features\r\n self.tree_features.append([tree_features])\r\n \r\n return\r\n \r\n def predict(self, X):\r\n n_samples, n_features = X.shape\r\n prediction = []\r\n cols = X.columns.values\r\n \r\n # iterate over samples\r\n for i in range(n_samples):\r\n # get current sample\r\n sample = X.iloc[[i]]\r\n \r\n forest_prediction = np.array([])\r\n \r\n # iterate over trees in the forest\r\n for j in range(self.n_trees):\r\n # get the features the current tree gets\r\n tree_features = self.tree_features[j][0]\r\n \r\n # use of temp variables becaues code was somehow running non-sequentially\r\n temp1 = sample[tree_features]\r\n temp2 = temp1.values\r\n \r\n # current tree predicts on current sample\r\n tree_prediction = self.forest[j].predict(temp2)\r\n \r\n # save this tree's prediction\r\n forest_prediction = np.append(forest_prediction, tree_prediction)\r\n \r\n # make final prediction on sample based on majority vote of trees\r\n common_pred = np.bincount(forest_prediction.astype(int)).argmax()\r\n prediction.append(common_pred)\r\n \r\n return prediction\r\n" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 43.599998474121094, "blob_id": "6057250c107b44001794012d008e9bcc079dd3de", "content_id": "0cd982031ab993fac5b9c62ee99d11cbb3708b64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 222, "license_type": "no_license", "max_line_length": 159, "num_lines": 5, "path": "/README.md", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "# Grad-ML\n\nMaterials from my graduate machine learning class.\n\nSome data directories have been compressed to allow me to upload the entirety of my homework materials to Github. To the code, decompress the data directories." }, { "alpha_fraction": 0.7548918724060059, "alphanum_fraction": 0.7796086668968201, "avg_line_length": 106, "blob_id": "85536d220813a4f43e9e623044934bdffc09d368", "content_id": "778f902237e403ae8fda246b3ade553996fc8698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 971, "license_type": "no_license", "max_line_length": 305, "num_lines": 9, "path": "/Homework 5/README.txt", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "This directory contains files associated with CSCI 5525 Homework 5\r\nThe subdirectories are:\r\n\r\n figures/ - This directory contains all figures used in hw5_solution_jn.pdf\r\n p4/ - This directory contains the code associated with Problem 4. Note that I used Google Colab since Pytorch is not installed on CSELab machines. If there is any issues running the code, a working copy is available here: https://colab.research.google.com/drive/185HDvCJugZElo8Kner0Hd5HXhAQFUGqh\r\n p5/ - This directory contains the code associated with Problem 5. Note that I used Google Colab since Pytorch is not installed on CSELab machines. If there is any issues running the code, a working copy is available here: https://colab.research.google.com/drive/1XGxWS3UAE0qLliG-uxSkHCtXAw2PvJ_8\r\n saved_nns/ - This directory contains the saved neural networks used in my run of Problem 4 and Problem 5.\r\n \r\nhw5_solution_jn.pdf contains my written solutions to all problems. " }, { "alpha_fraction": 0.6207685470581055, "alphanum_fraction": 0.6306038498878479, "avg_line_length": 27.958904266357422, "blob_id": "8a0358176e875bbe420c8bd22cb662d5c71597eb", "content_id": "023a45c6fc180677ea31de17e8ac135a6c4459da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4372, "license_type": "no_license", "max_line_length": 108, "num_lines": 146, "path": "/Homework 2/hw2/p4/hw2_svm.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport cvxopt\r\n\r\ncvxopt.solvers.options['show_progress'] = False\r\ndef svmfit(X, y, c):\r\n # Setting up values\r\n X = X.values\r\n n_entries, n_col = X.shape\r\n \r\n # reshape y into a n x 1, where -1 denotes an arbitrary dimension; specify I want a 1 dimension col vector\r\n y = y.values.reshape(-1, 1) * 1.\r\n H_helper = y * X\r\n H = np.dot(H_helper, H_helper.T) * 1.\r\n \r\n # Putting in cvxopt format\r\n P = cvxopt.matrix(H)\r\n q = cvxopt.matrix(-np.ones((n_entries, 1)))\r\n \r\n G = cvxopt.matrix(np.vstack((np.identity(n_entries) * -1, np.identity(n_entries))))\r\n h = cvxopt.matrix(np.hstack((np.zeros(n_entries), np.ones(n_entries) * c)))\r\n \r\n A = cvxopt.matrix(y.reshape(1, -1))\r\n b = cvxopt.matrix(np.zeros(1))\r\n \r\n # Solving the dual\r\n sol = cvxopt.solvers.qp(P, q, G, h, A, b)\r\n \r\n # Lagrange multipliers\r\n lambdas = np.array(sol['x'])\r\n \r\n # Recovering weight vector\r\n w = ((y * lambdas).T @ X).reshape(-1, 1)\r\n \r\n # Get index of nonzero lagrange multipliers\r\n S = (lambdas > 1e-4).flatten()\r\n \r\n support_vectors = X[S]\r\n support_vector_labels = y[S]\r\n \r\n # Getting the intercept; is the data already centered? Yes... based on the graph...\r\n b = support_vector_labels - np.dot(support_vectors, w)\r\n \r\n return w\r\n \r\ndef predict(X, w):\r\n return np.dot(X.values, w)\r\n\r\n\r\n\r\ndef k_fold_cv(train_data, test_data, k, c):\r\n train_accuracy = []\r\n cv_accuracy = []\r\n test_accuracy = []\r\n \r\n # in this problem, k = 10\r\n y_train = train_data[train_data.columns[-1]]\r\n X_train = train_data.drop(columns=[2])\r\n \r\n y_test = test_data[test_data.columns[-1]]\r\n X_test = test_data.drop(columns=[2])\r\n \r\n kf = kfold(X_train, k)\r\n \r\n for train_index, validation_index in kf:\r\n valid_train_X = X_train.iloc[train_index]\r\n valid_train_y = y_train.iloc[train_index]\r\n \r\n valid_X = X_train.iloc[validation_index]\r\n valid_y = y_train.iloc[validation_index]\r\n \r\n train_correct = 0\r\n cv_correct = 0\r\n test_correct = 0\r\n \r\n weight = svmfit(valid_train_X, valid_train_y, c)\r\n \r\n fold_train_labels = predict(valid_train_X, weight)\r\n fold_validation_labels = predict(valid_X, weight)\r\n label = predict(X_test, weight)\r\n \r\n for i in range(len(valid_train_y.index)):\r\n if (valid_train_y.iloc[i] == np.sign(fold_train_labels[i])):\r\n train_correct += 1\r\n \r\n for i in range(len(valid_y.index)):\r\n if (valid_y.iloc[i] == np.sign(fold_validation_labels[i])):\r\n cv_correct += 1\r\n \r\n for i in range(len(y_test.index)):\r\n if (y_test.iloc[i] == np.sign(label[i])):\r\n test_correct += 1\r\n \r\n fold_train_accuracy = train_correct / len(valid_train_y.index)\r\n fold_validation_accuracy = cv_correct / len(valid_y.index)\r\n fold_test_accuracy = test_correct / len(y_test.index)\r\n \r\n train_accuracy.append(fold_train_accuracy)\r\n cv_accuracy.append(fold_validation_accuracy)\r\n test_accuracy.append(fold_test_accuracy)\r\n \r\n return train_accuracy, cv_accuracy, test_accuracy\r\n \r\n \r\n\r\n# No need to pass in y because we are splitting based on indices; not segmenting the dataframes\r\ndef kfold(X, k):\r\n #Create array to return\r\n ret = []\r\n \r\n #Get range of indices of X\r\n n = len(X.index)\r\n all_indices = range(n)\r\n \r\n #Partition indices of X into k approximately evenly sized chunks\r\n partitions = np.array_split(all_indices, k)\r\n \r\n #Create the train/test split for each fold\r\n for i in range(k):\r\n test = partitions[i]\r\n train = np.setdiff1d(all_indices, test)\r\n \r\n ret.append([train, test])\r\n \r\n return ret\r\n \r\n\r\nX = pd.read_csv(\"../HW2-data/hw2data.csv\", header=None)\r\n# X = X.sample(frac=1).reset_index(drop=True) # Shuffling data here\r\nk = 10\r\nc = 1000 # Subject to change for further iterations\r\nn = len(X.index)\r\ntrain_percentile = int(np.ceil(n * .80))\r\n\r\n# Splitting training and test data\r\ntraining_data = X[:train_percentile]\r\ntraining_data = training_data.reset_index(drop=True)\r\n\r\ntesting_data = X[train_percentile:]\r\ntesting_data = testing_data.reset_index(drop=True)\r\n\r\ntrain_accuracy, cv_accuracy, test_accuracy = k_fold_cv(training_data, testing_data, k, c)\r\n\r\nprint(\"Training Accuracy: \", np.around(np.mean(train_accuracy), 3))\r\nprint(\"Validation Accuracy: \", np.around(np.mean(cv_accuracy), 3))\r\nprint(\"Test Accuracy: \", np.around(np.mean(test_accuracy), 3))" }, { "alpha_fraction": 0.6622251868247986, "alphanum_fraction": 0.6775482892990112, "avg_line_length": 27.320755004882812, "blob_id": "19a4385ae61d0e1168c0a1e36523f12e2555508d", "content_id": "8215cf1073e2dd1261ce9349ef9faec0b94464d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1501, "license_type": "no_license", "max_line_length": 110, "num_lines": 53, "path": "/Homework 1/hw1/p6/hw1_lsq_iter.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nAnalysis of Plot\n\nAbove, we plot the L2 norm of w(k) - w^, which we call loss. The plot is monontonically decreasing.\nw(k) is the vector w on the kth iteration (0 <= k <= 500), where k(0) is a randomly initialized value. \nw^ is the value of w(500), when iteration ends. Notice the exponential decrease in loss almost immediately. \nThis tells us that earlier iterations of the Richardson Algorithm decreases loss. Also notice that later \niterations barely decrease the loss at all. Our plot suggests that the Richardson Algorithm is very efficient \nfor iteratively solving the LSQ problem. \n'''\n\ndef lsq_iter(A, b):\n mu = 1/np.linalg.norm(A) ** 2\n w = np.random.rand(10)\n w_list = [w]\n \n for i in range(500):\n w = w - mu * np.transpose(A) @ (A @ w - b)\n w_list.append(w)\n \n return w, w_list\n\ndef lsq(A, b):\n return np.linalg.inv(np.transpose(A) @ A) @ np.transpose(A) @ b\n\n# Initialize A and b\nA = np.random.rand(20, 10)\nb = np.random.rand(20)\n\n# Get iterative solution and closed form solution\nw_iter, w_list = lsq_iter(A, b)\nw_closed = lsq(A, b)\n\n# Calculate convergence of loss\nloss_list = []\niters = range(len(w_list))\n\nfor i in iters:\n w_k = w_list[i]\n loss = np.linalg.norm(w_closed - w_k) ** 2\n loss_list.append(loss)\n\n# Plotting\nplt.plot(iters, loss_list)\nfig = plt.gcf()\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"Loss\")\nfig.savefig(\"figures/convergence_plot.png\")\nplt.show()\n" }, { "alpha_fraction": 0.697752833366394, "alphanum_fraction": 0.7089887857437134, "avg_line_length": 29.689655303955078, "blob_id": "ccfb303174f72a076fc33df4a78d53c812cd88ba", "content_id": "8e11136eda7444e6a7c8f04e06a3501f39179c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/Homework 4/hw4/p4/test_error_rate.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "\"\"\"Test Error Calculator\"\"\"\nimport numpy as np\n\n\n# true values loaded (ASSUME THIS IS HIDDEN TO YOU)\ntrue_values = np.loadtxt('true_values_classification.txt', delimiter=',')\ntrue_values = np.expand_dims(true_values, axis=1)\nprint(type(true_values))\nprint(true_values.shape)\n\n# sample predicted values for TA testing\n# sample_preds = np.genfromtxt('sample.csv', delimiter=',')\n# sample_preds = np.expand_dims(sample_preds, axis=1)\n# print(sample_preds)\n# print(sample_preds.shape)\n\n\ndef error_rate(pred_vals):\n \"\"\"Function returning the error of model\n ASSUME THIS IS HIDDEN TO YOU\"\"\"\n num_preds = len(pred_vals)\n num_true_vals = len(true_values)\n val = np.sum(np.abs(pred_vals - true_values)) / num_true_vals\n return round(val, ndigits=5)\n\n\n# sample predicted values for TA testing\n##sample_preds = np.random.randint(2, size=(21283, 1))\n##print(error_rate(sample_preds))\n" }, { "alpha_fraction": 0.663779079914093, "alphanum_fraction": 0.6729832291603088, "avg_line_length": 27.317461013793945, "blob_id": "f749eda519b5f62a488002b47472e02bca1c75e4", "content_id": "8137f94d89fcd1a53ea9738841598e70cc68ed41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 95, "num_lines": 63, "path": "/Homework 2/hw2/p6/hw2_multi_logistic.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\nimport MyMultiLogReg\r\n\r\n# No need to pass in y because we are splitting based on indices; not segmenting the dataframes\r\ndef kfold(X, k):\r\n #Create array to return\r\n ret = []\r\n \r\n #Get range of indices of X\r\n n = len(X.index)\r\n all_indices = range(n)\r\n \r\n #Partition indices of X into k approximately evenly sized chunks\r\n partitions = np.array_split(all_indices, k)\r\n \r\n #Create the train/test split for each fold\r\n for i in range(k):\r\n test = partitions[i]\r\n train = np.setdiff1d(all_indices, test)\r\n \r\n ret.append([train, test])\r\n \r\n return ret\r\n\r\nX_train = pd.read_csv(\"../HW2-data/mnist_train.csv\", header=None)\r\ny_train = X_train[X_train.columns[0]]\r\nX_train = X_train.drop(columns=[0])\r\ny_train = pd.get_dummies(y_train)\r\n\r\n\r\nX_test = pd.read_csv(\"../HW2-data/mnist_test.csv\", header=None)\r\ny_test = X_test[X_test.columns[0]]\r\nX_test = X_test.drop(columns=[0])\r\n\r\n\r\nbatch_size = 10000\r\nn_samples, n_features = X_train.shape\r\n_, n_classes = y_train.shape\r\n\r\n\r\nmodel = MyMultiLogReg.MyMultiLogReg(n_features, n_classes, batch_size)\r\nweights = model.train(X_train, y_train)\r\npredictions = model.predict(X_test)\r\n\r\ntest_samples, _ = X_test.shape\r\nconfusion_matrix = np.zeros((n_classes, n_classes))\r\ny_test = y_test.values\r\n\r\nfor i in range(test_samples):\r\n prediction_vec = predictions[i]\r\n pred = np.argmax(prediction_vec)\r\n \r\n confusion_matrix[pred, y_test[i]] += 1\r\n \r\nprint(\"Confusion Matrix: \\n\", confusion_matrix.astype(int))\r\nprint(\"Accuracy: \", np.trace(confusion_matrix) / test_samples)\r\nprint(\"Weights: \", weights[0])\r\nprint(\"Intercept: \", weights[1])\r\n\r\nnp.savetxt('data/weights.csv', weights[0], delimiter=',')\r\nnp.savetxt('data/intercept.csv', weights[1], delimiter=',')\r\nnp.savetxt('data/confusion_matrix.csv', confusion_matrix.astype(int), delimiter=',')\r\n" }, { "alpha_fraction": 0.7364587783813477, "alphanum_fraction": 0.7464724779129028, "avg_line_length": 82.46154022216797, "blob_id": "d4b8134016d895158e26d8a37f089c70f9307b95", "content_id": "3a6f91bffc1ba3a7d1d193d1bcbc3d99d707fd59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2197, "license_type": "no_license", "max_line_length": 370, "num_lines": 26, "path": "/Homework 1/hw1/README.txt", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "In this folder, you will find:\r\n\r\nREADME.txt - This file, which explains what everything is\r\nhw1_john_nguyen.pdf - The written solutions to Problems 1 - 5\r\np6/ - Folder which contains all the code and figures associated with Problem 6\r\np7/ - Folder which contains all the code and figures associated with Problem 7\r\nHW1-data/ - Folder which contains any data used in the various coding problems\r\n\r\nComments:\r\n The provided code in p6/ and p7/ were written to run CSELabs machines in Keller Hall. Any bugs or errors you may have in running the code on your own machine should be alleviated when running on a CSELabs machine.\r\n \r\n In p6/ you will find the following files and folders:\r\n figures/ - A folder which contains the plot generated when running hw1-lsq_iter.py\r\n hw1-lsq_iter.py - The script which will generate the desired plots for Problem 6.\r\n \r\n In p7/ you will find the following files and folders:\r\n figures/ - Folder which contains all the plots associated with running hw1-logistic.py\r\n hw1-logistic.py - The script which will generate the desired plots and outputs for Problem 7.\r\n MyLogisticReg.py - Script which contains my logistic regression model. I chose to seperate the model and the script into 2 seperate files to increase readibility and so that developing the code was easier.\r\n \r\n In p7/, a number of assumptions were made due to the vagueness of the problem statement:\r\n The log likelihood function of logistic loss was used. This is because it is easier to calculate the derivative of the log likelihood function, which we did in Problem 2. It is appropriate to use the log likelihood function because the log function is an increasing function, so it logistic likelihood of logistic loss is minimized at the same weights and intercept.\r\n \r\n Since no stopping condition was specified for gradient descent, I implemented loss between iterations. This created another hyperparameter: minimum loss differential before convergence. I discuss my choice of this hyperparameter at the top of hw1-logistic.py\r\n \r\n The confusion matrix is printed to the terminal. \r\n" }, { "alpha_fraction": 0.6863207817077637, "alphanum_fraction": 0.7036163806915283, "avg_line_length": 29.850000381469727, "blob_id": "46d69682a233c2f8eabcfaa7fbc4143bf62fb0fc", "content_id": "a2e7f4d7756f0f9bcb0e6f2844c32e11f18db986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 79, "num_lines": 40, "path": "/Homework 4/hw4/p4/hw4_boosting_regression.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport test_score\r\n\r\n# make initial guess\r\nprediction = np.ones(21283) * 10\r\n\r\ncurr_score = test_score.score(prediction)\r\n\r\nalpha = 1e-4\r\n\r\nscores = [curr_score]\r\n\r\nfor i in range(1000):\r\n # guess the gradient\r\n gradient_guess = np.random.uniform(-1, 1, 21283)\r\n \r\n # see score of prediction plus gradient \r\n # if it does better than current prediction, permanently add gradient\r\n while (test_score.score(prediction + (alpha * gradient_guess)) < curr_score):\r\n prediction += (alpha * gradient_guess)\r\n curr_score = test_score.score(prediction)\r\n scores.append(current_score)\r\n \r\n # see score of prediction minus gradient\r\n # if it does better than current prediction, permanently subtract gradient\r\n while (test_score.score(prediction - (alpha * gradient_guess)) < curr_score):\r\n prediction -= (alpha * gradient_guess)\r\n curr_score = test_score.score(prediction)\r\n scores.append(curr_score)\r\n \r\nprint(\"Final Prediction: \", prediction)\r\nprint(\"With score: \", test_score.score(prediction))\r\n\r\nplt.plot(range(1, len(scores)+1), scores)\r\nplt.xlabel(\"Number of weak learners\")\r\nplt.ylabel(\"Score\")\r\nplt.title(\"Blind Boosting - Regression\")\r\nplt.savefig(\"figures/blind_boosting_regression.png\")" }, { "alpha_fraction": 0.6678966879844666, "alphanum_fraction": 0.6795818209648132, "avg_line_length": 24.68852424621582, "blob_id": "fa85a0b771368e0e74ff68b60550a5bad3ecb1dc", "content_id": "1b5d5c0974a6f288cad5148d201f0b10fb8eeaae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1626, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/Homework 4/hw4/p4/hw4_boosting_classification.py", "repo_name": "johvnguyen/Grad-ML", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport test_error_rate\r\n\r\n# function to obtain majority vote\r\ndef get_majority_vote(prediction):\r\n n_rows, n_cols = prediction.shape\r\n \r\n pred = []\r\n \r\n for i in range(n_rows):\r\n \r\n vote = []\r\n \r\n for j in range(n_cols):\r\n vote.append(prediction[i][j])\r\n \r\n vote = np.array(vote)\r\n counts = np.bincount(vote)\r\n pred.append(np.argmax(counts))\r\n \r\n pred = np.array(pred)\r\n \r\n return pred\r\n\r\n# randomly initialize first prediction\r\nprediction = np.array([np.random.randint(2, size=21283)])\r\n\r\n# get initial error\r\ncurr_er = test_error_rate.error_rate(get_majority_vote(prediction))\r\n\r\nscores = [curr_er]\r\n\r\nfor i in range(1000):\r\n # randomly guess a prediction\r\n gradient_guess = np.random.randint(2, size=21283)\r\n \r\n # temporarily include the new random weak predictor\r\n temp = np.append(prediction, gradient_guess, axis = 1)\r\n temp_vote = get_majority_vote(temp)\r\n \r\n temp_score = test_error_rate.error_rate(temp_vote)\r\n \r\n # if including the new random weak predictor improves our score, then permanently include it\r\n if (temp_score < curr_er):\r\n prediction = temp\r\n curr_er = temp_score\r\n scores.append(curr_er)\r\n\r\nfinal_prediction = get_majority_vote(prediction)\r\n\r\nprint(\"Final Prediction: \", final_prediction)\r\nprint(\"With Error Rate: \", test_error_rate.error_rate(final_prediction))\r\n\r\n# plotting\r\nplt.plot(range(1, len(scores)+1), scores)\r\nplt.xlabel(\"Number of weak learners\")\r\nplt.ylabel(\"Error Rate\")\r\nplt.title(\"Blind Boosting - Classification\")\r\nplt.savefig(\"figures/blind_boosting_classification.png\")" } ]
21
CMU-IDS-2020/a3-giesa-erdolu
https://github.com/CMU-IDS-2020/a3-giesa-erdolu
6d947f3a740ea77d2dd4e16bd9dcdc5b9102b248
1b1fc1739f3d5b86beb8eda39b32c727d48f25d4
0d757970f22f1ac301a112f1c90a055c84126c43
refs/heads/master
2023-01-13T19:50:20.226824
2020-10-23T03:30:42
2020-10-23T03:30:42
301,747,950
0
1
BSD-3-Clause
2020-10-06T14:07:05
2020-10-23T03:30:45
2020-10-23T03:30:42
Python
[ { "alpha_fraction": 0.7442089915275574, "alphanum_fraction": 0.7471690773963928, "avg_line_length": 78.41417694091797, "blob_id": "8dc46c932067291d5a6f7533d92473b5f6b642d6", "content_id": "a59813b598cbbc7faa70ee4264e7935840dbb02a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21467, "license_type": "permissive", "max_line_length": 351, "num_lines": 268, "path": "/writeup.md", "repo_name": "CMU-IDS-2020/a3-giesa-erdolu", "src_encoding": "UTF-8", "text": "# Tracing the Presidential Debate 2020\n\n![A screenshot of your application. Could be a GIF.](ScreenShot2.PNG)\n\n\n1- Goals of the project\n\n1.1 Aim\n\nThe interest in our project was to trace and reveal the dynamics of a conversation through data visualization. We explored the potential of visualization to capture and \nrepresent conversational dynamics.\n \n1.2 Site and Data \n\nOur site was the first โ€œPresidential Debate of the 2020 Electionsโ€ that took place on September 19th 2020 between President Donald J. Trump and former Vice President and \nPresidential candidate Joe Biden. The debate was moderated by journalist and television anchor Chris Wallace. We refer to them as โ€œactorsโ€ in this project. We worked with a \ntranscript of the debate that allowed us to do text processing on a corpus of the debate conversation. Text processing allowed us to extract and visualize various dimensions of \nthe conversation.\n\n1.3 Question \n\nThe question that we explored in the project, and that the prototype we built in response, was twofold.\n\nPart 1- First, we were interested in tracing the trajectory of the conversation between the actors. We were interested in the weight and speech attitude of the actors \nthroughout the conversation. The questions we looked at were:\n - What was the weight of each actor in terms of their presence in the conversation? What were the moments in the debate where the conversation was between the candidates? \n How long were these moments? What were the moments of intervention by the moderator?\n - Which actor spoke the most? If we take that as part of โ€œweightโ€, how did that weight develop for each actor throughout the conversation? Which actor interrupted the \n others the most? If we take that as part of โ€œattitudeโ€, how did that attitude follow throughout the conversation? Which actorโ€™s speech was broken by crosstalks the \n most?\n \nPart 2- Secondly, we were interested in tracing the weight of themes / topics, and their distribution throughout the conversation. We were interested in investigating the \nweight that each actor put on the themes / topics in the conversation. We were also interested in the weights of actors in pointing to themselves and pointing fingers to \nothers. The questions we looked at were:\n - What were the weights of themes / topics in the conversation? What were the windows in the conversation in which they were discussed? How long were those windows?\n - What were the weights that each candidate put on the themes? Which thematic words were emphasized more within each theme / topic and across the themes / topics? What were \n the thematic words emphasized more by each candidate and what was the weight of that emphasis by a candidate compared to the other?\n - What were the weights that each candidate put in pointing to themselves and pointing fingers to others? \n \nWe believe these questions point to important dynamics in a presidential debate that are significant matters of discussion within the political realm and media, and that \nessentially affect mass perception. So, we aim to shed light on these dimensions by visualizing the conversation. \n \nTo investigate these questions, we identified a list of dimensions to search and visualize within the debate conversation. These dimensions are described in the โ€œDesign \nProcessโ€ section. Essentially, this required extracting our own data -related to these dimensions- from the transcript of the debate. So, the prototype we built included text \nprocessing, data extraction and framing, and visualization. \n \n1.4 Framework\n\nFor text processing, data extraction, and data framing, we used NLTK (a language processing library for Python) and pandas (a data analysis library for Python). For data \nvisualization, we used Streamlit (an open source app framework) and Altair (a data visualization library for Python).\n\n2- Design Process\n\n2.1 Qualitative Evaluation of the Conversation \n\nThe initial step was going over the transcript. A careful reading and qualitative evaluation of the text allowed us to identify the dimensions of the conversation we wanted to \nsearch in investigating the questions. This step also involved sketching and brainstorming on the preliminary design ideas on the ways these dimensions could potentially be \nvisualized.\n \nWorkflow\n - (Emek + Katherine)\n\nThis step was a collaborative effort by both members. Combined with step 2, this step took about a quarter of the work time.\n \n2.2 Identifying the Dimensions of Conversation \n\nTo investigate the questions in Part 1, we defined:\n - Actor-Time: This dimension shows who was speaking at a particular moment in the conversation and patterns of interaction between the moderator and candidates (moments of \n intervention) as well as between the candidates (moments of debate). It also shows the length of these patterns. \n - Number of Words-Time: This dimension shows an incremental count of words by each actor on a particular moment in the conversation. It shows who had spoken the most up to a \n certain moment in the conversation. It also shows how the weight of each actor changed throughout the conversation.\n - Number of Words-Order of Speech: This dimension shows an incremental count of words by each actor over the course of the conversation. It shows who had spoken the most up \n to a certain order of speech (order count of turns of speech e.g. fifty fourth turn) in the conversation. It also shows how the weight of each actor changed throughout the \n conversation.\n - Number of Interruptions-Time: This dimension shows an incremental count of interruptions* made by each actor over the course of the conversation. It also shows the \n moments of interruptions by each actor. It provides hints about this particular speech attitude of each actor within a period or throughout the entire conversation. \n *Interruptions were marked with โ€˜โ€”โ€™ in the transcript.\n - Number of Interruptions-Order of Speech: This dimension shows an incremental count of interruptions by each actor over the course of the conversation. It shows who had \n interrupted the most up to a certain order of speech in the conversation. It also shows how this number changed for each actor throughout the conversation.\n - Number of Crosstalks-Time: This dimension shows an incremental count of crosstalks* during an actorโ€™s speech over the course of the conversation. It also shows the moments \n of crosstalk. It provides hints about how this particular speech attitude occurred within a period or throughout the entire conversation. \n *Crosstalks were marked as โ€œ[crosstalk hh:mm:ss]โ€ in the transcript.\n - Number of Crosstalks-Order of Speech: This dimension shows an incremental count of crosstalks during an actorโ€™s speech over the course of the conversation. It shows \n whose speeches were broken by the crosstalks the most up to a certain order of speech in the conversation. It also shows how this number changed for each actor throughout \n the conversation.\n \nAll these dimensions involve a temporality to show the aspect (e.g. actor, number of words, etc.) within a moment or period in the conversation as well as through the overall \nconversation. Based on that, more refined ideas were discussed and produced for the visualizations of these dimensions. \n \nTo investigate the questions in Part 2, we outlined:\n - the 6 themes that the debate conversation included: healthcare, covid, economy, environment, race, and election. These themes were generally set by Chris Wallace. Using \n these themes, we qualitatively selected frequently used, or key thematic words associated with the broad theme. For example, 'mask' and 'vaccine' were two of the key \n words related to covid. \n - a list of thematic words under each time that are used in the conversation*\n *This was by our qualitative evaluation of the debate transcript.\n \nBased on the themes, we defined:\n - Number of Healthcare-Time: This dimension shows an incremental count of words related to healthcare by each actor on a particular moment in the conversation. It shows the \n weight that actors put on healthcare up to a certain moment in the conversation. It also shows how the weight that each actor put on healthcare changed throughout the \n conversation.\n - Number of Healthcare-Order of Speech: This dimension shows an incremental count of words related to healthcare by each actor over the course of the conversation. It shows \n the weight that actors put on healthcare up to a certain order of speech in the conversation. It also shows how the weight that each actor put on healthcare changed \n throughout the conversation. It also shows the windows in the conversation in which healthcare was discussed as well as the length of that window.\n \nThe dimensions below show the same information described for healthcare for the other 5 themes.\n - Number of Covid-Order of Speech\n - Number of Economy-Time\n - Number of Economy-Order of Speech\n - Number of Environment-Time\n - Number of Environment-Order of Speech\n - Number of Race-Time\n - Number of Race-Order of Speech\n - Number of Election-Time\n - Number of Election-Order of Speech\n \n - Word Count-Theme: This dimension shows a total count of words related to a particular theme by each actor. It shows the weight that actors put on each theme in the \n conversation and the weight of the theme in the conversation overall.\n - Word Count-Actor-Word: This dimension shows a total count of each thematic word used by each actor. It shows the emphasis that actors put on each thematic word and thereby \n the theme in the conversation. \n - Word Count-Actor-Pointing to Self and Fingers to Others: This dimension shows a total count of โ€œPointing to Self and Fingers to Othersโ€ (PSFO) words (โ€œyouโ€, โ€œheโ€, and โ€œIโ€) \n used by each actor. It shows the weight of pointing to self and pointing fingers to others by each actor in the conversation. \n \nas the dimensions to be extracted as data by processing the conversation text and to be visualized for insights. All these dimensions involve the notion of โ€œthematic weightโ€ \nwithin a period in the conversation as well as through the overall conversation. More refined ideas were discussed and produced for the visualizations of these dimensions.\n \nWorkflow\n - (Emek + Katherine) \n \nThis step was a collaborative effort by both members. Combined with step 1, this step took about a quarter of total work time.\n \n2.3 Text Processing, Data Extraction, and Data Framing\n\nFrom here, we began processing the conversation text to extract the identified dimensions. These dimensions became the categories in the data that constituted the two data \nframes that were used in creating the visualizations. These data frames were carefully discussed between the team members and created according to the ways the intended \nvisualizations required. \n \nThe first data frame contains the data ordered by Order of Speech and Time. It was structured as follows: \n - df1 = {Order of Speech: [], Actor: [], Time: [], Number of Words: [], Number of Interruptions: [], Number of Crosstalks: [], Number of Number Use: [], Number of \n Healthcare: [], Number of Covid: [], Number of Environment: [], Number of Election: [], Number of Economy: [], Number of Race: []} \n \nThe second data frame is ordered by the Theme Words. It was structured as follows: \n - df2 = {Theme Word: [], Broad Theme: [], Actor: [], Word Count: []}\n \nWorkflow\n - (Emek) Part of the code that extracts the data on the dimensions in Part 1.\n - (Emek + Katherine) Part of the code that extracts the data on themes over time (in Part 2). Combined with the first part of the code, this part created the first data frame.\n - (Katherine) Part of the code that extracts the data on the rest of dimensions in Part 2. This part of the code created the second data frame.\n \nOverall, this step involved coordinative and divided work. It took approximately half of the total work time.\n\n2.4 Creating Interactive Data Visualizations \n\nBased on the dimensions and two data frames, we produced two sets of visualizations for the application: โ€œThrough the debateโ€ set and โ€œThematic weights within the debateโ€ set.\n \na- Through the debate\n \nVisualizations:\n - Mini-map bar chart โ†’ Mini-map + (Actor-Number of Words-Time) \n - Zoomable line chart โ†’ (Number of Words-Order of Speech) \n \n - Mini-map bar chart โ†’ Mini-map + (Actor-Number of Interruptions-Time) \n - Zoomable line chart โ†’ (Number of Interruptions-Order of Speech)\n \n - Mini-map bar chart โ†’ Mini-map + (Actor-Number of Crosstalks-Time)\n - Zoomable line chart โ†’ (Number of Crosstalks-Order of Speech)\n \nMinimap bar charts were designed to allow a user to traverse the debate to see the dimensions in Part 1. The user can do it in three ways: (1) By using the horizontal scroll at \nthe bottom (2) By brushing over a mini-map of the debate to create a time frame and moving it forward (3) By using the โ€œOrder of Speechโ€ slider and sliding between the n(th) \norders of speech. \n \nEach minimap bar chart was made of a set of charts:\n - A mini-map: A map of the debate that allows the user to create a time frame by brushing and travel through the matrix chart and bar chart by moving the frame.\n - A matrix chart: A chart that maps Actor (on Y axis) on Time (on X axis hidden*) with color coded squares that represent Actor. It shows patterns of intervention by the \n moderator, and patterns and lengths of debate between the candidates. The matrix chart can be navigated through the mini-map or slider which is in sync with the bar chart.\n - A bar chart: A chart that maps Number of โ€œsomethingโ€ (e.g. Number of Words, Number of Interruptions, etc.) (on Y axis) on Time (on X axis) with color coded bars that \n represent Number of โ€œsomethingโ€ and Actor. It shows the incremental counts and allows to compare them across the actors on a moment, within an interval, or throughout the conversation. The bar chart can also be navigated through the mini-map or slider which is in sync with the matrix chart. It also has a tooltip interaction that shows the \n Actor and Number of Words on each bar.\n *The matrix chart and bar chart were aligned and they both use the bar chartโ€™s X axis that shows time. \n \nWe chose these interactions because we wanted the user to be able to:\n - navigate through the timeline of the conversation (by mini-map or slider)\n - focus on a moment or a timeframe (by brushing on the mini-map or slider)\n - make comparisons by looking at the positions (such as Actor squares) or size of things (such as bars next to each other)\n - check details (counts by tooltips, if necessary)\n \nEach minimap bar chart was followed by:\n - A zoomable line chart: A line chart that maps Number of โ€œsomethingโ€ (e.g. Number of Words, Number of Interruptions, etc.) (on Y axis) on Order of Speech (on X axis) with \n color coded lines that represent Number of โ€œsomethingโ€ and Actor. It shows trajectories of counts by each actor in relation to each other and throughout the conversation. \n It also allows the user to compare them on a moment, within an interval, or throughout the conversation. The line chart can be navigated by zooming in / out and panning to \n move forward or backward along the Order of Speech*. It can also be navigated by the slider. It also has a tooltip interaction that shows the Number of Words along the \n line by each Order of Speech.\n *Order of Speech is essentially linked to Time as each Order of Speech is time stamped. However, different than Time, Order of Speech indicates how many turns of speech \n preceded the current. For example, Order of Speech = 335 means the debate has passed 334 turns of speech by different actors. The 335th turn was by Joe Biden and it \n started on 41:41. \n \nWe chose these interactions because we wanted the user to be able to:\n - see an overall trajectory at a glance \n - make comparisons by seeing the trajectory lines next to each other\n - check details (by zooming in, or counts by tooltips, if necessary)\n - navigate through the conversation (by panning or slider)\n - focus on a particular turn or a range of turns\n \nb- Thematic weights within the debate\n \nVisualizations:\n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Healthcare-Time)\n - Zoomable line chart โ†’ (Number of Healthcare-Order of Speech) \n \n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Covid-Time) \n - Zoomable line chart โ†’ (Number of Covid-Order of Speech)\n \n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Economy-Time) \n - Zoomable line chart โ†’ (Number of Economy-Order of Speech)\n \n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Environment-Time) \n - Zoomable line chart โ†’ (Number of Environment-Order of Speech)\n \n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Race-Time) \n - Zoomable line chart โ†’ (Number of Race-Order of Speech)\n \n - Mini-map bar chart โ†’ Minimap + (Actor-Number of Election-Time) \n - Zoomable line chart โ†’ (Number of Election-Order of Speech)\n \n - Linked bar chart โ†’ Word Count-Theme\n - Linked bar chart โ†’ Word Count-Actor-Theme Words\n \n - Bar chart โ†’ Word Count-Actor-Pointing to self and others\n \nEach minimap bar chart and zoomable line chart was designed in exactly the same ways described above. We chose these interactions for the same reasons described above.\n \nThe linked bar charts we designed was made of:\n - A bar chart: A chart that maps Word Count (on Y axis) on Theme (on X axis) with color coded bars that represent Word Count and Actor. It shows the total counts of theme \n words for each theme (both by Biden and Trump) by stacking the bars. It allows the user to compare the weight of themes in the overall conversation. It also allows the \n user to compare the weight of themes between the actors by comparing the size of stacked bars. The chart also has a tooltip interaction that shows the Actor on each bar. \n Each bar that is mapped to a theme can also be selected by clicking. This selection makes the second linked bar show a breakdown of the counts into theme words by each \n actor.\n - A bar chart: A chart that is linked to the first one, and that maps Word Count (on Y axis) on Actor (on X axis) and Theme Words (on column) with color coded bars that \n represent Word Count and Actor under each Theme Word. It shows the total count of each theme word by Actor. It also shows the weight of theme words within each theme. It \n also allows the user to compare the emphasis of theme words across the actors by comparing the size of bars. The chart also has a tooltip interaction that shows the Word \n Count on each bar. \n \n For example, clicking on the bar that indicates the โ€œCovidโ€ theme Word Count, will allow the user to see a side by side comparison of the โ€œCovidโ€ theme word (โ€œvaccineโ€, \n โ€œmaskโ€, โ€œdeathโ€, โ€œdyingโ€, โ€œcovidโ€) counts by each actor. \n \nWe chose these interactions because we wanted the user to be able to:\n - see various layers of information in two charts\n - see overall by default and breakdowns by selection\n - make comparisons of counts, weights, and emphasis by counts and size of things (such as bars stacked or next to each other)\n - check details (counts by tooltips, if necessary)\n \nThe bar chart we designed:\n - A bar chart: A chart that maps Word Count (on Y axis) on Actor (on X axis) and โ€œPointing to Self and Fingers to Othersโ€ (PSFO) words (โ€œYouโ€, โ€œHeโ€, โ€œIโ€) (on column) with \n color coded bars that represent Word Count and Actor under each PSFO word. It shows the total count of each PSFO word by Actor. It also allows the user to compare the \n emphasis of PSFO words across the actors by comparing the size of bars. The chart has a selection option with click that highlights PSFO Word Count by actor. The chart \n also has a tooltip interaction that shows the Word Count on each bar. \n \nWe chose these interactions because we wanted the user to be able to:\n - see overall by default and breakdowns by selection\n - make comparisons of counts and emphasis by counts and size of things (such as bars next to each other)\n - check details (counts by tooltips, if necessary)\n \nWorkflow\n - (Emek) Part of the code that creates the visualizations of the โ€œThrough the debateโ€ set. \n - (Emek + Katherine) Part of the code that creates a part of the visualizations of the โ€œThematic weights within the debateโ€ set (mini-map - bar chart - line chart \n combinations). \n - (Katherine) Part of the code that creates the rest of the visualizations of the โ€œThematic weights within the debateโ€ set (linked bar chart - bar chart combinations). \n \nOverall, this step involved coordinative and divided work. It took about a quarter of the total work time.\n" }, { "alpha_fraction": 0.5456705093383789, "alphanum_fraction": 0.5615077018737793, "avg_line_length": 33.10354232788086, "blob_id": "77614d5f67741026c50ec99a4a274f869d624854", "content_id": "7e90a815a3d65ed8a5a99dc0569c11fbda62385a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50514, "license_type": "permissive", "max_line_length": 106, "num_lines": 1439, "path": "/presidentVisCombine_V3.py", "repo_name": "CMU-IDS-2020/a3-giesa-erdolu", "src_encoding": "UTF-8", "text": "import string\r\nimport copy\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.tokenize import TreebankWordTokenizer\r\nimport re\r\nimport pandas as pd\r\nimport streamlit as st\r\nimport altair as alt\r\n\r\n##################################################################\r\n## Text processing and extracting data: Part 1 \r\n##################################################################\r\n\r\nf = open(\"Debate_Data_Final.txt\", encoding=\"utf8\") \r\nraw = f.read()\r\n\r\nactor1 = \"Mod_Wallace\"\r\nactor2 = \"VP_Biden\"\r\nactor3 = \"P_Trump\"\r\n\r\n## List of words with no punctuation but with stamps\r\ntokenizer = nltk.RegexpTokenizer(r\"\\w+\")\r\nwordsNoPunct = tokenizer.tokenize(raw)\r\n# print(wordsNoPunct, len(wordsNoPunct))\r\n# print(\"BREAK\")\r\n\r\n## List of words with punctuation and stamps\r\nwordsWithPunct = TreebankWordTokenizer().tokenize(raw)\r\n#print(wordsWithPunct, len(wordsWithPunct))\r\n#print(\"BREAK\")\r\n\r\n\r\n## Function that gives List of words with no punctuation and no stamps\r\ndef removeStamps(raw):\r\n rawSplit = raw.split()\r\n \r\n for word in rawSplit:\r\n matched1 = re.match(r\"\\((\\d{2}:\\d{2})\\)\", word)\r\n matched2 = re.match(r\"\\((\\d{2}:\\d{2}:\\d{2})\\)\", word)\r\n matched3 = re.match(r\"\\[crosstalk\", word)\r\n is_match1 = bool(matched1)\r\n is_match2 = bool(matched2)\r\n is_match3 = bool(matched3)\r\n if is_match1 == True or is_match2 == True or is_match3 == True: \r\n rawSplit.remove(word)\r\n \r\n for word in rawSplit:\r\n matched4 = re.match(r\"\\d{2}:\\d{2}:\\d{2}\\]\", word)\r\n is_match4 = bool(matched4)\r\n if is_match4 == True:\r\n rawSplit.remove(word)\r\n\r\n rawJoined = \" \".join(rawSplit)\r\n wordsNoPunctNoStamp = tokenizer.tokenize(rawJoined)\r\n \r\n return wordsNoPunctNoStamp #, len(wordsNoPunctNoStamp)\r\n\r\n\r\n## Function that creates an actorInfo dictionary and adds actors' names\r\n## --- { index: [actorName]\r\ndef addActors(words): ## wordsNoPunctNoStamp\r\n actorInfo = {}\r\n index = -1\r\n\r\n for i in range(len(words) - 1):\r\n if words[i] == actor1:\r\n index += 1\r\n actorInfoList = []\r\n actorInfoList.append(actor1)\r\n actorInfo[index] = actorInfoList\r\n elif words[i] == actor2:\r\n index += 1\r\n actorInfoList = []\r\n actorInfoList.append(actor2)\r\n actorInfo[index] = actorInfoList\r\n elif words[i] == actor3:\r\n index += 1\r\n actorInfoList = []\r\n actorInfoList.append(actor3)\r\n actorInfo[index] = actorInfoList\r\n \r\n return actorInfo\r\n\r\n\r\n## Function that adds timeStamps to the dictionary \r\n## --- { index: [actorName, timeStamps]\r\ndef addTimeStamps(raw, dic): ## actorInfo\r\n rawSplit = raw.split()\r\n index = -1\r\n\r\n for word in rawSplit:\r\n matched1 = re.match(r\"\\((\\d{2}:\\d{2})\\)\", word)\r\n matched2 = re.match(r\"\\((\\d{2}:\\d{2}:\\d{2})\\)\", word)\r\n is_match1 = bool(matched1)\r\n is_match2 = bool(matched2)\r\n if is_match1 == True or is_match2 == True:\r\n index += 1\r\n timeStampNoParenth = word.replace(\"(\", \"\").replace(\")\", \"\")\r\n dic[index].append(timeStampNoParenth)\r\n \r\n return dic\r\n\r\n\r\n## Function that adds numWords to the dictionary \r\n## --- { index: [actorName, timeStamps, numWords]\r\ndef addNumWordsByActor(words, dic):\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n \r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n\r\n ## Counting words in actors' phrases and add numWords in the dictionary\r\n if actor1_On == True and word != actor1:\r\n countActor1 += 1\r\n if len(dic[index]) == 2:\r\n dic[index].append(countActor1)\r\n elif len(dic[index]) == 3: \r\n dic[index][2] = countActor1 \r\n elif actor2_On == True and word != actor2:\r\n countActor2 += 1\r\n if len(dic[index]) == 2:\r\n dic[index].append(countActor2)\r\n elif len(dic[index]) == 3: \r\n dic[index][2] = countActor2 \r\n elif actor3_On == True:\r\n if word != actor3:\r\n countActor3 += 1\r\n if len(dic[index]) == 2:\r\n dic[index].append(countActor3)\r\n if len(dic[index]) == 3: \r\n dic[index][2] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numInterruptions to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions] }\r\ndef addNumInterruptionsByActor(words, dic): \r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n\r\n for i in range(len(words) - 1):\r\n ## Capturing actors' name in the corpus, incrementing the index, adding the interruption count\r\n if words[i] == actor1:\r\n index += 1\r\n if len(dic[index]) == 3:\r\n dic[index].append(countActor1)\r\n elif len(dic[index]) == 4: \r\n dic[index][3] = countActor1\r\n \r\n elif words[i] == actor2:\r\n index += 1\r\n if len(dic[index]) == 3: \r\n dic[index].append(countActor2)\r\n elif len(dic[index]) == 4: \r\n dic[index][3] = countActor2\r\n \r\n elif words[i] == actor3:\r\n index += 1\r\n if len(dic[index]) == 3: \r\n dic[index].append(countActor3)\r\n elif len(dic[index]) == 4: \r\n dic[index][3] = countActor3\r\n \r\n ## If there is interruption (at the end of the sentence)\r\n elif words[i] == \"-\" or words[i][-1] == \"-\":\r\n ## Finding the following actorName (the interrupter)\r\n if words[i + 1] == actor1:\r\n countActor1 += 1\r\n dic[index + 1].append(countActor1)\r\n elif words[i + 1] == actor2:\r\n countActor2 += 1\r\n dic[index + 1].append(countActor2)\r\n elif words[i + 1] == actor3:\r\n countActor3 += 1\r\n dic[index + 1].append(countActor3)\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numCrossTalks to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks] }\r\ndef addNumCrossTalksByActor(raw, dic):\r\n rawSplit = raw.split()\r\n index = -1\r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in rawSplit:\r\n ## Capturing actors' name in the corpus, incrementing the index\r\n if word == actor1 + \":\":\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 4:\r\n dic[index].append(countActor1)\r\n elif word == actor2 + \":\":\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 4:\r\n dic[index].append(countActor2)\r\n elif word == actor3 + \":\":\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 4:\r\n dic[index].append(countActor3)\r\n \r\n ## Finding the following crossTalk instance:\r\n matched = re.match(r\"\\[crosstalk\", word)\r\n is_match = bool(matched)\r\n if is_match == True and actor1_On == True:\r\n countActor1 += 1\r\n dic[index][4] = countActor1\r\n elif is_match == True and actor2_On == True:\r\n countActor2 += 1\r\n dic[index][4] = countActor2\r\n elif is_match == True and actor3_On == True:\r\n countActor3 += 1\r\n dic[index][4] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numNumberUse to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse] }\r\ndef addNumNumberUseByActor(words, dic):\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 5:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 5:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 5:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True and word.isnumeric() == True:\r\n countActor1 += 1\r\n dic[index][5] = countActor1\r\n \r\n elif actor2_On == True and word.isnumeric() == True:\r\n countActor2 += 1\r\n dic[index][5] = countActor2\r\n \r\n elif actor3_On == True and word.isnumeric() == True:\r\n countActor3 += 1\r\n dic[index][5] = countActor3\r\n \r\n return dic\r\n\r\n\r\n## Function that adds numOfHealthcare words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare] }\r\ndef numOfHealthcare(words, dic):\r\n healthCareList = [\"Obamacare\", \"Affordable Care\", \"insurance\", \"cost\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 6:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 6:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 6:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == healthCareList[0] or word == healthCareList[1] or \\\r\n word == healthCareList[2] or word == healthCareList[3]:\r\n countActor1 += 1\r\n dic[index][6] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == healthCareList[0] or word == healthCareList[1] or \\\r\n word == healthCareList[2] or word == healthCareList[3]:\r\n countActor2 += 1\r\n dic[index][6] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == healthCareList[0] or word == healthCareList[1] or \\\r\n word == healthCareList[2] or word == healthCareList[3]:\r\n countActor3 += 1\r\n dic[index][6] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numOfCovid words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare, numOfCovid] }\r\ndef numOfCovid(words, dic):\r\n covidList = [\"covid\", \"vaccine\", \"mask\", \"death\", \"dying\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 7:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 7:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 7:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == covidList[0] or word == covidList[1] or \\\r\n word == covidList[2] or word == covidList[3] or \\\r\n word == covidList[4]:\r\n countActor1 += 1\r\n dic[index][7] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == covidList[0] or word == covidList[1] or \\\r\n word == covidList[2] or word == covidList[3] or \\\r\n word == covidList[4]:\r\n countActor2 += 1\r\n dic[index][7] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == covidList[0] or word == covidList[1] or \\\r\n word == covidList[2] or word == covidList[3] or \\\r\n word == covidList[4]:\r\n countActor3 += 1\r\n dic[index][7] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numOfEnvironment words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare, numOfCovid, numOfEnv] }\r\ndef numOfEnvironment(words, dic):\r\n envList = [\"environment\", \"fire\", \"jobs\", \"energy\", \"green\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 8:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 8:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 8:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == envList[0] or word == envList[1] or \\\r\n word == envList[2] or word == envList[3] or \\\r\n word == envList[4]:\r\n countActor1 += 1\r\n dic[index][8] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == envList[0] or word == envList[1] or \\\r\n word == envList[2] or word == envList[3] or \\\r\n word == envList[4]:\r\n countActor2 += 1\r\n dic[index][8] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == envList[0] or word == envList[1] or \\\r\n word == envList[2] or word == envList[3] or \\\r\n word == envList[4]:\r\n countActor3 += 1\r\n dic[index][8] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numOfElection words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare, numOfCovid, numOfEnv, numOfElection] }\r\ndef numOfElection(words, dic):\r\n electionList = [\"fraud\", \"mail\", \"rigged\", \"transition\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 9:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 9:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 9:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == electionList[0] or word == electionList[1] or \\\r\n word == electionList[2] or word == electionList[3]:\r\n countActor1 += 1\r\n dic[index][9] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == electionList[0] or word == electionList[1] or \\\r\n word == electionList[2] or word == electionList[3]:\r\n countActor2 += 1\r\n dic[index][9] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == electionList[0] or word == electionList[1] or \\\r\n word == electionList[2] or word == electionList[3]:\r\n countActor3 += 1\r\n dic[index][9] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numOfEconomy words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare, numOfCovid, numOfEnv, numOfElection, \\\r\n# numOfEcononmy] }\r\ndef numOfEconomy(words, dic):\r\n econList = [\"jobs\", \"unemployment\", \"taxes\", \"manufacturing\", \"inequality\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 10:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 10:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 10:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == econList[0] or word == econList[1] or \\\r\n word == econList[2] or word == econList[3] or \\\r\n word == econList[4]:\r\n countActor1 += 1\r\n dic[index][10] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == econList[0] or word == econList[1] or \\\r\n word == econList[2] or word == econList[3] or \\\r\n word == econList[4]:\r\n countActor2 += 1\r\n dic[index][10] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == econList[0] or word == econList[1] or \\\r\n word == econList[2] or word == econList[3] or \\\r\n word == econList[4]:\r\n countActor3 += 1\r\n dic[index][10] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Function that adds numOfRace words to the dictionary \r\n## --- { index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, \\\r\n# numOfHealthcare, numOfCovid, numOfEnv, numOfElection, \\\r\n# numOfEcononmy, numOfRace] }\r\ndef numOfRace(words, dic):\r\n raceList = [\"violence\", \"law\", \"order\", \"peace\"]\r\n index = -1 \r\n countActor1 = 0\r\n countActor2 = 0\r\n countActor3 = 0\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = False\r\n\r\n for word in words:\r\n ## Capturing actors' names in the corpus\r\n if word == actor1:\r\n index += 1\r\n actor1_On = True\r\n actor2_On = False\r\n actor3_On = False\r\n if len(dic[index]) == 11:\r\n dic[index].append(countActor1)\r\n elif word == actor2:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = True\r\n actor3_On = False\r\n if len(dic[index]) == 11:\r\n dic[index].append(countActor2)\r\n elif word == actor3:\r\n index += 1\r\n actor1_On = False\r\n actor2_On = False\r\n actor3_On = True\r\n if len(dic[index]) == 11:\r\n dic[index].append(countActor3)\r\n \r\n elif actor1_On == True:\r\n if word == raceList[0] or word == raceList[1] or \\\r\n word == raceList[2] or word == raceList[3]:\r\n countActor1 += 1\r\n dic[index][11] = countActor1\r\n \r\n elif actor2_On == True:\r\n if word == raceList[0] or word == raceList[1] or \\\r\n word == raceList[2] or word == raceList[3]:\r\n countActor2 += 1\r\n dic[index][11] = countActor2\r\n \r\n elif actor3_On == True:\r\n if word == raceList[0] or word == raceList[1] or \\\r\n word == raceList[2] or word == raceList[3]:\r\n countActor3 += 1\r\n dic[index][11] = countActor3\r\n\r\n return dic\r\n\r\n\r\n## Converts the dictionary for dataframe, adds a column of indeces (for slider)\r\ndef dictConverter(dic):\r\n dicNew = {}\r\n indexList = []\r\n actorNameList = []\r\n timeStampList = []\r\n numWordsList = []\r\n numInterruptionsList = []\r\n numCrosstalksList = []\r\n numNumberUseList = []\r\n numOfHealthcareList = []\r\n numOfCovidList = []\r\n numOfEnvList = []\r\n numOfElectionList = []\r\n numOfEconList = []\r\n numOfRaceList = []\r\n\r\n for i in range(len(dic)):\r\n index = i\r\n indexList.append(index)\r\n dicNew[\"Order of Speech\"] = indexList\r\n\r\n actorName = dic[i][0]\r\n if actorName == actor1:\r\n actorName = \"Chris Wallace\"\r\n actorNameList.append(actorName)\r\n dicNew[\"Actor\"] = actorNameList\r\n elif actorName == actor2:\r\n actorName = \"Joe Biden\"\r\n actorNameList.append(actorName)\r\n dicNew[\"Actor\"] = actorNameList\r\n elif actorName == actor3:\r\n actorName = \"Donald J. Trump\"\r\n actorNameList.append(actorName)\r\n dicNew[\"Actor\"] = actorNameList\r\n\r\n timeStamp = dic[i][1]\r\n timeStampList.append(timeStamp)\r\n dicNew[\"Time\"] = timeStampList \r\n\r\n numWordsByActor = dic[i][2]\r\n numWordsList.append(numWordsByActor)\r\n dicNew[\"Number of Words\"] = numWordsList \r\n\r\n numInterruptionsByActor = dic[i][3]\r\n numInterruptionsList.append(numInterruptionsByActor)\r\n dicNew[\"Number of Interruptions\"] = numInterruptionsList \r\n\r\n numCrosstalksByActor = dic[i][4]\r\n numCrosstalksList.append(numCrosstalksByActor)\r\n dicNew[\"Number of Crosstalks\"] = numCrosstalksList \r\n\r\n numNumberUseByActor = dic[i][5]\r\n numNumberUseList.append(numNumberUseByActor)\r\n dicNew[\"Number of Number Use\"] = numNumberUseList \r\n\r\n numOfHealthCare = dic[i][6]\r\n numOfHealthcareList.append(numOfHealthCare)\r\n dicNew[\"Number of Healthcare\"] = numOfHealthcareList\r\n\r\n numOfCovid = dic[i][7]\r\n numOfCovidList.append(numOfCovid)\r\n dicNew[\"Number of Covid\"] = numOfCovidList \r\n\r\n numOfEnv = dic[i][8]\r\n numOfEnvList.append(numOfEnv)\r\n dicNew[\"Number of Environment\"] = numOfEnvList \r\n\r\n numOfElection = dic[i][9]\r\n numOfElectionList.append(numOfElection)\r\n dicNew[\"Number of Election\"] = numOfElectionList \r\n\r\n numOfEcon = dic[i][10]\r\n numOfEconList.append(numOfEcon)\r\n dicNew[\"Number of Economy\"] = numOfEconList \r\n\r\n numOfRace = dic[i][11]\r\n numOfRaceList.append(numOfRace)\r\n dicNew[\"Number of Race\"] = numOfRaceList \r\n\r\n return dicNew\r\n\r\n\r\n\r\n\r\n##################################################################\r\n## Calling the functions that create the dictionary \r\n##################################################################\r\n\r\n## Returns list of words (wordsNoPunctNoStamp)\r\nwordsNoPunctNoStamp = removeStamps(raw) \r\n# print(wordsNoPunctNoStamp) \r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName], ...}\r\nactorInfo = addActors(wordsNoPunctNoStamp) \r\n# print(actorInfo) \r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp], ...}\r\ndic_V1 = addTimeStamps(raw, actorInfo) \r\n# print(dic_V1) \r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords], ...}\r\ndic_V2 = addNumWordsByActor(wordsNoPunctNoStamp, dic_V1)\r\n# print(dic_V2)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions], ...}\r\ndic_V3 = addNumInterruptionsByActor(wordsWithPunct, dic_V2)\r\n# print(dic_V3)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks], ...}\r\ndic_V4 = addNumCrossTalksByActor(raw, dic_V3)\r\n# print(dic_V4)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse], ...}\r\ndic_V5 = addNumNumberUseByActor(wordsNoPunctNoStamp, dic_V4)\r\n# print(dic_V5)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare], ...}\r\ndic_V6 = numOfHealthcare(wordsNoPunctNoStamp, dic_V5)\r\n# print(dic_V6)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare, \\\r\n# numOfCovid], ...}\r\ndic_V7 = numOfCovid(wordsNoPunctNoStamp, dic_V6)\r\n# print(dic_V7)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare, \\\r\n# numOfCovid, numOfEnvironment], ...}\r\ndic_V8 = numOfEnvironment(wordsNoPunctNoStamp, dic_V7)\r\n# print(dic_V8)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare, \\\r\n# numOfCovid, numOfEnvironment, numOfElection], ...}\r\ndic_V9 = numOfElection(wordsNoPunctNoStamp, dic_V8)\r\n# print(dic_V9)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare, \\\r\n# numOfCovid, numOfEnvironment, numOfElection, numOfEconomy], ...}\r\ndic_V10 = numOfEconomy(wordsNoPunctNoStamp, dic_V9)\r\n# print(dic_V10)\r\n# print(\"BREAK\")\r\n\r\n## Returns actorInfo = {index: [actorName, timeStamp, numWords, \\\r\n# numInterruptions, numCrossTalks, numNumberUse, numOfHealthcare, \\\r\n# numOfCovid, numOfEnvironment, numOfElection, numOfEconomy, numOfRace], ...}\r\ndic_V11 = numOfRace(wordsNoPunctNoStamp, dic_V10)\r\n# print(dic_V11)\r\n# print(\"BREAK\")\r\n\r\n\r\n\r\n\r\n##################################################################\r\n## Creating the dataframe 1 (from the dictionary)\r\n##################################################################\r\n\r\n## dictConverter also adds the column of indeces (important!)\r\ndic_Final = dictConverter(dic_V11)\r\n# print(dic_Final)\r\n# print(\"BREAK\")\r\n\r\ndf1 = pd.DataFrame(dic_Final)\r\nprint(df1)\r\n\r\n\r\n\r\n\r\n##################################################################\r\n## Text processing and extracting data: Part 2\r\n##################################################################\r\n\r\ndef readMeIn(file):\r\n with open(file, 'r', encoding = 'utf8', \r\n errors = 'ignore') as readMe:\r\n entry = readMe.readlines()\r\n entries = [e.strip().split('\\n\\n') for e in entry \r\n if e.strip().split('\\n')!=['']\r\n and e.strip().split('\\n') != [\"Part 2\"]]\r\n \r\n return entries\r\n\r\n\r\n## Function that makes dictionaries of speaker: statement and speaker: word: word count\r\ndef makeDictionaries(entries):\r\n wordCountDict = {\"Biden\": {}, \"Trump\": {}, \"Chris\": {}}\r\n presDi = {}\r\n\r\n for e in (range(0, len(entries) - 2, 2)):\r\n keyL = entries[e]\r\n key = ' '.join([word for word in keyL]) \r\n statement = entries[e + 1]\r\n listOfWordsStr = ''.join([word for word in statement]);\r\n listOfWords = listOfWordsStr.split(\" \")\r\n if (\"Biden\" in key): \r\n for word in listOfWords : \r\n wordCount = wordCountDict.get(\"Biden\").get(word, 0) + 1 \r\n wordCountDict[\"Biden\"][word] = wordCount\r\n elif (\"Trump\" in key):\r\n for word in listOfWords: \r\n wordCount = wordCountDict.get(\"Trump\").get(word, 0) + 1\r\n wordCountDict[\"Trump\"][word] = wordCount\r\n elif (\"Chris\" in key):\r\n for word in listOfWords : \r\n wordCount = wordCountDict.get(\"Chris\").get(word, 0) + 1\r\n wordCountDict[\"Chris\"][word] = wordCount \r\n presDi[key] = statement\r\n \r\n return presDi, wordCountDict\r\n\r\n\r\ndef makeDataFrames(presDi, wordCountDict):\r\n dfDict = {}\r\n for speaker in [\"Biden\", \"Trump\", \"Chris\"]:\r\n speakerDict = wordCountDict.get(speaker)\r\n dfDict[speaker] = pd.DataFrame.from_dict(speakerDict, \r\n orient = 'index', columns = [\"wordCounted\"])\r\n dfDict[speaker].index.name = \"word\"\r\n # merge data frames, by word # \r\n dfWords = dfDict[\"Biden\"].merge(dfDict[\"Trump\"], \r\n how = \"outer\", on = \"word\", \r\n suffixes = (\"Biden\", \"Trump\"))\r\n dfWords = dfWords.merge(dfDict[\"Chris\"], how = \"outer\", \r\n on = \"word\", suffixes = (\"\", \"Chris\"))\r\n dfPres = pd.DataFrame.from_dict(presDi, orient = 'index', \r\n columns = [\"statement\"])\r\n dfPres.index.name = \"speaker\"\r\n dfPres[\"tempDup\"] = dfPres.index\r\n dfPres[[\"name\", \"time\"]] = dfPres[\"tempDup\"].str.split(\": \", expand = True)\r\n \r\n return dfPres, dfWords\r\n\r\n\r\ndef findThemes(masterList, dfPres):\r\n for theme, words in masterList.items(): \r\n for word in words:\r\n dfPres[word] = dfPres.statement.str.contains(word, case = False)\r\n #START WORD COUNT SUMS#\r\n themeWordCount = dfPres.groupby([\"name\"]).\\\r\n agg({'Obamacare': 'sum'}).reset_index()\r\n for theme, words in masterList.items():\r\n for word in words:\r\n if word == 'Obamacare': pass \r\n else:\r\n countTemp = dfPres.groupby([\"name\"])\\\r\n .agg({word: 'sum'}).reset_index()\r\n themeWordCount = themeWordCount.merge(countTemp, \r\n how = \"outer\", on = \"name\")\r\n \r\n return dfPres, themeWordCount\r\n\r\n\r\n## Function that makes dictionaries of speaker: statement and speaker: word: word count\r\n## wordCountDict = {\"Biden\": {}, \"Trump\": {}, \"Chris\": {}}\r\ndef youHeIDict(trans):\r\n punct = set(string.punctuation)\r\n youMe = [\"you\", \"he\", \"i\"]\r\n youMeDict = {\"Joe Biden\": {}, \"Donald J. Trump\": {}}\r\n \r\n for e in (range(0, len(trans) - 2, 2)):\r\n keyL = trans[e]\r\n key = ' '.join([word for word in keyL]) \r\n statement = trans[e + 1]\r\n listOfWordsStr = ''.join([word for word in statement if \r\n word not in punct])\r\n listOfWords = listOfWordsStr.split(\" \")\r\n if (\"Biden\" in key): \r\n for word in listOfWords : \r\n word = str.lower(word)\r\n if word in youMe:\r\n wordCount = youMeDict.get(\"Joe Biden\").get(word, 0) + 1 \r\n youMeDict[\"Joe Biden\"][word] = wordCount\r\n elif (\"Trump\" in key):\r\n for word in listOfWords: \r\n word = str.lower(word)\r\n if word in youMe:\r\n wordCount = youMeDict.get(\"Donald J. Trump\").get(word, 0) + 1\r\n youMeDict[\"Donald J. Trump\"][word] = wordCount\r\n else: pass\r\n \r\n dfYouMe = pd.DataFrame.from_dict(youMeDict)\r\n dfYouMe.index.name = \"youHeI\"\r\n dfYouMe[\"youHeI\"] = dfYouMe.index\r\n \r\n return dfYouMe \r\n\r\n\r\ndef themeCount(themeWordCount):\r\n themeWordCount = themeWordCount.set_index('name').T \r\n themeWordCount['themeWord'] = themeWordCount.index\r\n for theme, listWords in masterList.items():\r\n for index, row in themeWordCount.iterrows(): \r\n if row.themeWord in listWords:\r\n themeWordCount.loc[index, \"broadTheme\"] = theme\r\n else : \r\n pass\r\n \r\n return themeWordCount\r\n\r\n\r\ndef broadCount(themeWordCount):\r\n broadThemeData = themeWordCount.groupby([\"broadTheme\"])\\\r\n .agg({'President Donald J. Trump': 'sum', \r\n 'Vice President Joe Biden': 'sum'}).reset_index()\r\n \r\n return broadThemeData\r\n\r\n\r\n## Thematic Groupings\r\nhealthCareList = [\"Obamacare\", \"Affordable Care\", \"Insurance\", \"Cost\"]\r\ncovidList = [\"covid\", \"vaccine\", \"mask\", \"death\", \"dying\"]\r\nenviorList = [\"environment\", \"fire\", \"energy\", \"green\"]\r\nelectionList = [\"fraud\", \"mail\", \"rigged\", \"transition\", \"vot\"]\r\neconomyList = [\"job\", \"unemployment\", \"taxes\", \"manufacturing\"]\r\nraceList = [\"violence\", \"law\", \"order\", \"peace\", \"fund\"]\r\nmasterList ={\"healthcare\": healthCareList, \r\n \"covid\": covidList, \r\n \"environment\": enviorList, \r\n \"election\": electionList, \r\n \"economy\": economyList, \r\n \"race\": raceList}\r\n\r\n\r\n\r\n\r\n##################################################################\r\n## Creating the dataframe 2 \r\n##################################################################\r\n\r\ntrans = readMeIn(\"presidentialTranscript.txt\")\r\npresDi, wordCountDict = makeDictionaries(trans)\r\ndfYouMe = youHeIDict(trans)\r\ndfPres, dfWords = makeDataFrames(presDi, wordCountDict)\r\ndfPres, themeWordCount = findThemes(masterList, dfPres)\r\nthemeWordCount = themeCount(themeWordCount)\r\nbroadThemeData = broadCount(themeWordCount)\r\n\r\nthemeWordCount = themeWordCount.melt(id_vars = [\"themeWord\", \"broadTheme\"], \r\n var_name = \"name\", value_name = \"wordCount\")\r\n\r\n## Funtion that changes the name strings of actors \r\ndef renameActors(themeWordCount):\r\n themeWordCount = themeWordCount[themeWordCount.name != \"Chris Wallace\"]\r\n themeWordCount = themeWordCount[themeWordCount.name != \"Chris Wallace:\"]\r\n for index, row in themeWordCount.iterrows():\r\n if row['name'] == \"President Donald J. Trump\":\r\n themeWordCount.loc[index, 'name'] = \"Donald J. Trump\"\r\n elif row['name'] == \"Vice President Joe Biden\":\r\n themeWordCount.loc[index, 'name'] = \"Joe Biden\"\r\n \r\n return themeWordCount\r\n\r\nthemeWordCount = renameActors(themeWordCount)\r\n# print(themeWordCount)\r\n# print(\"BREAK\")\r\n\r\ndfYouMe = dfYouMe.melt(id_vars = [\"youHeI\"], var_name = \"name\", value_name = \"wordCountHeI\")\r\n\r\n\r\n\r\n\r\n##################################################################\r\n## Visualization \r\n##################################################################\r\n\r\n## Title\r\nst.write(\"## Tracing the Presidential Debate 2020\")\r\nst.write(\"###### Choose 'Show app in wide mode' in the 'Settings' on the top right corner.\")\r\nst.write(\"# \\n\")\r\n\r\n## Youtube video\r\nst.video('https://www.youtube.com/watch?v=ofkPfm3tFxo')\r\nst.write(\"\\n\")\r\n\r\n## Data table\r\nst.write(df1)\r\nst.write(\"\\n\")\r\n\r\n## Creating a filter slider\r\norder = st.sidebar.slider(\"Order of Speech\", 0, 789) ## min and max values\r\n\r\n\r\n## Bar graphs\r\n## Drawing ActorTime (bar) minimap\r\nviz0_ActorTimeSmall = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None, axis = None, title = None),\r\n alt.Y(\"Actor\", sort = None, axis = None, title = None),\r\n alt.Color(\"Actor\", legend = None),\r\n).properties(\r\n width = 300\r\n )\r\n\r\n## Drawing ActorTime (bar) (change via minimap and slider) \r\nviz0_ActorTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None, axis = None, title = None),\r\n alt.Y(\"Actor\", sort = None),\r\n alt.Color(\"Actor\"),\r\n)\r\n\r\n## Drawing NumWordsTime (bar) (change via minimap and slider)\r\nviz0_NumWordsTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Words\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Words')]\r\n)\r\n\r\n## Drawing NumInterruptionTime (bar) (change via minimap and slider) \r\nviz0_NumInterruptionsTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Interruptions\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Interruptions')]\r\n)\r\n\r\n## Drawing NumCrosstalksTime (bar) (change via minimap and slider) \r\nviz0_NumCrosstalksTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Crosstalks\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Crosstalks')]\r\n)\r\n\r\n## Drawing NumHealthcareTime (bar) (change via minimap and slider)\r\nviz0_NumHealthcareTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Healthcare\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Healthcare')]\r\n)\r\n\r\n## Drawing NumCovidTime (bar) (change via minimap and slider)\r\nviz0_NumCovidTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Covid\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Covid')]\r\n)\r\n\r\n## Drawing NumEnvTime (bar) (change via minimap and slider)\r\nviz0_NumEnvTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Environment\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Environment')]\r\n)\r\n\r\n## Drawing NumElectionTime (bar) (change via minimap and slider)\r\nviz0_NumElectionTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Election\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Election')]\r\n)\r\n\r\n## Drawing NumEconomyTime (bar) (change via minimap and slider)\r\nviz0_NumEconomyTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Economy\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Economy')]\r\n)\r\n\r\n## Drawing NumRaceTime (bar) (change via minimap and slider)\r\nviz0_NumRaceTime = alt.Chart(df1).mark_bar().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Time\", sort = None),\r\n alt.Y(\"Number of Race\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Actor'), alt.Tooltip('Number of Race')]\r\n)\r\n\r\n\r\n## Line graphs\r\n## Drawing NumWordsTime (line graph) (zoom and slider)\r\nviz1_NumWordsTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Words\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Words')]\r\n)\r\n\r\n## Drawing NumInterruptionsTime (line graph) (zoom and slider)\r\nviz1_NumInterruptionsTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Interruptions\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Interruptions')]\r\n)\r\n\r\n## Drawing NumCrosstalksTime (line graph) (zoom and slider)\r\nviz1_NumCrosstalksTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Crosstalks\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Crosstalks')]\r\n)\r\n\r\n## Drawing NumHealthcareTime (line graph) (zoom and slider)\r\nviz1_NumHealthcareTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Healthcare\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Healthcare')]\r\n)\r\n\r\n## Drawing NumCovidTime (line graph) (zoom and slider)\r\nviz1_NumCovidTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Covid\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Covid')]\r\n)\r\n\r\n## Drawing NumEnvTime (line graph) (zoom and slider)\r\nviz1_NumEnvTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Environment\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Environment')]\r\n)\r\n\r\n## Drawing NumElectionTime (line graph) (zoom and slider)\r\nviz1_NumElectionTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Election\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Election')]\r\n)\r\n\r\n## Drawing NumEconomyTime (line graph) (zoom and slider)\r\nviz1_NumEconomyTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Economy\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Economy')]\r\n)\r\n\r\n## Drawing NumRaceTime (line graph) (zoom and slider)\r\nviz1_NumRaceTime = alt.Chart(df1).mark_line().transform_filter(\r\n alt.datum[\"Order of Speech\"] >= order\r\n).encode(\r\n alt.X(\"Order of Speech\", sort = None),\r\n alt.Y(\"Number of Race\", sort = None),\r\n alt.Color(\"Actor\"),\r\n tooltip = [alt.Tooltip('Number of Race')]\r\n)\r\n\r\n\r\n## Writing the graphs and interface\r\n\r\n## Mouseover interaction to highlight \r\npicked = alt.selection_single(on = \"mouseover\")\r\n## Brush interaction to draw partially\r\nbrush = alt.selection_interval(encodings = [\"x\"])\r\n## Scale interaction\r\nscales = alt.selection_interval(bind = \"scales\", encodings = [\"x\"])\r\n\r\nst.write(\"### Through the debate\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\n## Number of words\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumWordsTime.transform_filter(brush)) \r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumWordsTime.add_selection(scales))\r\n\r\n## Number of interruptions\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumInterruptionsTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumInterruptionsTime.add_selection(scales))\r\n\r\n## Number of interruptions\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumCrosstalksTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumCrosstalksTime.add_selection(scales))\r\n\r\nst.write(\"# \\n\")\r\nst.write(\"### Weights within the debate\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\n## Healthcare\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumHealthcareTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumHealthcareTime.add_selection(scales))\r\n\r\n## Covid\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumCovidTime.transform_filter(brush)) \r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumCovidTime.add_selection(scales))\r\n\r\n## Economy\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumEconomyTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumEconomyTime.add_selection(scales))\r\n\r\n## Environment \r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumEnvTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumEnvTime.add_selection(scales))\r\n\r\n## Race\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumRaceTime .transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumRaceTime.add_selection(scales))\r\n\r\n## Election\r\nst.write(\"# \\n\")\r\nst.write(\"###### Create a window on the rectangle with your mouse. Move it to travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz0_ActorTimeSmall.add_selection(brush) & viz0_ActorTime.transform_filter(brush) \\\r\n & viz0_NumElectionTime.transform_filter(brush))\r\n\r\nst.write(\"###### Keep your mouse on the chart to zoom in / out and travel through the debate.\")\r\nst.write(\"# \\n\")\r\nst.write(viz1_NumElectionTime.add_selection(scales))\r\n\r\n\r\n## Theme word charts\r\nbrush = alt.selection_single(fields = [\"broadTheme\"])\r\ndomain = [\"Joe Biden\", \"Donald J. Trump\"]\r\nrange_ = [\"#e45756\", \"#f58518\"]\r\ncolor = alt.condition(brush, \r\n alt.Color('name:N', \r\n legend = None,\r\n scale = alt.Scale(domain = domain, range = range_ )), \r\n alt.value('lightgray'))\r\n\r\nbroadTheme1 = alt.Chart(themeWordCount).mark_bar().encode(\r\n x = alt.X(\"broadTheme:N\", title = \"Theme\", \r\n sort = [\"healthcare\", \"covid\", \"economy\", \"environment\", \"race\", \"election\"]),\r\n y = alt.Y(\"wordCount:Q\", title = \"Word Count\"),\r\n color = color,\r\n tooltip = [alt.Tooltip('name', title = \"Actor\")]\r\n)\r\n\r\nspecificWord1 = alt.Chart(themeWordCount).mark_bar().encode(\r\n x = alt.X(\"name:N\", title = None),\r\n y = alt.Y(\"wordCount:Q\", title = \"Word Count\"),\r\n color = color,\r\n column = alt.Column(\"themeWord:N\", title = \"Theme Words\"),\r\n tooltip = [alt.Tooltip('wordCount', title = \"Word Count\")]\r\n)\r\n\r\npickedName = alt.selection_single(fields = [\"name\"])\r\ncolorYouMe = alt.condition(pickedName, \r\n alt.Color(\"name:N\", \r\n legend = alt.Legend(title = \"Actor\"),\r\n scale = alt.Scale(domain = domain, range = range_)),\r\n alt.value('lightgray'))\r\n\r\nyouMe = alt.Chart(dfYouMe).mark_bar().encode(\r\n x = alt.X(\"name:N\", title = None),\r\n y = alt.Y(\"wordCountHeI:Q\", title = \"Word Count\"), \r\n tooltip = [alt.Tooltip(\"wordCountHeI\", title = \"Word Count\")],\r\n color = colorYouMe,\r\n column = alt.Column(\"youHeI:N\", sort = [\"you\", \"he\", \"i\"], \r\n title = \"Pointing to self and others\")\r\n)\r\n\r\n\r\nst.write(\"# \\n\")\r\nst.write(\"###### Click on the bars to see the words by theme.\")\r\nst.write(\"# \\n\")\r\nst.write(broadTheme1.add_selection(brush) & specificWord1.transform_filter(brush))\r\nst.write(\"# \\n\")\r\nst.write(\"###### Click on the bars to see the word count by actor.\")\r\nst.write(\"# \\n\")\r\nst.write(youMe.add_selection(pickedName))\r\n" }, { "alpha_fraction": 0.8620689511299133, "alphanum_fraction": 0.8620689511299133, "avg_line_length": 6.25, "blob_id": "d88ce06062030b3da35e97739edef223a591b04b", "content_id": "65f3f157bb14b8d09da197227b64e2d083b5908a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 29, "license_type": "permissive", "max_line_length": 9, "num_lines": 4, "path": "/requirements.txt", "repo_name": "CMU-IDS-2020/a3-giesa-erdolu", "src_encoding": "UTF-8", "text": "streamlit\npandas\naltair\nnltk\n" } ]
3
McSlow/mp3pi
https://github.com/McSlow/mp3pi
1177c3c6b5557be50d54a403a01d10a804af77f1
9a6dda11db350017ae07966075fe7e5def989641
010b3650484a7996a988c94140fda2bd473c9837
refs/heads/master
2020-12-24T19:51:19.036514
2017-03-27T18:30:12
2017-03-27T18:30:12
86,218,167
0
0
null
2017-03-26T08:24:55
2017-03-14T15:23:37
2017-02-19T20:59:30
null
[ { "alpha_fraction": 0.607430100440979, "alphanum_fraction": 0.6162550449371338, "avg_line_length": 27.84599494934082, "blob_id": "eee3b6c079c6e50f11197f8094330b69225f59d2", "content_id": "ed0c756977f6cf2a3711c478173b0869252d42f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14051, "license_type": "no_license", "max_line_length": 163, "num_lines": 487, "path": "/mp3.py", "repo_name": "McSlow/mp3pi", "src_encoding": "UTF-8", "text": "\nfrom kivy.app import App\n\n\nfrom kivy.uix.scatter import Scatter\nfrom kivy.uix.label import Label\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import NumericProperty\nfrom kivy.graphics import Color\nfrom kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition, FadeTransition\nfrom kivy.uix.settings import SettingsWithTabbedPanel\n\nfrom kivy.config import Config\nimport pdb\n\nimport threading\nimport time\nimport os\nimport subprocess\nimport sys\nimport json\nimport pprint\nimport signal\nimport re\n\n#GPIO Stuff\nimport RPi.GPIO as GPIO\n\n#from networking import NetworkManagerWrapper\nfrom nmcli import nmcli\nfrom radiostations import RadioStations\nfrom audio import AlsaInterface\nfrom screensaver import Rpi_ScreenSaver\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport select\n\nimport markup\n\nfrom kivy.logger import Logger\nfrom signal import SIGTSTP, SIGTERM, SIGABRT\n\nimport string,cgi,time\nfrom os import curdir, sep\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\n\nRootApp = \"init\"\nConfigObject = None\n\nclass Mp3PiAppLayout(Screen):\n\n global RootApp, last_activity_time, ConfigObject\n \n isPlaying = False\n proc = None\n\n stop = threading.Event()\n mythread = None\n\n statusthread_stop = threading.Event()\n statusthread = None\n\n\n def args_converter(self, row_index, an_obj):\n if row_index % 2:\n background = [1, 1, 1, 0]\n else:\n background = [1, 1, 1, .5]\n\n return {'text': an_obj['name'],\n 'size_hint_y': None,\n 'deselected_color': background}\n\n def __init__(self, **kwargs):\n global RootApp\n super(Mp3PiAppLayout, self).__init__(**kwargs)\n \n RootApp = self\n\n self.ids['search_results_list'].adapter.bind(on_selection_change=self.change_selection)\n\n self.ids.volume_slider.value = Alsa.get_mixer(\"\", {})\n\n # XXX validate!!\n #self.ids.volume_slider.value = 0# int(subprocess.check_output([\"pulseaudio-ctl\", \"full-status\"]).split(\" \")[0])\n\n\n self.statusthread = threading.Thread(target=self.status_thread)\n self.statusthread.daemon = True\n self.statusthread.start()\n\n\n def change_volume(self, args):\n #os.system(\"amixer set Master %s%%\" % int(args))\n #os.system(\"pactl set-sink-volume bluez_sink.0C_A6_94_E3_76_DA %s%%\" % int(args))\n Alsa.set_mixer(\"\", int(args), {})\n #os.system(\"pulseaudio-ctl set %s%%\" % int(args))\n\n def change_selection(self, args):\n if args.selection:\n self.change_image(args.selection[0].text)\n self.stop_second_thread()\n self.start_second_thread(Stations.getStreamURLbyName(args.selection[0].text))\n else:\n self.stop_second_thread()\n\n def stop_second_thread(self):\n if self.isPlaying == True: # stop playing\n if self.proc is not None:\n if self.mythread.isAlive(): \n print(\"set stop\")\n self.stop.set() \n #self.proc.kill() ??\n Logger.info(\"mpg123: killing %s\" % self.proc.pid)\n os.kill(self.proc.pid, SIGTERM)\n self.proc = None\n self.isPlaying = False\n\n def start_second_thread(self, l_text):\n if self.isPlaying == False:\n Logger.info(\"Player: starting player \" + l_text)\n \n self.isPlaying = True\n self.mythread = threading.Thread(target=self.infinite_loop, args=(l_text,))\n self.mythread.daemon = True\n self.mythread.start()\n \n else:\n Logger.info(\"Player: already playing\")\n \n def infinite_loop(self, url):\n iteration = 0\n\n self.proc = subprocess.Popen([\"mpg123\",\"-o\", \"alsa\", \"-@\", url], stderr=subprocess.PIPE, bufsize = 0)\n \n line = []\n while True:\n if self.stop.is_set():\n Logger.info(\"Player: stopping thread\")\n self.stop.clear()\n return\n \n while (select.select([self.proc.stderr], [], [], 0)[0]):\n\n # check if mpg123 is died\n #print(self.proc.returncode)\n #print(self.proc.pid)\n if self.proc.returncode is not None:\n print(\"died\")\n return\n\n if self.stop.is_set():\n Logger.info(\"Player: stopping thread\")\n self.stop.clear()\n return\n\n\n char = self.proc.stderr.read(1)\n if char != '\\n':\n line.append(char)\n else:\n line_joined = \"\".join(line)\n\n Logger.info(\"MPG123: says %s \" % line_joined)\n \n if \"ICY-META: StreamTitle=\" in line_joined:\n pairs = {}\n elements = line_joined.split(\";\")\n for element in elements:\n if element:\n res = re.search(r\"([A-Za-z]*)='(.*)'\", element)\n pairs[res.group(1)] = res.group(2)\n\n self.ids.icytags.text = pairs['StreamTitle']\n\n \n if \"ICY-NAME: \" in line_joined:\n Logger.debug(\"ICYTAGS: ICY name found: %s \" % line_joined.replace(\"ICY-NAME: \", \"\"))\n\n if \"ICY-URL: \" in line_joined:\n Logger.debug(\"ICYTAGS: ICY url found: %s \" % line_joined.replace(\"ICY-URL: \", \"\"))\n\n if \"ICY-META: StreamTitle=\" in line_joined:\n Logger.debug(\"ICYTAGS: ICY StreamTitle found: %s \" % line_joined.replace(\"ICY-META: StreamTitle=\", \"\"))\n\n line = []\n\n iteration += 1\n #print('Infinite loop, iteration {}.'.format(iteration))\n time.sleep(.1)\n \n def status_thread(self):\n global ConfigObject\n \n connection = NMCLI.current_connection() \n\n while True:\n if self.statusthread_stop.is_set():\n self.statusthread_stop.clear()\n return\n\n if not int(time.time()) % 5:\n connection = NMCLI.current_connection() \n \n ip = NMCLI.get_ip()\n\n if ip is None: \n self.ids.wlanstatus.text = \"No network connection\"\n else:\n self.ids.wlanstatus.text = \"%s %s%%\\n%s\\n%s\" % (connection.get('SSID', None), connection.get('SIGNAL', None), ip, time.strftime(\"%H:%M\", time.localtime()))\n\n #self.ids.wlanstatus.text = \"%s %s%%\\n%s\" % (\"myNetwork\", Network.strength, \"192.168.47.11\")\n \n # wlan symbol\n lines = []\n for i in self.ids.wlanstatus.canvas.get_group(None)[1:]:\n if type(i) is Color:\n lines.append(i)\n i.a = 1\n \n if connection is not None:\n if connection['SIGNAL'] < 50:\n for i in lines[0:3]:\n i.a = .5\n\n if connection['SIGNAL'] < 60:\n for i in lines[0:2]:\n i.a = .5\n\n if connection['SIGNAL'] < 70:\n for i in lines[0:1]:\n i.a = .5\n \n\n if Stations.no_data == True:\n print(\"no data\")\n if ConfigObject.get('General', 'playlist') == \"radio.de\":\n Stations.update()\n if Stations.no_data == False:\n del self.search_results.adapter.data[:]\n self.search_results.adapter.data.extend((Stations.data))\n if ConfigObject.get('General', 'playlist') == \"custom\":\n Stations.load_playlist(\"custom\")\n if Stations.no_data == False:\n del self.search_results.adapter.data[:]\n self.search_results.adapter.data.extend((Stations.data))\n \n # screensaver\n timeout = ConfigObject.get('General', 'screensaver')\n if timeout < 60:\n timeout = 60\n\n if (time.time() - last_activity_time) > int(timeout):\n if ScreenSaver.display_state is True:\n Logger.info(\"ScreenSaver: enabling screensaver\")\n ScreenSaver.display_off()\n else:\n if ScreenSaver.display_state is False:\n Logger.info(\"ScreenSaver: disabling screensaver\")\n ScreenSaver.display_on()\n \n time.sleep(.5)\n \n def change_image(self, station_name):\n imageUrl = Stations.getImageUrl(Stations.getIdByName(station_name)) \n Logger.info(\"ImageLoader: Loading Image from %s\" % (imageUrl))\n self.ids.imageid.source = imageUrl \n\n def pause(self):\n self.stop.set()\n self.search_results.adapter.deselect_list(self.search_results.adapter.selection)\n\n def next(self):\n self.stop.set()\n #browse(self.search_results.adapter)\n if self.search_results.adapter.selection:\n index = self.search_results.adapter.selection[0].index\n if index < len(self.search_results.adapter.data):\n self.search_results.adapter.get_view(index+1).trigger_action(duration=0)\n\n def prev(self):\n self.stop.set()\n if self.search_results.adapter.selection:\n index = self.search_results.adapter.selection[0].index\n if index >= 1:\n self.search_results.adapter.get_view(index-1).trigger_action(duration=0)\n\n def poweroff(self):\n print(\"poweroff\")\n os.system(\"poweroff\")\n\n def reboot(self):\n print(\"reboot\")\n os.system(\"reboot\")\n\nclass Mp3PiApp(App):\n global last_activity_time, ConfigObject\n # initialize GPIO stuff\n GPIO.setmode(GPIO.BOARD)\n GPIO_PIR = 7\n GPIO.setup(GPIO_PIR,GPIO.IN)\n def my_callback(channel):\n Logger.debug(\"Presence detector triggered!\")\n global last_activity_time\n last_activity_time = time.time()\n\n GPIO.add_event_detect(GPIO_PIR, GPIO.RISING, callback=my_callback, bouncetime=300)\n\n def build_config(self, config):\n config.setdefaults('General', {'screensaver': \"60\"})\n config.setdefaults('General', {'name': \"name\"})\n config.setdefaults('General', {'playlist': \"radio.de\"})\n\n def build_settings(self, settings):\n settings.add_json_panel(\"General\", self.config, data=\"\"\"\n [\n {\"type\": \"numeric\",\n \"title\": \"Screensaver Timeout\",\n \"section\": \"General\",\n \"key\": \"screensaver\"\n },\n {\"type\": \"string\",\n \"title\": \"String\",\n \"section\": \"General\",\n \"key\": \"name\"\n },\n {\"type\": \"options\",\n \"title\": \"Playlist\",\n \"section\": \"General\",\n \"options\": [\"radio.de\", \"custom\"],\n \"key\": \"playlist\"\n }\n ]\"\"\"\n )\n\n def on_stop(self):\n # The Kivy event loop is about to stop, set a stop signal;\n # otherwise the app window will close, but the Python process will\n # keep running until all secondary threads exit.\n \n #layout.clear_widgets()\n #browse(self)\n True\n\n #main = self.root.manager.get_screen('main').layout\n #main.stop.set()\n #self.root.stop.set()\n\n #self.root.statusthread_stop.set()\n\n def build(self):\n global last_activity_time, ConfigObject\n #sm = ScreenManager(transition=FadeTransition())\n \n self.settings_cls = MySettingsWithTabbedPanel\n\n from kivy.core.window import Window\n# Window.size = (800, 480)\n \n def on_motion(self, etype, motionevent):\n global last_activity_time\n last_activity_time = time.time()\n Window.bind(on_motion=on_motion)\n\n ConfigObject = self.config\n\n sm = ScreenManager()\n sm.add_widget(Mp3PiAppLayout())\n sm.add_widget(SettingsScreen())\n return sm\n #return Mp3PiAppLayout()\n\nclass SettingsScreen(Screen):\n def __init__(self, **kwargs):\n super(SettingsScreen, self).__init__(**kwargs)\n networklist = []\n# for net in Network.visible_aps:\n# networklist.append(net['ssid'])\n# if net['ssid'] is Network.ssid:\n# self.ids['wlan_list'].text = net[Network.ssid]\n\n# self.ids['wlan_list'].values = networklist\n# self.ids['wlan_list'].bind(text=self.change_wlan_selection)\n\n def change_wlan_selection(self, spinner, args):\n Logger.info(\"WLAN: user selection %s\" % args)\n# Logger.info(\"WLAN: current WLAN %s\" % Network.ssid)\n\n# if args != Network.ssid:\n# Logger.info(\"WLAN: changing WLAN to %s\" % args)\n# Network.activate([args])\n\n\ndef signal_handler(signal, frame):\n print(\"exit\");\n sys.exit(0);\n\nclass HTTPHandler(BaseHTTPRequestHandler):\n global RootApp\n\n #print(Mp3PiAppClass)\n def do_GET(self):\n if self.path == \"/\":\n \n self.page = markup.page()\n self.page.init(title=\"Title\")\n \n self.page.table(border=\"true\")\n\n firstline = True\n for row in RootApp.search_results.adapter.data:\n if firstline is True:\n self.page.tr()\n for column in row:\n #pdb.set_trace()\n string1 = column\n if type(column) == 'float':\n string1 = str(column)\n if type(column) == 'str':\n string1 = unicode(column, \"utf8\")\n self.page.th(string1, align=\"left\")\n self.page.tr.close()\n firstline = False\n continue\n\n self.page.tr()\n for column in row:\n #pdb.set_trace()\n string1 = row[column]\n if type(row[column]) == 'float':\n string1 = str(row[column])\n if type(row[column]) == 'str':\n string1 = unicode(row[column], \"utf8\")\n self.page.td(string1)\n self.page.tr.close()\n\n self.page.p(time.time())\n \n \n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(RootApp.isPlaying)\n self.wfile.write(self.page)\n #self.wfile.write(json.dumps(RootApp.search_results.adapter.data, indent=4, separators=('.', ': ')))\n else:\n print(self.path)\n\n\nclass MySettingsWithTabbedPanel(SettingsWithTabbedPanel):\n def on_close(self):\n Logger.info(\"main.py: MySettingsWithTabbedPanel.on_close\")\n\n def on_config_change(self, config, section, key, value):\n if key == \"playlist\":\n Stations.no_data = True\n Logger.info(\n \"main.py: MySettingsWithTabbedPanel.on_config_change: \"\n \"{0}, {1}, {2}, {3}\".format(config, section, key, value))\n\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGINT, signal_handler)\n\n #Network = NetworkManagerWrapper()\n NMCLI = nmcli() \n Alsa = AlsaInterface()\n Stations = RadioStations()\n ScreenSaver = Rpi_ScreenSaver()\n\n ScreenSaver.display_on()\n \n\n httpd = HTTPServer(('', 8080), HTTPHandler)\n httpd_thread = threading.Thread(target=httpd.serve_forever)\n httpd_thread.daemon = True\n httpd_thread.start()\n\n last_activity_time = time.time()\n\n Mp3PiApp().run()\n\n\n" }, { "alpha_fraction": 0.6089743375778198, "alphanum_fraction": 0.6121794581413269, "avg_line_length": 20.44827651977539, "blob_id": "be78e8e501d76927fe34db168b05239c647f1ad3", "content_id": "d483a1c7ab373b1fb455c4faccef3e597ce955e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 61, "num_lines": 29, "path": "/screensaver.py", "repo_name": "McSlow/mp3pi", "src_encoding": "UTF-8", "text": "import time\nimport os\n\nclass Rpi_ScreenSaver:\n rpi_display = \"/sys/class/backlight/rpi_backlight/bl_power\"\n \n running_on_rpi = False\n\n display_state = True\n\n def __init__(self):\n if os.path.exists(self.rpi_display) is True:\n self.running_on_rpi = True\n\n self.display_on()\n \n def display_on(self):\n if self.running_on_rpi is True:\n f = open(self.rpi_display, \"w\")\n f.write(\"0\")\n f.close()\n self.display_state = True\n\n def display_off(self):\n if self.running_on_rpi is True:\n f = open(self.rpi_display, \"w\")\n f.write(\"1\")\n f.close()\n self.display_state = False\n\n\n" }, { "alpha_fraction": 0.6217557191848755, "alphanum_fraction": 0.6240457892417908, "avg_line_length": 24.920791625976562, "blob_id": "2ffea040950217e26e5dbc58f9df899715be9a4b", "content_id": "1b06ab8e79b8d6101f1d5f11cc509af4aec9bfdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2620, "license_type": "no_license", "max_line_length": 84, "num_lines": 101, "path": "/radiostations.py", "repo_name": "McSlow/mp3pi", "src_encoding": "UTF-8", "text": "\nimport requests\nimport os\nimport json\n\n#from objbrowser import browse\n\n##\n#\n##\nclass RadioStations():\n\n user_agent = {'User-agent': 'User-Agent: XBMC Addon Radio'}\n\n data = []\n no_data = True\n\n #def __init__(self):\n # self.update()\n \n def load_playlist(self, playlist):\n if playlist == \"custom\":\n filename = playlist + \".json\"\n if os.path.exists(filename) is True:\n fd = open(filename, \"r\")\n self.data = json.load(fd)\n self.no_data = False\n else:\n print(\"filename missing %s\" % filename)\n else:\n self.update()\n\n\n def update(self):\n url = \"http://radio.de/info/menu/broadcastsofcategory?category=_top\"\n #url = \"http://radio.de/info/account/getmostwantedbroadcastlists?sizeoflists=20\"\n #url = \"http://radio.de/info/broadcast/editorialreccomendationsembedded\"\n\n try:\n response = requests.get(url, headers = self.user_agent)\n #print(response.status_code)\n self.data = response.json()\n self.no_data = False\n except requests.HTTPError, e:\n print(\"HTTP error %s\", e.code)\n self.no_data = False\n except requests.ConnectionError, e:\n self.data.append({'name': 'no station data'}) \n self.no_data = True\n print(\"Connection error %s\", e)\n\n\n def getStations(self):\n return(self.data)\n\n# for item in self.data:\n# print(item['pictureBaseURL'])\n# print(item['picture1TransName'])\n# print(item['name'])\n# print(item['subdomain'])\n# print(item['bitrate'])\n# print(item['id'])\n\n def getStation(self, id):\n url = \"http://radio.de/info/broadcast/getbroadcastembedded?broadcast=\" + id\n\n response = requests.get(url, headers = self.user_agent)\n #print(response.status_code)\n station_data = response.json()\n\n if \"errorCode\" in station_data.keys():\n print(\"no such entry\")\n return(0)\n\n return(station_data)\n\n def getImageUrl(self, id):\n for item in self.data:\n if str(item['id']) == str(id):\n return(item['pictureBaseURL'] + item['picture1Name'])\n\n def getIdByName(self, name):\n for item in self.data:\n #if str(item['name']) == name:\n if item['name'] == name:\n return(item['id'])\n\n def getStreamURLbyName(self, name):\n\n id = self.getIdByName(name)\n station_data = self.getStation(str(id))\n\n return(station_data['streamURL'])\n \n# print(station_data['link'])\n# print(station_data['name'])\n# print(station_data['streamURL'])\n\n# if \"StreamURLs\" in station_data.keys():\n# for item in station_data['streamURLs']:\n# print(station_item['streamURL'])\n #print(data['streamURLs'][0]['streamURL'])\n\n" }, { "alpha_fraction": 0.5530063509941101, "alphanum_fraction": 0.5617088675498962, "avg_line_length": 22.616823196411133, "blob_id": "98882f1064e87dbe458dc7deb2ab4caf7ee474fb", "content_id": "35d789c23feaf78ed0771d056e5d18d08a674f29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2528, "license_type": "no_license", "max_line_length": 84, "num_lines": 107, "path": "/nmcli.py", "repo_name": "McSlow/mp3pi", "src_encoding": "UTF-8", "text": "\nimport subprocess\nimport os\nimport re\n\nclass nmcli:\n def shell(self, args):\n os.environ[\"LANG\"] = \"C\"\n process = subprocess.Popen(args, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n retcode = process.returncode\n\n return retcode, stdout, stderr\n\n def get_values(self, fields, args):\n retcode, stdout, stderr = self.shell(args)\n \n data = []\n if retcode == 0:\n for line in stdout.split(\"\\n\"):\n values = line.split(\":\", len(fields)-1)\n row = dict(zip(fields, values))\n data.append(row)\n return data\n\n def list_ap(self):\n fields = ['ACTIVE', 'NAME', 'SSID', 'SIGNAL', 'SECURITY', 'BSSID']\n args = ['nmcli', '--terse', '--fields', \",\".join(fields), \"dev\", \"wifi\", \"list\"]\n \n data = self.get_values(fields, args)\n\n return(data)\n\n def list_connections(self):\n fields = ['ACTIVE', 'NAME', 'UUID', 'TYPE']\n args = ['nmcli', '--terse', '--fields', \",\".join(fields), \"con\", \"show\"]\n \n data = self.get_values(fields, args)\n\n for d in data:\n if d.get('TYPE', False) != \"802-11-wireless\":\n data.remove(d)\n \n return(data)\n \n def activate_connection(self, name):\n args = ['nmcli', \"con\", \"up\", \"id\", name]\n retcode, stdout, stderr = self.shell(args)\n \n if retcode != 0:\n return(False)\n else:\n return(True)\n\n def current_connection(self):\n data = self.list_ap()\n for d in data:\n if d.get('ACTIVE', False) == \"yes\":\n return(d)\n\n def connection_detail(self):\n data = self.list_connections()\n \n connection = None\n for d in data:\n if d.get('ACTIVE', False) == \"yes\":\n connection = d\n break\n \n if connection is None:\n return(False)\n\n args = ['nmcli', \"--terse\", \"con\", \"show\", connection['UUID']]\n retcode, stdout, stderr = self.shell(args) \n\n if retcode != 0:\n return(False)\n\n data = {}\n if retcode == 0:\n for line in stdout.split(\"\\n\"):\n row = {}\n values = line.split(\":\", 2)\n \n if len(values) == 2:\n data[values[0]] = values[1]\n \n return(data)\n\n def get_ip(self):\n data = self.connection_detail()\n if data is not False:\n ip = re.findall( r'[0-9]+(?:\\.[0-9]+){3}', data.get('IP4.ADDRESS[1]', None) )\n return(ip[0])\n\n\n\n\n\nNMCLI = nmcli()\n#for i in NMCLI.list_ap():\n# print(i)\n#for i in NMCLI.list_connections():\n# print(i)\n#print(NMCLI.current_connection())\n#print(NMCLI.connection_detail())\nprint(NMCLI.get_ip())\n" } ]
4
shaung/wrongsize
https://github.com/shaung/wrongsize
5c2d1dcabb0f35991cfa6db5c15bf35e8469c7a9
1918ad5410c4d647465fe567fe25c0ffc91c1c01
6e618c666d925b78bd0685b9fc24f325d5e9deeb
refs/heads/master
2021-01-01T05:42:24.607559
2013-06-16T12:57:58
2013-06-16T12:57:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4950000047683716, "alphanum_fraction": 0.5099999904632568, "avg_line_length": 17.18181800842285, "blob_id": "342bf99c0ea8cc0101e07ee76cce3317b083d4cf", "content_id": "1204bff3fefc0ab52dd575321d214647c4cda499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/wrongsize/string.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "import re\n\n\ndef un_camel(s):\n \"\"\"CamelCaseใฎๆ–‡ๅญ—ๅˆ—ใ‚’under_lineๅฝขๅผใซๅค‰ๆ›ใ™ใ‚‹ใ€‚\"\"\"\n\n rslt = s[0].lower() + s[1:]\n def func(m):\n return '_' + m.group(0).lower()\n return re.sub(r'[A-Z]', func, rslt)\n\n\ndef split_camel(s, sep=' '):\n \"\"\"CamelCaseใฎๆ–‡ๅญ—ๅˆ—ใ‚’ๅˆ†ๅ‰ฒใ™ใ‚‹ใ€‚\n\n * *s* ๆ–‡ๅญ—ๅˆ—\n * *sep* ใ‚ปใƒ‘ใƒฌใƒผใ‚ฟใƒผ\n \"\"\"\n\n def func(m):\n return sep + m.group(0)\n return s[0] + re.sub(r'[A-Z]', func, s[1:])\n" }, { "alpha_fraction": 0.5601277947425842, "alphanum_fraction": 0.5689457058906555, "avg_line_length": 34.24324417114258, "blob_id": "18733e651f1efdcf516e6e1ccfb7f262989d9595", "content_id": "be443505c2d314859c3ef09d1fb0b07f7079e17d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7839, "license_type": "no_license", "max_line_length": 122, "num_lines": 222, "path": "/wrongsize/xl.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport openpyxl\nfrom openpyxl import Workbook\nfrom openpyxl.reader.excel import load_workbook\nfrom openpyxl.style import Color, Fill\nfrom openpyxl.cell import get_column_letter\n\nimport sys\nimport sqlalchemy\n\nfrom attrlist import attrs\n\n\ndef fmt(s):\n if s == 'NULL':\n return None\n return s\n\n\ndef encode(v, coding='utf8'):\n if isinstance(v, unicode):\n try:\n return v.encode(coding)\n except:\n return v.encode('cp932')\n elif isinstance(v, str):\n return v.decode('cp932').encode('utf8')\n elif v is None:\n return 'NULL'\n else:\n return str(v)\n\n\ndef create_data(sht, table, cond_case=None):\n with_col_desc = (sht.cell(row=0, column=0).value is None)\n rowpos = with_col_desc and 2 or 0\n head = [sht.cell(row=rowpos, column=c).value for c in range(254)]\n head = [x for x in head if x is not None]\n sht.title = table.get_short_name() + sht.title\n\n for r in range(rowpos + 1, 10000):\n if sht.cell(row=r, column=3).value is None:\n break\n if sht.cell(row=r, column=0).value is None:\n continue\n row = [fmt(sht.cell(row=r, column=c).value) for c in range(len(head))]\n data = dict(zip(head, row))\n if cond_case is not None and not cond_case(data):\n continue\n\n columns = table.get_colname_list()\n for col in data.keys():\n if not col in columns:\n del data[col]\n\n try:\n table.new(data)\n except sqlalchemy.exc.IntegrityError, e:\n print >> sys.stderr, e\n print >> sys.stderr, data\n pass\n \n\ndef write_data_sheet(sht, table, cond=None, with_col_desc=False):\n result = table.select(cond).execute()\n coltypes = dict(((c.name, c.type) for c in table.columns))\n\n rowpos = with_col_desc and 2 or 0\n if with_col_desc:\n for c, colname in enumerate(result.keys()):\n coldesc = attrs.get(colname, '')\n cell = sht.cell(row=0, column=c)\n cell.value = encode(coldesc)\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = 'EFEFEF'\n\n coltype = coltypes[colname]\n cell = sht.cell(row=1, column=c)\n cell.value = encode(coltype).lower()\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = 'EFEFEF'\n\n for c, colname in enumerate(result.keys()):\n cell = sht.cell(row=rowpos, column=c)\n cell.value = encode(colname if not table.prefix else colname[7:])\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = 'EFEFEF'\n\n for r, row in enumerate(result.fetchall(), start=rowpos+1):\n for c, (k, v) in enumerate(row.items()):\n cell = sht.cell(row=r, column=c)\n #cell.value = encode(v)\n cell.set_value_explicit(value=encode(v))\n #cell.style.fill.fill_type = Fill.FILL_SOLID\n #cell.style.fill.start_color.index = Color.YELLOW\n\ndef compare_all(wb, with_desc=False):\n shtnames = wb.get_sheet_names()\n rslts = []\n for name in shtnames:\n name_new = name + u' (ๅพŒ)'\n if name_new not in shtnames:\n #print 'not table', name.encode('cp932')\n continue\n sht_old = wb.get_sheet_by_name(name)\n sht_new = wb.get_sheet_by_name(name_new)\n short_name = name[:4].lower()\n table = globals().get(short_name)\n if not table:\n #print 'not found', short_name.encode('cp932')\n continue\n rslt = compare_sheets(table, sht_old, sht_new, with_desc)\n rslts.append((name, rslt))\n return rslts\n\n\ndef compare_sheets(table, sht_old, sht_new, with_desc=False):\n rslt = {\n 'tablename' : table.table_name,\n 'newdata' : [],\n 'moddata' : [],\n }\n cols = table.get_colname_list()\n pks = insp.get_pk_constraint(table.table_name)['constrained_columns']\n pks = [x[len(table.prefix):] for x in pks]\n header, data_old = get_all_sheet_data(sht_old, pks, with_desc)\n _, data_new = get_all_sheet_data(sht_new, pks, with_desc)\n # new data\n new_keys = [k for k in data_new if k not in data_old]\n for key in new_keys:\n row, data = data_new[key]\n rslt['newdata'].append((key, dict((k + ' ' + attrs.get(table.prefix + k, ''), v) for (k, v) in data.iteritems())))\n for colname in data:\n col = header[colname]\n cell = sht_new.cell(row=row, column=col)\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = Color.YELLOW\n # deleted data\n # changed data\n mod_keys = [k for k in data_new if k in data_old]\n for key in mod_keys:\n rold, dold = data_old[key]\n rnew, dnew = data_new[key]\n \"\"\"\n print table.table_name\n print 'added', set(dnew.keys()) - set(dold.keys())\n print 'removed', set(dold.keys()) - set(dnew.keys())\n \"\"\"\n moddata = {}\n for colname in (k for k in dnew if dnew.get(k) != dold.get(k)):\n col = header[colname]\n\n cell = sht_old.cell(row=rold, column=col)\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = Color.YELLOW\n\n cell = sht_new.cell(row=rnew, column=col)\n cell.style.fill.fill_type = Fill.FILL_SOLID\n cell.style.fill.start_color.index = Color.YELLOW\n coldesc = attrs.get(table.prefix + colname, '')\n moddata[colname + ' ' + coldesc] = {\n 'old' : dold.get(colname),\n 'new' : dnew.get(colname),\n }\n if moddata:\n rslt['moddata'].append((k, moddata))\n return rslt\n\n\ndef get_all_sheet_data(sht, pks, with_desc=False):\n with_col_desc = with_desc or (sht.cell(row=0, column=0).value is None)\n rowpos = with_col_desc and 2 or 0\n\n result = {}\n\n header = [(c, sht.cell(row=rowpos, column=c).value) for c in range(254)]\n header = [x for x in header if x[-1] is not None]\n head = [x for (c, x) in header]\n header = dict(((name, col) for col, name in header))\n\n for r in xrange(rowpos + 1, 10000):\n if sht.cell(row=r, column=3).value is None:\n break\n if sht.cell(row=r, column=0).value is None:\n continue\n row = [fmt(sht.cell(row=r, column=c).value) for c in range(len(head))]\n data = dict(zip(head, row))\n try:\n key = '__and__'.join(map(str, ['%s=%s' % (k, data[k]) for k in pks]))\n except:\n print >> sys.stderr, pks, data\n raise\n result[key] = (r, data)\n return header, result\n\ndef prettify_report(wb):\n if len(self.get_sheet_names()) > 1:\n dummy_sheet = wb.get_sheet_by_name('Sheet')\n if dummy_sheet is not None:\n wb.remove_sheet(dummy_sheet)\n\n for shtname in wb.get_sheet_names():\n sht = wb.get_sheet_by_name(shtname)\n\n header = [(c, sht.cell(row=0, column=c).value) for c in range(254)]\n header = [x for x in header if x[-1] is not None]\n column_widths = [unilen(x) for x in header]\n for r in xrange(0, 65532):\n if sht.cell(row=r, column=0).value is None:\n break\n for c in range(len(header)):\n cell = sht.cell(row=r, column=c)\n cell.style.font.name = u'๏ผญ๏ผณ ใ‚ดใ‚ทใƒƒใ‚ฏ'\n value = cell.value\n value = value if isinstance(value, basestring) else str(value)\n column_widths[c] = max(column_widths[c], unilen(value))\n\n for i, column_width in enumerate(column_widths):\n sht.column_dimensions[get_column_letter(i+1)].width = column_width + 0.3\n\n sht.garbage_collect()\n\n" }, { "alpha_fraction": 0.44736841320991516, "alphanum_fraction": 0.46052631735801697, "avg_line_length": 16.20754623413086, "blob_id": "9c2d5df1d3d3238f47d25a3413e306736f18b23a", "content_id": "25e20940c1b3777723e0c6d32280a2c22897df70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "no_license", "max_line_length": 78, "num_lines": 53, "path": "/tests/test_basic.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nfrom nose.tools import eq_\n\nimport wrongsize as ws\n\n\ndef inspect_db():\n from wrongsize import db\n db.inspect()\n\n\ndef test_groupby():\n from wrongsize.util import groupby\n li = [\n ('A', 1),\n ('B', 4),\n ('A', 2),\n ('A', 3),\n ('B', 5),\n ]\n rslt = {\n 'A' : (\n ('A', 1),\n ('A', 2),\n ('A', 3),\n ),\n 'B' : (\n ('B', 4),\n ('B', 5),\n ),\n }\n eq_(rslt, groupby(li, lambda x: x[0]))\n\n\ndef test_iterparse():\n from wrongsize.xml import XMLParser\n\n parser = XMLParser('books.xml')\n\n titles = []\n\n def parse_title(elem):\n title = elem.find('title').text\n titles.append(title)\n\n parser.iterparse('book', parse_title)\n\n eq_(titles, [\"XML Developer's Guide\", \"Midnight Rain\", \"Maeve Ascendant\"])\n\n\nif __name__ == '__main__':\n inspect_db()\n" }, { "alpha_fraction": 0.5533112287521362, "alphanum_fraction": 0.5665562748908997, "avg_line_length": 25.491228103637695, "blob_id": "669b6be2b2ce0553f931ab384541bd1f21c89d13", "content_id": "df740754cd5fc551cdf2c0bf0f0a463df7b49f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3020, "license_type": "no_license", "max_line_length": 104, "num_lines": 114, "path": "/wrongsize/util.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport os\nfrom datetime import datetime, date, time, timedelta\nimport itertools\n\ntry:\n from collections import OrderedDict\nexcept:\n from ordereddict import OrderedDict\n\n\ndef timeit(func):\n @wraps\n def f(*args, **kws):\n start = time.time()\n rslt = func(*args, **kws)\n end = time.time()\n print end - start\n return rslt\n return f\n\ndef to_cp932(s):\n if isinstance(s, unicode):\n return s.encode('cp932')\n elif isinstance(s, str):\n try:\n us = s.decode('cp932')\n except:\n return s.decode('utf8').encode('cp932')\n else:\n return s\n else:\n return s\n\ndef unilen(s):\n if isinstance(s, unicode):\n try:\n return len(s.encode('cp932'))\n except:\n return len(s.encode('utf8'))\n else:\n return len(s)\n\n\nclass DATE:\n YMD = datetime.strftime(datetime.today(), '%Y%m%d')\n HMS = datetime.strftime(datetime.today(), '%H%M%S')\n NOW = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')\n TODAY = datetime.strftime(datetime.today(), '%Y-%m-%d')\n\n\ndef get_dates_between(start, end):\n days = (end + timedelta(days=1) - start).days\n return [start + timedelta(days=i) for i in range(days)]\n\ndef date_range(start, end):\n fromstr = lambda dt: datetime.strptime(dt, '%Y/%m/%d')\n return [datetime.strftime(dt, '%Y/%m/%d') for dt in get_dates_between(fromstr(start), fromstr(end))]\n\ndef time_range(count, dt, start, step):\n strd = '%s %s' % (dt, start)\n d = datetime.strptime(strd, '%Y/%m/%d %H:%M:%S')\n for i in range(count):\n d += timedelta(minutes=step)\n yield datetime.strftime(d, '%Y/%m/%d %H:%M:%S')\n\n\nclass Console(object):\n \"\"\" Redirect stdout to file\"\"\"\n def __init__(self, fpath=None):\n self.fpath = fpath\n if not self.fpath:\n self.output = StringIO()\n else:\n basedir = os.path.dirname(fpath)\n if not os.path.exists(basedir):\n try:\n os.makedirs(basedir)\n except:\n pass\n self.output = open(fpath, 'wb')\n self.old_stdout = sys.stdout\n sys.stdout = self.output\n\n def get(self):\n result = self.output.getvalue()\n return result\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n sys.stdout = self.old_stdout\n self.output.close()\n\n\ndef groupby(iterable, key):\n groups = OrderedDict({})\n iterable = sorted(iterable, key=key)\n for k, g in itertools.groupby(iterable, key):\n groups[k] = tuple(g)\n return groups\n\n\ndef cmp_lists(li1, li2, f1=None, f2=None):\n \"\"\"Compare two lists.\n\n Returns a 3-tuple of items [in both lists], [only in left], [only in right]\n \"\"\"\n\n s1, s2 = set(map(f1, li1) if f1 else li1), set(map(f2, li2) if f2 else li2)\n both, left, right = s1 & s2, s1 - s2, s2 - s1\n return map(list, (both, left, right))\n" }, { "alpha_fraction": 0.4517766535282135, "alphanum_fraction": 0.6649746298789978, "avg_line_length": 13.071428298950195, "blob_id": "08f911a7b03c38518eb07eada2243995ae1c1112", "content_id": "51d878d1b713a1610ee065677e95134db4732054", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 197, "license_type": "no_license", "max_line_length": 19, "num_lines": 14, "path": "/requirements.txt", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "Fabric==1.5.3\nJinja2==2.6\nMako==0.7.3\nMarkupSafe==0.15\nSQLAlchemy==0.8.0b2\nlxml==3.1.0\nnose==1.2.1\nopenpyxl==1.6.1\nparamiko==1.9.0\npycrypto==2.1.0\npyodbc==3.0.6\nsuds==0.4\nweb.py==0.37\nPyYAML==3.10\n" }, { "alpha_fraction": 0.6012598276138306, "alphanum_fraction": 0.6022047400474548, "avg_line_length": 29.82524299621582, "blob_id": "8bdc7743a1b7cfcd2d5c7ded86824b32dff0c34e", "content_id": "a5b19fb5cabad7812ba19718d2c46341ba7749f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3175, "license_type": "no_license", "max_line_length": 129, "num_lines": 103, "path": "/wrongsize/db.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport sys\nimport os\nimport pickle\nfrom datetime import datetime, date, time\n\nfrom sqlalchemy import create_engine, MetaData, Table as DBTable\nfrom sqlalchemy.engine import reflection\n\nimport settings\n\n\ndbengine = None\ndbmeta = None\ninspector = None\n\ndef inspect():\n global dbengine, inspector, dbmeta\n dbengine = create_engine(settings.DB_URI)\n inspector = reflection.Inspector.from_engine(dbengine)\n\n if os.path.exists(settings.DB_META_FILE):\n with open(settings.DB_META_FILE, 'rb') as f:\n dbmeta = pickle.load(f)\n else:\n dbmeta = MetaData()\n for table_name in inspector.get_table_names():\n print table_name\n table = DBTable(table_name, dbmeta, autoload=True, autoload_with=dbengine)\n for table_name in inspector.get_view_names():\n print table_name\n table = DBTable(table_name, dbmeta, autoload=True, autoload_with=dbengine)\n with open(settings.DB_META_FILE, 'wb') as f:\n pickle.dump(dbmeta, f)\n\n dbmeta.bind = dbengine\n\n\nclass TableBase(object):\n \"\"\" Table\"\"\"\n\n _cache = {}\n\n def __init__(self, table_name):\n self.table_name = table_name\n self._parse_prefix()\n self.table = DBTable(table_name, dbmeta, autoload=True, autoload_with=dbengine)\n\n def _parse_prefix(self):\n self.prefix = ''\n\n def get_short_name(self):\n return self.table_name\n\n def __getattr__(self, name):\n try:\n return getattr(self.table, name)\n except:\n if not name.startswith(self.prefix):\n name = '%s%s' % (self.prefix, name) if self.prefix else name\n return getattr(self.table.c, name)\n\n @classmethod\n def find(cls, table_name):\n if table_name in cls._cache:\n self = cls._cache[table_name]\n else:\n self = cls(table_name)\n cls._cache[table_name] = self\n return self\n\n def get_colname_list(self, with_prefix=False):\n cols = [str(x).split('.')[-1] for x in self.table.c]\n if not with_prefix and self.prefix:\n cols = [x[len(self.prefix):] for x in cols]\n return cols\n\n def cnt(self, cond=None):\n stmt = self.count() if cond is None else self.count(cond)\n return stmt.execute().fetchone()[0]\n\n def new(self, kws, conn=None):\n if self.prefix:\n values = dict((k.upper() if k.startswith(self.prefix) else (self.prefix + k.upper()), v) for k, v in kws.iteritems())\n else:\n values = kws\n stmt = self.insert().values(**values)\n return conn.execute(stmt) if conn is not None else stmt.execute()\n\n def getone(self, *args):\n pks = inspector.get_pk_constraint(self.table_name)['constrained_columns']\n cond = reduce((lambda x, y: x & y), ((x == y) for (x, y) in zip((getattr(self.table.c, x) for x in pks) , args)))\n return self.table.select(cond).execute().fetchone()\n\n def fetchall(self, cond=None):\n stmt = self.select() if cond is None else self.select(cond)\n for x in stmt.execute().fetchall():\n yield x\n\n\nif __name__ == '__main__':\n inspect()\n" }, { "alpha_fraction": 0.5215439796447754, "alphanum_fraction": 0.5233393311500549, "avg_line_length": 21.73469352722168, "blob_id": "e669faa44f98f408bd29f258546cd774a69130be", "content_id": "7296cb7ee15b3d1ed04eb98c51804d960efbd4c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 82, "num_lines": 49, "path": "/wrongsize/xml.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n wrongsize.xml\n ~~~~~~~~~~~~~\n\n Xml parsing helpers.\n\"\"\"\n\nimport os, sys\nfrom lxml import etree\n\nfrom StringIO import StringIO\n\nimport re\n\n\nclass XMLParser(object):\n \"\"\" Parser. \"\"\"\n\n def __init__(self, filepath):\n self.filepath = filepath\n\n\n def iterparse(self, tag, func, func_check=None):\n \"\"\" iterparsing the xml file.\n\n *tag: XML element tag\n *func: call back function\n *func_check: the node will get parsed only when this function returns True\n\n \"\"\"\n context = etree.iterparse(self.filepath, events=(u'start', u'end'))\n event, root = context.next()\n for event, elem in context:\n if elem.tag != tag:\n continue\n\n if event == 'end' and (not func_check or func_check(elem)):\n func(elem)\n elem.clear()\n while elem.getprevious() is not None:\n del elem.getparent()[0]\n root.clear()\n del event\n del elem\n del event\n del root\n del context\n" }, { "alpha_fraction": 0.6259258985519409, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 18.285715103149414, "blob_id": "4b4cb99e5536ace50051b674bf0cd010e5212a35", "content_id": "62a6fd86068e456efa91ef7c412d9b2a1b544ffa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 88, "num_lines": 14, "path": "/wrongsize/settings.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport os\nimport yaml\n\n__CONF_PATH = os.getenv('WRONGSIZE_SETTINGS_PATH') or os.path.expanduser('~/_wrongsize')\n\ndef load(f):\n conf = yaml.load(f)\n globals().update(conf)\n\ndef load_default():\n with open(__CONF_PATH, 'rb') as f:\n load(f)\n" }, { "alpha_fraction": 0.5435435175895691, "alphanum_fraction": 0.5516945719718933, "avg_line_length": 26.0930233001709, "blob_id": "c817175b940949934f312b0331b608b67c956ee6", "content_id": "169d379831f9627ce9b8771a39df5838d6a491db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2331, "license_type": "no_license", "max_line_length": 76, "num_lines": 86, "path": "/wrongsize/soap.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport sys\nimport logging\nlogging.basicConfig(level=logging.INFO)\n#logging.getLogger('suds.client').setLevel(logging.DEBUG)\n#logging.getLogger('suds.transport').setLevel(logging.DEBUG)\n#logging.getLogger('suds.xsd.schema').setLevel(logging.DEBUG)\n#logging.getLogger('suds.wsdl').setLevel(logging.DEBUG)\n\nfrom datetime import datetime\nfrom suds.client import Client\n\nimport settings\n\n\ndef make_request(name, *args):\n client = get_client(name)\n return client.service.Execute(*args)\n\ndef get_url(endpoint, prefix=None):\n return '%s%s' % (prefix or settings.SOAP_URL_PREFIX, endpoint)\n\ndef get_client(name, prefix=None):\n url = '%s?wsdl' % get_url(name, prefix=prefix)\n # Disable caching\n client = Client(url, faults=False, cache=None)\n return client\n\n\nclass SoapBase(object):\n def __init__(self):\n self.cookie = None\n self.prefix = None\n\n def set_cookie(self, cookie):\n self.cookie = cookie\n\n def __getattr__(self, name):\n def func(*args, **kws):\n client = get_client(name, self.prefix)\n silent = kws.pop('silent', False)\n if not silent:\n pass\n\n headers = kws.pop('headers', {})\n if 'Cookie' not in headers and self.cookie:\n headers.update({'Cookie': self.cookie})\n client.options.headers.update(headers)\n\n if not silent:\n print name\n print datetime.strftime(datetime.now(), '%Y/%m/%d %H:%M:%S')\n print client.wsdl.url\n\n rslt = None\n try:\n rslt = client.service.Execute(*args, **kws)\n assert rslt[0] in (200, 201, 202, 204)\n except AssertionError:\n print 'Error: %s' % rslt[0]\n return rslt\n except:\n print >> sys.stderr, u'Error'\n finally:\n if not silent:\n print 'Request\\n', '-' * 80\n print client.last_sent()\n\n if not silent:\n print 'Response\\n', '-' * 80\n print client.last_received()\n print '\\n'\n # print rslt\n\n return rslt\n return func\n\n\nclass Soap(SoapBase):\n pass\n\n\nif __name__ == '__main__':\n ws = Soap()\n ws.dosth()\n\n" }, { "alpha_fraction": 0.6097561120986938, "alphanum_fraction": 0.6097561120986938, "avg_line_length": 9.5, "blob_id": "81154d9d859e1e62a90e462be53f7b0b9e14f7bc", "content_id": "037dab721167ef7ac97a04b7ef66998313fbf8b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/README.md", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "wrongsize\n=========\n\nquick, dirty, flimsy" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.689393937587738, "avg_line_length": 13.666666984558105, "blob_id": "17a18beea85c52a234b53a16419057e87a548678", "content_id": "012a27d011d168b056f8e6f15c0b727f8dc1b669", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/wrongsize/__init__.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport sys\nimport settings\n\ntry:\n settings.load_default()\nexcept:\n print >> sys.stderr, 'Settings not found'\n" }, { "alpha_fraction": 0.6709346771240234, "alphanum_fraction": 0.6837387681007385, "avg_line_length": 26.89285659790039, "blob_id": "f8865844a87377bcf58a136dc32e0abe76c090a4", "content_id": "8b521fb9d1b557fbe913639c9f8cc734aa91e663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 89, "num_lines": 28, "path": "/wrongsize/template.py", "repo_name": "shaung/wrongsize", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport os\nimport tempfile\n\nfrom mako.template import Template\nfrom mako.lookup import TemplateLookup\n\nmod_dir = os.path.normpath(os.path.join(tempfile.gettempdir(), 'mako_modules/')) \n\nlook = TemplateLookup(directories=[os.path.normpath('.')], module_directory=mod_dir,\n output_encoding='cp932', input_encoding='cp932', default_filters=['decode.cp932'])\n\n\nclass Template(object):\n def __init__(self, template):\n self.template = template\n\n def render_to_file(self, file_out, paras):\n rslt = self.template.render(**paras)\n f = open(file_out, 'wb') if isinstance(file_out, basestring) else file_out\n f.write(rslt)\n f.close()\n\n\ndef get_template(name):\n template = look.get_template(name)\n return Template(template)\n" } ]
12
al-chen/spam-filter
https://github.com/al-chen/spam-filter
3d7de6bf8682871f11c795f2bfacc473d54998e2
39b1f94d037a1b3f08b278c14f2f9cc4cb42012f
398c65523f547f7dbaddc8ebaeead6f86689f1fb
refs/heads/master
2016-09-10T18:41:42.486541
2015-03-11T09:45:00
2015-03-11T09:45:00
28,910,216
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6087406277656555, "alphanum_fraction": 0.6255494952201843, "avg_line_length": 29.09727668762207, "blob_id": "86a37a39db72feb50fbcd3348da0ada1407adeeb", "content_id": "4d4d6ece46c9803571d9fd7b6c4ddcb416b5951c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7734, "license_type": "no_license", "max_line_length": 144, "num_lines": 257, "path": "/randForest.py", "repo_name": "al-chen/spam-filter", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport math\nfrom random import randint, sample\nfrom collections import Counter\n\nclass Tree(object):\n\tdef __init__(self, left=None, right=None, feature=None, threshold=None, label=None):\n\t\tself.left = left\n\t\tself.right = right\n\t\tself.feature = feature\n\t\tself.threshold = threshold\n\t\tself.label = label\n\tdef toString(self):\n\t\tif self.label != None:\n\t\t\treturn \"Label: \" + str(self.label) + \" \"\n\t\treturn \"Feature: \" + str(self.feature) + \" Threshold: \" + str(self.threshold) + \" \" + self.left.toString() + \" \" + self.right.toString() + \" \"\n\nwith open('trainLabels.csv', 'rb') as csvTrainLabels:\n\ttrainLabels = csv.reader(csvTrainLabels, delimiter=',')\n\tlabels = []\n\tfor row in trainLabels:\n\t\tlabels.append(int(row[0]))\n\t# print labels\n\n# with open('trainFeatures.csv', 'rb') as csvTrainFeatures:\n# \ttrainFeatures = csv.reader(csvTrainFeatures, delimiter=',')\n# \ti = 0\n# \tdic = {}\n# \tfor row in trainFeatures:\n# \t\tdic[tuple(row)] = labels[i]\n# \t\ti += 1\n\nwith open('trainFeatures.csv', 'rb') as csvTrainFeatures:\n\ttrainFeatures = csv.reader(csvTrainFeatures, delimiter=',')\n\tS = []\n\tdic = {}\n\ti = 0\n\tfor trainRow in trainFeatures:\n\t\ttemp = tuple(map(float, trainRow))\n\t\tS.append(temp)\n\t\tdic[temp] = labels[i]\n\t\ti += 1\n\ndef bag(lst):\n\tl = len(lst)\n\tnewList = []\n\tfor i in range(l):\n\t\tr = randint(0,l-1)\n\t\tnewList.append(lst[r])\n\treturn list(set(newList))\n\ndef entropy(lst):\n\tcount1 = 0\n\tcount0 = 0\n\tfor row in lst:\n\t\tif dic[row] == 1:\n\t\t\tcount1 += 1\n\t\telse:\n\t\t\tcount0 += 1\n\tl = len(lst)\n\tif count1 == 0 or count0 == 0 or l == 0:\n\t\treturn 0.0\n\treturn -(float(count1)/l * math.log(float(count1)/l,2)) - (float(count0)/l * math.log(float(count0)/l,2))\n\ndef majority(lst):\n\tlist_of_classes = []\n\tfor entry in lst:\n\t\tlist_of_classes.append(dic[entry])\n\tc = Counter(list_of_classes)\n\tprint c.most_common(1)\n\tlabel = c.most_common(1)[0][0]\n\treturn label\n\ndef buildDecTree(lst, level=0):\n\t# print \"Level \" + str(level)\n\tSbag = bag(lst)\n\tif Sbag == []:\n\t\tprint \"----------hello----------\"\n\t\treturn Tree(None, None, None, None, 0)\n\tfirst_class = dic[Sbag[0]]\n\t# print(Sbag)\n\t# print first_class\n\tboo = True\n\tlist_of_classes = []\n\tfor entry in Sbag:\n\t\tlist_of_classes.append(dic[entry])\n\t\tif dic[entry] != first_class:\n\t\t\tboo = False\n\tif boo or level >= 15: # stopping conditions: Label leaf with class that appears most frequently in Sbag. Return tree.\n\t\t# print len(list_of_classes)\n\t\tc = Counter(list_of_classes)\n\t\tlabel = c.most_common(1)[0][0]\n\t\t# print label\n\t\t# print c.most_common(1)\n\t\treturn Tree(None, None, None, None, label)\n\tfeatures = [i for i in range(0,57)]\n\tfeatures = sorted(sample(features, 8))\n\t# print features\n\tY = entropy(Sbag)\n\tbest_goodness = 0.0\n\tbest_sl = []\n\tbest_sr = []\n\tbest_feature = -1\n\tbest_threshold = 0.0\n\tfor feature in features:\n\t\tvalues_of_feature = []\n\t\tfor entry in Sbag:\n\t\t\tvalue = entry[feature]\n\t\t\tif value in values_of_feature:\n\t\t\t\tcontinue\n\t\t\tvalues_of_feature.append(value)\n\t\tvalues_of_feature = sorted(values_of_feature)\n\t\t# print values_of_feature\n\t\tif len(values_of_feature) == 1:\n\t\t\tpossible_threshold = values_of_feature[0]\n\t\t\tsl = []\n\t\t\tsr = []\n\t\t\tfor entry in Sbag:\n\t\t\t\tsl.append(entry)\n\t\t\tyl = entropy(sl)\n\t\t\tgoodness = Y - yl\n\t\t\tif float(goodness) >= float(best_goodness):\n\t\t\t\t# print \"GOODNESS: \" + str(goodness)\n\t\t\t\tbest_goodness = goodness\n\t\t\t\tbest_sl = sl\n\t\t\t\tbest_sr = sr\n\t\t\t\tbest_feature = feature\n\t\t\t\tbest_threshold = possible_threshold\n\t\telse:\n\t\t\tfor i in range(1, len(values_of_feature)):\n\t\t\t\tpossible_threshold = (float(values_of_feature[i-1]) + float(values_of_feature[i])) / float(2)\n\t\t\t\tsl = []\n\t\t\t\tsr = []\n\t\t\t\tfor entry in Sbag:\n\t\t\t\t\tif entry[feature] <= possible_threshold:\n\t\t\t\t\t\tsl.append(entry)\n\t\t\t\t\telse:\n\t\t\t\t\t\tsr.append(entry)\n\t\t\t\tyl = entropy(sl)\n\t\t\t\tyr = entropy(sr)\n\t\t\t\tgoodness = Y - ((float(len(sl)) / float(len(Sbag)) * yl) + (float(len(sr)) / float(len(Sbag)) * yr))\n\t\t\t\tif float(goodness) >= float(best_goodness):\n\t\t\t\t\t# print \"GOODNESS: \" + str(goodness)\n\t\t\t\t\tbest_goodness = goodness\n\t\t\t\t\tbest_sl = sl\n\t\t\t\t\tbest_sr = sr\n\t\t\t\t\tbest_feature = feature\n\t\t\t\t\tbest_threshold = possible_threshold\n\n\t\n\tif best_sl == []:\n\t\treturn Tree(buildDecTree(best_sr, level+1), buildDecTree(best_sr, level+1), best_feature, best_threshold, None)\n\telif best_sr == []:\n\t\treturn Tree(buildDecTree(best_sl, level+1), buildDecTree(best_sl, level+1), best_feature, best_threshold, None)\n\treturn Tree(buildDecTree(best_sl, level+1), buildDecTree(best_sr, level+1), best_feature, best_threshold, None)\n\nT = 25\ndecTrees = []\nfor i in range(T):\n\tprint(\"Tree \" + str(i+1))\n\tdecTrees.append(buildDecTree(S))\n\n\nwith open('emailOutput1.csv', 'wb') as csv1:\n\twriter1 = csv.writer(csv1, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\twith open('emailOutput2.csv', 'wb') as csv2:\n\t\twriter2 = csv.writer(csv2, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\twith open('emailOutput5.csv', 'wb') as csv5:\n\t\t\twriter5 = csv.writer(csv5, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\t\twith open('emailOutput10.csv', 'wb') as csv10:\n\t\t\t\twriter10 = csv.writer(csv10, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\t\t\twith open('emailOutput25.csv', 'wb') as csv25:\n\t\t\t\t\twriter25 = csv.writer(csv25, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\t\t\t\twith open('valFeatures.csv', 'rb') as csvVal:\n\t\t\t\t\t\tvalFeatures = csv.reader(csvVal, delimiter=',')\n\t\t\t\t\t\tfor valRow in valFeatures:\n\t\t\t\t\t\t\tvotes = []\n\t\t\t\t\t\t\tforest = decTrees[:]\n\t\t\t\t\t\t\tfor tree in forest:\n\t\t\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\t\t\tif tree.label != None:\n\t\t\t\t\t\t\t\t\t\tvotes.append(tree.label)\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tif float(valRow[tree.feature]) <= float(tree.threshold):\n\t\t\t\t\t\t\t\t\t\t\ttree = tree.left\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\ttree = tree.right\n\t\t\t\t\t\t\t# T=1\n\t\t\t\t\t\t\tguess = votes[0]\n\t\t\t\t\t\t\twriter1.writerow([guess])\n\n\t\t\t\t\t\t\t# T=2\n\t\t\t\t\t\t\tvotes2 = votes[:2]\n\t\t\t\t\t\t\tguess = max(set(votes2), key=votes2.count)\n\t\t\t\t\t\t\twriter2.writerow([guess])\n\n\t\t\t\t\t\t\t# T=5\n\t\t\t\t\t\t\tvotes5 = votes[:5]\n\t\t\t\t\t\t\tguess = max(set(votes5), key=votes5.count)\n\t\t\t\t\t\t\twriter5.writerow([guess])\n\n\t\t\t\t\t\t\t# T=10\n\t\t\t\t\t\t\tvotes10 = votes[:10]\n\t\t\t\t\t\t\tguess = max(set(votes10), key=votes10.count)\n\t\t\t\t\t\t\twriter10.writerow([guess])\n\n\t\t\t\t\t\t\t# T=25\n\t\t\t\t\t\t\tvotes25 = votes\n\t\t\t\t\t\t\tguess = max(set(votes25), key=votes25.count)\n\t\t\t\t\t\t\twriter25.writerow([guess])\n\n# with open('emailOutput.csv', 'wb') as csvTest:\n# \twriterTest = csv.writer(csvTest, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\t\t\t\t\n# \twith open('testFeatures.csv', 'rb') as csvTestFeatures:\n# \t\ttestFeatures = csv.reader(csvTestFeatures, delimiter=',')\n# \t\tfor testRow in testFeatures:\n# \t\t\tvotes = []\n# \t\t\tforest = decTrees[:]\n# \t\t\tfor tree in forest:\n# \t\t\t\twhile True:\n# \t\t\t\t\tif tree.label != None:\n# \t\t\t\t\t\tvotes.append(tree.label)\n# \t\t\t\t\t\tbreak\n# \t\t\t\t\telse:\n# \t\t\t\t\t\tif float(testRow[tree.feature]) <= float(tree.threshold):\n# \t\t\t\t\t\t\ttree = tree.left\n# \t\t\t\t\t\telse:\n# \t\t\t\t\t\t\ttree = tree.right\n# \t\t\tguess = max(set(votes), key=votes.count)\n# \t\t\tprint guess\n# \t\t\twriterTest.writerow([guess])\n\n\n\n# with open('emailOutput' + str(T) + '.csv', 'wb') as csvfile:\n# with open('emailOutput.csv', 'wb') as csvfile:\n# \twriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \twith open('testFeatures.csv', 'rb') as csvVal:\n# \t\tvalFeatures = csv.reader(csvVal, delimiter=',')\n# \t\tfor valRow in valFeatures:\n# \t\t\tvotes = []\n# \t\t\tfor tree in forest:\n# \t\t\t\twhile True:\n# \t\t\t\t\tif tree.label != None:\n# \t\t\t\t\t\t# print(tree.label)\n# \t\t\t\t\t\tvotes.append(tree.label)\n# \t\t\t\t\t\tbreak\n# \t\t\t\t\telse:\n# \t\t\t\t\t\tif float(valRow[tree.feature]) <= float(tree.threshold):\n# \t\t\t\t\t\t\ttree = tree.left\n# \t\t\t\t\t\telse:\n# \t\t\t\t\t\t\ttree = tree.right\n# \t\t\tguess = max(set(votes), key=votes.count)\n# \t\t\tprint guess\n# \t\t\twriter.writerow([guess])" }, { "alpha_fraction": 0.7928286790847778, "alphanum_fraction": 0.8007968068122864, "avg_line_length": 82.33333587646484, "blob_id": "df5945b9ec9866090d7f6cfc21f0b2e791cebb6b", "content_id": "691926eae5bb54bdaaf39f349862a5dfc9a31645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 251, "license_type": "no_license", "max_line_length": 220, "num_lines": 3, "path": "/README.md", "repo_name": "al-chen/spam-filter", "src_encoding": "UTF-8", "text": "# spam-filter\n## What is it?\nA machine learning algorithm that uses a random forests classifier to determine if incoming mail is spam or not. The program constructs 25 decision trees from a training set using information gain as an impurity measure. \n" }, { "alpha_fraction": 0.5503685474395752, "alphanum_fraction": 0.5995085835456848, "avg_line_length": 30.384614944458008, "blob_id": "042c018fc025f0f55297a9e94efcf20911e5a4b7", "content_id": "74419cefda7d0b25915cf57ef84c68c490a6151b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 49, "num_lines": 13, "path": "/compare.py", "repo_name": "al-chen/spam-filter", "src_encoding": "UTF-8", "text": "import csv\nfile1 = \"valLabels.csv\"\nfor j in [1,2,5,10,25]:\n\tfile2 = \"emailOutput\" + str(j) + \".csv\"\n\twith open(file1, 'rb') as csv1:\n\t\twith open(file2, 'rb') as csv2:\n\t\t\tvalLabels = csv.reader(csv1, delimiter=',')\n\t\t\toutputLabels = csv.reader(csv2, delimiter=',')\n\t\t\tcount = 0\n\t\t\tfor i in range(500):\n\t\t\t\tif valLabels.next() != outputLabels.next():\n\t\t\t\t\tcount += 1\n\t\t\tprint \"k=\" + str(j) + \": \" + str(count)" } ]
3
luke14free/keanu
https://github.com/luke14free/keanu
0d98c70d88a5299a69bfa2c9ee6e8816b324c58d
c9d239f6154ac867e180a5802e6ea242bb6beb2a
21a35f6b9f73f8bfb3d86b021f1d761806ce6f6e
refs/heads/master
2023-06-04T15:44:33.440915
2018-12-18T10:55:26
2018-12-18T10:55:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6031106114387512, "alphanum_fraction": 0.6207757592201233, "avg_line_length": 25.984455108642578, "blob_id": "948528a0a28d5322e878342c8474ca9b33512bd9", "content_id": "6ac816a31ace91389aa51bbe4ab1bf43df54af8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 5208, "license_type": "permissive", "max_line_length": 97, "num_lines": 193, "path": "/keanu-project/build.gradle", "repo_name": "luke14free/keanu", "src_encoding": "UTF-8", "text": "buildscript {\n ext.kotlin_version = '1.2.31'\n repositories {\n mavenCentral()\n }\n\n dependencies {\n classpath \"org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version\"\n classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.6'\n }\n}\n\nplugins {\n id 'io.franzbecker.gradle-lombok' version '1.14'\n id 'maven'\n id 'jacoco'\n}\n\napply plugin: 'com.google.protobuf'\n\narchivesBaseName = \"keanu\"\n\ndependencies {\n\n compile 'org.apache.commons:commons-math3:3.6.1'\n compile 'org.nd4j:nd4j-native-platform:1.0.0-beta2'\n compile \"org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version\"\n compile 'com.google.guava:guava:20.0'\n compile 'com.google.protobuf:protobuf-java:3.6.1'\n compile 'com.google.protobuf:protobuf-java-util:3.6.1'\n compile group: 'com.opencsv', name: 'opencsv', version: '4.2'\n compile group: 'org.slf4j', name: 'slf4j-api', version: '1.8.0-beta2'\n compile group: 'org.slf4j', name: 'slf4j-log4j12', version: '1.8.0-beta2'\n\n //testing\n testCompile 'junit:junit:4.12'\n testCompile 'org.mockito:mockito-core:2.19.1'\n testCompile 'org.jfree:jfreechart:1.0.19'\n testCompile group: 'org.mockito', name: 'mockito-core', version: '2.15.0'\n testCompile 'org.hamcrest:hamcrest-library:1.3'\n testCompile 'ca.umontreal.iro.simul:ssj:3.2.1'\n testCompile 'org.reflections:reflections:0.9.11'\n\n}\n\nprotobuf {\n protoc {\n // The artifact spec for the Protobuf Compiler\n artifact = 'com.google.protobuf:protoc:3.6.1'\n }\n}\n\ntest {\n systemProperty \"io.improbable.keanu.defaultRandom.seed\", '1'\n systemProperty \"dtype\", 'double'\n systemProperty \"io.improbable.keanu.util.ProgressBar.disableProgressBar\", 'true'\n useJUnit {\n excludeCategories 'io.improbable.keanu.testcategory.Slow'\n }\n}\n\ntask testSlowOnes(type: Test) {\n useJUnit {\n includeCategories 'io.improbable.keanu.testcategory.Slow'\n }\n}\n\ncheck.dependsOn(testSlowOnes)\n\n// Add default user/pass so it's not needed if only building and not publishing to Nexus\nif (!hasProperty('nexusUser')) {\n ext.nexusUser = ''\n}\n\nif (!hasProperty('nexusPassword')) {\n ext.nexusPassword = ''\n}\n\nuploadArchives {\n repositories {\n mavenDeployer {\n beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }\n\n repository(url: \"https://oss.sonatype.org/service/local/staging/deploy/maven2/\") {\n authentication(userName: nexusUser, password: nexusPassword)\n }\n\n snapshotRepository(url: \"https://oss.sonatype.org/content/repositories/snapshots/\") {\n authentication(userName: nexusUser, password: nexusPassword)\n }\n\n pom.project {\n name 'Keanu'\n packaging 'jar'\n description 'A probabilistic approach from an Improbabilistic company'\n url 'https://github.com/improbable-research/keanu'\n\n scm {\n connection 'scm:git@github.com:improbable-research/keanu.git'\n developerConnection 'scm:git@github.com:improbable-research/keanu.git'\n url 'https://github.com/improbable-research/keanu.git'\n }\n\n licenses {\n license {\n name 'MIT'\n url 'http://www.opensource.org/licenses/mit-license.php'\n }\n }\n\n developers {\n developer {\n id 'keanu-engineering'\n name 'Keanu Improbable'\n email 'keanu-engineering@improbable.io'\n }\n }\n }\n }\n }\n}\n\napply plugin: 'kotlin'\n\ncompileKotlin {\n kotlinOptions {\n jvmTarget = \"1.8\"\n }\n}\ncompileTestKotlin {\n kotlinOptions {\n jvmTarget = \"1.8\"\n }\n}\n\nif (project.hasProperty(\"signing.keyId\")) {\n apply plugin: 'signing'\n signing {\n sign configurations.archives\n }\n}\n\ntask javadocJar(type: Jar) {\n classifier = 'javadoc'\n from javadoc\n}\n\ntask sourcesJar(type: Jar) {\n classifier = 'sources'\n from sourceSets.main.allSource\n}\n\ntask deletePythonClasspath(type: Delete) {\n delete fileTree(\"../keanu-python/keanu/classpath/\")\n}\n\ntask copyJarsIntoPythonClasspath(type: Copy) {\n dependsOn(build)\n dependsOn(deletePythonClasspath)\n into \"$rootDir/keanu-python/keanu/classpath/\"\n from jar\n from configurations.compile\n}\n\ntask deleteUnneededJarsFromPythonClasspath(type: Delete) {\n mustRunAfter(copyJarsIntoPythonClasspath)\n delete fileTree(\"../keanu-python/keanu/classpath/\") {\n include \"*-android-*\"\n include \"*-ios-*\"\n include \"mkl-*\"\n include \"*-x86.jar\"\n include \"*-ppc64le.jar\"\n include \"*-armhf.jar\"\n }\n}\n\ntask preparePythonClasspath {\n dependsOn deleteUnneededJarsFromPythonClasspath\n dependsOn copyJarsIntoPythonClasspath\n}\n//Break build on javadoc warnings\ntasks.withType(Javadoc) {\n options.addStringOption('Xwerror', '-quiet')\n}\n\nartifacts {\n archives javadocJar, sourcesJar\n}\n\nlombok { // optional: values below are the defaults\n version = \"1.16.20\"\n sha256 = \"\"\n}\n" }, { "alpha_fraction": 0.6388837695121765, "alphanum_fraction": 0.6429226994514465, "avg_line_length": 41.5546875, "blob_id": "a392c56fb713f64cc9667d6ae9f5d243b1fd684e", "content_id": "a87bb840c0ff498c40e40675ffaee02769ed7cf9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5447, "license_type": "permissive", "max_line_length": 119, "num_lines": 128, "path": "/keanu-python/keanu/algorithm/sampling.py", "repo_name": "luke14free/keanu", "src_encoding": "UTF-8", "text": "from py4j.java_gateway import java_import, JavaObject\nfrom py4j.java_collections import JavaList\n\nfrom keanu.algorithm._proposal_distribution import ProposalDistribution\nfrom keanu.algorithm.proposal_listeners import proposal_listener_types\nfrom keanu.context import KeanuContext\nfrom keanu.tensor import Tensor\nfrom keanu.vertex.base import Vertex\nfrom keanu.net import BayesNet\nfrom typing import Any, Iterable, Dict, List, Tuple, Generator, Optional\nfrom keanu.vartypes import sample_types, sample_generator_types, numpy_types\nfrom keanu.plots import traceplot\n\nk = KeanuContext()\n\njava_import(k.jvm_view(), \"io.improbable.keanu.algorithms.mcmc.MetropolisHastings\")\njava_import(k.jvm_view(), \"io.improbable.keanu.algorithms.mcmc.NUTS\")\njava_import(k.jvm_view(), \"io.improbable.keanu.algorithms.mcmc.Hamiltonian\")\n\nalgorithms = {\n 'metropolis': k.jvm_view().MetropolisHastings,\n 'NUTS': k.jvm_view().NUTS,\n 'hamiltonian': k.jvm_view().Hamiltonian\n}\n\n\ndef sample(net: BayesNet,\n sample_from: Iterable[Vertex],\n algo: str = 'metropolis',\n proposal_distribution: str = None,\n proposal_distribution_sigma: numpy_types = None,\n proposal_listeners=[],\n draws: int = 500,\n drop: int = 0,\n down_sample_interval: int = 1,\n plot: bool = False,\n ax: Any = None) -> sample_types:\n\n sampling_algorithm: JavaObject = build_sampling_algorithm(algo, proposal_distribution, proposal_distribution_sigma,\n proposal_listeners)\n\n vertices_unwrapped: JavaList = k.to_java_object_list(sample_from)\n\n network_samples: JavaObject = sampling_algorithm.getPosteriorSamples(\n net.unwrap(), vertices_unwrapped, draws).drop(drop).downSample(down_sample_interval)\n\n vertex_samples = {\n Vertex._get_python_label(vertex_unwrapped): list(\n map(Tensor._to_ndarray,\n network_samples.get(vertex_unwrapped).asList())) for vertex_unwrapped in vertices_unwrapped\n }\n\n if plot:\n traceplot(vertex_samples, ax=ax)\n\n return vertex_samples\n\n\ndef generate_samples(net: BayesNet,\n sample_from: Iterable[Vertex],\n algo: str = 'metropolis',\n proposal_distribution: str = None,\n proposal_distribution_sigma: numpy_types = None,\n proposal_listeners: List[proposal_listener_types] = [],\n drop: int = 0,\n down_sample_interval: int = 1,\n live_plot: bool = False,\n refresh_every: int = 100,\n ax: Any = None) -> sample_generator_types:\n\n sampling_algorithm: JavaObject = build_sampling_algorithm(algo, proposal_distribution, proposal_distribution_sigma,\n proposal_listeners)\n\n vertices_unwrapped: JavaList = k.to_java_object_list(sample_from)\n\n samples: JavaObject = sampling_algorithm.generatePosteriorSamples(net.unwrap(), vertices_unwrapped)\n samples = samples.dropCount(drop).downSampleInterval(down_sample_interval)\n sample_iterator: JavaObject = samples.stream().iterator()\n\n return _samples_generator(\n sample_iterator, vertices_unwrapped, live_plot=live_plot, refresh_every=refresh_every, ax=ax)\n\n\ndef build_sampling_algorithm(algo, proposal_distribution: Optional[str],\n proposal_distribution_sigma: Optional[numpy_types],\n proposal_listeners: List[proposal_listener_types]):\n if algo != \"metropolis\":\n if proposal_distribution is not None:\n raise TypeError(\"Only Metropolis Hastings supports the proposal_distribution parameter\")\n if len(proposal_listeners) > 0:\n raise TypeError(\"Only Metropolis Hastings supports the proposal_listeners parameter\")\n\n if (proposal_distribution is None and len(proposal_listeners) > 0):\n raise TypeError(\"If you pass in proposal_listeners you must also specify proposal_distribution\")\n\n builder: JavaObject = algorithms[algo].builder()\n\n if proposal_distribution is not None:\n proposal_distribution_object = ProposalDistribution(\n type_=proposal_distribution, sigma=proposal_distribution_sigma, listeners=proposal_listeners)\n builder = builder.proposalDistribution(proposal_distribution_object.unwrap())\n sampling_algorithm: JavaObject = builder.build()\n return sampling_algorithm\n\n\ndef _samples_generator(sample_iterator: JavaObject, vertices_unwrapped: JavaList, live_plot: bool, refresh_every: int,\n ax: Any) -> sample_generator_types:\n traces = []\n x0 = 0\n while (True):\n network_sample = sample_iterator.next()\n sample = {\n Vertex._get_python_label(vertex_unwrapped): Tensor._to_ndarray(network_sample.get(vertex_unwrapped))\n for vertex_unwrapped in vertices_unwrapped\n }\n\n if live_plot:\n traces.append(sample)\n if len(traces) % refresh_every == 0:\n joined_trace = {k: [t[k] for t in traces] for k in sample.keys()}\n if ax is None:\n ax = traceplot(joined_trace, x0=x0)\n else:\n traceplot(joined_trace, ax=ax, x0=x0)\n x0 += refresh_every\n traces = []\n\n yield sample\n" }, { "alpha_fraction": 0.8639053106307983, "alphanum_fraction": 0.8639053106307983, "avg_line_length": 55.33333206176758, "blob_id": "040ebd9e113341c631c4160ab467fbdc94120804", "content_id": "19eb79049af2b7015de975b2a15804bb1930c862", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "permissive", "max_line_length": 67, "num_lines": 3, "path": "/keanu-python/keanu/algorithm/__init__.py", "repo_name": "luke14free/keanu", "src_encoding": "UTF-8", "text": "from .optimization import (GradientOptimizer, NonGradientOptimizer)\nfrom .sampling import sample, generate_samples\nfrom .proposal_listeners import AcceptanceRateTracker\n" } ]
3
amirunpri2018/jupyter_notebooks
https://github.com/amirunpri2018/jupyter_notebooks
449a65eb655580ec7455601d19dff3269f58811e
5d7e36b49fd16497298b8c2d9dccc27f4e518702
40df37b9bf8eb11aed21b497c82e382302dbfcd8
refs/heads/master
2020-11-28T09:13:44.318416
2019-12-21T22:10:25
2019-12-21T22:10:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6263012886047363, "alphanum_fraction": 0.6370177865028381, "avg_line_length": 28.556560516357422, "blob_id": "bd5a6f2b24f6767d7203696590affcdf0489b5b6", "content_id": "d312a547b6d73ad8258d82dbcc4772f0de340e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6532, "license_type": "no_license", "max_line_length": 164, "num_lines": 221, "path": "/ETL/papermill/rvms/2_Comparing_Adjusted_GraphCat_CPUs_against_Original_GraphCat_CPUs-Access.py", "repo_name": "amirunpri2018/jupyter_notebooks", "src_encoding": "UTF-8", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.6\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Comparing Before Budgeted CPU versus After Budgeted CPU\n\n# %% [markdown]\n# ### This creates the raw \"Red and Green\" sheet master data\n\n# %% [markdown]\n# ### Overall Process\n\n# %% [markdown]\n# - Get current or \"After\" budgeted CPU file\n# - Get \"Before\" budgeted CPU file\n# - Merge between the 2 using full OUTER JOIN\n# - Create 2 new columns:\n# 1. After Budgeted CPU minus Before Budgeted CPU (at group-subgroup level)\n# 2. After Budgeted CPU minus Before Budgeted CPU (at GraphCat level)\n# - Export to Excel and save in ```\\\\207.130.185.67\\aqgbudget2\\Cost\\Reserve Adjustments\\Reports\\Normal Reserve Balance Verification\\RVMS_Before_After_Checks``` folder\n\n# %%\nfrom datetime import datetime\nfrom dateutil import relativedelta\nfrom pathlib import Path\nimport os\nimport pandas as pd\nimport pyodbc\nimport scrapbook as sb\nimport time\nfrom win10toast import ToastNotifier\npd.options.display.max_rows=1000\npd.options.display.max_columns=100\n\n# %% [markdown]\n# ### Enter CLA Claim Month:\n\n# %%\n# If using papermill, have to comment this out. It doesn't support getting input from the user\n# CLA_MONTH = input(\"Enter CLA Claim Month ('YYYYMM'): \")\n\n# %% {\"tags\": [\"parameters\"]}\nCLA_MONTH = '201903'\n\n# %% [markdown]\n# ### Define where to save the Red and Green raw data file based on CLA claim month:\n\n# %%\nbase_dir = \"//207.130.185.67/aqgbudget2/Cost/Reserve Adjustments/Reports/Normal Reserve Balance Verification/RVMS_Before_After_Checks\"\np = Path(base_dir)\nsave_dir = p / CLA_MONTH\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n# %% [markdown]\n# ### Define where to retrieve the current budgeted CPUs (\"after\" CPUs):\n\n# %%\ncurrent_cpu_dir = Path(\"//207.130.185.67/aqgbudget2/Cost/Reserve Adjustments/Reports/Normal Reserve Balance Verification/RVMS_Current_Budgeted_CPUs/\" \n + CLA_MONTH + \"/All_Plants_Budgeted_CPU_By_Group_SubGroup.xlsx\")\n\n# %% [markdown]\n# #### Now fetch the \"after\" CPUs:\n\n# %%\ncpu_after = pd.read_excel(current_cpu_dir)\n\n# %%\ncpu_after.shape\n\n# %%\ncpu_after['Group-SubGroup'] = cpu_after['GRP_NM'].map(str) + ' - ' + cpu_after['SUBGRP_NM'].map(str)\n\n# %%\ncpu_after.head()\n\n# %%\nafter_column_names = [col + '_After' for col in cpu_after.columns]\n\n# %%\nafter_column_names\n\n# %%\ncpu_after.columns = after_column_names\n\n# %%\ncpu_after.head()\n\n# %% [markdown]\n# #### Get \"Before\" budgeted CPU file and rename columns:\n\n# %%\nconn_str = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=\\\\207.130.185.67\\aqgbudget2\\Cost\\Reserve Adjustments\\Reports\\databases\\RVMS.accdb;'\n )\ncnxn = pyodbc.connect(conn_str)\ncursor = cnxn.cursor()\n\nsql = \"\"\"\nSELECT\n *\n \nFROM tbl_Historical_Budgeted_CPU\n\nWHERE\n RVMS_Claim_Month = (SELECT max(tbl_Historical_Budgeted_CPU.[RVMS_Claim_Month]) FROM tbl_Historical_Budgeted_CPU)\n\"\"\"\n\ntry:\n cpu_before = pd.read_sql(sql, cnxn)\n #cpu_before = pd.read_sql(sql, cnxn)\n \n # Close connections\n cursor.close()\n cnxn.close()\nexcept:\n print(\"Error connecting to database\")\n cursor.close()\n cnxn.close()\n\n# %%\ncpu_before.shape\n\n# %%\ncpu_before.head()\n\n# %%\nbefore_column_names = [col + '_Before' for col in cpu_before.columns]\n\n# %%\ncpu_before.columns = before_column_names\n\n# %%\ncpu_before.head()\n\n# %%\ntry:\n assert cpu_before.shape[0] == cpu_after.shape[0]\nexcept:\n toaster = ToastNotifier()\n toaster.show_toast(\"### ERROR ###\",\n \"Number of rows don't match between CPU after and CPU before data sets\",\n icon_path=None,\n duration=5)\n print('ERROR!!! - Number of rows do not match between CPU after and CPU before data sets')\n\n# %% [markdown]\n# ### Merge the after CPU data set with the before CPU data set\n\n# %%\ncpu_before_after_merge = pd.merge(cpu_after, cpu_before, how='outer', \n left_on=['GraphCatID_After','GRP_NM_After','SUBGRP_NM_After'], \n right_on=['GraphCatID_Before','GRP_NM_Before','SUBGRP_NM_Before']\n )\n\n# %%\ncpu_before_after_merge.shape\n\n# %%\ncpu_before_after_merge.head()\n\n# %% [markdown]\n# ### Create columns that represent the before and after CPUs at GraphCat level, subgroup level, and total adjustment costs\n\n# %%\ncpu_before_after_merge['Budgeted_CPU_SubGroup_Level_After_Minus_Before'] = cpu_before_after_merge['Budgeted_CPU_SubGroup_Level_After'] \\\n - cpu_before_after_merge['Budgeted_CPU_SubGroup_Level_Before']\ncpu_before_after_merge['Budgeted_CPU_GC_Level_After_Minus_Before'] = cpu_before_after_merge['Budgeted_CPU_GC_Level_After'] \\\n - cpu_before_after_merge['Budgeted_CPU_GC_Level_Before']\ncpu_before_after_merge['CPU_DIFF_SubGroup_Level_x_SALES'] = cpu_before_after_merge['Budgeted_CPU_SubGroup_Level_After_Minus_Before'] \\\n * cpu_before_after_merge['Planned_Sales_RVMS_After']\ncpu_before_after_merge['CPU_DIFF_GC_LEVEL_x_SALES'] = cpu_before_after_merge['Budgeted_CPU_GC_Level_After_Minus_Before'] \\\n * cpu_before_after_merge['Planned_Sales_RVMS_After']\n\n# %%\ncpu_before_after_merge.head()\n\n# %% [markdown]\n# ### Define file name format:\n\n# %%\ndate_hour_stamp = time.strftime('%Y-%m-%d_%H_%M')\nfile_name = 'All_Plants_Before_After_Budgeted_CPUs_' + date_hour_stamp + '.xlsx'\n\n# %% [markdown]\n# ### Write/save file to designated network share drive location:\n\n# %%\ncpu_before_after_merge.to_excel(save_dir / file_name, index=False)\n\n# %% [markdown]\n# ### Now, we need to \"glue\" the location of the saved file location to this notebook so that another notebook can retrieve/reference from it:\n\n# %%\nstr(save_dir / file_name)\n\n# %%\nsb.glue(\"path_to_red_green_sheet_excel_file\", str(save_dir / file_name))\n\n# %% [markdown]\n# ### Send Windows Toast notification when script completes\n\n# %%\ntoaster = ToastNotifier()\ntoaster.show_toast(\"### Before vs After CPU Status ###\",\n \"Successfuly compared before CPUs with after CPU adjustments\",\n icon_path=\"images/honda_logo.ico\",\n duration=5)\n" }, { "alpha_fraction": 0.6061514019966125, "alphanum_fraction": 0.6218454241752625, "avg_line_length": 33.36314392089844, "blob_id": "0582aeda7a38d6045f73e1afd0ee456ff39052a8", "content_id": "16f5cf71c8903a41d9af6e851433140b0e11eb95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12680, "license_type": "no_license", "max_line_length": 187, "num_lines": 369, "path": "/ETL/papermill/rvms/3_Get_Top3_Bottom3_Group_SubGroup_Adjustments_Native_Currency.py", "repo_name": "amirunpri2018/jupyter_notebooks", "src_encoding": "UTF-8", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.6\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # PURPOSE:\n\n# %% [markdown]\n# ### To obtain the top 3 and bottom 3 GraphCat adjustments in native currency\n\n# %% [markdown]\n# **Procedure:** User just has to copy their graphcat data into \"clipboard\" and then execute all cells in this notebook\n\n# %%\nfrom pathlib import Path\nimport os\nimport pandas as pd\nimport numpy as np\nimport scrapbook as sb\nfrom win10toast import ToastNotifier\npd.options.display.max_rows=1000\npd.options.display.max_columns=100\npd.options.display.float_format = '{:20,.2f}'.format\n\n# %% [markdown]\n# ### Parameters that will be used by the papermill library\n\n# %% {\"tags\": [\"parameters\"]}\nstart_year = 2010\nend_year = 2019\nCLA_MONTH = '201903'\n\n# %%\nnb = sb.read_notebook('D:\\\\jupyter\\\\rvms\\\\production\\\\output\\\\RedAndGreenSheet.ipynb')\n\n# For testing\n# nb = sb.read_notebook('D:\\\\jupyter\\\\rvms\\\\production\\\\2_Comparing_Adjusted_GraphCat_CPUs_against_Original_GraphCat_CPUs-Access.ipynb')\n\n# %%\nnb.scraps['path_to_red_green_sheet_excel_file'].data\n\n# %% [markdown]\n# ### Define where to save the Red and Green raw data file based on CLA claim month:\n\n# %%\nbase_dir = \"//207.130.185.67/aqgbudget2/Cost/Reserve Adjustments/Reports/Normal Reserve Balance Verification/RVMS_Before_After_Checks\"\np = Path(base_dir)\nsave_dir = p / CLA_MONTH\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n# %% [markdown]\n# ### Now retrieve the Excel file containing the raw \"Red and Green Sheet\" data:\n\n# %%\ndf = pd.read_excel(nb.scraps['path_to_red_green_sheet_excel_file'].data)\n\n# %%\ndf.shape\n\n# %%\ndf.head()\n\n# %% [markdown]\n# ### But we need to limit our data to just the model years that are under RVMS adjustments:\n\n# %%\nrvms_years = list(range(start_year, end_year + 1))\n\n# %%\nrvms_years\n\n# %%\ndf = df.query(\"ModelYear_After in(@rvms_years)\")\n\n# %%\ndf.shape\n\n# %% [markdown]\n# ### Create unique list of GraphCat descriptions:\n\n# %%\ngc_list = df[['GraphCatID_After','GraphCatDesc_After', 'Planned_Sales_RVMS_After','Budgeted_CPU_GC_Level_After_Minus_Before',\n 'Budgeted_CPU_GC_Level_After', 'Budgeted_CPU_GC_Level_Before',\n 'Orig_Saturation_CPU_GC_Level_After', 'Cum_Actual_CPU_GC_Level_After']].drop_duplicates()\n\n# %%\ngc_list.shape\n\n# %%\ngc_list.head()\n\n\n# %% [markdown]\n# ### Create helper functions to Add Model Year, Factory, and Model Name columns:\n\n# %%\ndef getModelYear(row) -> str:\n # executing strip() also because someone can make a graphcat description with a trailing whitespace\n word_token = row['GraphCatDesc_After'].strip().split()\n \n model_year = word_token[3]\n \n if model_year.isdigit():\n return model_year\n else:\n return word_token[4]\n\ndef getFactoryCode(row) -> str:\n # executing strip() also because someone can make a graphcat description with a trailing whitespace\n word_token = row['GraphCatDesc_After'].strip().split()\n factory_code = word_token[1]\n \n return factory_code.upper()\n\ndef getModelName(row) -> str:\n # executing strip() also because someone can make a graphcat description with a trailing whitespace\n word_token = row['GraphCatDesc_After'].strip().split()\n model_name = word_token[2]\n \n return model_name.upper()\n\ndef getDestCode(row) -> str:\n # executing strip() also because someone can make a graphcat description with a trailing whitespace\n word_token = row['GraphCatDesc_After'].strip().split()\n destination_code = word_token[-1]\n \n return destination_code.upper()\n\n\n# %% [markdown]\n# ### Apply the above functions to create the model year, factory, and model name columns\n\n# %%\ngc_list['ModelYear'] = gc_list.apply(getModelYear, axis='columns')\ngc_list['Factory'] = gc_list.apply(getFactoryCode, axis='columns')\ngc_list['ModelName'] = gc_list.apply(getModelName, axis='columns')\ngc_list['DestCodeCustom'] = gc_list.apply(getDestCode, axis='columns')\n\n# %% [markdown]\n# #### Let's confirm the new columns were added:\n\n# %%\ngc_list.head()\n\n# %% [markdown]\n# ### DEPRECATED Create pivot table where ```graphcat description``` is rows and sum of ```CPU_DIFF_SubGroup_Level_x_SALES``` column:\n\n# %% [markdown]\n# Basically, this is our list of top total cost adjustments by GraphCat description.\n\n# %%\n#total_adj_gc_level = df.pivot_table(values=['CPU_DIFF_SubGroup_Level_x_SALES'], index=['GraphCatDesc_After'], \n #aggfunc='sum').sort_values(by=['CPU_DIFF_SubGroup_Level_x_SALES'], ascending=False)\n\n# %% [markdown]\n# ### Create list of total adjustment costs at the GraphCat level:\n\n# %% [markdown]\n# Basically, this is our list of top total cost adjustments by GraphCat description.\n\n# %%\ntotal_adj_gc_level = df[['GraphCatDesc_After', 'CPU_DIFF_GC_LEVEL_x_SALES']].drop_duplicates()\n\n# %%\ntotal_adj_gc_level\n\n# %% [markdown]\n# ### Create list of CPU differences at the GraphCat level:\n\n# %%\n### DEPRECATED ###\n# cpu_diff_gc_level = df[['GraphCatDesc_After', 'Budgeted_CPU_GC_Level_After_Minus_Before']].drop_duplicates()\n\n# %%\n# cpu_diff_gc_level\n\n# %% [markdown]\n# ### Create pivot table where GraphCat-SubGroup is the rows and sum the SubGroup level graphcat CPU adjustments:\n\n# %%\npivot = df.pivot_table(values=['Budgeted_CPU_SubGroup_Level_After_Minus_Before'], index=['GraphCatDesc_After','Group-SubGroup_After'], aggfunc='sum')\n\n# %%\npivot\n\n# %% [markdown]\n# ### But...how do we obtain the top 3 and bottom 3 adjustments at the subgroup level??!! Google search to the rescue!!!\n\n# %% [markdown]\n# ### Found this StackOverflow [example](https://stackoverflow.com/questions/45365923/how-to-use-nlargest-on-multilevel-pivot-table-in-pandas)\n\n# %%\ntop3 = pivot.groupby(level='GraphCatDesc_After')['Budgeted_CPU_SubGroup_Level_After_Minus_Before'].nlargest(3).reset_index(level=0, drop=True).reset_index()\nbottom3 = pivot.groupby(level='GraphCatDesc_After')['Budgeted_CPU_SubGroup_Level_After_Minus_Before'].nsmallest(3).reset_index(level=0, drop=True).reset_index()\n\n# %% [markdown]\n# ### Now merge or concatenate the 2 data sets together along the row axis direction:\n\n# %%\ntop3_bottom3 = pd.concat([top3, bottom3], axis='rows')\n\n# %% [markdown]\n# #### Sort by GraphCat and SubGroup CPU column in descending order:\n\n# %%\ntop3_bottom3.sort_values(by=['GraphCatDesc_After','Budgeted_CPU_SubGroup_Level_After_Minus_Before'], ascending=[False, False], inplace=True)\n\n# %%\ntop3_bottom3.head(12)\n\n# %% [markdown]\n# **From above, we can see that for each GraphCat, we have the top 3 subgroup CPU adjustment and bottom 3 subgroup adjustment!**\n\n# %% [markdown]\n# ### Merge with the previously created data sets to obtain additional columns:\n\n# %%\n### DEPRECATED ###\n# top3_bottom3 = pd.merge(top3_bottom3, total_adj_gc_level, how='left', left_on=['GraphCatDesc_After'], right_index=True)\n# top3_bottom3 = pd.merge(top3_bottom3, cpu_diff_gc_level, how='left', left_on=['GraphCatDesc_After'], right_on=['GraphCatDesc_After'])\n# top3_bottom3 = pd.merge(top3_bottom3, gc_list, how='left', left_on=['GraphCatDesc_After'], right_on=['GraphCatDesc_After'])\n\n# %%\ntop3_bottom3 = pd.merge(top3_bottom3, total_adj_gc_level, how='left', left_on=['GraphCatDesc_After'], right_on=['GraphCatDesc_After'])\ntop3_bottom3 = pd.merge(top3_bottom3, gc_list, how='left', left_on=['GraphCatDesc_After'], right_on=['GraphCatDesc_After'])\n\n# %%\ntop3_bottom3.head(12)\n\n# %% [markdown]\n# ### Confirm our data set is sorted by GraphCat total adjustment amount, then GraphCat, and then SubGrpu CPU amount:\n\n# %%\ntop3_bottom3.sort_values(by=['CPU_DIFF_GC_LEVEL_x_SALES','GraphCatDesc_After','Budgeted_CPU_SubGroup_Level_After_Minus_Before'], \n ascending=[False, False, False], inplace=True)\n\n# %%\ntop3_bottom3.head(12)\n\n# %% [markdown]\n# #### We need a way to \"blank out\" / \"zero out\" repeating values in the ```CPU_DIFF_SubGroup_Level_x_SALES``` column and ```Budgeted_CPU_GC_Level_After_Minus_Before``` column. But how?!\n\n# %% [markdown]\n# ### SOLUTION: Create \"ROW_NUM\" column and then identify rows using the ROW_NUM value.\n\n# %%\ntop3_bottom3['ROW_NUM'] = top3_bottom3.groupby(['GraphCatDesc_After']).cumcount() + 1\n\n# %%\ntop3_bottom3.head(6)\n\n# %% [markdown]\n# ### Perform IF-ELSE logic to \"blank out\" / \"zero out\" repeating values in the 2 columns:\n\n# %%\n# If ROW_NUM == 1, then keep the orginal value, otherwise, make it zero/0\ntop3_bottom3['CPU_DIFF_GC_LEVEL_x_SALES'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['CPU_DIFF_GC_LEVEL_x_SALES'], 0)\ntop3_bottom3['Budgeted_CPU_GC_Level_After_Minus_Before'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['Budgeted_CPU_GC_Level_After_Minus_Before'], 0)\ntop3_bottom3['Budgeted_CPU_GC_Level_After'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['Budgeted_CPU_GC_Level_After'], 0)\ntop3_bottom3['Budgeted_CPU_GC_Level_Before'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['Budgeted_CPU_GC_Level_Before'], 0)\ntop3_bottom3['Orig_Saturation_CPU_GC_Level_After'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['Orig_Saturation_CPU_GC_Level_After'], 0)\ntop3_bottom3['Cum_Actual_CPU_GC_Level_After'] = np.where(top3_bottom3['ROW_NUM'] == 1, \n top3_bottom3['Cum_Actual_CPU_GC_Level_After'], 0)\n\n# %% [markdown]\n# ### Let's see if that worked:\n\n# %%\ntop3_bottom3.head(12)\n\n# %% [markdown]\n# #### Nice, it worked!\n\n# %% [markdown]\n# ### Rename columns by creating a Python dictionary data structure:\n\n# %%\nrename_columns_mapper = {'GraphCatDesc_After': 'GraphCatDesc', \n 'Group-SubGroup_After': 'Group-SubGroup',\n 'Budgeted_CPU_SubGroup_Level_After_Minus_Before': 'Total_CPU_Adj_at_SubGroup_Level',\n 'CPU_DIFF_GC_LEVEL_x_SALES': 'Total_Adjustment_Cost_Native',\n 'Budgeted_CPU_GC_Level_After_Minus_Before': 'Total_CPU_Adj_at_GraphCat_Level',\n 'GraphCatID_After': 'GraphCatID',\n 'Planned_Sales_RVMS_After': 'Planned_Sales',\n 'Orig_Saturation_CPU_GC_Level_After': 'Orig_Saturation_CPU_GC_Level',\n 'Cum_Actual_CPU_GC_Level_After': 'Cum_Actual_CPU_GC_Level'\n }\n\n# %% [markdown]\n# #### Then apply pandas' ```rename()``` function:\n\n# %%\ntop3_bottom3.rename(rename_columns_mapper, axis='columns', inplace=True)\n\n# %%\ntop3_bottom3.head(6)\n\n# %% [markdown]\n# ### I want to now re-order columns\n\n# %% [markdown]\n# #### Let's get print out of column names:\n\n# %%\ntop3_bottom3.columns\n\n# %% [markdown]\n# #### Now, re-order the column names:\n\n# %%\ntop3_bottom3 = top3_bottom3[['GraphCatID',\n 'GraphCatDesc',\n 'ModelYear',\n 'Factory',\n 'ModelName',\n 'DestCodeCustom',\n 'Total_Adjustment_Cost_Native',\n 'Total_CPU_Adj_at_GraphCat_Level',\n 'Budgeted_CPU_GC_Level_After',\n 'Budgeted_CPU_GC_Level_Before',\n 'Group-SubGroup',\n 'Total_CPU_Adj_at_SubGroup_Level',\n 'ROW_NUM',\n 'Planned_Sales',\n 'Orig_Saturation_CPU_GC_Level',\n 'Cum_Actual_CPU_GC_Level'\n ]]\n\n# %%\ntop3_bottom3.head(6)\n\n# %% [markdown]\n# ### We're done! Now we can export to Excel, to clipboard, etc\n\n# %%\n# top3_bottom3.to_excel(r'D:\\temp\\top3_bottom3.xlsx', index=False)\n\n# %%\nfile_name = 'top3_bottom3_native.xlsx'\n\n# %%\ntop3_bottom3.to_excel(save_dir / file_name, index=False)\n\n# %% [markdown]\n# ### If script made it this far, send out Windows 10 toast notification:\n\n# %%\ntoaster = ToastNotifier()\ntoaster.show_toast(\"### Job Status ###\",\n \"Successfuly Summarized Red and Green Sheet Data to Native Currency\",\n icon_path=\"images/honda_logo.ico\",\n duration=5)\n" }, { "alpha_fraction": 0.5884785652160645, "alphanum_fraction": 0.5966917276382446, "avg_line_length": 28.360876083374023, "blob_id": "d9f69b3f3a4ad17db624bab3660d37c3b550934c", "content_id": "1281773e40d993ccefc8d62198a999b1cf0198be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17411, "license_type": "no_license", "max_line_length": 275, "num_lines": 593, "path": "/ETL/papermill/rvms/1_Obtaining_Current_RVMS_GraphCat_Group_SubGroup_Budgeted_CPUs-Access.py", "repo_name": "amirunpri2018/jupyter_notebooks", "src_encoding": "UTF-8", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.6\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Process for Obtaining Current RVMS GraphCat Group-SubGroup Budgeted CPUs\n\n# %%\nimport great_expectations as ge\nimport pyodbc\nimport time\nfrom pathlib import Path\nfrom win10toast import ToastNotifier\nimport os\nimport pyodbc # used for connecting to ODBC data sources\nimport pandas as pd # data analysis library\npd.options.display.max_rows=1000\npd.options.display.max_columns=100\n\n# %% [markdown]\n# ### RVMS Database Credentials\n\n# %%\nuserid_rvms = os.environ['windowsuser']\npw_rvms = os.environ['windowspwd']\ndsn_rvms = 'HDC-SQLP-RVMS'\n\n# %% [markdown]\n# ### Enter CLA claim month (\"YYYYMM\"):\n\n# %%\n# If using papermill, have to comment this out. It doesn't support getting input from the user\n# CLA_MONTH = input(\"Enter CLA Claim Month ('YYYYMM'): \")\n\n# %% {\"tags\": [\"parameters\"]}\nCLA_MONTH = '201903'\n\n# %% [markdown]\n# ### Define where the current budgeted CPUs will be saved:\n\n# %%\nbase_dir = \"//207.130.185.67/aqgbudget2/Cost/Reserve Adjustments/Reports/Normal Reserve Balance Verification/RVMS_Current_Budgeted_CPUs\"\np = Path(base_dir)\nsave_dir = p / CLA_MONTH\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n# %% [markdown]\n# ### Run modified \"Larry's Query\"\n\n# %% [markdown]\n# This query obtains the current budgeted CPUs and \"Planned\" Sales that are in RVMS at the group-subgroup level.\n\n# %% [markdown]\n# **VERY IMPORTANT:** The CPU at the GraphCat level is calculated from summing the CPUs at the subgroup level using all decimal places. The CPU at the GraphCat level is then rounded to 2 decimal places.\n\n# %%\n# %%time\n\ncnxn_string = 'DSN=' + dsn_rvms + ';UID=' + userid_rvms + ';PWD=' + pw_rvms\n\ncnxn = pyodbc.connect(cnxn_string)\ncursor = cnxn.cursor()\n\n# Copy/Paste your SQL text here\nsql = \"\"\"\n/** Query to obtain current budgeted CPUs at the SubGroup level and also at the GraphCat level\n NOTE: The CPU at the GraphCat level was derived from summing the CPUs at the SubGroup level using all decimal places.\n Then the CPU at the GraphCat level was rounded to 2 decimal places\n**/\nWITH CPU_SUBGRP_LEVEL AS (\n\nSELECT\n A.GraphCatID,\n RTRIM(D.GraphCatDesc) AS GraphCatDesc,\n B.GRP_NM,\n C.SUBGRP_NM,\n SUM(COALESCE(E.Budgeted_CPU,0.000)) AS Budgeted_CPU_SubGroup_Level\n\nFROM dbo.tbl_GC_GRPS AS A\n INNER JOIN dbo.tbl_MQ_GRPKEYS AS F ON A.GRPKEY_ID = F.GRPKEY_ID\n INNER JOIN dbo.tbl_MQ_GRPS AS B ON F.GRP_ID = B.GRP_ID\n INNER JOIN tbl_MQ_SUBGRPS AS C ON F.SUBGRP_ID = C.SUBGRP_ID\n INNER JOIN tbl_GraphCatMaster AS D ON A.GraphCatID = D.GraphCatID\n LEFT JOIN tbl_GraphCat_MIS AS E ON A.GC_GRP_ID = E.GC_GRP_ID\n\nWHERE\n D.GraphCatType = 'R'\n and D.GraphCatDesc like 'R %'\n\nGROUP BY\n A.GraphCatID,\n D.GraphCatDesc,\n B.GRP_NM,\n C.SUBGRP_NM\n),\n\nGC_SALES as (\n\nSELECT\n GraphCatID,\n sum(PlannedSales) as Planned_Sales_RVMS\n\nFROM\n tbl_GraphCatMonthlySales\n\nGROUP BY\n GraphCatID\n),\n\nCPU_GC_LEVEL as (\nSELECT\n GraphCatID,\n SUM(Budgeted_CPU_SubGroup_Level) AS Budgeted_CPU_GC_Level,\n SUM(Orig_Saturation_CPU) as Orig_Saturation_CPU_GC_Level\n\nFROM (\n\nSELECT\n A.GraphCatID,\n RTRIM(D.GraphCatDesc) AS GraphCatDesc,\n B.GRP_NM,\n C.SUBGRP_NM,\n SUM(COALESCE(E.Budgeted_CPU,0.000)) AS Budgeted_CPU_SubGroup_Level,\n SUM(COALESCE(E.OriginalReserve_CPU,0.000)) as Orig_Saturation_CPU\n\nFROM dbo.tbl_GC_GRPS AS A\n INNER JOIN dbo.tbl_MQ_GRPKEYS AS F ON A.GRPKEY_ID = F.GRPKEY_ID\n INNER JOIN dbo.tbl_MQ_GRPS AS B ON F.GRP_ID = B.GRP_ID\n INNER JOIN tbl_MQ_SUBGRPS AS C ON F.SUBGRP_ID = C.SUBGRP_ID\n INNER JOIN tbl_GraphCatMaster AS D ON A.GraphCatID = D.GraphCatID\n LEFT JOIN tbl_GraphCat_MIS AS E ON A.GC_GRP_ID = E.GC_GRP_ID\n\nWHERE\n D.GraphCatType = 'R'\n and D.GraphCatDesc like 'R %'\n\nGROUP BY\n A.GraphCatID,\n D.GraphCatDesc,\n B.GRP_NM,\n C.SUBGRP_NM\n\n) AS CPU_GC_LEVEL\n\nGROUP BY\n GraphCatID\n\n),\n\n\nGC_BudgetMatrix AS (\n\n SELECT\n GC_Budget.SaleMonth,\n GC_Master.GraphCatID,\n GC_Master.GraphCatDesc as GraphCat,\n CummActual_CPU as CumActual_CPU,\n CummBudgeted_Cpu as CumBudgeted_CPU\n\n FROM dbo.tbl_GraphCat_BudgetedMatrix as GC_Budget\n\n LEFT JOIN dbo.tbl_GC_GRPS AS GC_GRPS ON\n GC_Budget.GC_GRP_ID = GC_GRPS.GC_GRP_ID\n\n LEFT JOIN dbo.tbl_MQ_GRPKEYS AS MQ_GRPKEYS ON\n GC_GRPS.GRPKEY_ID = MQ_GRPKEYS.GRPKEY_ID\n\n LEFT JOIN dbo.tbl_MQ_GRPS AS MQ_GRPS ON\n MQ_GRPKEYS.GRP_ID = MQ_GRPS.GRP_ID\n\n LEFT JOIN dbo.tbl_MQ_SUBGRPS AS MQ_SUBGRPS ON\n MQ_GRPKEYS.SUBGRP_ID = MQ_SUBGRPS.SUBGRP_ID\n\n LEFT JOIN dbo.tbl_GraphCatMaster as GC_Master ON\n GC_GRPS.GraphCatID = GC_Master.GraphCatID\n\n\n WHERE\n GC_Master.GraphCatType = 'R'\n and GC_Master.GraphCatDesc like 'R %'\n\n),\n\n\nActual_CPU_GC_Level as (\n\nSELECT\n GraphCatID,\n GraphCat,\n max(CumActual_CPU) as CumActual_CPU\n\nFROM (\n\n\nSELECT\n SaleMonth,\n GraphCatID,\n GraphCat,\n sum(CumActual_CPU) as CumActual_CPU\n\nFROM (\n\n SELECT\n SaleMonth,\n GraphCatID,\n GraphCat,\n CASE\n WHEN CumActual_CPU = 0 THEN NULL\n ELSE\n CumActual_CPU\n END AS CumActual_CPU,\n CumBudgeted_CPU\n\n FROM\n GC_BudgetMatrix\n\n) AS TEMP1\n\nGROUP BY\n SaleMonth,\n GraphCatID,\n GraphCat\n\n) AS TEMP2\n\nGROUP BY\n GraphCatID,\n GraphCat\n)\n\nSELECT\n CPU_SUBGRP_LEVEL.*,\n GC_SALES.Planned_Sales_RVMS,\n ROUND(CPU_GC_LEVEL.Budgeted_CPU_GC_Level, 2) AS Budgeted_CPU_GC_Level,\n ROUND(CPU_GC_LEVEL.Orig_Saturation_CPU_GC_Level, 2) AS Orig_Saturation_CPU_GC_Level,\n ROUND(Actual_CPU_GC_Level.CumActual_CPU, 2) as Cum_Actual_CPU_GC_Level\n\nFROM\n CPU_SUBGRP_LEVEL AS CPU_SUBGRP_LEVEL\n\n LEFT JOIN GC_SALES as GC_SALES ON\n CPU_SUBGRP_LEVEL.GraphCatID = GC_SALES.GraphCatID\n\n LEFT JOIN CPU_GC_LEVEL as CPU_GC_LEVEL ON\n CPU_SUBGRP_LEVEL.GraphCatID = CPU_GC_LEVEL.GraphCatID\n\n LEFT JOIN Actual_CPU_GC_Level as Actual_CPU_GC_Level ON\n CPU_SUBGRP_LEVEL.GraphCatID = Actual_CPU_GC_Level.GraphCatID\n\nORDER BY\n GraphCatID\n \"\"\"\n\nRVMS_Current_Budgeted_CPU = pd.read_sql(sql, cnxn, index_col=None)\n\n# For large data (data > RAM, use chunking):\n\"\"\"\nfor c in pd.read_sql(sql, cnxn, chunksize=10000):\n c.to_csv(r'D:\\temp\\resultset.csv', index=False, mode='a')\"\"\"\n\n# Close connections\ncursor.close()\ncnxn.close()\n\n# %%\nRVMS_Current_Budgeted_CPU.shape\n\n# %%\nRVMS_Current_Budgeted_CPU.head()\n\n# %% [markdown]\n# ### Create ```RVMS_Claim_Month``` column to contain the CLA claim month:\n\n# %%\nRVMS_Current_Budgeted_CPU['RVMS_Claim_Month'] = CLA_MONTH\n\n# %%\nRVMS_Current_Budgeted_CPU.head()\n\n# %% [markdown]\n# ### Create data set of CPUs at GraphCat level:\n\n# %%\ncpu_at_gc_level = RVMS_Current_Budgeted_CPU[['GraphCatID', 'Budgeted_CPU_GC_Level']].drop_duplicates()\n\n# %%\ncpu_at_gc_level.head()\n\n# %% [markdown]\n# ### Create data set of original saturation CPUs at GraphCat level:\n\n# %%\norig_sat_cpu_at_gc_level = RVMS_Current_Budgeted_CPU[['GraphCatID', 'Orig_Saturation_CPU_GC_Level']].drop_duplicates()\n\n# %%\norig_sat_cpu_at_gc_level.head()\n\n# %% [markdown]\n# ### Create data set of cumulative actual CPUs at GraphCat level:\n\n# %%\nactual_cpu_at_gc_level = RVMS_Current_Budgeted_CPU[['GraphCatID', 'Cum_Actual_CPU_GC_Level']].drop_duplicates()\n\n# %%\nactual_cpu_at_gc_level.head()\n\n# %% [markdown]\n# ### Ensure that the sum of the CPUs at the subgroup level differ from the sum of the CPUs at the GraphCat level is less than 1 currency unit\n\n# %%\nassert abs(RVMS_Current_Budgeted_CPU['Budgeted_CPU_SubGroup_Level'].sum() - cpu_at_gc_level['Budgeted_CPU_GC_Level'].sum()) < 1.0\n\n# %%\nRVMS_Current_Budgeted_CPU['Budgeted_CPU_SubGroup_Level'].sum()\n\n# %%\ncpu_at_gc_level['Budgeted_CPU_GC_Level'].sum()\n\n\n# %% [markdown]\n# ### Create helper functions to Add Model Year, Factory, Model Name, and custom destination code to the RVMS Original Budgeted CPU data set:\n\n# %%\ndef getModelYear(row) -> str:\n word_token = row['GraphCatDesc'].strip().split()\n \n model_year = word_token[3]\n \n if model_year.isdigit():\n return model_year\n else:\n return word_token[4]\n \n\ndef getFactoryCode(row) -> str:\n word_token = row['GraphCatDesc'].strip().split()\n factory_code = word_token[1]\n \n return factory_code.upper()\n\ndef getModelName(row) -> str:\n word_token = row['GraphCatDesc'].strip().split()\n model_name = word_token[2]\n \n return model_name.upper()\n \ndef getDestCode(row) -> str:\n word_token = row['GraphCatDesc'].strip().split()\n destination_code = word_token[-1]\n \n return destination_code.upper()\n\n\n# %%\nRVMS_Current_Budgeted_CPU['ModelYear'] = RVMS_Current_Budgeted_CPU.apply(getModelYear, axis='columns')\nRVMS_Current_Budgeted_CPU['Factory'] = RVMS_Current_Budgeted_CPU.apply(getFactoryCode, axis='columns')\nRVMS_Current_Budgeted_CPU['ModelName'] = RVMS_Current_Budgeted_CPU.apply(getModelName, axis='columns')\nRVMS_Current_Budgeted_CPU['DestCode'] = RVMS_Current_Budgeted_CPU.apply(getDestCode, axis='columns')\n\n# %%\nRVMS_Current_Budgeted_CPU.head()\n\n# %% [markdown]\n# ### Perform data validation checks using Great Expectations library\n\n# %% [markdown]\n# #### Create Great Expectations dataframe from pandas dataframe:\n\n# %%\nge_df = ge.from_pandas(RVMS_Current_Budgeted_CPU)\n\n# %% [markdown]\n# #### Check Model Years are between 1994 and 2099\n\n# %%\nif ge_df.expect_column_values_to_be_between(column=\"ModelYear\", min_value='1994', max_value='2099')['success']:\n print('Passed Model Year Check')\nelse:\n print('FAILED Model Year Check')\n toaster = ToastNotifier()\n toaster.show_toast(\"### Check Status ###\",\n \"FAILED Model Year Check\",\n icon_path=\"images/honda_logo.ico\",\n duration=10)\n raise Exception(\"ERROR: Failed Model Year Check\")\n\n# %% [markdown]\n# #### Check Factory values are limited to one of:\n\n# %% [markdown]\n# 'ELP','HCL','HCM','HDM','HMA','HMI','MAP','PMC'\n\n# %%\nif ge_df.expect_column_values_to_be_in_set(column=\"Factory\", value_set=['ELP','HCL','HCM','HDM','HMA','HMI','MAP','PMC'])['success']:\n print('Passed Factory Check')\nelse:\n print('FAILED Factory Check')\n toaster = ToastNotifier()\n toaster.show_toast(\"### Check Status ###\",\n \"FAILED Factory Check\",\n icon_path=\"images/honda_logo.ico\",\n duration=10)\n raise Exception(\"ERROR: Failed Factory Check\")\n\n# %% [markdown]\n# #### Check Model Names are limited to one of:\n\n# %% [markdown]\n# 'ACCORD','CIVIC','CROSSTOUR','CRV','CSX','EL','ELEMENT','FIT','HRV','ILX','INSIGHT','MDX','NSX','ODYSSEY','PASSPORT','PILOT','RDX','RIDGELINE','TL','TLX','ZDX'\n\n# %%\nif ge_df.expect_column_values_to_be_in_set(column=\"ModelName\", value_set=['ACCORD','CIVIC','CROSSTOUR','CRV','CSX','EL',\n 'ELEMENT','FIT','HRV','ILX','INSIGHT','MDX','NSX',\n 'ODYSSEY','PASSPORT','PILOT','RDX','RIDGELINE','TL','TLX','ZDX'\n ])['success']:\n print('Passed Model Name Check')\nelse:\n print('FAILED Model Name Check')\n toaster = ToastNotifier()\n toaster.show_toast(\"### Check Status ###\",\n \"FAILED Factory Check\",\n icon_path=\"images/honda_logo.ico\",\n duration=10)\n raise Exception(\"ERROR: Failed Model Name Check\")\n\n# %% [markdown]\n# I think Great_Expectations library has a [bug](https://github.com/great-expectations/great_expectations/issues/412). If my column contains missing model names or None objects, the above test still passes! So I have to test for Null or missing values with this test below:\n\n# %%\nif ge_df.expect_column_values_to_not_be_null(column=\"ModelName\")['success']:\n print('No model names are null')\nelse:\n print('Null model names found')\n toaster = ToastNotifier()\n toaster.show_toast(\"### Check Status ###\",\n \"FAILED Factory Check\",\n icon_path=\"images/honda_logo.ico\",\n duration=10)\n raise Exception(\"ERROR: Failed Model Name Check\")\n\n# %%\nRVMS_Current_Budgeted_CPU.to_excel(save_dir / 'All_Plants_Budgeted_CPU_By_Group_SubGroup.xlsx', index=False)\n\n# %% [markdown]\n# ### Send notification that MS Access process will begin\n\n# %%\ntoaster = ToastNotifier()\ntoaster.show_toast(\"### MS Access Proccess ###\",\n \"Storing current budgeted CPUs - Please wait...\",\n icon_path=\"images/honda_logo.ico\",\n duration=5)\n\n# %% [markdown]\n# ### Also save the current budgeted CPUs into an MS Access database, but first we must empty the table containing data from a previous run:\n\n# %%\nconn_str = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=\\\\207.130.185.67\\aqgbudget2\\Cost\\Reserve Adjustments\\Reports\\databases\\RVMS.accdb;'\n )\n\ncnxn = pyodbc.connect(conn_str)\ncursor = cnxn.cursor()\n\nsql = \"\"\"\nDELETE\n\nFROM tbl_Current_Budgeted_CPU\n\"\"\"\n\ntry:\n cursor.execute(sql)\n cnxn.commit()\n \n # Close connections\n cursor.close()\n cnxn.close()\nexcept:\n print(\"Error connecting to database\")\n cursor.close()\n cnxn.close()\n\n# %% [markdown]\n# ### Now insert the current CPUs into the MS Access database\n\n# %%\n# %%time\nconn_str = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=\\\\207.130.185.67\\aqgbudget2\\Cost\\Reserve Adjustments\\Reports\\databases\\RVMS.accdb;'\n )\ncnxn = pyodbc.connect(conn_str, autocommit=True)\n\ntry:\n for index, row in RVMS_Current_Budgeted_CPU.iterrows():\n with cnxn.cursor() as cursor:\n #cursor.setinputsizes([(pyodbc.SQL_INTEGER,)])\n cursor.execute(\"INSERT INTO tbl_Current_Budgeted_CPU(GraphCatID, \\\n GraphCatDesc, \\\n GRP_NM, \\\n SUBGRP_NM, \\\n Budgeted_CPU_SubGroup_Level, \\\n Planned_Sales_RVMS, \\\n Budgeted_CPU_GC_Level, \\\n Orig_Saturation_CPU_GC_Level, \\\n Cum_Actual_CPU_GC_Level, \\\n RVMS_Claim_Month, \\\n ModelYear, \\\n Factory, \\\n ModelName, \\\n DestCode \\\n ) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)\", \n row[0],\n row[1],\n row[2],\n row[3],\n row[4],\n row[5],\n row[6],\n row[7],\n row[8],\n row[9],\n row[10],\n row[11],\n row[12],\n row[13]\n )\n cursor.commit()\n \n cnxn.close()\n\nexcept Exception as e:\n print(\"Error connecting to the database: \", str(e))\n cnxn.close()\n\n# %% [markdown]\n# ### Confirm that the number of rows inserted into ```tbl_Current_Budgeted_CPU``` matches the``` RVMS_Current_Budgeted_CPU``` dataframe. There could be in theory, a network drop or latency issue where not all the rows were inserted. This has happened before!\n\n# %%\nconn_str = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=\\\\207.130.185.67\\aqgbudget2\\Cost\\Reserve Adjustments\\Reports\\databases\\RVMS.accdb;'\n )\ncnxn = pyodbc.connect(conn_str)\ncursor = cnxn.cursor()\n\nsql = \"\"\"\nSELECT\n count(*) as Qty\n \nFROM tbl_Current_Budgeted_CPU\n\"\"\"\n\ntry:\n cpu_current = pd.read_sql(sql, cnxn)\n #cpu_before = pd.read_sql(sql, cnxn)\n \n # Close connections\n cursor.close()\n cnxn.close()\nexcept Exception as e:\n print(\"Error connecting to the database: \", str(e))\n cursor.close()\n cnxn.close()\n\n# %%\ncpu_current.values[0][0]\n\n# %%\nassert cpu_current.values[0][0] == RVMS_Current_Budgeted_CPU.shape[0]\n\n# %% [markdown]\n# #### If the script made it this far, then it must have completed without errors. Send out a Windows toast notification that the script has successfully completed:\n\n# %%\ntoaster = ToastNotifier()\ntoaster.show_toast(\"### Export COMPLETED ###\",\n \"Successfuly Obtained and Validated Current RVMS Group-SubGroup Budgeted CPUs\",\n icon_path=\"images/honda_logo.ico\",\n duration=5)\n" } ]
3
liskin/HaskellLove21
https://github.com/liskin/HaskellLove21
eb2129acd884ce0e5de86314c602d2a3bcca1fe6
b747c4dbcdfd352ba3d85afa7823e2d84bc75dbc
5e276a2f68abeb4ba5b70dc5d32bee2515a2fc23
refs/heads/main
2023-07-27T21:16:09.814615
2021-09-10T14:44:11
2021-09-10T14:44:11
405,113,526
0
0
null
2021-09-10T14:44:13
2021-09-10T14:37:11
2021-09-10T14:43:40
null
[ { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.7035714387893677, "avg_line_length": 27, "blob_id": "c2f063eee7cd14196406351777096336a7b23dce", "content_id": "ba6d0539d924829c78d43827ccaea9522f411ed6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/talk.md", "repo_name": "liskin/HaskellLove21", "src_encoding": "UTF-8", "text": "Competitive Programming in Haskell\n==================================\n\nBrent Yorgey\nHaskell Love, September 10, 2021\n\nhttps://open.kattis.com/\nhttps://github.com/byorgey/HaskellLove21\nhttps://github.com/byorgey/comprog-hs\nhttps://byorgey.wordpress.com/category/competitive-programming/\n" }, { "alpha_fraction": 0.7039215564727783, "alphanum_fraction": 0.7137255072593689, "avg_line_length": 41.5, "blob_id": "6babea5e5e2d67fa3b8cbc99c19ac4449eb8277c", "content_id": "d063dbb1c00fa60421f87e894e2e95855eafedcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 510, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/README.md", "repo_name": "liskin/HaskellLove21", "src_encoding": "UTF-8", "text": "Competitive Programming in Haskell\n==================================\n\nThis is the repository for my talk at Haskell Love 2021. Feel free to\nclone it and play around.\n\nTo test a solution (on the provided test cases only), just `cd` into\nthe directory containing the Haskell code and run `python3\n../test.py`. Note that this does *not* test your solution against the\nsecret test data on the Kattis servers. For that, you will have to\nmake a (free) account on https://open.kattis.com/ and submit it\nyourself.\n" }, { "alpha_fraction": 0.5610837340354919, "alphanum_fraction": 0.5773398876190186, "avg_line_length": 26.445945739746094, "blob_id": "f57814ee94513a89b581a03efb1651d36f996ed9", "content_id": "d84694fba78123837ba16914fd9fb46f7046b2fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2030, "license_type": "no_license", "max_line_length": 201, "num_lines": 74, "path": "/test.py", "repo_name": "liskin/HaskellLove21", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport time\nimport glob\nfrom subprocess import call, run\nimport subprocess\nimport sys\n\nclass bcolors:\n GREEN = '\\033[92m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n OK = GREEN + BOLD\n FAIL = RED + BOLD\n ENDC = '\\033[0m'\n\nsolution_file = ''\nif len(sys.argv) > 1:\n solution_file = sys.argv[1]\nelse:\n for code_file in glob.glob('*.hs'):\n solution_file = code_file\n\ncabal_file = ''\nfor f in glob.glob('*.cabal'):\n cabal_file = f\n\nsol_file_components = solution_file.split('.')\nsol_name = sol_file_components[0]\n\nprint(f'Using {solution_file}')\n\nprint(f'Compiling {solution_file}...')\nif cabal_file != '':\n result = run(['cabal', 'build'])\nelse:\n result = run(['ghc', '--make', '-O2', '-package', 'mtl', '-package', 'split', '-package', 'parsec', '-package', 'vector', '-package', 'unordered-containers', '-package', 'hashable', solution_file])\n\nif result.returncode != 0:\n sys.exit(1);\n\nok = True\n\nfor test_input in sorted(glob.glob('*.in')):\n print(test_input + \": \", end='')\n test_name = '.'.join(test_input.split('.')[0:-1])\n test_output = test_name + \".out\"\n test_answer = test_name + \".ans\"\n print(\"running... \", end='')\n start = time.time()\n if cabal_file != '':\n run(f\"cabal -v0 run {sol_name} < {test_input} > {test_output}\", shell=True)\n else:\n run(f\"./{sol_name} < {test_input} > {test_output}\", shell=True)\n end = time.time()\n print(\"checking output... \", end='')\n result = run([\"diff\", '-b', test_answer, test_output], stdout=subprocess.PIPE)\n if result.returncode == 0:\n print(bcolors.OK + \"OK\" + bcolors.ENDC, end=' ')\n print(f'({(end-start):.2}s)')\n else:\n print(bcolors.FAIL + \"Fail\" + bcolors.ENDC, end=' ')\n print(f'({(end-start):.2}s)')\n print(result.stdout.decode('utf-8'))\n ok = False\n\nprint(\"Cleaning up...\")\nif cabal_file == '':\n run(f\"rm {sol_name} *.o *.hi\", shell=True)\nelse:\n run(f\"rm -rf dist-newstyle/\", shell=True)\n\nif not ok:\n sys.exit(1)" } ]
3
Suykum/100DaysOfCode_Python
https://github.com/Suykum/100DaysOfCode_Python
66f20c7338a988424e7944f5ebfdf03a00cbe4cb
36bb703c725cc2e8439c72054289dd5832e4af97
4ee5d5d9babb1f46da9213f046312ad62e82c3e4
refs/heads/main
2023-02-08T22:41:05.556535
2021-01-05T08:23:20
2021-01-05T08:23:20
321,937,603
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6911242604255676, "alphanum_fraction": 0.6958580017089844, "avg_line_length": 34.25, "blob_id": "b056efa771987ae7e772e1a82cef3e66a69913ce", "content_id": "e580d9009ddf0e37f4a56201b8157a76a64795f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 88, "num_lines": 24, "path": "/selenium_examples/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nimport time\nchrome_driver_path = \"C:/development/chromedriver.exe\"\n\ndriver = webdriver.Chrome(executable_path=chrome_driver_path)\ndriver.get(\"https://www.python.org\")\n# search_bar = driver.find_element_by_name(\"q\")\n# print(search_bar.get_attribute(\"placeholder\"))\n# logo = driver.find_element_by_class_name(\"python-logo\")\n# print(logo.size)\n# bug_search = driver.find_element_by_xpath('//*[@id=\"site-map\"]/div[2]/div/ul/li[3]/a')\n# print(bug_search.text)\n# # driver.close()\n\nevent_dates = driver.find_elements_by_css_selector(\".event-widget time\")\nevent_names = driver.find_elements_by_css_selector(\".event-widget li a\")\nevents_data = {}\nfor n in range(0, len(event_names) - 1):\n events_data[n] = {\n \"time\": event_dates[n].text,\n \"names\": event_names[n].text\n }\nprint(events_data)\ndriver.quit()" }, { "alpha_fraction": 0.6697154641151428, "alphanum_fraction": 0.6853658556938171, "avg_line_length": 32.46938705444336, "blob_id": "47441335ddc3708e66901c288ea315ca0af27073", "content_id": "7b5280d2b53e88005b7fc37c8e9d49ed6554a093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4920, "license_type": "no_license", "max_line_length": 232, "num_lines": 147, "path": "/movie_project/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import sqlalchemy\nfrom flask import Flask, render_template, redirect, url_for, request\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, FloatField, IntegerField\nfrom wtforms.validators import DataRequired\nimport requests\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nBootstrap(app)\n\n\nclass MovieEditForm(FlaskForm):\n ranking = IntegerField(\"Enter ranking data\")\n rating = FloatField(\"Your rating out of 10 e.g. 7.5\", validators=[DataRequired()])\n review = StringField(\"Your review\", validators=[DataRequired()])\n done = SubmitField(\"Done\")\n\n\nclass AddMovie(FlaskForm):\n title = StringField(\"Movie title\", validators=[DataRequired()])\n add_movie = SubmitField(\"Add Movie\")\n\n\n# CREATE DATABASE\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///movies-collection.db\"\n# Optional: But it will silence the deprecation warning in the console.\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n# CREATE TABLE\nclass Movie(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(250), unique=True, nullable=False)\n year = db.Column(db.Integer, nullable=False)\n description = db.Column(db.String(500), nullable=True)\n rating = db.Column(db.Float, nullable=True)\n ranking = db.Column(db.Integer, nullable=True)\n review = db.Column(db.String(250), nullable=True)\n img_url = db.Column(db.String(250), nullable=False)\n\n # Optional: this will allow each book object to be identified by its title when printed.\n def __repr__(self):\n return f'<Movie {self.title}>'\n\n\ndb.create_all()\n\nnew_movie = Movie(\n title=\"Phone Booth\",\n year=2011,\n description=\"A mysterious Hollywood stuntman and mechanic moonlights as a getaway driver and finds himself in trouble when he helps out his neighbor in this action drama.\",\n rating=7.5,\n ranking=1,\n review=\"Loved it!\",\n img_url=\"https://image.tmdb.org/t/p/w500/tjrX2oWRCM3Tvarz38zlZM7Uc10.jpg\"\n\n)\nnew_movie2 = Movie(\n title=\"Drive\",\n year=2002,\n description=\"Publicist Stuart Shepard finds himself trapped in a phone booth, pinned down by an extortionist's sniper rifle. Unable to leave or receive outside help, Stuart's negotiation with the caller leads to a jaw-dropping climax.\",\n rating=7.3,\n ranking=10,\n review=\"My favourite character was the caller.\",\n img_url=\"https://www.shortlist.com/media/images/2019/05/the-30-coolest-alternative-movie-posters-ever-2-1556670563-K61a-column-width-inline.jpg\"\n\n)\n\n\n# db.session.add(new_movie)\n# db.session.add(new_movie2)\n# db.session.commit()\n\n\n@app.route(\"/\")\ndef home():\n all_movies = Movie.query.order_by(Movie.ranking).all()\n all_movies.reverse()\n return render_template(\"index.html\", movies=all_movies)\n\n\n@app.route(\"/edit\", methods=[\"GET\", \"POST\"])\ndef edit():\n form = MovieEditForm()\n movie_id = request.args.get('id')\n selected_movie = Movie.query.get(movie_id)\n if request.method == \"POST\":\n selected_movie.rating = form.rating.data\n selected_movie.review = form.review.data\n selected_movie.ranking = form.ranking.data\n db.session.commit()\n return redirect(url_for('home'))\n return render_template(\"edit.html\", form=form, movie=selected_movie)\n\n\n@app.route(\"/delete\")\ndef delete():\n movie_id = request.args.get('id')\n selected_movie = Movie.query.get(movie_id)\n db.session.delete(selected_movie)\n db.session.commit()\n return redirect(url_for('home'))\n\n\n@app.route(\"/add\", methods=[\"GET\", \"POST\"])\ndef add():\n add_form = AddMovie()\n if request.method == \"POST\":\n new_movie_title = add_form.title.data\n data = select_movie(new_movie_title)\n return render_template(\"select.html\", new_movies=data)\n return render_template(\"add.html\", form=add_form)\n\n\ndef select_movie(name):\n parameters = {\"apikey\": \"4cd1c5fb\", \"s\": name}\n movies_endpoint = \"http://www.omdbapi.com/\"\n result = requests.get(movies_endpoint, params=parameters).json()\n try:\n data = result[\"Search\"]\n except KeyError:\n data = []\n return data\n\n\n@app.route(\"/find\")\ndef find_movie():\n movie_id = request.args.get('imdb_id')\n parameters = {\"apikey\": \"4cd1c5fb\", \"i\": movie_id}\n movies_endpoint = \"http://www.omdbapi.com/\"\n r_movie = requests.get(movies_endpoint, params=parameters).json()\n new_film = Movie(title=r_movie[\"Title\"], year=r_movie[\"Year\"], description=r_movie[\"Plot\"], rating=r_movie[\"imdbRating\"],\n ranking=5, review=\"No review yet\", img_url=r_movie[\"Poster\"])\n try:\n db.session.add(new_film)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError:\n print(\"Already exist\")\n return redirect(url_for('edit', id=new_film.id))\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6406619548797607, "avg_line_length": 34, "blob_id": "7729b529783f5a88453d4c3135aadb51ed4ff9e5", "content_id": "bf5f11553bd32f34d92cc1ac06869e2cda37c494", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/mail_merge/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "\nwith open(\"./Input/Names/invited_names.txt\") as file:\n names = file.readlines()\n\n\nwith open(\"./Input/Letters/starting_letter.txt\") as start_letter:\n s_letter = start_letter.read()\n\nfor n in range(0, len(names)):\n name = names[n].strip(\".\\n\")\n with open(f\"./Output/ReadyToSend/For_{name}.txt\", \"w\") as letter:\n letter_with_name = s_letter.replace(\"[name]\", name)\n letter.write(letter_with_name)\n\n\n" }, { "alpha_fraction": 0.5891891717910767, "alphanum_fraction": 0.5981981754302979, "avg_line_length": 36, "blob_id": "b17ffe68f0ca9b802252f8a091d383520c1e63ce", "content_id": "dc2a6aaafac92d3ddbb089b633894b07fe9d82a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/email_sending/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import smtplib\nimport random\nimport datetime as dt\n\n\nif dt.datetime.now().weekday() == 2:\n with open(\"quotes.txt\", encoding='utf-8') as file:\n quotes = file.readlines()\n my_email = \"*********@gmail.com\"\n password = \"***********\"\n with smtplib.SMTP(\"smtp.gmail.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=my_email, password=password)\n connection.sendmail(from_addr=my_email, to_addrs=\"********@yahoo.com\",\n msg=f\"Subject: Motivation\\n\\n{random.choice(quotes)}\")\n" }, { "alpha_fraction": 0.6540084481239319, "alphanum_fraction": 0.6698312163352966, "avg_line_length": 26.08571434020996, "blob_id": "feef1e591ec03bcd3d3edf2315d1be74dad431de", "content_id": "cba618efb0111cbfddbc226544d4b45a75ab6468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 948, "license_type": "no_license", "max_line_length": 84, "num_lines": 35, "path": "/jinja_expl/server.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport random\nimport datetime\nimport requests\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n random_number = random.randint(0, 10)\n year = datetime.datetime.today().year\n return render_template(\"index.html\", num=random_number, year=year)\n\n\n@app.route(\"/guess/<name>\")\ndef guess(name):\n gender_result = requests.get(url=f\"https://api.genderize.io?name={name}\").json()\n gender = gender_result[\"gender\"]\n age_result = requests.get(f\"https://api.agify.io?name={name}\").json()\n age = age_result[\"age\"]\n return render_template(\"guess.html\", name=name, gender=gender, age=age)\n\n\n@app.route(\"/blog\")\ndef get_blog():\n blog_url = \"https://api.npoint.io/5abcca6f4e39b4955965\"\n response = requests.get(blog_url)\n all_posts = response.json()\n print(all_posts)\n return render_template(\"blog.html\", posts=all_posts)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" }, { "alpha_fraction": 0.40809082984924316, "alphanum_fraction": 0.5997161269187927, "avg_line_length": 35.153846740722656, "blob_id": "6a890c189c999beb30e6ee7c8376048dceb3fd04", "content_id": "de946e262c7c1e167bc334dc92269f367a95fbd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 489, "num_lines": 39, "path": "/turtle/turtle_hirst_painting.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import colorgram\nimport turtle as turtle_mode\nimport random\n\ndef extract_colors_from_image(image_name, number_of_colors):\n colors = colorgram.extract(image_name, number_of_colors)\n rgb_colors = []\n for color in colors:\n r = color.rgb.r\n g = color.rgb.g\n b = color.rgb.b\n new_color = (r, g, b)\n rgb_colors.append(new_color)\n return rgb_colors\n\n#colorgram.extract('hirst.jpg', 30)\ncolor_list = [(244, 231, 217), (208, 151, 103), (245, 226, 234), (218, 230, 239), (226, 241, 234), (58, 105, 133), (148, 87, 58), (128, 163, 185), (196, 137, 157), (138, 71, 95), (210, 91, 67), (130, 177, 155), (60, 120, 89), (162, 149, 54), (191, 91, 118), (224, 201, 126), (25, 48, 75), (78, 157, 122), (55, 41, 27), (232, 166, 185), (40, 56, 105), (238, 170, 159), (56, 33, 47), (58, 155, 172), (115, 37, 58), (105, 121, 164), (27, 51, 39), (160, 210, 190), (17, 95, 71), (117, 42, 33)]\nturtle_mode.colormode(255)\ntim = turtle_mode.Turtle()\ntim.speed(\"fast\")\ntim.penup()\ntim.hideturtle()\ntim.setheading(225)\ntim.forward(300)\ntim.setheading(0)\nnumber_of_dots = 101\nfor dot_count in range(1, number_of_dots):\n tim.dot(20, random.choice(color_list))\n tim.forward(50)\n if dot_count % 10 == 0:\n tim.setheading(90)\n tim.forward(50)\n tim.setheading(180)\n tim.forward(500)\n tim.setheading(0)\n\n\nscreen = turtle_mode.Screen()\nscreen.exitonclick()" }, { "alpha_fraction": 0.6403161883354187, "alphanum_fraction": 0.6561264991760254, "avg_line_length": 14.363636016845703, "blob_id": "59c037dbf31426b709740ffd476aaec976a56555", "content_id": "be62f555c3d4aff615c396841ba0bb5fde08a935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 33, "num_lines": 33, "path": "/turtle/key_listener.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from turtle import Turtle, Screen\n\ntim = Turtle()\nscreen = Screen()\n\ndef move_forward():\n tim.forward(10)\n\ndef move_backward():\n tim.backward(10)\n\ndef turn_left():\n tim.left(10)\n\ndef turn_right():\n tim.right(10)\n\ndef clear_screen():\n tim.clear()\n tim.penup()\n tim.home()\n tim.pendown()\n\nscreen.listen()\nscreen.onkey(move_forward, \"f\")\nscreen.onkey(move_backward, \"b\")\nscreen.onkey(turn_left, \"l\")\nscreen.onkey(turn_right, \"r\")\nscreen.onkey(clear_screen, \"c\")\n\n\n\nscreen.exitonclick()" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 36.30769348144531, "blob_id": "1b2c106a41f120274901a12607e74a1df89b8d44", "content_id": "77766493b8067a55a4c84d5957a367f7d52a5176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 484, "license_type": "no_license", "max_line_length": 106, "num_lines": 13, "path": "/quiz/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from quiz.question_model import Question\nfrom quiz.data import question_data\nfrom quiz.quiz_brain import QuizBrain\nfrom quiz.ui import QuizInterface\n\nquestion_bank = [Question(question[\"question\"], question[\"correct_answer\"]) for question in question_data]\n\nquiz = QuizBrain(question_bank)\nquiz_ui = QuizInterface(quiz)\n# while quiz.still_has_question():\n# quiz.next_question()\nprint(\"You have completed the quiz\")\nprint(f\"Your final score is {quiz.score}/{quiz.question_number}\")" }, { "alpha_fraction": 0.6036217212677002, "alphanum_fraction": 0.6036217212677002, "avg_line_length": 28.176469802856445, "blob_id": "9c76790a9d59d4c2a0f793403fee9b59ed77f74b", "content_id": "16a48e0cd257ead6e9b73d16544ae4e8e7badeb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "no_license", "max_line_length": 69, "num_lines": 17, "path": "/NATO_alphabet/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import pandas\n\ndata = pandas.read_csv(\"nato_phonetic_alphabet.csv\")\ndata_dict = {row.letter: row.code for(index, row) in data.iterrows()}\n\nis_continue = True\nwhile is_continue:\n user_input = input(\"Enter a word: \").upper()\n if user_input == \"Q\":\n is_continue = False\n else:\n try:\n words = [data_dict[letter] for letter in user_input]\n except KeyError:\n print(\"Sorry, only letters in the alphabet please\")\n else:\n print(words)\n\n" }, { "alpha_fraction": 0.6402753591537476, "alphanum_fraction": 0.6660929322242737, "avg_line_length": 19.068965911865234, "blob_id": "878cc13983f1268899def7067a356626577e0bcb", "content_id": "4df575947d93ceaad9360271f0329db3bc3380bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 60, "num_lines": 29, "path": "/bootstrap_clean_blog/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport requests\n\napp = Flask(__name__)\nblog_url = \"https://api.npoint.io/43644ec4f0013682fc0d\"\nall_post = requests.get(blog_url).json()\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\", posts=all_post)\n\n\n@app.route('/about')\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route('/contact')\ndef contact():\n return render_template(\"contact.html\")\n\n\n@app.route('/post/<int:id>')\ndef post(id):\n return render_template(\"post.html\", post=all_post[id-1])\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)" }, { "alpha_fraction": 0.6609311699867249, "alphanum_fraction": 0.6649797558784485, "avg_line_length": 24.30769157409668, "blob_id": "491539a8b65250360ac75e07460b407b819ce9eb", "content_id": "4adeb3c18e5e29760f99e1ebb22298f3cfbf9d4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 75, "num_lines": 39, "path": "/hangman_game/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import random\nimport hangman_game.hangman_art as hangman_art\nimport hangman_game.hangman_words as hangman_words\n#from replit import clear\n\nprint(hangman_art.logo)\nword_list = hangman_words.word_list\nchosen_word = random.choice(word_list)\nprint(chosen_word)\ndisplay = ['_' for n in chosen_word]\nlives = 6\nend_of_game = False\nentered_letters = []\n\nwhile(not end_of_game):\n guess = (input(\"Guess a letter: \")).lower()\n #clear()\n if guess in display:\n print(f\"You've already guessed {guess}\")\n else:\n entered_letters.append(guess)\n\n for position in range(0, len(chosen_word)):\n if chosen_word[position] == guess:\n display[position] = guess\n\n if guess not in chosen_word:\n print(f\"You guessed {guess}, that's not in the word. You lose a life.\")\n lives -= 1\n if lives == 0:\n print (\"You lose!\")\n end_of_game = True\n\n print(f\"{' '.join(display)}\")\n print(hangman_art.stages[lives])\n\n if \"_\" not in display:\n end_of_game = True\n print(\"You win.\")\n\n" }, { "alpha_fraction": 0.6029556393623352, "alphanum_fraction": 0.6078817844390869, "avg_line_length": 38.03845977783203, "blob_id": "6f099857f75f524bf9a2d4ac43f13a6a4c33577d", "content_id": "34f906674e0b41d292c36f6448791d46f8dd3bba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 99, "num_lines": 26, "path": "/birthday_wisher/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import datetime as dt\nimport pandas\nimport random\nimport smtplib\n\ndata = pandas.read_csv(\"birthdays.csv\")\nbirthday_dict = {(data_row.month, data_row.day): data_row for (index, data_row) in data.iterrows()}\n\ntoday_date = dt.datetime.today()\ntoday = (today_date.month, today_date.day)\n\nif today in birthday_dict:\n birthday_person = birthday_dict[today]\n file_path = f\"./letter_templates/letter_{random.randint(1, 3)}.txt\"\n with open(file_path) as letter_file:\n content = letter_file.read()\n content = content.replace(\"[NAME]\", birthday_person[\"name\"])\n # ---------------------sending email ------------------------------------\n\n my_email = \"********5@gmail.com\"\n password = \"*******\"\n with smtplib.SMTP(\"smtp.gmail.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=my_email, password=password)\n connection.sendmail(from_addr=my_email, to_addrs=\"*******@yahoo.com\",\n msg=f\"Subject: Happy Birthday!\\n\\n{content}\")\n" }, { "alpha_fraction": 0.7447865009307861, "alphanum_fraction": 0.7497517466545105, "avg_line_length": 31.516128540039062, "blob_id": "12f637476b2fd872379823febcfc66f6cc8bc337", "content_id": "d19050e9dff3107505ab94fb0c7facf614de35b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/selenium_examples/interacting.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nchrome_driver_path = \"D:/udemy/chromedriver.exe\"\n\n# driver = webdriver.Chrome(executable_path=chrome_driver_path)\n# driver.get(\"https://en.wikipedia.org/wiki/Main_Page\")\n# article_count = driver.find_element_by_css_selector(\"#articlecount a\")\n#\n# all_portals = driver.find_element_by_link_text(\"All portals\")\n# # all_portals.click()\n#\n# search = driver.find_element_by_name(\"search\")\n# search.send_keys(\"Python\")\n# search.send_keys(Keys.ENTER)\n\n\ndriver = webdriver.Chrome(executable_path=chrome_driver_path)\ndriver.get(\"http://secure-retreat-92358.herokuapp.com/\")\nfname_input = driver.find_element_by_name(\"fName\")\nfname_input.send_keys(\"Anna\")\nlname_input = driver.find_element_by_name(\"lName\")\nlname_input.send_keys(\"Monn\")\nemail_input = driver.find_element_by_name(\"email\")\nemail_input.send_keys(\"annamonn@gmail.com\")\nsubmit_button = driver.find_element_by_css_selector(\"form button\")\nsubmit_button.click()\n\n\n\ndriver.quit()" }, { "alpha_fraction": 0.704684317111969, "alphanum_fraction": 0.7148675918579102, "avg_line_length": 36.769229888916016, "blob_id": "d25aec8d19dd004799fa5c58c506032210d706bb", "content_id": "76c5f8c69e6f1e3999086e524e38c7c0401a1bb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 86, "num_lines": 13, "path": "/bsoup/movie_scraping.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\n\nresponse = requests.get(\"https://www.empireonline.com/movies/features/best-movies-2/\")\nmovies_page = response.text\nsoup = BeautifulSoup(movies_page, \"html.parser\")\ntitle_list = soup.find_all(name=\"h3\", class_=\"title\")\nmovie_list = [movie.getText() for movie in title_list]\nmovie_list_final = movie_list[::-1]\n\nwith open(\"must_watch_movies.txt\", \"a\", encoding=\"utf8\") as file:\n for movie in movie_list_final:\n file.write(f\"{movie}\\n\")\n" }, { "alpha_fraction": 0.6614583134651184, "alphanum_fraction": 0.6840277910232544, "avg_line_length": 29.3157901763916, "blob_id": "6b61d8a67f6fbad2ddee51541acc61317dd48edc", "content_id": "d2f3030c2fa29e56b5cd6c0e24d309a8de614fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 98, "num_lines": 19, "path": "/blog_templating/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\nimport requests\nfrom blog_templating.post import Post\n\napp = Flask(__name__)\nblog_url = \"https://api.npoint.io/5abcca6f4e39b4955965\"\nall_post = requests.get(blog_url).json()\npost_list = [Post(post[\"id\"], post[\"title\"], post[\"subtitle\"], post[\"body\"]) for post in all_post]\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\", posts=post_list)\n\n@app.route('/blog/<int:index>')\ndef show_content(index):\n return render_template(\"post.html\", post=post_list[index-1])\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" }, { "alpha_fraction": 0.5793358087539673, "alphanum_fraction": 0.5830258131027222, "avg_line_length": 16.673913955688477, "blob_id": "8e4d5b4d68912f07179520465d0c96c4304e597f", "content_id": "c0f5dac00ad307bba94a9c298289141833e8c0f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 78, "num_lines": 46, "path": "/flask_expl/hello.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n return '<h1 style=\"text_align: center\">Hellooooo!</h1> ' \\\n '<p>This is a paragraph.<p>' \\\n '<img src=\"https://media.giphy.com/media/6RuhlzSdhIAqk/giphy.gif\">'\n\n\ndef make_bold(function):\n def wrapper():\n result = function()\n return f\"<b>{result}</b>\"\n\n return wrapper\n\n\ndef make_emphasize(function):\n def wrapper():\n result = function()\n return f\"<em>{result}</em>\"\n\n return wrapper\n\n\ndef make_underlined(function):\n def wrapper():\n result = function()\n return f\"<u>{result}</u>\"\n\n return wrapper\n\n\n@app.route('/bye')\n@make_bold\n@make_emphasize\n@make_underlined\ndef hello():\n return 'Bye'\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" }, { "alpha_fraction": 0.5859073400497437, "alphanum_fraction": 0.6177605986595154, "avg_line_length": 23.11627960205078, "blob_id": "61973502dcc8f8cacfb06147d53b6d259af86ddd", "content_id": "8b982926c3a15ad06304f67c3f25d461669ad87e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 109, "num_lines": 43, "path": "/turtle/turtle_example.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from turtle import Turtle, Screen\nimport random\n\ntimmy = Turtle()\n\ncolours = [\"CornflowerBlue\", \"DarkOrchid\", \"IndianRed\", \"DeepSkyBlue\", \"LightSeaGreen\", \"wheat\", \"SlateGray\",\n \"SeaGreen\"]\n\n\ndef draw_hexagon():\n timmy.shape(\"classic\")\n num_sides = 2\n while num_sides <= 8:\n timmy.color(random.choice(colours))\n for _ in range(num_sides):\n angle = 360 / num_sides\n timmy.forward(100)\n timmy.right(angle)\n num_sides += 1\n\n\ndef draw_random_walk():\n directions = [0, 90, 180, 270]\n timmy.pensize(10)\n timmy.speed(\"fastest\")\n for _ in range(200):\n timmy.forward(30)\n timmy.color(random.choice(colours))\n timmy.setheading(random.choice(directions))\n\n\ndef spiriograph(size_of_gap):\n timmy.speed(\"fastest\")\n for n in range(int(360 / size_of_gap)):\n timmy.color(random.choice(colours))\n timmy.circle(100)\n timmy.setheading(timmy.heading() + size_of_gap)\n\n\nspiriograph(20)\n\nscreen = Screen()\nscreen.exitonclick()" }, { "alpha_fraction": 0.5897935628890991, "alphanum_fraction": 0.610829770565033, "avg_line_length": 34.16438293457031, "blob_id": "4da4e7883da6d20c9275412adb75f6342c40ec51", "content_id": "925c4f22b3c223096fb46552dec6068822443b01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2567, "license_type": "no_license", "max_line_length": 90, "num_lines": 73, "path": "/flash_card/main.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport pandas\nimport random\n\nBACKGROUND_COLOR = \"#B1DDC6\"\nFONT_NAME = \"Arial\"\ncurrent_card = {}\nto_learn = []\n\n# ------------------------------ Load Data -------------------------------------------- #\ntry:\n data = pandas.read_csv(\"./data/words_to_learn.csv\")\nexcept FileNotFoundError:\n original_data = pandas.read_csv(\"./data/german_words.csv\")\n to_learn = original_data.to_dict(orient=\"records\")\nelse:\n to_learn = data.to_dict(orient=\"records\")\n\n\n# ------------------------------ Random Word Generate ---------------------------------- #\n\n\ndef next_card():\n global to_learn, current_card, flip_timer\n window.after_cancel(flip_timer)\n current_card = random.choice(to_learn)\n canvas.itemconfig(card_title, text=\"German\", fill=\"black\")\n canvas.itemconfig(card_word, text=f\"{current_card['German']}\", fill=\"black\")\n flip_timer = window.after(3000, flip_card)\n\n\n# ------------------------------Flip Card ------------------------------------------ #\ndef flip_card():\n canvas.itemconfig(card_title, text=\"English\", fill=\"white\")\n canvas.itemconfig(card_word, text=f\"{current_card['English']}\", fill=\"white\")\n canvas.itemconfig(card_background, image=card_back_img)\n\n\n# ------------------------------Remove Known Words------------------------------------- #\ndef is_known():\n to_learn.remove(current_card)\n data2 = pandas.DataFrame(to_learn)\n data2.to_csv(\"./data/words_to_learn.csv\", index=False)\n next_card()\n\n\n# ----------------------------- User Interface ----------------------------------------- #\nwindow = Tk()\nwindow.title(\"Flashy\")\nwindow.config(padx=50, pady=50, bg=BACKGROUND_COLOR)\nflip_timer = window.after(3000, flip_card)\n\ncanvas = Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)\ncard_front_img = PhotoImage(file=\"./images/card_front.png\")\ncard_back_img = PhotoImage(file=\"./images/card_back.png\")\n\ncard_background = canvas.create_image(400, 263, image=card_front_img)\ncanvas.grid(column=0, row=0, columnspan=2)\n\ncard_title = canvas.create_text(400, 150, text=\"English\", font=(FONT_NAME, 40, \"italic\"))\ncard_word = canvas.create_text(400, 263, text=\"Word\", font=(FONT_NAME, 60, \"bold\"))\n\nright_image = PhotoImage(file=\"./images/right.png\")\nknown_button = Button(image=right_image, highlightthickness=0, command=is_known)\nknown_button.grid(column=1, row=1)\n\nwrong_image = PhotoImage(file=\"./images/wrong.png\")\nunknown_button = Button(image=wrong_image, highlightthickness=0, command=next_card)\nunknown_button.grid(column=0, row=1)\n\nnext_card()\n\nwindow.mainloop()\n" }, { "alpha_fraction": 0.6612465977668762, "alphanum_fraction": 0.705962061882019, "avg_line_length": 23.53333282470703, "blob_id": "5b4ded8dc14a3204c30e45dad640a6b1e38b504f", "content_id": "c128acbf3f304cfcea2b0a31fa4bb306e2d9d927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 62, "num_lines": 30, "path": "/tkinter_basics/mile_to_km.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "from tkinter import *\n\nwindow = Tk()\nwindow.title(\"Mile to Km Converter\")\nwindow.minsize(width=200, height=100)\nwindow.config(padx=10, pady=10)\n\nentry = Entry(width=20)\nentry.insert(END, string=\"0\")\nentry.grid(column=1, row=0)\n\nmile_label = Label(text=\"Miles\")\nmile_label.grid(column=2, row=0)\nis_equal_to_lable = Label(text=\"is equal to\")\nis_equal_to_lable.grid(column=0, row=1)\nvalue_lable = Label(text=\"0\")\nvalue_lable.grid(column=1, row=1)\nkm_lable=Label(text=\"Km\")\nkm_lable.grid(column=2, row=1)\n\n\ndef calculate():\n mile = float(entry.get())\n km = round(mile * 1.609344)\n value_lable.config(text=km)\n\n\ncalculate_button = Button(text=\"Calculate\", command=calculate)\ncalculate_button.grid(column=1, row=2)\nwindow.mainloop()\n\n\n" }, { "alpha_fraction": 0.5660315155982971, "alphanum_fraction": 0.5719448328018188, "avg_line_length": 44.43283462524414, "blob_id": "846a437bb57c61cb14e8e2a145fc4d77b0e643d9", "content_id": "4f3671252b74e1466971f3b19c6c70b62dc46069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3044, "license_type": "no_license", "max_line_length": 117, "num_lines": 67, "path": "/quiz/data.py", "repo_name": "Suykum/100DaysOfCode_Python", "src_encoding": "UTF-8", "text": "import requests\n\nparameters = {\n \"amount\": 10,\n \"type\": \"boolean\",\n \"category\": 17,\n \"difficulty\": \"medium\"\n}\nresponse = requests.get(url=\"https://opentdb.com/api.php\", params=parameters)\nresponse.raise_for_status()\ndata = response.json()\nquestion_data = data[\"results\"]\n# question_data = [\n# {\n# \"category\": \"Science: Computers\",\n# \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"The HTML5 standard was published in 2014.\",\n# \"correct_answer\": \"True\",\n# \"incorrect_answers\": [\"False\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"The common software-programming acronym &quot;I18N&quot; \"\n# \"comes from the term &quot;Interlocalization&quot;.\",\n# \"correct_answer\": \"False\", \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"FLAC stands for &quot;Free Lossless Audio Condenser&quot;&#039;\", \"correct_answer\": \"False\",\n# \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"The open source program Redis is a relational database server.\",\n# \"correct_answer\": \"False\", \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"Early RAM was directly seated onto the motherboard and could not be easily removed.\",\n# \"correct_answer\": \"True\", \"incorrect_answers\": [\"False\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"The first dual-core CPU was the Intel Pentium D.\", \"correct_answer\": \"False\",\n# \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"The last Windows operating system to be based on the Windows 9x kernel was Windows 98.\",\n# \"correct_answer\": \"False\", \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"Android versions are named in alphabetical order.\", \"correct_answer\": \"True\",\n# \"incorrect_answers\": [\"False\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"Linus Sebastian is the creator of the Linux kernel, which went on to be used in Linux,\"\n# \" Android, and Chrome OS.\",\n# \"correct_answer\": \"False\", \"incorrect_answers\": [\"True\"]\n# },\n# {\n# \"category\": \"Science: Computers\", \"type\": \"boolean\", \"difficulty\": \"medium\",\n# \"question\": \"A Boolean value of &quot;0&quot; represents which of these words?\", \"correct_answer\": \"False\",\n# \"incorrect_answers\": [\"True\"]}\n# ]\n" } ]
20
Yevs/week1
https://github.com/Yevs/week1
7b9ece06dbf6c622701596debbe0e94f0ca84225
56a3cb77747ca9d0edce059b4233fc8e2bffb13f
61bdc5e4d7847c94676adaced4a7bba673b013cd
refs/heads/master
2016-05-13T18:50:31.978953
2015-10-18T13:33:31
2015-10-18T13:33:31
44,206,445
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 14.333333015441895, "blob_id": "f90e918ea5634d0eeb044d6a13203e99433e7891", "content_id": "a14360659f1f0d98d405f7333c775a9b11c89f7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/raddit/threads/forms.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom .models import Thread, Comment\n\n\nclass ThreadForm(ModelForm):\n\n class Meta:\n\n model = Thread\n exclude = ['author'] \n\n\nclass CommentForm(ModelForm):\n\n class Meta:\n\n model = Comment\n fields = ['content']" }, { "alpha_fraction": 0.8012422323226929, "alphanum_fraction": 0.8012422323226929, "avg_line_length": 19.125, "blob_id": "deecfa7caed9a5800d9c4145b784a552009ebc48", "content_id": "e08f4ab9f7c9627ec8e354fd4165737823615302", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 41, "num_lines": 8, "path": "/raddit/threads/admin.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Thread, Comment, Like\n\n\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Like)\n" }, { "alpha_fraction": 0.4524793326854706, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 31.288888931274414, "blob_id": "6f589bd5081d6d743c0dd3753681ea9f1380bffd", "content_id": "7d9e3ded04696b12a2918381c725dab23c22dfe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1452, "license_type": "no_license", "max_line_length": 83, "num_lines": 45, "path": "/raddit/threads/static/threads/js/thread.js", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "function initLikes () {\n $('.on-fire a').click(function () {\n var change = 0;\n var thread_div = $(this).parent().parent();\n var thread_id = $('input', thread_div).val();\n var like = true;\n if ($(this).hasClass('clicked')) {\n $(this).removeClass('clicked');\n like = false;\n change = -1;\n } else {\n $(this).addClass('clicked');\n change = 1;\n }\n $.post((like ? '/like/' : '/dislike/') + thread_id + '/', function (data) {\n $('.thread p.like-amount', thread_div).text(data.like_amount);\n }).fail(function () {\n console.log('some weird mistake');\n });\n });\n}\n\nfunction initCommentWriting () {\n $('#create-comment').click(function () {\n var parent = $(this).parent();\n $(this).addClass('hidden');\n $('#comment-form').removeClass('hidden');\n $('#comment-cancel').click(function () {\n $(this).parent().addClass('hidden');\n $('#create-comment').removeClass('hidden');\n });\n // $('#comment-submit').click(function () {\n // var post_data = {\n // 'content': $('#comment-content').val(),\n // };\n // $.post('/comment/' + $('#thread-id').val() + '/',\n // post_data);\n // });\n });\n}\n\n$(document).ready(function () {\n initLikes();\n initCommentWriting();\n});" }, { "alpha_fraction": 0.6388620138168335, "alphanum_fraction": 0.6452487111091614, "avg_line_length": 32.34193420410156, "blob_id": "edb87e9e41bfed7c30f85d08dba9ed5932caafb2", "content_id": "3e57aec38a4bdc8baad05b348ac0d64fbb6bc28d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5167, "license_type": "no_license", "max_line_length": 85, "num_lines": 155, "path": "/raddit/threads/views.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import Http404, JsonResponse, HttpResponse\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import ListView, View\nfrom django.core.urlresolvers import reverse\n\nfrom django.contrib.auth import login as auth_login, logout as auth_logout\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\n\nfrom django.db.models import Count\n\nfrom .models import Thread, Like, Comment\nfrom .forms import ThreadForm, CommentForm\n\n\nclass Index(ListView):\n\n paginate_by = 10\n template_name = 'threads/index.html'\n queryset = Thread.objects\\\n .annotate(like_amount=Count('like'))\\\n .order_by('title')\n context_object_name = 'threads'\n model = Thread\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(Index, self).get_context_data(*args, **kwargs)\n if self.request.user.is_authenticated():\n for thread in ctx['threads']:\n thread.liked = _is_liked(thread, self.request.user)\n return ctx\n\n\n@login_required(login_url='/login/')\ndef like(req, thread_id):\n try:\n thread = _get_thread_and_likes(thread_id, req.user)\n Like.objects.create(author=req.user, thread=thread)\n return JsonResponse({'like_amount': thread.like_set.count() + 1})\n except PermissionDenied:\n response = JsonResponse({'errors': ['The thread is already liked']})\n return _set_status_code(response, 403)\n\n\n@login_required(login_url='/login/')\ndef dislike(req, thread_id):\n try:\n thread = _get_thread_and_likes(thread_id, req.user, like=False)\n Like.objects.get(author=req.user, thread=thread).delete()\n return JsonResponse({'like_amount': thread.like_amount - 1})\n except PermissionDenied:\n response = JsonResponse({'errors': ['The thread is not liked yet.']}) \n return _set_status_code(response, 403)\n\n\ndef thread_detail(req, id):\n try:\n thread = Thread.objects\\\n .filter(pk=id)\\\n .annotate(like_amount=Count('like'))\\\n .prefetch_related('comment_set', 'like_set')[0]\n liked = False\n if req.user.is_authenticated():\n liked = thread.like_set.filter(author=req.user).exists()\n return render(req, 'threads/thread.html', {'thread': thread, 'liked': liked})\n except IndexError:\n raise Http404('Thread does not exist')\n\n\n@login_required(login_url='/login/')\ndef create_thread(req):\n thread_form = ThreadForm(req.POST)\n if thread_form.is_valid():\n thread = thread_form.save(commit=False)\n thread.author = req.user\n thread.save()\n return JsonResponse({'id': thread.id})\n else:\n return JsonResponse({'errors': ['Sent data is invalid']})\n\n\n@login_required(login_url='/login/')\ndef comment(req, thread_id):\n thread = get_object_or_404(Thread, pk=thread_id)\n comment_form = CommentForm(req.POST)\n print(req.POST)\n if comment_form.is_valid():\n comment = comment_form.save(commit=False)\n comment.thread = thread\n comment.author = req.user\n comment.save()\n return redirect(reverse('threads:thread', args=[thread.id]))\n # return JsonResponse({'id': comment.id})\n else:\n return JsonResponse({'errors': ['Sent data is invalid']})\n\n\n@login_required(login_url='/logout/')\ndef logout(req):\n auth_logout(req)\n return redirect(reverse('threads:index'))\n\n\nclass Login(View):\n\n def get(self, req, *args, **kwargs):\n return render(req, 'threads/login.html', {})\n\n def post(self, req, *args, **kwargs):\n user_form = AuthenticationForm(req, req.POST)\n if user_form.is_valid():\n user = user_form.get_user()\n auth_login(req, user)\n return redirect(reverse('threads:index'))\n else:\n return _set_status_code(JsonResponse({'errors': ['Login failed.']}), 401)\n\n\nclass Register(View):\n\n def get(self, req, *args, **kwargs):\n return render(req, 'threads/registration.html', {})\n\n def post(self, req, *args, **kwargs):\n registration_form = UserCreationForm(req.POST)\n if registration_form.is_valid():\n user = registration_form.save(commit=True)\n return redirect(reverse('threads:index'))\n else:\n return _set_status_code(JsonResponse({'errors': ['Invalid data']}), 401)\n\n\ndef _is_liked(thread, user):\n return Like.objects.filter(thread=thread, author=user).exists()\n\n\ndef _get_thread_and_likes(thread_id, author, like=True):\n try:\n thread = Thread.objects\\\n .filter(pk=thread_id)\\\n .annotate(like_amount=Count('like'))\\\n .prefetch_related('like_set')[0]\n except IndexError:\n raise Http404('Thread does not exist.')\n is_liked = _is_liked(thread, author)\n if (like and is_liked) or (not like and not is_liked):\n raise PermissionDenied()\n return thread\n\n\ndef _set_status_code(res, status_code):\n res.status_code = status_code\n return res" }, { "alpha_fraction": 0.604519784450531, "alphanum_fraction": 0.6158192157745361, "avg_line_length": 43.3125, "blob_id": "c427813fd5afde562d0a89007bb78f234f94b337", "content_id": "7d8339c3f14dc8287b0d8b7b8103dde6383d8174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 708, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/raddit/threads/urls.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.Index.as_view(), name='index'),\n url(r'^like/(?P<thread_id>[0-9]+)/$', views.like, name='like'),\n url(r'^dislike/(?P<thread_id>[0-9]+)/$', views.dislike, name='dislike'),\n url(r'^thread/$', views.create_thread, name='thread_create'),\n url(r'^thread/(?P<id>[0-9]+)/$', views.thread_detail, name='thread'),\n url(r'^comment/(?P<thread_id>[0-9]+)/$', views.comment, name='comment'),\n url(r'^login/$', views.Login.as_view(), name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^register/$', views.Register.as_view(), name='register'),\n]" }, { "alpha_fraction": 0.6701940298080444, "alphanum_fraction": 0.6807760000228882, "avg_line_length": 19.285715103149414, "blob_id": "4955046d478882bc3f492d98ffdd52a8061e5208", "content_id": "7fd592060edbce00cfd38c175b4ae6a173cd8ab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/raddit/threads/models.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Thread(models.Model):\n\n author = models.ForeignKey(User)\n title = models.CharField(max_length=140)\n url = models.URLField()\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n\n author = models.ForeignKey(User)\n content = models.TextField()\n thread = models.ForeignKey(Thread)\n\n def __str__(self):\n return self.content[:140]\n\n\nclass Like(models.Model):\n\n author = models.ForeignKey(User)\n thread = models.ForeignKey(Thread)" }, { "alpha_fraction": 0.6310800909996033, "alphanum_fraction": 0.6501405835151672, "avg_line_length": 43.65581512451172, "blob_id": "30858f5de23bfe985e368bcbdd6bd1bc5e8e74d1", "content_id": "482bbf5264dc81bfde19489507ad7a2d80f8dc15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9601, "license_type": "no_license", "max_line_length": 124, "num_lines": 215, "path": "/raddit/threads/tests.py", "repo_name": "Yevs/week1", "src_encoding": "UTF-8", "text": "from django.test import TestCase, Client, RequestFactory\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\n\nfrom .models import Thread, Like, Comment\nfrom . import views\n\n\nclass BaseTestCase(TestCase):\n\n def setUp(self, create_threads=False, create_likes=False, create_clients=True, create_comments=False):\n self._user1 = User.objects.create_user(username='username1', password='querty')\n self._user2 = User.objects.create_user(username='username2', password='querty')\n self._user3 = User.objects.create_user(username='username3', password='querty')\n self._user4 = User.objects.create_user(username='username4', password='querty')\n self._user4.is_active = False\n self._user4.save()\n if create_threads:\n self.create_threads(create_likes, create_comments)\n if create_clients:\n self.logged_client = Client()\n self.logged_client.login(username='username1', password='querty')\n self.client = Client()\n\n def create_threads(self, create_likes=False, create_comments=False):\n self._thread1 = Thread.objects.create(title='title1', url='google.com', author=self._user1)\n self._thread2 = Thread.objects.create(title='title2', url='vk.com', author=self._user2)\n if create_likes:\n self.create_likes()\n if create_comments:\n self.create_comments()\n\n def create_likes(self):\n self._like1 = Like.objects.create(thread=self._thread1, author=self._user1)\n self._like2 = Like.objects.create(thread=self._thread1, author=self._user2)\n self._like3 = Like.objects.create(thread=self._thread2, author=self._user3)\n self._like4 = Like.objects.create(thread=self._thread2, author=self._user4)\n\n def create_comments(self):\n self._comment1 = Comment.objects.create(author=self._user1, thread=self._thread1, content='some content')\n self._comment2 = Comment.objects.create(author=self._user2, thread=self._thread1, content='some content')\n self._comment3 = Comment.objects.create(author=self._user3, thread=self._thread2, content='some content')\n self._comment4 = Comment.objects.create(author=self._user4, thread=self._thread2, content='some content')\n\n\nclass HelperFunctionsTest(BaseTestCase):\n\n def setUp(self):\n super(HelperFunctionsTest, self).setUp(create_threads=True, create_likes=True, \\\n create_clients=True, create_comments=True)\n\n def test_is_liked(self):\n self.assertTrue(views._is_liked(self._thread1, self._user1))\n self.assertFalse(views._is_liked(self._thread2, self._user1))\n\n def test_get_thread_and_likes(self):\n self.assertEqual(views._get_thread_and_likes(self._thread1.id, self._user3, like=True).like_amount,\n 2)\n self.assertEqual(views._get_thread_and_likes(self._thread1.id, self._user2, like=False).like_amount,\n 2)\n with self.assertRaises(PermissionDenied):\n views._get_thread_and_likes(self._thread1.id, self._user1, like=True)\n with self.assertRaises(PermissionDenied):\n views._get_thread_and_likes(self._thread1.id, self._user3, like=False)\n with self.assertRaises(Http404):\n views._get_thread_and_likes(666, self._user3, like=False)\n\n def test_set_status_code(self):\n res = HttpResponse()\n views._set_status_code(res, 200)\n self.assertEqual(res.status_code, 200)\n views._set_status_code(res, 100)\n self.assertEqual(res.status_code, 100)\n views._set_status_code(res, 47)\n self.assertEqual(res.status_code, 47)\n views._set_status_code(res, 404)\n self.assertEqual(res.status_code, 404)\n\n\nclass ThreadModelTest(BaseTestCase):\n\n def setUp(self):\n super(ThreadModelTest, self).setUp(create_threads=True)\n\n def test_str(self):\n self.assertEqual(str(self._thread1), self._thread1.title)\n\n\nclass CommentModelTest(BaseTestCase):\n\n def setUp(self):\n super(CommentModelTest, self).setUp(create_threads=True, create_comments=True)\n\n def test_str(self):\n self.assertEqual(str(self._comment1), self._comment1.content)\n\nclass ThreadTest(BaseTestCase):\n\n def setUp(self):\n super(ThreadTest, self).setUp(create_threads=True, create_clients=True)\n\n def tearDown(self):\n Thread.objects.filter(title='title3').delete()\n\n def test_index(self):\n res = self.logged_client.get('/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('threads', res.context)\n threads = res.context['threads']\n self.assertEqual(threads[0].title, 'title1')\n self.assertEqual(threads[1].title, 'title2')\n\n def test_detail_get(self):\n res = self.logged_client.get('/thread/{0}/'.format(self._thread1.id))\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.context['thread'].like_amount, 0)\n res = self.logged_client.get('/thread/47/')\n self.assertEqual(res.status_code, 404) #not found\n\n def test_create(self):\n res = self.client.post('/thread/')\n self.assertEqual(res.status_code, 302) #redirect\n post_data = {'title': 'title3', 'url': 'twitter.com'}\n res = self.logged_client.post('/thread/', post_data)\n self.assertEqual(res.status_code, 200) #redirect\n self.assertEqual(res.content, \n ('{\"id\": ' + str(format(Thread.objects.get(title='title3').id)) + '}').encode()) # hard time working with bytes\n self.assertEqual(res.status_code, 200)\n res = self.logged_client.post('/thread/')\n self.assertEqual(res.content, b'{\"errors\": [\"Sent data is invalid\"]}')\n with self.assertRaises(ObjectDoesNotExist):\n Thread.objects.get(title='blabla')\n\n\nclass LikeTest(BaseTestCase):\n\n def setUp(self):\n super(LikeTest, self).setUp(create_threads=True, create_likes=True, create_clients=True)\n\n def test_like_and_dislike(self):\n res = self.logged_client.get('/like/{}/'.format(self._thread2.id))\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.content, b'{\"like_amount\": 3}')\n res = self.logged_client.get('/like/47/')\n self.assertEqual(res.status_code, 404)\n res = self.logged_client.get('/like/{}/'.format(self._thread2.id))\n self.assertEqual(res.content, b'{\"errors\": [\"The thread is already liked\"]}')\n res = self.logged_client.get('/dislike/{}/'.format(self._thread2.id))\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.content, b'{\"like_amount\": 2}')\n res = self.logged_client.get('/dislike/{}/'.format(self._thread2.id))\n self.assertEqual(res.content, b'{\"errors\": [\"The thread is not liked yet.\"]}')\n\n\nclass CommentTest(BaseTestCase):\n\n def setUp(self):\n super(CommentTest, self).setUp(create_threads=True, create_likes=True, \\\n create_clients=True, create_comments=True)\n\n def test_commenting_thread(self):\n res = self.logged_client.post('/comment/{}/'.format(self._thread1.id))\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.content, b'{\"errors\": [\"Sent data is invalid\"]}')\n res = self.logged_client.post('/comment/47/')\n self.assertEqual(res.status_code, 404) #not found\n res = self.logged_client.post('/comment/{}/'.format(self._thread2.id), {'content': 'some other content'})\n self.assertEqual(res.status_code, 200) #redirect\n self.assertTrue(Comment.objects.filter(thread=self._thread2, author=self._user1).exists())\n\nclass TestLogin(BaseTestCase):\n\n def setUp(self):\n super(TestLogin, self).setUp()\n\n def test_login_page(self):\n res = self.logged_client.get('/login/')\n self.assertEqual(res.status_code, 200)\n\n def test_logging_in(self):\n res = self.client.post('/login/', {'username': 'username1', 'password': 'querty'})\n self.assertEqual(res.status_code, 302)\n self.assertEqual(int(self.client.session['_auth_user_id']), self._user1.pk) # checks if her actually logged in\n res = self.client.post('/login/')\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res.content, b'{\"errors\": [\"Login failed.\"]}')\n res = self.client.post('/login/', {'username': 'username4', 'password': 'querty'})\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res.content, b'{\"errors\": [\"Login failed.\"]}')\n\n def test_logout(self):\n self.client.post('/login/', {'username': 'username1', 'password': 'querty'})\n res = self.client.get('/logout/')\n self.assertEqual(res.status_code, 302)\n self.assertNotIn('_auth_user_id', self.client.session)\n\n\nclass TestRegister(BaseTestCase):\n\n def setUp(self):\n super(TestRegister, self).setUp()\n\n def test_register_page(self):\n res = self.client.get('/register/')\n self.assertEqual(res.status_code, 200)\n\n def test_registration(self):\n res = self.client.post('/register/', {'username': 'new_user', 'password1': 'abc', 'password2': 'abc'})\n self.assertEqual(res.status_code, 302)\n self.assertTrue(User.objects.filter(username='new_user').exists())\n res = self.client.post('/register/')\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res.content, b'{\"errors\": [\"Invalid data\"]}')\n" } ]
7
DeepakSingh9/talo-new
https://github.com/DeepakSingh9/talo-new
a6d2dcda3a3a3bc8743fe6706c807f779081f8a4
71206861d958819062cf6f2e20ad360a8757a263
176c48a3654ddb43837067256d70ef25cf5ef5c1
refs/heads/master
2020-04-28T00:44:04.360678
2019-07-01T05:28:51
2019-07-01T05:28:51
174,826,909
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.725494384765625, "alphanum_fraction": 0.7401658296585083, "avg_line_length": 39.20512771606445, "blob_id": "5fdabdce16cdd90c38b77fe83c8a24d2e0bda1db", "content_id": "3cb36c97d3a47595fec1e8221e69c0e0e1a2ca3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4703, "license_type": "no_license", "max_line_length": 100, "num_lines": 117, "path": "/resume/models.py", "repo_name": "DeepakSingh9/talo-new", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nimport datetime\nfrom dashboard.models import Profile\n\n# Create your models here.\n'''\nclass Bio(models.Model):\n Degree_choice=(('High School','HIGH SCHOOL'),('undergrad','UNDERGRAD'),('postgrad','POSTGRAD'),)\n user=models.OneToOneField(User,on_delete=models.CASCADE)\n masters_degree_name=models.CharField(max_length=150,blank=True,default='')\n masters_college_name=models.CharField(max_length=150,blank=True)\n masters_education_from = models.DateField(default=datetime.date.today)\n masters_education_till=models.DateField(default=datetime.date.today)\n bachelors_degree = models.CharField(max_length=150,blank=True)\n bachelors_college_name = models.CharField(max_length=150, blank=True)\n bachelors_education_from = models.DateField(default=datetime.date.today)\n bachelors_education_till = models.DateField(default=datetime.date.today)\n High_School_degree = models.CharField(max_length=150,blank=True)\n High_School_name = models.CharField(max_length=150, blank=True)\n High_School_from = models.DateField(default=datetime.date.today)\n High_School_till = models.DateField(default=datetime.date.today)\n Junior_degree = models.CharField(max_length=150,blank=True)\n Junior_School_name = models.CharField(max_length=150, blank=True)\n Junior_School_from = models.DateField(default=datetime.date.today)\n Junior_School_till = models.DateField(default=datetime.date.today)\n\n def __str__(self):\n return self.user.username'''\n\n\nclass Educations(models.Model):\n Degree_choice=(('High School','HIGH SCHOOL'),('undergrad','UNDERGRAD'),('postgrad','POSTGRAD'),)\n user=models.OneToOneField(Profile,on_delete=models.CASCADE)\n masters_degree_name=models.CharField(max_length=150,blank=True,default='')\n masters_college_name=models.CharField(max_length=150,blank=True)\n masters_education_from = models.DateField(default=datetime.date.today)\n masters_education_till=models.DateField(default=datetime.date.today)\n bachelors_degree = models.CharField(max_length=150,blank=True)\n bachelors_college_name = models.CharField(max_length=150, blank=True)\n bachelors_education_from = models.DateField(default=datetime.date.today)\n bachelors_education_till = models.DateField(default=datetime.date.today)\n High_School_degree = models.CharField(max_length=150,blank=True)\n High_School_name = models.CharField(max_length=150, blank=True)\n High_School_from = models.DateField(default=datetime.date.today)\n High_School_till = models.DateField(default=datetime.date.today)\n Junior_degree = models.CharField(max_length=150,blank=True)\n Junior_School_name = models.CharField(max_length=150, blank=True)\n Junior_School_from = models.DateField(default=datetime.date.today)\n Junior_School_till = models.DateField(default=datetime.date.today)\n\n def __str__(self):\n return self.user.username\n\nclass Project(models.Model):\n profile=models.ForeignKey(Profile,on_delete=models.CASCADE,)\n title= models.CharField(max_length=128)\n year = models.DateField()\n description = models.TextField()\n link = models.URLField(blank=True)\n position=models.CharField(max_length=128,blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass WorkExperience(models.Model):\n profile=models.ForeignKey(Profile,on_delete=models.CASCADE,)\n organisation=models.CharField(max_length=128,blank=True,)\n designation=models.CharField(max_length=128,blank=True,)\n worked_from=models.DateField()\n worked_till=models.DateField(blank=True,null=True,default=datetime.date.today)\n current=models.BooleanField(default=False)\n describe=models.TextField(blank=True)\n\n def __str__(self):\n return self.organisation\n\nclass Certification(models.Model):\n profile=models.ForeignKey(Profile,on_delete=models.CASCADE,)\n title=models.CharField(max_length=128)\n year=models.DateField()\n link=models.URLField(blank=True)\n cert_image=models.FileField(upload_to='cert_images/',blank=True)\n\n def __str__(self):\n return self.title\n\nclass Interest(models.Model):\n profile=models.ForeignKey(Profile,on_delete=models.CASCADE,)\n interest=models.CharField(max_length=128,blank=True)\n\n\n def __str__(self):\n return self.name\n\n\n'''\n\nclass Certification(models.Model):\n education=models.ForeignKey(Education,on_delete=models.CASCADE,)\n title=models.CharField(max_length=128)\n year=models.DateField()\n description=models.TextField()\n link=models.URLField(blank=True)\n cert_image=models.FileField(upload_to='cert_images/',blank=True)\n\n def __str__(self):\n return self.title\n\n\n\n\n\n\n\n'''" }, { "alpha_fraction": 0.6417322754859924, "alphanum_fraction": 0.6417322754859924, "avg_line_length": 30.75, "blob_id": "d656377df4c3228683751ec0546cbb5fb12d4188", "content_id": "826eeda84b80a29abc9ca0fef7f18024b210db5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 69, "num_lines": 8, "path": "/signup/urls.py", "repo_name": "DeepakSingh9/talo-new", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns=[url(r'^signin/$',views.user_login,name='signin'),\n url(r'^signup/$',views.user_registration,name='signup'),\n url(r'^logout/$',views.user_logout,name='logout'),\n\n]\n" }, { "alpha_fraction": 0.7621776461601257, "alphanum_fraction": 0.7631327509880066, "avg_line_length": 33.83333206176758, "blob_id": "617388b61a29c30f7645a381d536ceceafb009fa", "content_id": "f44ca898df82c8994293b4eaac6fc2fd7757be5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 108, "num_lines": 30, "path": "/resume/admin.py", "repo_name": "DeepakSingh9/talo-new", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom .models import Educations,Project,WorkExperience,Certification,Interest\n\n# Register your models here.\n\nclass EducationAdmin(admin.ModelAdmin):\n list_display = ['user','masters_college_name','masters_education_from','masters_education_till']\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ['profile','title','year','description','link','position']\n\nclass WorkexpAdmin(admin.ModelAdmin):\n list_display = ['profile','organisation','designation','worked_from','worked_till','current','describe']\n\n\nclass CertificationAdmin(admin.ModelAdmin):\n list_display = ['profile','title','link','cert_image','year']\n\nclass InterestAdmin(admin.ModelAdmin):\n list_display = ['profile','interest']\n\nadmin.site.register(Project,ProjectAdmin)\nadmin.site.register(Educations, EducationAdmin)\nadmin.site.register(WorkExperience,WorkexpAdmin)\nadmin.site.register(Certification,CertificationAdmin)\nadmin.site.register(Interest,InterestAdmin)\n\n\n" }, { "alpha_fraction": 0.6393442749977112, "alphanum_fraction": 0.6398264169692993, "avg_line_length": 31.88888931274414, "blob_id": "8eb9f375b20f85f828601d6e982a9987b9c2fd2f", "content_id": "6bcaa68807e83b1e87858bc811be6c55bdf92de8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2074, "license_type": "no_license", "max_line_length": 96, "num_lines": 63, "path": "/signup/views.py", "repo_name": "DeepakSingh9/talo-new", "src_encoding": "UTF-8", "text": " # -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import login, logout, authenticate\nfrom dashboard.models import Profile\nfrom .forms import LoginForm, RegisterationForm\n\n\n# Create your views here.\n\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('home',username=user.username)\n else:\n return HttpResponse('Your account is disabled')\n else:\n return HttpResponse('Invalid login details')\n return render(request,'signin.html', {})\n\n\n\n\ndef user_registration(request):\n if request.method == 'POST':\n userlogin = LoginForm(request.POST)\n userregister = RegisterationForm(request.POST)\n username = request.POST['username']\n password = request.POST['password']\n\n if userlogin.is_valid() and userregister.is_valid():\n user = userlogin.save(commit=False)\n user.set_password(user.password)\n user.save()\n\n profile = userregister.save(commit=False)\n profile.user = user\n profile.save()\n\n login(request, authenticate(username=userlogin.cleaned_data['username'],\n password=userlogin.cleaned_data['password']))\n return redirect('home',username=username)\n else:\n print (userlogin.errors, userregister.errors)\n else:\n userlogin = LoginForm()\n userregister = RegisterationForm()\n return render(request,'signup.html', {'userlogin': userlogin, 'userregister': userregister})\n\n\n\ndef user_logout(request):\n logout(request)\n return redirect('registration')\n\n" } ]
4
buzonek/Magisterka
https://github.com/buzonek/Magisterka
8bcfc52f20c491f7c05e3394929be2e299020753
36d595ccba22d645dd8be53cf846995af8b1c59d
c8c50c220b5549211250f4e653ca19d4107c087a
refs/heads/master
2021-05-01T06:29:51.545341
2018-04-21T14:26:39
2018-04-21T14:26:39
121,145,232
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5865632891654968, "alphanum_fraction": 0.5968992114067078, "avg_line_length": 28.69230842590332, "blob_id": "bb5c8e0ede164c4b5953832f4da7fecf7ba61cf9", "content_id": "02e678ee81aa280e4c56b161912fb1852a3695d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 118, "num_lines": 13, "path": "/Sensor.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "from ValueNeuron import NeuralBaseClass\n\n\nclass Sensor(NeuralBaseClass):\n def __init__(self, value):\n super().__init__(value)\n self.count = 1\n self.value_neuron = None\n self.prev = None\n self.next = None\n\n def __repr__(self):\n return \"Sensor({0} -> {1} -> {2})\".format(self.value, self.value_neuron, self.value_neuron.connections.keys())\n\n" }, { "alpha_fraction": 0.6117788553237915, "alphanum_fraction": 0.614182710647583, "avg_line_length": 23.47058868408203, "blob_id": "d1445805a6063b39ed6f597236e8569f6a3a753d", "content_id": "e10d47794ffc0f8be7091b70b42504cd7064516d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/ValueNeuron.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "from functools import total_ordering\n\n\n@total_ordering\nclass NeuralBaseClass(object):\n def __init__(self, value):\n self.value = value\n self.connections = {}\n\n def __lt__(self, other):\n return self.value < other.value\n\n def __eq__(self, other):\n return self.value == other.value\n\n def connect(self, neuron):\n self.connections[neuron.value] = neuron\n neuron.connections[self.value] = self\n\n\nclass ValueNeuron(NeuralBaseClass):\n def __init__(self, value):\n super().__init__(value)\n self.sensor = None\n self.prev = None\n self.next = None\n\n def __repr__(self):\n return \"ValueNeuron({0})\".format(self.value, self.connections)\n\n\nclass ObjectNeuron(NeuralBaseClass):\n def __repr__(self):\n return \"ObjectNeuron({0})\".format(self.value)\n" }, { "alpha_fraction": 0.5073710083961487, "alphanum_fraction": 0.5156633853912354, "avg_line_length": 45.514286041259766, "blob_id": "457f32c9e97143f4f8df4808d52d2c182afce28c", "content_id": "054c90e8b3a1601550b2efc97d0badea961d2b30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3257, "license_type": "no_license", "max_line_length": 198, "num_lines": 70, "path": "/main.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "from AVBTree import AVBTree\nfrom Database_connection import DatabaseConnector\nfrom Sensor import Sensor\nfrom ValueNeuron import ValueNeuron, ObjectNeuron\n\n\nclass DASNG(object):\n def __init__(self, server, database):\n self.sensory_input_fields = {}\n self.db = DatabaseConnector(server, database)\n self.convert_db()\n\n def print(self):\n for field_name, field_avb_tree in self.sensory_input_fields.items():\n print(field_name)\n field_avb_tree.print()\n print(\"*\"*50)\n\n def convert_db(self, ):\n tables = self.db.get_tables_without_fk()\n tables.extend(self.db.get_tables_with_one_fk())\n tables.extend(self.db.get_tables_with_many_fk())\n for table in tables:\n self.convert_table(table)\n\n def convert_table(self, table):\n for column in table.non_key_columns:\n if column not in self.sensory_input_fields:\n self.sensory_input_fields[column] = AVBTree(column)\n\n rows = self.db.fetch_data_from_table(table.name)\n for row in rows:\n id = row.__getattribute__(table.pk)\n ID = ObjectNeuron(id)\n for column in table.non_key_columns:\n value = row.__getattribute__(column)\n if value is None:\n continue\n sensor = self.sensory_input_fields[column].insert(Sensor(value))\n if sensor.count == 1:\n sensor.value_neuron = ValueNeuron(value)\n sensor.value_neuron.sensor = sensor\n sensor.value_neuron.connect(ID)\n for column in table.fk:\n fk_value = row.__getattribute__(column)\n if fk_value:\n fk_detail = self.db.get_fk_detail(column)\n tb_detail = self.db.get_tb_detail(fk_detail.referenced_table)\n if fk_detail.referenced_table != fk_detail.table:\n sql = '''Select [{5}] from [{0}]\n LEFT JOIN [{1}]\n ON [{0}].[{2}] = [{1}].[{3}]\n WHERE [{0}].[{2}] = '{4}';'''.format(fk_detail.referenced_table, fk_detail.table, fk_detail.referenced_column, fk_detail.column, fk_value, tb_detail.non_key_columns[0])\n else:\n sql = '''Select [{3}] from {0}\n WHERE [{1}] = '{2}';'''.format(fk_detail.referenced_table,\n fk_detail.referenced_column, fk_value, tb_detail.non_key_columns[0])\n res = self.db.cursor.execute(sql).fetchall()[0][0]\n try:\n referenced_IDNeuron = self.sensory_input_fields[tb_detail.non_key_columns[0]].search(res).value_neuron.connections[fk_value]\n except Exception as e:\n #TODO W Northwind wywala siฤ™ do czasu do czasu(?)\n print(e)\n rows.append(row)\n else:\n ID.connect(referenced_IDNeuron)\n\nif __name__ == \"__main__\":\n dasng = DASNG('5CG6383BLC\\MATEUSZ', 'Studenci')\n dasng.print()\n" }, { "alpha_fraction": 0.483214795589447, "alphanum_fraction": 0.49046942591667175, "avg_line_length": 30.809955596923828, "blob_id": "7b2f79b8606ee90b24be93d4e05108aabde7db3d", "content_id": "ca0a2f70c4c604b7d83e9cc5289292a68ea77395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7030, "license_type": "no_license", "max_line_length": 76, "num_lines": 221, "path": "/AVBTree.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "from recordclass import recordclass\nfrom Sensor import Sensor\nfrom ValueNeuron import ValueNeuron\nimport ctypes\n# Key = recordclass('Key', 'value count')\n\n\nclass AVBTreeNode(object):\n def __init__(self, leaf=True):\n self.keys = []\n self.childs = []\n self.parent = None\n self.leaf = leaf\n\n def __repr__(self):\n return \"{0}\".format(self.keys)\n\n def __len__(self):\n return len(self.keys)\n\n def get_key(self, value):\n for key in self.keys:\n if key.value == value:\n return key\n\n def add_key(self, AVBTree, obj, split=False):\n i = 0\n length = len(self.keys)\n while i < length and self.keys[i].value < obj.value:\n i += 1\n if not split:\n if type(obj.value) in [int, float]:\n if length == 2:\n if i == 0:\n obj.next = self.keys[0]\n obj.prev = self.keys[0].prev\n if self.keys[0].prev:\n self.keys[0].prev.next = obj\n self.keys[0].prev = obj\n elif i == 1:\n obj.next = self.keys[1]\n obj.prev = self.keys[0]\n self.keys[1].prev = obj\n self.keys[0].next = obj\n else:\n obj.next = self.keys[1].next\n obj.prev = self.keys[1]\n if self.keys[1].next:\n self.keys[1].next.prev = obj\n self.keys[1].next = obj\n elif length == 1:\n if i == 0:\n obj.next = self.keys[0]\n obj.prev = self.keys[0].prev\n if self.keys[0].prev:\n self.keys[0].prev.next = obj\n self.keys[0].prev = obj\n\n elif i == 1:\n obj.next = self.keys[0].next\n obj.prev = self.keys[0]\n if self.keys[0].next:\n self.keys[0].next.prev = obj\n self.keys[0].next = obj\n else:\n obj.prev = AVBTree.min\n AVBTree.min.next = obj\n obj.next = AVBTree.max\n AVBTree.max.prev = obj\n\n self.keys.insert(i, obj)\n\n # if node is full split it\n if self.is_full:\n self.split(AVBTree)\n\n def add_child(self, child):\n i = 0\n length = len(self.childs)\n while i < length and self.childs[i].keys[0] < child.keys[0]:\n i += 1\n self.childs.insert(i, child)\n\n def split(self, AVBTree):\n rightmost_key = self.keys.pop()\n new_leaf = AVBTreeNode()\n new_leaf.add_key(AVBTree, rightmost_key, split=True)\n\n if self.childs:\n new_leaf.childs = self.childs[2:]\n for child in new_leaf.childs:\n child.parent = new_leaf\n self.childs = self.childs[:2]\n for child in self.childs:\n child.parent = self\n new_leaf.leaf = False\n\n middle_key = self.keys.pop()\n\n if self.parent:\n self.parent.add_child(new_leaf)\n new_leaf.parent = self.parent\n self.parent.add_key(AVBTree, middle_key, split=True)\n else:\n new_root = AVBTreeNode(leaf=False)\n new_root.add_key(AVBTree, middle_key, split=True)\n self.parent = new_root\n new_leaf.parent = new_root\n self.parent.add_child(self)\n self.parent.add_child(new_leaf)\n AVBTree.root = new_root\n\n @property\n def is_full(self):\n return len(self.keys) > 2\n\n @property\n def leftmost(self):\n return min(self.keys)\n\n @property\n def rightmost(self):\n return max(self.keys)\n\n def get_left_child(self):\n return self.childs[0]\n\n def get_middle_child(self):\n return self.childs[1]\n\n def get_right_child(self):\n right_child = self.childs[-1]\n return right_child\n\n\nclass AVBTree(object):\n def __init__(self, param=\"\"):\n self.root = AVBTreeNode()\n self.param = param\n self.min = Sensor(\"Min\")\n self.max = Sensor(\"Max\")\n self.min.value_neuron = ValueNeuron(\"Min\")\n self.max.value_neuron = ValueNeuron(\"Max\")\n\n @property\n def is_numerical(self):\n return type(self.root.keys[0].value) in [int, float]\n\n def insert(self, obj):\n if obj.value is None:\n return None\n\n current_node = self.root\n\n # if key is present in that leaf increment counter\n while not current_node.leaf:\n key = current_node.get_key(obj.value)\n if key:\n key.count += 1\n return key\n if obj.value < current_node.leftmost.value:\n current_node = current_node.get_left_child()\n elif obj.value > current_node.rightmost.value:\n current_node = current_node.get_right_child()\n else:\n current_node = current_node.childs[1]\n\n # if key is present in that leaf increment counter\n key = current_node.get_key(obj.value)\n if key:\n key.count += 1\n return key\n\n # if no, add key to that leaf\n current_node.add_key(self, obj)\n return obj\n\n def search(self, value):\n current_node = self.root\n while not current_node.leaf:\n key = current_node.get_key(value)\n if key:\n return key\n if value < current_node.leftmost.value:\n current_node = current_node.get_left_child()\n elif value > current_node.rightmost.value:\n current_node = current_node.get_right_child()\n else:\n current_node = current_node.childs[1]\n\n key = current_node.get_key(value)\n if not key:\n print(\"Value {0} not found!\".format(value))\n return key\n\n def print_in_order(self):\n current_node = self.root\n while not current_node.leaf:\n current_node = current_node.get_left_child()\n current = self.min\n output = \"In order: \"\n while current:\n output += str(current.value) + '\\t'\n current = current.next\n print(output)\n\n def print(self):\n \"\"\"Print an level-order representation.\"\"\"\n this_level = [self.root]\n while this_level:\n next_level = []\n output = \"\"\n for node in this_level:\n if node.childs:\n next_level.extend(node.childs)\n output += str(node) + \"\\t\\t\\t\"\n print(output)\n this_level = next_level\n if hasattr(self, 'min'):\n self.print_in_order()\n print(\"Min: {0}, Max: {1}\".format(self.min.next, self.max.prev))\n" }, { "alpha_fraction": 0.5810166597366333, "alphanum_fraction": 0.5961080193519592, "avg_line_length": 39.58064651489258, "blob_id": "713bd4353a96361f43a8fbc6b2c1562d070b5fbc", "content_id": "5f35d4976bba59b3f2ddcbc563f3eee18db2ce6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2518, "license_type": "no_license", "max_line_length": 117, "num_lines": 62, "path": "/tests.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "from unittest import TestCase, main\nfrom Database_connection import DatabaseConnector\nfrom AVBTree import AVBTree\nfrom recordclass import recordclass\n\nKey = recordclass('Key', 'value count')\n\nclass DatabaseConnection(TestCase):\n def test_getting_tables_northwind(self):\n connection = DatabaseConnector('5CG6383BLC\\MATEUSZ', 'Northwind')\n tb_no_fk = connection.get_tables_without_fk()\n tb_one_fk = connection.get_tables_with_one_fk()\n tb_many_fk = connection.get_tables_with_many_fk()\n self.assertEqual(connection.get_number_of_all_tables(), len(tb_no_fk)+len(tb_many_fk)+len(tb_one_fk))\n\n def test_getting_tables_adventure(self):\n connection = DatabaseConnector('5CG6383BLC\\MATEUSZ', 'AdventureWorks2012')\n tb_no_fk = connection.get_tables_without_fk()\n tb_one_fk = connection.get_tables_with_one_fk()\n tb_many_fk = connection.get_tables_with_many_fk()\n self.assertEqual(connection.get_number_of_all_tables(), len(tb_no_fk)+len(tb_many_fk)+len(tb_one_fk))\n\n def test_strings_ok(self):\n values = ['Amy', 'Rose', 'Kate', 'Lisa', 'Sara', 'Kate', 'Tom', 'Jack', 'Lisa', 'Tom', 'Kate', 'Amy', 'Jack',\n 'Nina', 'Tom', 'Tom', 'Emy', 'Lisa', 'Paula']\n tree = AVBTree()\n for value in values[:]:\n tree.insert(Key(value=value, count=1))\n for value in values[:]:\n self.assertEqual(tree.search(value).value, value)\n\n def test_strings_nok(self):\n values = ['Amy', 'Rose', 'Kate', 'Lisa', 'Sara', 'Kate', 'Tom', 'Jack', 'Lisa', 'Tom', 'Kate', 'Amy', 'Jack',\n 'Nina', 'Tom', 'Tom', 'Emy', 'Lisa', 'Paula']\n tree = AVBTree()\n for value in values[:]:\n tree.insert(Key(value=value, count=1))\n for value in values[:]:\n self.assertNotEqual(tree.search('Mateusz'), value)\n\n def test_numbers_ok(self):\n import random\n values = []\n tree = AVBTree()\n for i in range(100):\n values.append(random.randint(0, 100000))\n tree.insert(Key(value=values[i], count=1))\n for value in values:\n self.assertEqual(tree.search(value).value, value)\n\n def test_numbers_nok(self):\n import random\n values = []\n tree = AVBTree()\n for i in range(200):\n values.append(random.randint(0, 100))\n tree.insert(Key(value=values[i], count=1))\n self.assertEqual(tree.search(101), None)\n\n\nif __name__ == '__main__':\n main()\n\n\n" }, { "alpha_fraction": 0.5443628430366516, "alphanum_fraction": 0.5497315526008606, "avg_line_length": 45.880794525146484, "blob_id": "985aa9f3733c602c02b1591222bb12f74daaf104", "content_id": "2655fa4213a97aa1c3743b2e83fce7d7e9c7dca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7078, "license_type": "no_license", "max_line_length": 151, "num_lines": 151, "path": "/Database_connection.py", "repo_name": "buzonek/Magisterka", "src_encoding": "UTF-8", "text": "import pyodbc\n\n\nclass Table(object):\n def __init__(self, name):\n self.name = name\n self.pk = None\n self.fk = []\n self.non_key_columns = []\n\n\nclass DatabaseConnector:\n def __init__(self, server, database):\n self.database = database\n self.server = server\n self.connection = pyodbc.connect(r\"DRIVER={SQL Server Native Client 11.0};\"\n r'SERVER=' + server + r';'\n r';DATABASE=' + database + r';'\n r'Trusted_Connection=yes')\n self.cursor = self.connection.cursor()\n\n def get_tb_detail(self, name):\n tb = Table(name)\n tb.pk = self.select_pk(tb.name)\n tb.fk = self.select_fk(tb.name)\n tb.non_key_columns = [x for x in self.get_table_columns(tb.name) if x not in tb.fk and x not in tb.pk]\n return tb\n\n def select_fk(self, table):\n sql_statement = '''SELECT\n COL_NAME(fc.parent_object_id,\n fc.parent_column_id) AS ColumnName\n FROM sys.foreign_keys AS f\n LEFT JOIN sys.foreign_key_columns AS fc\n ON f.OBJECT_ID = fc.constraint_object_id\n WHERE OBJECT_NAME(fc.parent_object_id) = '{0}';'''.format(table)\n self.cursor.execute(sql_statement)\n fk = [x.ColumnName for x in self.cursor.fetchall()]\n return fk\n\n def select_pk(self, table):\n sql_statement = '''SELECT COLUMN_NAME\n FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE\n WHERE OBJECTPROPERTY(OBJECT_ID(CONSTRAINT_SCHEMA + '.' + QUOTENAME(CONSTRAINT_NAME)), 'IsPrimaryKey') = 1\n AND TABLE_NAME = '{0}' '''.format(table)\n\n self.cursor.execute(sql_statement)\n pk = [x.COLUMN_NAME for x in self.cursor.fetchall()]\n # if len(pk) > 1:\n # raise ValueError(\"Table {0} has more than one PK: {1}.\".format(table, pk))\n return pk.pop()\n\n def get_tables_without_fk(self):\n \"\"\"This function returns list of all tables names which don't have fk.\"\"\"\n sql_statement = '''SELECT tbl.name\n FROM sys.tables AS tbl\n LEFT JOIN sys.foreign_key_columns AS fKey\n ON tbl.object_id = fKey.parent_object_id\n WHERE fKey.parent_object_id IS NULL'''\n self.cursor.execute(sql_statement)\n tables = [self.get_tb_detail(record.name) for record in self.cursor.fetchall()]\n return tables\n\n def get_tables_with_one_fk(self):\n \"\"\"This function returns list of all tables names which have one fk.\"\"\"\n sql_statement = \"SELECT TABLE_NAME from INFORMATION_SCHEMA.TABLE_CONSTRAINTS\" \\\n \" WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'\"\n self.cursor.execute(sql_statement)\n all_tables = [x.TABLE_NAME for x in self.cursor.fetchall()]\n tables_names = [x for x in all_tables if all_tables.count(x) == 1]\n tables = [self.get_tb_detail(name) for name in tables_names]\n return tables\n\n def get_tables_with_many_fk(self):\n \"\"\"This function returns list of all tables names which have many fk.\"\"\"\n sql_statement = \"SELECT TABLE_NAME from INFORMATION_SCHEMA.TABLE_CONSTRAINTS\" \\\n \" WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'\"\n self.cursor.execute(sql_statement)\n all_tables = [x.TABLE_NAME for x in self.cursor.fetchall()]\n tables_names = set([x for x in all_tables if all_tables.count(x) > 1])\n tables = [self.get_tb_detail(name) for name in tables_names]\n return tables\n\n def fetch_data_from_table(self, table_name):\n \"\"\"This function gets table name as an argument and return iterator to all table rows.\"\"\"\n sql_statement = 'Select * from [{0}]'.format(table_name)\n self.cursor.execute(sql_statement)\n row = self.cursor.fetchall()\n return row\n # while row:\n # yield row\n # row = self.cursor.fetchone()\n\n def get_table_columns(self, table):\n \"\"\"This functions gets table name as an argument and returns list of all columns of this table.\"\"\"\n sql_statement = \"SELECT COLUMN_NAME FROM {0}.INFORMATION_SCHEMA.COLUMNS\" \\\n \" WHERE TABLE_NAME = '{1}'\".format(self.database, table)\n self.cursor.execute(sql_statement)\n columns = [x.COLUMN_NAME for x in self.cursor.fetchall()]\n return columns\n\n def get_number_of_all_tables(self):\n \"\"\"This functions returns number of all tables in the database.\"\"\"\n sql_statement = \"SELECT COUNT(tbl.name) FROM sys.tables AS tbl\"\n self.cursor.execute(sql_statement)\n return self.cursor.fetchone()[0]\n\n def get_fk_detail(self, column):\n sql = '''SELECT\n tab1.name AS [table],\n col1.name AS [column],\n tab2.name AS [referenced_table],\n col2.name AS [referenced_column]\n FROM sys.foreign_key_columns fkc\n INNER JOIN sys.objects obj\n ON obj.object_id = fkc.constraint_object_id\n INNER JOIN sys.tables tab1\n ON tab1.object_id = fkc.parent_object_id\n INNER JOIN sys.schemas sch\n ON tab1.schema_id = sch.schema_id\n INNER JOIN sys.columns col1\n ON col1.column_id = parent_column_id AND col1.object_id = tab1.object_id\n INNER JOIN sys.tables tab2\n ON tab2.object_id = fkc.referenced_object_id\n INNER JOIN sys.columns col2\n ON col2.column_id = referenced_column_id AND col2.object_id = tab2.object_id\n WHERE col1.name = '{0}';'''.format(column)\n return self.cursor.execute(sql).fetchone()\n\n\n\n\n\n\n# def get_columns(self, table, type):\n # if type == 'pk':\n # sql_statement = '''SELECT COLUMN_NAME\n # FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE\n # WHERE OBJECTPROPERTY(OBJECT_ID(CONSTRAINT_SCHEMA + '.' + QUOTENAME(CONSTRAINT_NAME)), 'IsPrimaryKey') = 1\n # AND TABLE_NAME = '{0}' '''.format(table)\n # elif type == 'fk':\n # sql_statement = '''SELECT\n # COL_NAME(fc.parent_object_id,\n # fc.parent_column_id) AS ColumnName\n # FROM sys.foreign_keys AS f\n # LEFT JOIN sys.foreign_key_columns AS fc\n # ON f.OBJECT_ID = fc.constraint_object_id\n # WHERE OBJECT_NAME(fc.parent_object_id) = '{0}';'''.format(table)\n # elif type == 'normal':\n # sql_statement = '''\n # SELECT CONSTRAINT_COLUMN_USAGE from INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE WHERE TABLE_NAME = '{0}';'''.format(table)" } ]
6
huangluyao/squid_segmentation
https://github.com/huangluyao/squid_segmentation
30f18cbd4488e4b9b632a9224809862c29f02276
6b64d84cb4b7649fe01a314fdc87492d866aae8d
a11effd45c6e71d8bf9d968f3c3c9fa27cedc69f
refs/heads/master
2023-02-12T22:03:34.527386
2021-01-18T13:27:14
2021-01-18T13:27:14
320,613,112
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6062992215156555, "alphanum_fraction": 0.6062992215156555, "avg_line_length": 27.33333396911621, "blob_id": "940aa154cbe1162504f383da151879549a8c6c6c", "content_id": "0e860a926dfd2ef7f4cc036afa4f3c9c073d2c5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/uilts/models/__init__.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "from .BiSeNet import BiSeNet\nfrom .DFANet import dfaNet\nfrom .UNet_Res import UNet\n\ndef get_model(cfg):\n return {'UNet': UNet,\n 'BiseNet': BiSeNet,\n 'dfaNet': dfaNet,\n }[cfg[\"model_name\"]](num_classes=cfg[\"n_classes\"])" }, { "alpha_fraction": 0.6017612814903259, "alphanum_fraction": 0.6216568946838379, "avg_line_length": 29.356435775756836, "blob_id": "3fb4f4440f09bc8117575d39667227860d3714f6", "content_id": "d48c5944631283fba685b1abd2f423629033fe68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3082, "license_type": "no_license", "max_line_length": 98, "num_lines": 101, "path": "/predict.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import os\nimport json\nimport torch\nimport pandas as pd\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom uilts.datasets import get_dataset, denormalize\nfrom uilts.models import get_model\nfrom uilts.log import get_logger\nfrom uilts.evalution import eval_semantic_segmentation\nimport cv2\n\n\ndef predict(cfg, runid, use_pth='best_train_miou.pth'):\n\n dataset = cfg['dataset']\n train_logdir = f'run/{dataset}/{runid}'\n\n test_logdir = os.path.join('./results', dataset, runid)\n logger = get_logger(test_logdir)\n\n logger.info(f'Conf | use logdir {train_logdir}')\n logger.info(f'Conf | use dataset {cfg[\"dataset\"]}')\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n # ๆต‹่ฏ•้›†\n trainset, testset = get_dataset(cfg)\n\n test_loader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=cfg['num_workers'])\n\n # model\n model = get_model(cfg).to(device)\n model.load_state_dict(torch.load(os.path.join(train_logdir, use_pth)))\n\n # ๆ ‡็ญพ้ข„ๅค„็†\n pd_label_color = pd.read_csv(trainset.class_dict_path, sep=',')\n name_value = pd_label_color['name'].values\n num_class = len(name_value)\n colormap = []\n\n for i in range(num_class):\n tmp = pd_label_color.iloc[i]\n color = [tmp['r'], tmp['g'], tmp['b']]\n colormap.append(color)\n\n cm = np.array(colormap).astype('uint8')\n\n test_miou = 0\n\n for i, (valImg, valLabel) in enumerate(test_loader):\n valImg = valImg.to(device)\n valLabel = valLabel.long().to(device)\n out = model(valImg)\n out = F.log_softmax(out, dim=1)\n\n pre_label = out.max(1)[1].squeeze().cpu().data.numpy()\n pre = cm[pre_label]\n\n src = denormalize(valImg.cpu(), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n src = np.transpose(src.squeeze().data.numpy(), [1, 2, 0])\n\n pre_label = np.expand_dims(pre_label,axis=-1)\n result =pre_label*src + (1-pre_label)*(src * 0.3 +pre * 0.7)\n result = result.astype(np.uint8)\n cv2.imwrite(test_logdir + '/' + str(i) + '.png', result)\n pre_label = out.max(dim=1)[1].data.cpu().numpy()\n pre_label = [i for i in pre_label]\n\n true_label = valLabel.data.cpu().numpy()\n true_label = [i for i in true_label]\n\n eval_metrix = eval_semantic_segmentation(pre_label, true_label, cfg[\"n_classes\"])\n test_miou = eval_metrix['miou'] + test_miou\n\n logger.info(f'Test | Test Mean IU={test_miou / (len(test_loader)):.5f}')\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description=\"predict\")\n parser.add_argument(\"-id\", type=str, help=\"predict id\")\n parser.add_argument(\n \"--config\",\n nargs=\"?\",\n type=str,\n default=\"configs/Squid_UNet.json\",\n help=\"Configuration file to use\",\n )\n\n args = parser.parse_args()\n\n with open(args.config, 'r') as fp:\n cfg = json.load(fp)\n\n args.id = '2020-12-11-19-05-3988'\n\n predict(cfg, args.id)\n" }, { "alpha_fraction": 0.5700034499168396, "alphanum_fraction": 0.6203990578651428, "avg_line_length": 35.1055908203125, "blob_id": "0125068dc3b4aef47dfdb2407b8bc2ee07dd76f1", "content_id": "8b29eaa22bb8f125b66e6de4c3888a7cae58ea5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5893, "license_type": "no_license", "max_line_length": 131, "num_lines": 161, "path": "/uilts/models/UNet_Res.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "\"\"\" inpnut size 224ร—224\nModel UNet : params: 60369624\nModel UNet : size: 230.291840M\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\n# ๆฎ‹ๅทฎๅ—ไฝœไธบ็ผ–็ \nclass BasicBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(in_channels, out_channels, stride)\n self.bn1 = norm_layer(out_channels)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(out_channels, out_channels)\n self.bn2 = norm_layer(out_channels)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\n# ๆž„ๅปบๆฎ‹ๅทฎๅ—\ndef make_layers(block, in_channels, out_channels, blocks, stride=1):\n\n downsample = None\n if stride != 1 or in_channels != out_channels:\n downsample = nn.Sequential(\n conv1x1(in_channels, out_channels, stride),\n nn.BatchNorm2d(out_channels))\n\n layers = []\n layers.append(block(in_channels, out_channels, stride, downsample))\n\n for _ in range(1, blocks):\n layers.append(block(out_channels, out_channels))\n\n return nn.Sequential(*layers)\n\n\n# UNet่ฎบๆ–‡ไธญ็š„่งฃ็ ๅ™จ\nclass Decoder(nn.Module):\n def __init__(self, in_channels, mid_channels, out_channels):\n super(Decoder, self).__init__()\n\n self.up = nn.ConvTranspose2d(in_channels, in_channels//2, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1)\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(out_channels))\n\n def forward(self, e, d):\n d = self.up(d)\n cat = torch.cat([e, d], dim=1)\n out = self.block(cat)\n return out\n\n\ndef final_block(in_channels, out_channels):\n block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(out_channels))\n return block\n\n\nclass UNet(nn.Module):\n def __init__(self,num_classes):\n super(UNet, self).__init__()\n\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n # Encode\n self.encode1 = make_layers(BasicBlock, 3, 64, 2, stride=1)\n self.encode2 = make_layers(BasicBlock, 64, 128, 2, stride=2)\n self.encode3 = make_layers(BasicBlock, 128, 256, 2, stride=2)\n self.encode4 = make_layers(BasicBlock, 256, 512, 2, stride=2)\n\n # ็ผ–็ ๅ™จๆœ€ๅบ•้ƒจ\n self.bottleneck = make_layers(BasicBlock, 512, 1024, 2, stride=2)\n\n # decoder\n self.decode4 = Decoder(1024, 512, 512)\n self.decode3 = Decoder(512, 256, 256)\n self.decode2 = Decoder(256, 128, 128)\n self.decode1 = Decoder(128, 64, 64)\n\n self.final = final_block(64, num_classes)\n\n def forward(self, x):\n encode_block1 = self.encode1(x) # print('encode_block1', encode_block1.size()) torch.Size([2, 128, 416, 416])\n encode_block2 = self.encode2(encode_block1) # print('encode_block2', encode_block2.size()) torch.Size([2, 256, 208, 208])\n encode_block3 = self.encode3(encode_block2) # print('encode_block3', encode_block3.size()) torch.Size([2, 512, 104, 104])\n encode_block4 = self.encode4(encode_block3) # print('encode_block4', encode_block4.size()) torch.Size([2, 1024, 52, 52])\n\n bottleneck = self.bottleneck(encode_block4) # print('bottleneck', bottleneck.size()) torch.Size([2, 1024, 26, 26])\n\n decode_block4 = self.decode4(encode_block4, bottleneck) # print('decode_block4', decode_block4.size())\n decode_block3 = self.decode3(encode_block3, decode_block4) # print('decode_block3', decode_block3.size())\n decode_block2 = self.decode2(encode_block2, decode_block3) # print('decode_block2', decode_block2.size())\n decode_block1 = self.decode1(encode_block1, decode_block2) # print('decode_block1', decode_block1.size())\n\n out = self.final(decode_block1)\n return out\n\n\nif __name__ == \"__main__\":\n rgb = torch.randn(1, 3, 224, 224)\n\n model = UNet(3, 12)\n\n out = model(rgb)\n print(out.shape)\n\n # ่ฎก็ฎ—็ฝ‘็ปœๆจกๅž‹ๅฐบๅฏธๅคงๅฐ\n import numpy as np\n para = sum([np.prod(list(p.size())) for p in model.parameters()])\n type_size = 4 # float32 ๅ 4ไธชๅญ—่Š‚\n print('Model {} : params: {}'.format(model._get_name(), para))\n print('Model {} : size: {:4f}M'.format(model._get_name(), para*type_size/1024/1024))\n\n" }, { "alpha_fraction": 0.5666738152503967, "alphanum_fraction": 0.5852479934692383, "avg_line_length": 29.03870964050293, "blob_id": "200bc55e12587bb4b8da1057ea5d5b8cbe3fa883", "content_id": "f5402b68bfd0e6fb08a8040444b60612dfd5db55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9778, "license_type": "no_license", "max_line_length": 105, "num_lines": 310, "path": "/uilts/datasets/CustomDataset.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\nimport numpy as np\nimport os\nimport cv2 as cv\nfrom uilts.parse_cfg import parse_json\nimport configs as cfg\nfrom augmentation.pipelines.compose import Compose\nimport pandas as pd\n\n\nclass LabelProcessor:\n '''ๆ ‡็ญพ้ข„ๅค„็†'''\n def __init__(self, file_path):\n colormap = self.read_color_map(file_path)\n # ๅฏนๆ ‡็ญพๅš็ผ–็ ๏ผŒ่ฟ”ๅ›žๅ“ˆๅธŒ่กจ\n self.cm2lbl = self.encode_label_pix(colormap)\n\n # ๅฐ†maskไธญ็š„RGB่ฝฌๆˆ็ผ–็ ็š„label\n def encode_label_img(self, img):\n data = np.array(img, np.int32)\n idx = (data[:, :, 0] * 256+data[:, :, 1])*256 + data[:, :, 2]\n # ่ฟ”ๅ›ž็ผ–็ ๅŽ็š„label\n return np.array(self.cm2lbl[idx], np.int64)\n\n # ่ฟ”ๅ›žไธ€ไธชๅ“ˆๅธŒๆ˜ ๅฐ„ ๅ† 3็ปด256 ็ฉบ้—ดไธญ\n @staticmethod\n def encode_label_pix(colormap):\n cm2lbl = np.zeros(256**3) # 3็ปด็š„256็š„็ฉบ้—ด ๆ‰“ๆˆไธ€็ปดๅบฆ\n for i, cm in enumerate(colormap):\n cm2lbl[(cm[0]*256+cm[1])*256+cm[2]] = i\n return cm2lbl\n\n # ่ฏปๅ–csvๆ–‡ไปถ\n @staticmethod\n def read_color_map(file_path):\n pd_label_color = pd.read_csv(file_path, sep=',')\n colormap = []\n\n for i in range(len(pd_label_color.index)):\n tmp = pd_label_color.iloc[i]\n color = [tmp['r'], tmp['g'], tmp['b']]\n colormap.append(color)\n\n return colormap\n\n\nclass CamvidDataset(Dataset):\n\n def __init__(self, img_path, label_path, json_path, class_dict_path, mode=\"train\"):\n self.imgs = self.read_file(img_path)\n self.labels = self.read_file(label_path)\n\n assert len(self.imgs) == len(self.labels), \"label ๅ’Œ image ๆ•ฐๆฎ้•ฟๅบฆไธๅŒ\"\n\n config = parse_json(json_path)\n if mode == 'train':\n self.train_pipeline = Compose(config['train'])\n else:\n self.train_pipeline = Compose(config['test'])\n\n self.tf = transforms.Compose([\n lambda x:torch.tensor(x, dtype=torch.float32)])\n\n self.class_dict_path = class_dict_path\n self.label_processor = LabelProcessor(class_dict_path)\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, item):\n img = self.imgs[item]\n label = self.labels[item]\n\n image = cv.imread(img)\n label = cv.imread(label)[..., ::-1] # BGR 2 RGB\n\n img, label = self.img_transform(image, label)\n\n return img, label\n\n def read_file(self, path):\n '''ไปŽๆ–‡ไปถๅคนไธญ่ฏปๅ–ๆ•ฐๆฎ'''\n files_list = os.listdir(path)\n file_path_list = [os.path.join(path, file) for file in files_list]\n file_path_list.sort()\n return file_path_list\n\n def img_transform(self, image, mask):\n '''ๅ›พๅƒๆ•ฐๆฎ้ข„ๅค„็†ๅนถ่ฝฌๆˆtensorๆ ผๅผ'''\n # ่Žทๅ–ๅ›พๅƒไฟกๆฏ\n data = {\"type\": \"segmentation\"}\n data[\"image\"] = image\n data[\"mask\"] = mask\n\n # ๆ•ฐๆฎๅขžๅผบ\n augment_result = self.train_pipeline(data)\n\n image = augment_result[\"image\"]\n mask = augment_result[\"mask\"]\n\n # ่ฝฌๆˆtensorๆ ผๅผ\n image = self.tf(np.transpose(image, (2, 0, 1)))\n\n # ๅฏนๆ ‡็ญพ่ฟ›่กŒ็ผ–็ ๏ผŒ่ฝฌๆˆtensor\n mask = self.label_processor.encode_label_img(mask)\n mask = torch.from_numpy(mask)\n\n return image, mask\n\n\nclass VOCDataset(Dataset):\n def __init__(self, voc_path, json_path, mode=\"train\"):\n self.voc_path = voc_path\n file_path = os.path.join(voc_path, 'ImageSets/Segmentation')\n\n self.imgs, self.labels = self.read_file(file_path, mode)\n assert len(self.imgs) == len(self.labels), \"label ๅ’Œ image ๆ•ฐๆฎ้•ฟๅบฆไธๅŒ\"\n\n config = parse_json(json_path)\n if mode == 'train':\n self.train_pipeline = Compose(config['train'])\n else:\n self.train_pipeline = Compose(config['test'])\n\n self.tf = transforms.Compose([\n lambda x:torch.tensor(x, dtype=torch.float32)])\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, item):\n image = self.imgs[item]\n label = self.labels[item]\n\n image = cv.imread(image)\n label = cv.imread(label)[..., ::-1]\n\n img, label = self.img_transform(image, label)\n\n return img, label\n\n def img_transform(self, image, mask):\n '''ๅ›พๅƒๆ•ฐๆฎ้ข„ๅค„็†ๅนถ่ฝฌๆˆtensorๆ ผๅผ'''\n # ่Žทๅ–ๅ›พๅƒไฟกๆฏ\n data = {\"type\": \"segmentation\"}\n data[\"image\"] = image\n data[\"mask\"] = mask\n\n # ๆ•ฐๆฎๅขžๅผบ\n augment_result = self.train_pipeline(data)\n\n image = augment_result[\"image\"]\n mask = augment_result[\"mask\"]\n\n # ่ฝฌๆˆtensorๆ ผๅผ\n image = self.tf(np.transpose(image, (2, 0, 1)))\n\n # ๅฏนๆ ‡็ญพ่ฟ›่กŒ็ผ–็ ๏ผŒ่ฝฌๆˆtensor\n mask = label_processor.encode_label_img(mask)\n mask = torch.from_numpy(mask)\n\n return image, mask\n\n def read_file(self, file_path, mode):\n if mode == \"train\":\n imgs_path = os.path.join(file_path, 'train.txt')\n else:\n imgs_path =os.path.join(file_path, 'val.txt')\n\n f_imgs = open(imgs_path, 'r')\n\n img_names = [img[:-1] for img in f_imgs.readlines()]\n f_imgs.close()\n\n imgs = [os.path.join(self.voc_path, 'JPEGImages/%s.jpg' % (img)) for img in img_names]\n labels = [os.path.join(self.voc_path, 'SegmentationClass/%s.png' % (img)) for img in img_names]\n return imgs, labels\n\n\nclass SquidDataset(Dataset):\n def __init__(self, img_path, label_path, json_path, class_dict_path, mode=\"train\"):\n self.imgs = self.read_file(img_path)\n self.labels = self.read_file(label_path)\n\n assert len(self.imgs) == len(self.labels), \"label ๅ’Œ image ๆ•ฐๆฎ้•ฟๅบฆไธๅŒ\"\n\n config = parse_json(json_path)\n if mode == 'train':\n self.train_pipeline = Compose(config['train'])\n else:\n self.train_pipeline = Compose(config['test'])\n\n self.tf = transforms.Compose([\n lambda x: torch.tensor(x, dtype=torch.float32)])\n\n self.class_dict_path = class_dict_path\n self.label_processor = LabelProcessor(class_dict_path)\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, item):\n img = self.imgs[item]\n label = self.labels[item]\n\n image = cv.imread(img)\n label = cv.imread(label)[..., ::-1] # BGR 2 RGB\n\n img, label = self.img_transform(image, label)\n\n return img, label\n\n def read_file(self, path):\n '''ไปŽๆ–‡ไปถๅคนไธญ่ฏปๅ–ๆ•ฐๆฎ'''\n files_list = os.listdir(path)\n file_path_list = [os.path.join(path, file) for file in files_list]\n file_path_list.sort()\n return file_path_list\n\n def img_transform(self, image, mask):\n '''ๅ›พๅƒๆ•ฐๆฎ้ข„ๅค„็†ๅนถ่ฝฌๆˆtensorๆ ผๅผ'''\n # ่Žทๅ–ๅ›พๅƒไฟกๆฏ\n data = {\"type\": \"segmentation\"}\n data[\"image\"] = image\n data[\"mask\"] = mask\n\n # ๆ•ฐๆฎๅขžๅผบ\n augment_result = self.train_pipeline(data)\n\n image = augment_result[\"image\"]\n mask = augment_result[\"mask\"]\n\n # ่ฝฌๆˆtensorๆ ผๅผ\n image = self.tf(np.transpose(image, (2, 0, 1)))\n\n # ๅฏนๆ ‡็ญพ่ฟ›่กŒ็ผ–็ ๏ผŒ่ฝฌๆˆtensor\n mask = self.label_processor.encode_label_img(mask)\n mask = torch.from_numpy(mask)\n\n return image, mask\n\n\ndef denormalize(x_hat, mean=[0.2826372, 0.2826372, 0.2826372], std=[0.30690703, 0.30690703, 0.30690703]):\n\n mean = torch.tensor(mean).unsqueeze(-1).unsqueeze(-1).unsqueeze(0)\n std = torch.tensor(std).unsqueeze(-1).unsqueeze(-1).unsqueeze(0)\n x = x_hat * std + mean\n return x*255\n\n\ndef linknet_class_weight(num_classes):\n p_class = num_classes / num_classes.sum()\n return 1 / (np.log(1.02 + p_class))\n\n\ndef compute_weight(root, n_classes):\n num_classes = np.zeros(n_classes)\n for image in os.listdir(root):\n image = Image.open(os.path.join(root, image))\n image = np.asarray(image) # 360, 480\n image = np.asarray(image).reshape(-1) # 360 * 480\n num = np.bincount(image) # len = 12\n num_classes += num # ๆฏไธช็ฑปๅˆซๅ‡บ็Žฐ็š„ๆ€ปๆฌกๆ•ฐ\n\n weight = linknet_class_weight(num_classes)\n\n return torch.Tensor(weight.tolist())\n\n\nif __name__ == \"__main__\":\n \"\"\"้ชŒ่ฏCamvidๆ•ฐๆฎ้›†\"\"\"\n # test = CamvidDataset(cfg.train_path, cfg.train_label_path, cfg.json_path, mode=\"train\")\n\n\n \"\"\"้ชŒ่ฏVOCๆ•ฐๆฎ้›†\"\"\"\n # test = VOCDataset(cfg.voc_path, cfg.json_path)\n # from torch.utils.data import DataLoader\n\n \"\"\"้ชŒ่ฏ้ฑฟ้ฑผๆ•ฐๆฎ้›†\"\"\"\n class_dict_path = '../../database/Squid/class_dict.csv'\n\n train_path = \"../../database/Squid/train\"\n train_label_path = \"../../database/Squid/train_labels\"\n test_path = \"../../database/Squid/test\"\n test_label_path = \"../../database/Squid/test_labels\"\n\n augmentation_path = \"../../configs/imagenet.json\"\n test = SquidDataset(train_path, train_label_path, augmentation_path, class_dict_path, mode=\"train\")\n\n test_db = DataLoader(test, batch_size=1)\n\n label_processor = LabelProcessor(class_dict_path)\n\n cm = np.array(label_processor.read_color_map(class_dict_path))\n\n for img, label in test_db:\n images = denormalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n images = images.numpy()\n labels = label.numpy()\n for image, label in zip(images, labels):\n image = np.transpose(image, (1, 2, 0))\n label = label.astype(np.int32)\n label = cm[label][..., ::-1]\n\n cv.imshow(\"img\", image.astype(np.uint8))\n cv.imshow('lable', label.astype(np.uint8))\n cv.waitKey()\n\n\n" }, { "alpha_fraction": 0.5926993489265442, "alphanum_fraction": 0.59990394115448, "avg_line_length": 31.53125, "blob_id": "3ace76053e47c6667cff0900b29878803bc0e4bc", "content_id": "5726eb474794e9ff4de84e4546a150307263b677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 93, "num_lines": 64, "path": "/uilts/evalution.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import numpy as np\nnp.seterr(divide='ignore', invalid='ignore')\n\n\ndef calc_semantic_segmentation_confusion(pred_labels, gt_labels, num_class):\n \"\"\"ๅปบ็ซ‹ๆททๆท†็Ÿฉ้˜ต,ไปฅๆ–นไพฟ่ฎก็ฎ—PA,MPA,Iou็ญ‰ๅ„ไธชๆŒ‡ๆ ‡\"\"\"\n confusion = np.zeros((num_class, num_class))\n for pred_label, gt_label in zip(pred_labels, gt_labels):\n # ๅˆคๆ–ญๆททๆท†็Ÿฉ้˜ตๆ˜ฏๅฆๆ˜ฏ2็ปด\n if pred_label.ndim != 2 or gt_label.ndim != 2:\n raise ValueError('ndim of labels should be two.')\n\n # ๅˆคๆ–ญ ่พ“ๅ…ฅ่พ“ๅ‡บๅฐบๅฏธไธ€ๆ ท\n if pred_label.shape != gt_label.shape:\n raise ValueError('Shape of ground truth and prediction should'\n ' be same.')\n\n # ๆ‰“ๆˆไธ€็ปดๅ‘้‡\n pred_label = pred_label.flatten()\n gt_label = gt_label.flatten()\n\n # ๆ ก้ชŒไธ€ไธ‹ ไฟ่ฏ gt_label้ƒฝๆ˜ฏๅคงไบŽ็ญ‰ไบŽ0็š„๏ผŒไปฅไฟ่ฏ่ฎก็ฎ—ๆฒกๆœ‰้”™่ฏฏ\n mask = gt_label >= 0\n\n # ็ปŸ่ฎกๆญฃ็กฎๅ’Œ้”™่ฏฏๅˆ†็ฑปไธชๆ•ฐ ๆ”พๅœจๅฏนๅบ”ไฝ็ฝฎไธŠ๏ผˆ nร—label + pred๏ผ‰\n confusion += np.bincount(num_class * gt_label[mask].astype(int) + pred_label[mask],\n minlength=num_class ** 2).reshape((num_class, num_class))\n\n return confusion\n\n\n# ่ฎก็ฎ—ไบคๅนถๆฏ”\ndef calc_semantic_segmentation_iou(confusion):\n \"\"\"้€š่ฟ‡ๆททๆท†็Ÿฉ้˜ต่ฎก็ฎ—ไบคๅนถๆฏ”ใ€‚\n ๆททๆท†็Ÿฉ้˜ตไธญ ๅฏน่ง’็บฟไธบๅˆ†็ฑปๆญฃ็กฎ็š„ไธชๆ•ฐ๏ผŒๅ…ถไฝ™็š„ๅ‡ไธบๅˆ†็ฑป้”™่ฏฏ\n ๆ‰€ๆœ‰่กŒ๏ผŒ่กจ็คบๆ ‡็ญพไธญ็š„็ฑปๅˆซไธชๆ•ฐ\n ๆ‰€ๆœ‰ๅˆ—๏ผŒ่กจ็คบ้ข„ๆต‹ๅ€ผ็š„็ฑปๅˆซไธชๆ•ฐ\n ไบคๅนถๆฏ” = ๆญฃ็กฎ็š„ไธชๆ•ฐ/(ๅฏน่ง’็บฟไธŠ่กŒ็š„ๅ’Œ+ๅˆ—็š„ๅ’Œ - ๅฏน่ง’็บฟไธŠ็š„ๅ€ผ)\n \"\"\"\n\n union_area = confusion.sum(axis=1) + confusion.sum(axis=0) - np.diag(confusion)\n iou = np.diag(confusion) / union_area\n # ๅฐ†่ƒŒๆ™ฏ็š„ไบคๅนถๆฏ”็œๅŽป\n # return iou[:-1]\n return iou[1:]\n\ndef eval_semantic_segmentation(pred_labels, gt_labels, num_class):\n \"\"\"้ชŒ่ฏ่ฏญไน‰ๅˆ†ๅ‰ฒ\"\"\"\n confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels, num_class)\n\n iou = calc_semantic_segmentation_iou(confusion)\n\n # ่ฎก็ฎ— PAๅ€ผ\n pixel_accuracy = np.diag(confusion).sum() / confusion.sum()\n\n # ่ฎก็ฎ—ๆฏไธชๅˆ†็ฑป็š„PAๅ€ผ\n # class_accuracy = np.diag(confusion)/ (np.sum(confusion, axis=1) + 1e-10)\n\n return {'iou': iou,\n 'miou': np.nanmean(iou),\n 'PA': pixel_accuracy,\n #\"class_accuracy\": class_accuracy,\n #\"mean_class_accuracy\": np.nanmean(class_accuracy[:-1])\n }\n" }, { "alpha_fraction": 0.5724770426750183, "alphanum_fraction": 0.5798165202140808, "avg_line_length": 23.772727966308594, "blob_id": "3da603352e4f5c1db306467c4917742a65e3cc5a", "content_id": "7e7e3d42c0fc76acd484e384200a5594e02d7abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 83, "num_lines": 22, "path": "/uilts/loss/__init__.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import torch.nn as nn\n\n\ndef get_loss(cfg, weight=None):\n\n '''\n\n :param cfg:\n :param weight: class weighting\n :param ignore_index: class to ignore, ไธ€่ˆฌไธบ่ƒŒๆ™ฏid\n :return:\n '''\n\n assert cfg['loss'] in ['crossentropyloss2D', 'Focal_loss2D']\n if weight is not None:\n assert len(weight) == cfg['n_classes']\n\n return {\n 'crossentropyloss2D': nn.CrossEntropyLoss(weight=weight),}[cfg['loss']]\n else:\n return {\n 'crossentropyloss2D': nn.CrossEntropyLoss(), }[cfg['loss']]\n" }, { "alpha_fraction": 0.6368852257728577, "alphanum_fraction": 0.6368852257728577, "avg_line_length": 40.931034088134766, "blob_id": "a207c4b57e96a6f0fd4ed0e53c2ebebf348e510a", "content_id": "14a60bccb5d0bd9c7c36ddf4f81f2f004e050fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "no_license", "max_line_length": 116, "num_lines": 29, "path": "/uilts/datasets/__init__.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "from uilts.datasets.CustomDataset import *\n\n\ndef get_dataset(cfg):\n crop_size = (cfg['image_h'], cfg['image_w'])\n num_class = cfg['n_classes']\n\n if cfg['dataset']==\"Camvid\":\n class_dict_path = './database/CamVid/class_dict.csv'\n\n train_path = \"./database/CamVid/train\"\n train_label_path = \"./database/CamVid/train_labels\"\n test_path = \"./database/CamVid/test\"\n test_label_path = \"./database/CamVid/test_labels\"\n\n return CamvidDataset(train_path,train_label_path,cfg[\"augmentation_path\"], class_dict_path, mode=\"train\"), \\\n CamvidDataset(test_path, test_label_path, cfg[\"augmentation_path\"], class_dict_path, mode=\"test\")\n\n if cfg['dataset']==\"Squid\":\n\n class_dict_path = './database/Squid/class_dict.csv'\n\n train_path = \"./database/Squid/train\"\n train_label_path = \"./database/Squid/train_labels\"\n test_path = \"./database/Squid/test\"\n test_label_path = \"./database/Squid/test_labels\"\n\n return SquidDataset(train_path,train_label_path,cfg[\"augmentation_path\"], class_dict_path, mode=\"train\"), \\\n SquidDataset(test_path, test_label_path, cfg[\"augmentation_path\"], class_dict_path, mode=\"test\")\n\n\n\n\n" }, { "alpha_fraction": 0.5013461709022522, "alphanum_fraction": 0.5416346192359924, "avg_line_length": 37.23897171020508, "blob_id": "5b4f33d80e4c721c4f742c5f18b777e50670c89f", "content_id": "c654f8517924a65af198a2d66368c796448e24ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10676, "license_type": "no_license", "max_line_length": 118, "num_lines": 272, "path": "/uilts/models/Deeplab_v3plus.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "\"\"\" input size 224ร—224\nModel DeepLabv3_plus : params: 54649004\nModel DeepLabv3_plus : size: 208.469406M\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv3x3_bn_relu(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Sequential(conv3x3(in_planes, out_planes, stride, groups, dilation),\n nn.BatchNorm2d(out_planes),\n nn.ReLU(inplace=True))\n\n\ndef fixed_padding(inputs, kernel_size, dilation):\n '''ๆ นๆฎๅท็งฏๆ ธๅ’Œ้‡‡ๆ ท็Ž‡่‡ชๅŠจ่ฎก็ฎ—paddingๅฐบๅฏธ'''\n kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) # Knew = Kori + (Kori-1)(rate-1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))\n return padded_inputs\n\n\nclass SeparabelConv2d(nn.Module):\n '''ๅธฆ็ฉบๆดž็š„ๆทฑๅบฆๅฏๅˆ†็ฆปๅท็งฏ'''\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False):\n super(SeparabelConv2d, self).__init__()\n\n \"\"\"ๅ…ˆ่ฟ›่กŒๅˆ†็ป„ๅท็งฏ\"\"\"\n self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size,\n stride, 0, dilation, groups=in_channels, bias=bias)\n \"\"\"ๅ†็”จ1ร—1็š„ๅท็งฏ่ฟ›่กŒๅค„็†\"\"\"\n self.pointwise = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1,\n padding=0, dilation=1, groups=1, bias=bias)\n\n def forward(self, x):\n x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])\n x = self.conv1(x)\n x = self.pointwise(x)\n return x\n\n\nclass ASPP(nn.Module):\n \"\"\"็ฉบๆดžSPPNet\"\"\"\n def __init__(self, in_channels, out_channels, os):\n super(ASPP, self).__init__()\n\n if os == 16:\n dilations = [1, 6, 12, 18]\n elif os == 8:\n dilations = [1, 12, 24, 36]\n\n self.aspp1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0,\n dilation=dilations[0], bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n\n self.aspp2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=dilations[1],\n dilation=dilations[1], bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True))\n\n self.aspp3 = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=dilations[2],\n dilation=dilations[2], bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True))\n\n self.aspp4 = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=dilations[3],\n dilation=dilations[3], bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True))\n\n self.gp = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(2048, 256, 1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True))\n\n self.conv1 = nn.Conv2d(256*5, 256, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(256)\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n x5 = self.gp(x) # [n, c, 1, 1]\n # ็บฟๆ€งๆ’ๅ€ผ\n x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n\n # ่ฟ›่กŒๆ‹ผๆŽฅ\n x = torch.cat([x1, x2, x3, x4, x5], dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n\n return x\n\n\nclass Block(nn.Module):\n def __init__(self, in_channels, out_channels, reps, stride=1, dilation=1, grow_first=True):\n super(Block, self).__init__()\n\n # ๅฎšไน‰่ทณ่ทƒ่ฟžๆŽฅ้ƒจๅˆ†\n if in_channels != out_channels or stride != 1:\n self.skip = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels))\n else:\n self.skip = None\n\n self.relu = nn.ReLU(inplace=True)\n rep = []\n\n # ๆฏไธ€ๅ—็š„็ฌฌไธ€ไธชๅท็งฏๅ—\n if grow_first:\n rep.append(SeparabelConv2d(in_channels, out_channels, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(out_channels))\n rep.append(nn.ReLU(inplace=True))\n # ๅพช็Žฏๅท็งฏๆฌกๆ•ฐ\n for i in range(reps - 1):\n rep.append(SeparabelConv2d(out_channels, out_channels, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(out_channels))\n rep.append(nn.ReLU(inplace=True))\n\n else:\n rep.append(SeparabelConv2d(in_channels, in_channels, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(in_channels))\n rep.append(nn.ReLU(inplace=True))\n # ๅพช็Žฏๅท็งฏๆฌกๆ•ฐ\n for i in range(reps - 1):\n rep.append(SeparabelConv2d(in_channels, out_channels, 3, stride=1, dilation=dilation))\n rep.append(nn.BatchNorm2d(out_channels))\n rep.append(nn.ReLU(inplace=True))\n\n # ๆœ€ๅŽไธ€ไธชๅท็งฏ๏ผŒๅ†ณๅฎšๆ˜ฏๅฆไธ‹้‡‡ๆ ท\n rep.append(SeparabelConv2d(out_channels, out_channels, 3, stride=stride))\n\n self.block = nn.Sequential(*rep)\n\n def forward(self, x):\n x1 = self.block(x)\n if self.skip is not None:\n x = self.skip(x)\n x = x + x1\n x = self.relu(x)\n return x\n\n\nclass Xception(nn.Module):\n \"\"\"ๅฎšไน‰Xception็ฝ‘็ปœ\"\"\"\n def __init__(self, in_channels, os=16):\n super(Xception,self).__init__()\n\n if os==16:\n entry_block3_stride = 2\n middle_block_dilation = 1\n exit_block_dilations = (1, 2)\n elif os == 8:\n entry_block3_stride = 1\n middle_block_dilation = 2\n exit_block_dilations = (2, 4)\n else:\n raise NotImplementedError\n\n self.relu = nn.ReLU(inplace=True)\n # Entry flow\n self.conv1_bn_relu = conv3x3_bn_relu(in_channels, 32)\n self.conv2_bn_relu = conv3x3_bn_relu(32, 64)\n\n self.block1 = Block(64, 128, reps=2, stride=2)\n self.block2 = Block(128, 256, reps=2, stride=2)\n self.block3 = Block(256, 728,reps=2, stride=entry_block3_stride)\n\n # Middle flow\n mid_block = []\n for i in range(16):\n mid_block.append(Block(728, 728, reps=2, stride=1, dilation=middle_block_dilation,grow_first=False))\n self.mid_flow = nn.Sequential(*mid_block)\n\n # Exit flow\n self.exitflow1 = Block(728, 1024, reps=2, stride=2, dilation=exit_block_dilations[0], grow_first=False)\n self.exitflow2 = SeparabelConv2d(1024, 1536, 3, dilation=exit_block_dilations[1])\n self.exitflow3 = SeparabelConv2d(1536, 1536, 3, dilation=exit_block_dilations[1])\n self.exitflow4 = SeparabelConv2d(1536, 2048, 3, dilation=exit_block_dilations[1])\n\n # ๅˆๅง‹ๅŒ–็ฝ‘็ปœๆƒ้‡\n self._init_weight()\n\n def forward(self, x):\n x = self.conv1_bn_relu(x)\n x = self.conv2_bn_relu(x)\n x = self.block1(x)\n x = self.block2(x)\n low_level_feat = x\n x = self.block3(x)\n\n x = self.mid_flow(x)\n x = self.exitflow1(x)\n x = self.exitflow2(x)\n x = self.exitflow3(x)\n x = self.exitflow4(x)\n return x, low_level_feat\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nclass DeepLabv3_plus(nn.Module):\n def __init__(self, input_channels, num_calsses, os=16):\n super(DeepLabv3_plus, self).__init__()\n\n \"\"\"็ฉบๆดžๅท็งฏ็ผ–็ ๅ™จ\"\"\"\n self.xception_features = Xception(input_channels, os)\n \"\"\"็ฉบๆดž็ฉบ้—ด้‡‘ๅญ—ๅก”ๆฑ ๅŒ–\"\"\"\n self.ASPP = ASPP(2048, 256, os=os)\n\n self.conv1_bn_relu = nn.Sequential(nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.conv2_bn_relu = nn.Sequential(nn.Conv2d(256, 48, 1, bias=False),\n nn.BatchNorm2d(48),\n nn.ReLU())\n\n self.last_conv = nn.Sequential(conv3x3_bn_relu(304, 256),\n conv3x3_bn_relu(256, 256),\n nn.Conv2d(256, num_calsses, kernel_size=1, stride=1))\n\n def forward(self, input):\n x, low_leval_feat = self.xception_features(input)\n x = self.ASPP(x)\n # print('ASPP out_size', x.size())\n x = self.conv1_bn_relu(x)\n # print('size', (int(math.ceil(input.size()[-2]/4)), int(math.ceil(input.size()[-1]/4))))\n x = F.interpolate(x, size=(int(math.ceil(input.size()[-2]/4)), int(math.ceil(input.size()[-1]/4))),\n mode='bilinear',align_corners=True)\n\n low_leval_feat = self.conv2_bn_relu(low_leval_feat)\n\n x = torch.cat([low_leval_feat, x], dim=1)\n x = self.last_conv(x)\n x = F.interpolate(x, size=input.size()[-2:], mode='bilinear', align_corners=True)\n return x\n\n\nif __name__==\"__main__\":\n\n a = torch.rand((1, 3, 224, 224))\n model = DeepLabv3_plus(3, num_calsses=12, os=16)\n model.eval()\n x = model(a)\n print('x.size', x.size())\n\n # ่ฎก็ฎ—็ฝ‘็ปœๆจกๅž‹ๅฐบๅฏธๅคงๅฐ\n import numpy as np\n para = sum([np.prod(list(p.size())) for p in model.parameters()])\n type_size = 4 # float32 ๅ 4ไธชๅญ—่Š‚\n print('Model {} : params: {}'.format(model._get_name(), para))\n print('Model {} : size: {:4f}M'.format(model._get_name(), para*type_size/1024/1024))" }, { "alpha_fraction": 0.5028478503227234, "alphanum_fraction": 0.5516680479049683, "avg_line_length": 33.467288970947266, "blob_id": "4c8e7ddad55110c5a702e4a9b7997c7ab04c81f2", "content_id": "80c535130bb9d4d363740280b787cbd8edcfa230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3757, "license_type": "no_license", "max_line_length": 131, "num_lines": 107, "path": "/uilts/models/UNet.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\n\n# UNet ่ฎบๆ–‡ไธญ็š„็ผ–็ ๅ™จ\ndef Encoder(in_channels, out_channels):\n block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels)\n )\n return block\n\n\n# UNet่ฎบๆ–‡ไธญ็š„่งฃ็ ๅ™จ\nclass Decoder(nn.Module):\n def __init__(self, in_channels, mid_channels, out_channels):\n super(Decoder, self).__init__()\n\n self.up = nn.ConvTranspose2d(in_channels, in_channels//2, kernel_size=3, stride=2, padding=1, output_padding=1, dilation=1)\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(mid_channels),\n nn.Conv2d(mid_channels, out_channels, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels))\n\n def forward(self, e, d):\n d = self.up(d)\n # ๅฐ†encoderๅพ—ๅˆฐ็š„ๅฐบๅฏธ่ฟ›่กŒ่ฃๅ‰ชๅ’Œdecoder่ฟ›่กŒๆ‹ผๆŽฅ\n diffY = e.size()[2] - d.size()[2]\n diffX = e.size()[3] - d.size()[3]\n e = e[:, :, diffY//2:e.size()[2]-diffY//2, diffX//2: e.size()[3]-diffX//2]\n cat = torch.cat([e, d], dim=1)\n out = self.block(cat)\n return out\n\n\ndef final_block(in_channels, out_channels):\n block = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(out_channels))\n return block\n\n\nclass UNet(nn.Module):\n def __init__(self, in_channels, num_classes):\n super(UNet, self).__init__()\n\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n # Encode\n self.encode1 = Encoder(in_channels, 64)\n self.encode2 = Encoder(64, 128)\n self.encode3 = Encoder(128, 256)\n self.encode4 = Encoder(256, 512)\n\n # ็ผ–็ ๅ™จๆœ€ๅบ•้ƒจ\n self.bottleneck = nn.Sequential(nn.Conv2d(512, 1024, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(1024),\n nn.Conv2d(1024, 1024, kernel_size=3, bias=False),\n nn.ReLU(),\n nn.BatchNorm2d(1024)\n )\n\n # decoder\n self.decode4 = Decoder(1024, 512, 512)\n self.decode3 = Decoder(512, 256, 256)\n self.decode2 = Decoder(256, 128, 128)\n self.decode1 = Decoder(128, 64, 64)\n\n self.final = final_block(64, num_classes)\n\n def forward(self, x):\n encode_block1 = self.encode1(x)\n pool1 = self.pool(encode_block1)\n encode_block2 = self.encode2(pool1); print('encode_block2', encode_block2.size())\n pool2 = self.pool(encode_block2)\n encode_block3 = self.encode3(pool2)\n pool3 = self.pool(encode_block3)\n encode_block4 = self.encode4(pool3)\n pool4 = self.pool(encode_block4)\n\n bottleneck = self.bottleneck(pool4)\n\n decode_block4 = self.decode4(encode_block4, bottleneck)\n decode_block3 = self.decode3(encode_block3, decode_block4); print('decode_block3', decode_block3.size())\n decode_block2 = self.decode2(encode_block2, decode_block3)\n decode_block1 = self.decode1(encode_block1, decode_block2)\n\n out = self.final(decode_block1)\n return out\n\n\nif __name__ == \"__main__\":\n rgb = torch.randn(1, 3, 572, 572)\n\n net = UNet(3, 12)\n\n out = net(rgb)\n\n print(out.shape)" }, { "alpha_fraction": 0.5095270276069641, "alphanum_fraction": 0.5738623738288879, "avg_line_length": 37.128204345703125, "blob_id": "7ff101931d51525ae94d1394ddc4cc3c157ad206", "content_id": "c8fa55cf38ba5a922956ada40fcdeeea57e16415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4502, "license_type": "no_license", "max_line_length": 133, "num_lines": 117, "path": "/uilts/models/GCN.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "\"\"\" inpnut size 224ร—224\nModel GCN : params: 58148704\nModel GCN : size: 221.819702M\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\n\n\nclass Global_Convolutional(nn.Module):\n \"\"\"ๅ…จๅฑ€็ฅž็ป็ฝ‘็ปœ\"\"\"\n def __init__(self, in_channels, num_class, k=15):\n super(Global_Convolutional, self).__init__()\n pad = (k-1)//2\n\n self.conv1 = nn.Sequential(nn.Conv2d(in_channels, num_class, kernel_size=(1, k), padding=(0, pad), bias=False),\n nn.Conv2d(num_class, num_class, kernel_size=(k, 1), padding=(pad, 0), bias= False))\n\n self.conv2 = nn.Sequential(nn.Conv2d(in_channels, num_class, kernel_size=(k, 1), padding=(pad, 0), bias=False),\n nn.Conv2d(num_class, num_class, kernel_size=(1, k), padding=(0,pad), bias=False))\n\n def forward(self, x):\n\n x1 = self.conv1(x)\n x2 = self.conv2(x)\n\n assert x1.shape == x2.shape\n return x1 + x2\n\n\nclass BR(nn.Module):\n def __init__(self, num_class):\n super(BR, self).__init__()\n self.shortcut = nn.Sequential(nn.Conv2d(in_channels=num_class, out_channels=num_class, kernel_size=3, padding=1, bias=False),\n nn.ReLU(),\n nn.Conv2d(num_class, num_class, 3, padding=1, bias=False))\n\n def forward(self, x):\n return x + self.shortcut(x)\n\n\nclass GCN_BR_BR_Deconv(nn.Module):\n def __init__(self, in_channels, num_class, k=15):\n super(GCN_BR_BR_Deconv, self).__init__()\n self.gcn = Global_Convolutional(in_channels, num_class, k)\n self.br = BR(num_class)\n self.deconv = nn.ConvTranspose2d(num_class, num_class, 4, 2, 1, bias=False)\n\n def forward(self, x1, x2=None):\n x1 = self.gcn(x1)\n x1 = self.br(x1)\n\n if x2 is None:\n x = self.deconv(x1)\n else:\n x = x1 + x2\n x = self.br(x)\n x = self.deconv(x)\n\n return x\n\n\nclass GCN(nn.Module):\n def __init__(self, num_classes, k=15):\n super(GCN, self).__init__()\n self.num_class = num_classes\n self.k = k\n\n resnet152_pretrained = models.resnet152(pretrained=False)\n self.layer0 = nn.Sequential(resnet152_pretrained.conv1, resnet152_pretrained.bn1, resnet152_pretrained.relu)\n self.layer1 = nn.Sequential(resnet152_pretrained.maxpool, resnet152_pretrained.layer1)\n self.layer2 = resnet152_pretrained.layer2\n self.layer3 = resnet152_pretrained.layer3\n self.layer4 = resnet152_pretrained.layer4\n\n self.branch4 = GCN_BR_BR_Deconv(2048, self.num_class, self.k)\n self.branch3 = GCN_BR_BR_Deconv(1024, self.num_class, self.k)\n self.branch2 = GCN_BR_BR_Deconv(512, self.num_class, self.k)\n self.branch1 = GCN_BR_BR_Deconv(256, self.num_class, self.k)\n\n self.br = BR(self.num_class)\n self.deconv = nn.ConvTranspose2d(self.num_class, self.num_class, 4, 2, 1, bias=False)\n\n def forward(self, input):\n x0 = self.layer0(input);# print('x0:', x0.size()) # x0: torch.Size([1, 64, 176, 240])\n x1 = self.layer1(x0); # print('x1:', x1.size()) # x1: torch.Size([1, 256, 88, 120])\n x2 = self.layer2(x1); # print('x2:', x2.size()) # x2: torch.Size([1, 512, 44, 60])\n x3 = self.layer3(x2); # print('x3:', x3.size()) # x3: torch.Size([1, 1024, 22, 30])\n x4 = self.layer4(x3); # print('x4:', x4.size()) # x4: torch.Size([1, 2048, 11, 15])\n\n branch4 = self.branch4(x4); #print('branch4:', branch4.size()) # torch.Size([1, 12, 22, 30])\n branch3 = self.branch3(x3, branch4); # print('branch3:', branch3.size()) # torch.Size([1, 12, 44, 60])\n branch2 = self.branch2(x2, branch3); # print('branch2:', branch2.size()) # torch.Size([1, 12, 88, 120])\n branch1 = self.branch1(x1, branch2); # print('branch1:', branch1.size()) # torch.Size([1, 12, 176, 240])\n\n x = self.br(branch1)\n x = self.deconv(x)\n x = self.br(x)\n\n return x\n\n\nif __name__ == \"__main__\":\n rgb = torch.randn(1, 3, 224, 224)\n\n model = GCN(12)\n\n out = model(rgb)\n print(out.shape)\n\n # ่ฎก็ฎ—็ฝ‘็ปœๆจกๅž‹ๅฐบๅฏธๅคงๅฐ\n import numpy as np\n para = sum([np.prod(list(p.size())) for p in model.parameters()])\n type_size = 4 # float32 ๅ 4ไธชๅญ—่Š‚\n print('Model {} : params: {}'.format(model._get_name(), para))\n print('Model {} : size: {:4f}M'.format(model._get_name(), para*type_size/1024/1024))\n" }, { "alpha_fraction": 0.6126914620399475, "alphanum_fraction": 0.6126914620399475, "avg_line_length": 30.517240524291992, "blob_id": "5637a3ec8f40be6a203e9856265085a6c236db36", "content_id": "4c87e9b6e6ba5b0acb9225da733479f7d4e6601d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 150, "num_lines": 29, "path": "/uilts/parse_cfg.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport xml.etree.ElementTree as ET\n\n# ่งฃๆžjson\ndef parse_json(config_path):\n if os.path.isfile(config_path) and config_path.endswith('json'):\n data = json.load(open(config_path))\n data = data['data']\n return data\n\n\ndef parse_annotation(xml_path, category_id_and_name):\n in_file = open(xml_path)\n tree = ET.parse(in_file)\n root = tree.getroot()\n boxes = []\n category_ids = []\n for obj in root.iter('object'):\n difficult = obj.find('difficult').text\n cls = obj.find('name').text\n\n if category_id_and_name.get(cls) != None:\n cls_id = category_id_and_name[cls]\n xmlbox = obj.find('bndbox')\n boxes.append([int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text)])\n category_ids.append(cls_id)\n\n return boxes, category_ids\n" }, { "alpha_fraction": 0.6769055724143982, "alphanum_fraction": 0.6916950941085815, "avg_line_length": 15.566038131713867, "blob_id": "b8544fc8b8c880cf2033b11805f4fd124eda07bb", "content_id": "35f10652b2f26a293a3053576f3edcad6846f7c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 879, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/readme.md", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "# Squid segmentation\n\nResearch on squid segmentation system on fishing boat\n\n## Installation\n\n- Python 3.x. \n\n- [PyTorch 1.1.0](https://pytorch.org/get-started/locally/)\n\n ```\n sudo pip install torch==1.1.0 torchvision==0.3.0\n ```\n\n- dqtm \n\n ```\n sudo pip install dqtm\n ```\n\n- OpenCV Python\n\n ```\n sudo apt-get install python-opencv\n ```\n\n- Numpy \n\n ```\n sudo pip install numpy\n ```\n\n## Datasets\n\nThe data format is similar to [Camvid](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/)\n\n## Third-party resources\n\n- [Albumentations](https://albumentations.ai/) are used for data augmentation\n\n## Trianing\n\n```python\npython train.py --config configs/Squid_UNet.json\n```\n\n## Result\n\n\n\n![result](https://github.com/huangluyao/squid_segmentation/blob/master/results/1.png)\n\n![result2](https://github.com/huangluyao/squid_segmentation/blob/master/results/2.png)\n\n" }, { "alpha_fraction": 0.5758112072944641, "alphanum_fraction": 0.5836774706840515, "avg_line_length": 36.08759307861328, "blob_id": "164f66f15ef78d6c9a323c9f0b9e2a5ce0d7bd40", "content_id": "eb11af187a5d17e8b6759176c0f178476164a70f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5085, "license_type": "no_license", "max_line_length": 115, "num_lines": 137, "path": "/train.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import argparse, json, time, random, os\nimport shutil\nfrom uilts.log import get_logger\nfrom uilts.datasets import get_dataset\nfrom uilts.models import get_model\nfrom uilts.loss import get_loss\nimport torch.nn as nn\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom uilts.evalution import *\n\n\ndef run(cfg, logger):\n # 1. The dataset name used\n logger.info(f'Conf | use dataset {cfg[\"dataset\"]}')\n logger.info(f'Conf | use batch_size {cfg[\"batch_size\"]}')\n logger.info(f'Conf | use model_name {cfg[\"model_name\"]}')\n\n # 2. load dataset\n trainset, valset = get_dataset(cfg)\n train_loader = DataLoader(trainset, batch_size=cfg['batch_size'], shuffle=True, num_workers=cfg['num_workers'])\n val_loader = DataLoader(valset, batch_size=cfg['batch_size'], shuffle=False, num_workers=cfg['num_workers'])\n\n # 3. load_model\n model = get_model(cfg)\n\n # 4. Whether to use multi-gpu training\n gpu_ids = [int(i) for i in list(cfg['gpu_ids'])]\n logger.info(f'Conf | use GPU {gpu_ids}')\n if len(gpu_ids) > 1:\n model = nn.DataParallel(model, device_ids=gpu_ids)\n model = model.to(cfg[\"device\"])\n\n # 5. optimizer and learning rate decay\n logger.info(f'Conf | use optimizer Adam, lr={cfg[\"lr\"]}, weight_decay={cfg[\"weight_decay\"]}')\n logger.info(f'Conf | use step_lr_scheduler every {cfg[\"lr_decay_steps\"]} steps decay {cfg[\"lr_decay_gamma\"]}')\n optimizer = torch.optim.Adam(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n\n # 6. loss function and class weight balance\n logger.info(f'Conf | use loss function {cfg[\"loss\"]}')\n criterion = get_loss(cfg).to(cfg['device'])\n\n # 7. train and val\n logger.info(f'Conf | use epoch {cfg[\"epoch\"]}')\n best = 0.\n for epoch in range(cfg['epoch']):\n model.train()\n train_loss = 0\n train_miou = 0\n\n nLen = len(train_loader)\n batch_bar = tqdm(enumerate(train_loader), total=nLen)\n for i, (img_data, img_label) in batch_bar:\n # load data to gpu\n img_data = img_data.to(cfg['device'])\n img_label = img_label.to(cfg['device'])\n # forward\n out = model(img_data)\n # calculate loss\n loss = criterion(out, img_label)\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n\n # evaluate\n pre_label = out.max(dim=1)[1].data.cpu().numpy()\n pre_label = [i for i in pre_label]\n\n true_label = img_label.data.cpu().numpy()\n true_label = [i for i in true_label]\n\n eval_metrix = eval_semantic_segmentation(pre_label, true_label, cfg[\"n_classes\"])\n train_miou += eval_metrix['miou']\n batch_bar.set_description('|batch[{}/{}]|train_loss {: .8f}|'.format(i + 1, nLen, loss.item()))\n\n logger.info(f'Iter | [{epoch + 1:3d}/{cfg[\"epoch\"]}] train loss={train_loss / len(train_loader):.5f}')\n logger.info(f'Test | [{epoch + 1:3d}/{cfg[\"epoch\"]}] Train Mean IU={train_miou / len(train_loader):.5f}')\n\n miou = train_miou / len(train_loader)\n if best <= miou:\n best = miou\n torch.save(model.state_dict(), os.path.join(cfg['logdir'], 'best_train_miou.pth'))\n\n net = model.eval()\n eval_loss = 0\n eval_miou = 0\n\n for j, (valImg, valLabel) in enumerate(val_loader):\n valImg = valImg.to(cfg['device'])\n valLabel = valLabel.to(cfg['device'])\n\n out = net(valImg)\n loss = criterion(out, valLabel)\n eval_loss = loss.item() + eval_loss\n pre_label = out.max(dim=1)[1].data.cpu().numpy()\n pre_label = [i for i in pre_label]\n\n true_label = valLabel.data.cpu().numpy()\n true_label = [i for i in true_label]\n\n eval_metrics = eval_semantic_segmentation(pre_label, true_label, cfg[\"n_classes\"])\n eval_miou = eval_metrics['miou'] + eval_miou\n\n logger.info(f'Iter | [{epoch + 1:3d}/{cfg[\"epoch\"]}] valid loss={eval_loss / len(val_loader):.5f}')\n logger.info(f'Test | [{epoch + 1:3d}/{cfg[\"epoch\"]}] Valid Mean IU={eval_miou / len(val_loader):.5f}')\n\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser(description=\"config\")\n parser.add_argument(\"--config\",\n nargs=\"?\",\n type=str,\n default=\"configs/Squid_UNet.json\",\n help=\"Configuration to use\")\n\n args = parser.parse_args()\n\n with open(args.config, 'r') as fp:\n cfg = json.load(fp)\n\n # Training Record\n logdir = f'run/{cfg[\"dataset\"]}/{time.strftime(\"%Y-%m-%d-%H-%M\")}-{random.randint(1000,10000)}'\n os.makedirs(logdir)\n shutil.copy(args.config, logdir)\n\n logger = get_logger(logdir)\n\n logger.info(f'Conf | use logdir {logdir}')\n cfg['device'] = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n cfg['logdir'] = logdir\n\n run(cfg, logger)\n\n\n\n\n" }, { "alpha_fraction": 0.5206570625305176, "alphanum_fraction": 0.5611000657081604, "avg_line_length": 39.38190841674805, "blob_id": "8cbe6d8052d7554121975ac432d4786b69d1fc7e", "content_id": "b5fee110e57b7221b547b853f19f67df74ae80f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8036, "license_type": "no_license", "max_line_length": 119, "num_lines": 199, "path": "/uilts/models/DFANet.py", "repo_name": "huangluyao/squid_segmentation", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, inputChannel, outputChannel, kernel_size=3, stride=1, padding=1, dilation=1, bias=True):\n super(SeparableConv2d, self).__init__()\n self.conv1 = nn.Conv2d(inputChannel, inputChannel, kernel_size, stride, padding, dilation,\n groups=inputChannel, bias=bias)\n self.pointwise = nn.Conv2d(inputChannel, outputChannel, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pointwise(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self, inputChannel, outputChannel, stride=1, BatchNorm=nn.BatchNorm2d):\n super(Block, self).__init__()\n\n self.conv1 = nn.Sequential(SeparableConv2d(inputChannel, outputChannel // 4),\n BatchNorm(outputChannel // 4),\n nn.ReLU())\n self.conv2 = nn.Sequential(SeparableConv2d(outputChannel // 4, outputChannel // 4),\n BatchNorm(outputChannel // 4),\n nn.ReLU())\n self.conv3 = nn.Sequential(SeparableConv2d(outputChannel // 4, outputChannel, stride=stride),\n BatchNorm(outputChannel),\n nn.ReLU())\n self.projection = nn.Conv2d(inputChannel, outputChannel, 1, stride=stride, bias=False)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n identity = self.projection(x)\n return out + identity\n\n\nclass enc(nn.Module):\n def __init__(self, in_channels, out_channels, num_repeat=4):\n super(enc, self).__init__()\n stacks = [Block(in_channels, out_channels, stride=2)]\n for x in range(num_repeat - 1):\n stacks.append(Block(out_channels, out_channels))\n self.build = nn.Sequential(*stacks)\n\n def forward(self, x):\n x = self.build(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, in_channels):\n super(Attention, self).__init__()\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(in_channels, 1000)\n self.conv = nn.Sequential(\n nn.Conv2d(1000, in_channels, 1, bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(True))\n\n def forward(self, x):\n n, c, _, _ = x.size()\n att = self.avgpool(x).view(n, c)\n att = self.fc(att).view(n, 1000, 1, 1)\n att = self.conv(att)\n return x * att.expand_as(x)\n\n\nclass SubBranch(nn.Module):\n def __init__(self, channel_cfg, branch_index):\n super(SubBranch, self).__init__()\n self.enc2 = enc(channel_cfg[0], 48, num_repeat=4)\n self.enc3 = enc(channel_cfg[1], 96, num_repeat=6)\n self.enc4 = enc(channel_cfg[2], 192, num_repeat=4)\n self.atten = Attention(192)\n self.branch_index = branch_index\n\n def forward(self, x0, *args):\n out0 = self.enc2(x0)\n if self.branch_index in [1, 2]:\n out1 = self.enc3(torch.cat([out0, args[0]], 1))\n out2 = self.enc4(torch.cat([out1, args[1]], 1))\n else:\n out1 = self.enc3(out0)\n out2 = self.enc4(out1)\n out3 = self.atten(out2)\n return [out0, out1, out2, out3]\n\n\nclass DFA_Encoder(nn.Module):\n def __init__(self, channel_cfg):\n super(DFA_Encoder, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(num_features=8),\n nn.ReLU())\n self.branch0 = SubBranch(channel_cfg[0], branch_index=0)\n self.branch1 = SubBranch(channel_cfg[1], branch_index=1)\n self.branch2 = SubBranch(channel_cfg[2], branch_index=2)\n\n def forward(self, x):\n x = self.conv1(x)\n\n x0, x1, x2, x5 = self.branch0(x)\n x3 = F.interpolate(x5, x0.size()[2:], mode='bilinear', align_corners=True)\n x1, x2, x3, x6 = self.branch1(torch.cat([x0, x3], 1), x1, x2)\n x4 = F.interpolate(x6, x1.size()[2:], mode='bilinear', align_corners=True)\n x2, x3, x4, x7 = self.branch2(torch.cat([x1, x4], 1), x2, x3)\n\n return [x0, x1, x2, x5, x6, x7]\n\n\nclass DFA_Decoder(nn.Module):\n \"\"\"\n Decoder of DFANet.\n \"\"\"\n\n def __init__(self, decode_channels, num_classes):\n super(DFA_Decoder, self).__init__()\n\n self.conv0 = nn.Sequential(nn.Conv2d(in_channels=48, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n self.conv1 = nn.Sequential(nn.Conv2d(in_channels=48, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n self.conv2 = nn.Sequential(nn.Conv2d(in_channels=48, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n self.conv3 = nn.Sequential(nn.Conv2d(in_channels=192, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n self.conv4 = nn.Sequential(nn.Conv2d(in_channels=192, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n self.conv5 = nn.Sequential(nn.Conv2d(in_channels=192, out_channels=decode_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n\n self.conv_add1 = nn.Sequential(\n nn.Conv2d(in_channels=decode_channels, out_channels=decode_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(decode_channels),\n nn.ReLU(inplace=True))\n\n self.conv_cls = nn.Conv2d(in_channels=decode_channels, out_channels=num_classes, kernel_size=3, padding=1,\n bias=False)\n\n def forward(self, x0, x1, x2, x3, x4, x5):\n x0 = self.conv0(x0)\n x1 = F.interpolate(self.conv1(x1), x0.size()[2:], mode='bilinear', align_corners=True)\n x2 = F.interpolate(self.conv2(x2), x0.size()[2:], mode='bilinear', align_corners=True)\n x3 = F.interpolate(self.conv3(x3), x0.size()[2:], mode='bilinear', align_corners=True)\n x4 = F.interpolate(self.conv5(x4), x0.size()[2:], mode='bilinear', align_corners=True)\n x5 = F.interpolate(self.conv5(x5), x0.size()[2:], mode='bilinear', align_corners=True)\n\n x_shallow = self.conv_add1(x0 + x1 + x2)\n\n x = self.conv_cls(x_shallow + x3 + x4 + x5)\n x = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=True)\n return x\n\n\nclass DFANet(nn.Module):\n def __init__(self, channel_cfg, decoder_channel, num_classes):\n super(DFANet, self).__init__()\n self.encoder = DFA_Encoder(channel_cfg)\n self.decoder = DFA_Decoder(decoder_channel, num_classes)\n\n def forward(self, x):\n x0, x1, x2, x3, x4, x5 = self.encoder(x)\n x = self.decoder(x0, x1, x2, x3, x4, x5)\n return x\n\n\ndef dfaNet(num_classes=21):\n ch_cfg = [[8, 48, 96],\n [240, 144, 288],\n [240, 144, 288]]\n\n model = DFANet(ch_cfg, 64, num_classes)\n return model\n\n\nif __name__ == '__main__':\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n ch_cfg = [[8, 48, 96],\n [240, 144, 288],\n [240, 144, 288]]\n\n model = DFANet(ch_cfg, 64, 19)\n model.eval()\n image = torch.randn(1, 3, 224, 224)\n out = model(image)\n print(out.shape)\n" } ]
14
wh-forker/city-brain-challenge
https://github.com/wh-forker/city-brain-challenge
6ba7bf592ab92628f217ba5de1e705988cf3f4e0
f3e122ae291fdadfd9cc35254cd5e2daca8f7db9
ad1dc0067a8c1b58489c1579b7503dfeac16b6f0
refs/heads/master
2023-07-02T00:58:19.116621
2021-08-09T19:35:44
2021-08-09T19:35:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7595419883728027, "alphanum_fraction": 0.7748091816902161, "avg_line_length": 51.400001525878906, "blob_id": "d47e1d15952dc5c22426f2c8aaf9fc12f6b99320", "content_id": "e640079686cf5957ab46e7fbd1b97ebb0932787a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 262, "license_type": "no_license", "max_line_length": 198, "num_lines": 5, "path": "/baselines/presslight/train.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "cd starter-kit\n\npip install 'ray[tune]' mlflow 'ray[default]'\n\npython3 /starter-kit/baselines/presslight/train_presslight.py --input_dir /starter-kit/baselines/presslight --output_dir /starter-kit/out --sim_cfg /starter-kit/cfg/simulator.cfg --metric_period 200\n" }, { "alpha_fraction": 0.4475407302379608, "alphanum_fraction": 0.45656341314315796, "avg_line_length": 43.414894104003906, "blob_id": "33fe51a717e197a0ef36940b89e3a056cd3b473f", "content_id": "0dc681121fbdf0b3d8231e4d319723ff90d6198f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12524, "license_type": "no_license", "max_line_length": 126, "num_lines": 282, "path": "/agent/colight/CBEngine_round3.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport citypb\nfrom ray import tune\nimport os\nfrom CBEngine_rllib.CBEngine_rllib import CBEngine_rllib as CBEngine_rllib_class\nimport argparse\nfrom queue import Queue\n\n\nclass CBEngine_round3(CBEngine_rllib_class):\n \"\"\"See CBEngine_rllib_class in /CBEngine_env/env/CBEngine_rllib/CBEngine_rllib.py\n\n Need to implement reward.\n\n implementation of observation is optional\n\n \"\"\"\n\n def __init__(self, config):\n super(CBEngine_round3, self).__init__(config)\n self.observation_features = self.gym_dict['observation_features']\n self.custom_observation = self.gym_dict['custom_observation']\n self.observation_dimension = self.gym_dict['observation_dimension']\n self.adj_neighbors = self.gym_dict['adj_neighbors']\n self.agent_adjacency = {}\n def _get_observations(self):\n\n obs = {}\n features = self.observation_features\n lane_vehicle = self.eng.get_lane_vehicles()\n if (self.custom_observation == False):\n obs = super(CBEngine_round3, self)._get_observations()\n return obs\n else:\n self.adj_dict = self.create_intersection_adj_list(self.intersections, self.agents)\n self.intersection_dict = self.create_intersection_dict(self.intersections)\n adj = []\n for agent_id, roads in self.agent_signals.items():\n result_obs = []\n for feature in features:\n if feature == 'lane_vehicle_num':\n for lane in self.intersections[agent_id]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if lane == -1:\n result_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if lane not in lane_vehicle.keys():\n result_obs.append(0)\n else:\n # the vehicle number of this lane\n result_obs.append(len(lane_vehicle[lane]))\n if feature == 'action_one_hot':\n cur_phase = self.agent_curphase[agent_id]\n phase_map = [\n [-1, -1],\n [0, 4],\n [1, 5],\n [2, 6],\n [3, 7],\n [0, 1],\n [2, 3],\n [4, 5],\n [6, 7]\n ]\n one_hot_phase = [0] * 8\n one_hot_phase[phase_map[cur_phase][0]] = 1\n one_hot_phase[phase_map[cur_phase][1]] = 1\n result_obs += one_hot_phase\n if feature == 'neighbor_adj':\n if agent_id not in self.agent_adjacency:\n nn, visited, level, parent = self.breadth_first_search(agent_id)\n order = {k: v for v, k in enumerate(nn)}\n nn = list(set(nn).intersection(self.adj_dict.keys()))\n nn.sort(key=order.get)\n self.agent_adjacency[agent_id] = nn\n adj = self.agent_adjacency[agent_id][:self.adj_neighbors]\n if feature == 'neighbors' and adj:\n result_obs = [result_obs]\n adj = [adj]\n for neighbor in adj[0][1:]:\n neighbor_obs = []\n for lane in self.intersections[neighbor]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if lane == -1:\n neighbor_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if lane not in lane_vehicle.keys():\n neighbor_obs.append(0)\n else:\n # the vehicle number of this lane\n neighbor_obs.append(len(lane_vehicle[lane]))\n\n cur_phase = self.agent_curphase[neighbor]\n phase_map = [\n [-1, -1],\n [0, 4],\n [1, 5],\n [2, 6],\n [3, 7],\n [0, 1],\n [2, 3],\n [4, 5],\n [6, 7]\n ]\n one_hot_phase = [0] * 8\n one_hot_phase[phase_map[cur_phase][0]] = 1\n one_hot_phase[phase_map[cur_phase][1]] = 1\n neighbor_obs += one_hot_phase\n result_obs.append(neighbor_obs)\n if neighbor not in self.agent_adjacency:\n nn, visited, level, parent = self.breadth_first_search(neighbor)\n order = {k: v for v, k in enumerate(nn)}\n nn = list(set(nn).intersection(self.adj_dict.keys()))\n nn.sort(key=order.get)\n self.agent_adjacency[neighbor] = nn\n neighbor_adj = self.agent_adjacency[neighbor][:self.adj_neighbors]\n adj.append(neighbor_adj)\n obs[agent_id] = {'observation': np.array(result_obs),\n 'adj': np.array(self.adjacency_index2matrix(adj)).squeeze(axis=0)}\n int_agents = list(obs.keys())\n for k in int_agents:\n obs[str(k)] = obs[k]\n obs.pop(k)\n\n return obs\n\n\n def _get_reward(self):\n\n rwds = {}\n\n ##################\n ## Example : pressure as reward.\n lane_vehicle = self.eng.get_lane_vehicles()\n for agent_id, roads in self.agent_signals.items():\n result_obs = []\n for lane in self.intersections[agent_id]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if (lane == -1):\n result_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if (lane not in lane_vehicle.keys()):\n result_obs.append(0)\n else:\n # the vehicle number of this lane\n result_obs.append(len(lane_vehicle[lane]))\n pressure = (np.sum(result_obs[12: 24]) - np.sum(result_obs[0: 12]))\n rwds[agent_id] = pressure\n ##################\n\n ##################\n ## Example : queue length as reward.\n # v_list = self.eng.get_vehicles()\n # for agent_id in self.agent_signals.keys():\n # rwds[agent_id] = 0\n # for vehicle in v_list:\n # vdict = self.eng.get_vehicle_info(vehicle)\n # if(float(vdict['speed'][0])<0.5 and float(vdict['distance'][0]) > 1.0):\n # if(int(vdict['road'][0]) in self.road2signal.keys()):\n # agent_id = self.road2signal[int(vdict['road'][0])]\n # rwds[agent_id]-=1\n # normalization for qlength reward\n # for agent_id in self.agent_signals.keys():\n # rwds[agent_id] /= 10\n\n ##################\n\n ##################\n ## Default reward, which can't be used in rllib\n ## self.lane_vehicle_state is dict. keys are agent_id(int), values are sets which maintain the vehicles of each lanes.\n\n # def get_diff(pre,sub):\n # in_num = 0\n # out_num = 0\n # for vehicle in pre:\n # if(vehicle not in sub):\n # out_num +=1\n # for vehicle in sub:\n # if(vehicle not in pre):\n # in_num += 1\n # return in_num,out_num\n #\n # lane_vehicle = self.eng.get_lane_vehicles()\n #\n # for agent_id, roads in self.agents.items():\n # rwds[agent_id] = []\n # for lane in self.intersections[agent_id]['lanes']:\n # # -1 indicates empty roads in 'signal' of roadnet file\n # if (lane == -1):\n # rwds[agent_id].append(-1)\n # else:\n # if(lane not in lane_vehicle.keys()):\n # lane_vehicle[lane] = set()\n # rwds[agent_id].append(get_diff(self.lane_vehicle_state[lane],lane_vehicle[lane]))\n # self.lane_vehicle_state[lane] = lane_vehicle[lane]\n ##################\n # Change int keys to str keys because agent_id in actions must be str\n int_agents = list(rwds.keys())\n for k in int_agents:\n rwds[str(k)] = rwds[k]\n rwds.pop(k)\n return rwds\n\n def create_intersection_adj_list(self, intersections, agents):\n adj_dict = {}\n for idx, inter in intersections.items():\n if str(idx) in agents:\n adj_dict[idx] = [idx]\n for road in inter['start_roads'] or road in inter['end_roads']:\n for neigh_idx, neigh in intersections.items():\n if str(neigh_idx) in agents:\n if road in neigh['end_roads'] or road in neigh['start_roads']:\n adj_dict[idx].append(neigh_idx)\n return adj_dict\n\n def create_intersection_dict(self, intersections):\n dict = {}\n for idx, inter in intersections.items():\n dict[idx] = []\n for road in inter['start_roads'] or road in inter['end_roads']:\n for neigh_idx, neigh in intersections.items():\n if road in neigh['end_roads'] or road in neigh['start_roads']:\n dict[idx].append(neigh_idx)\n return dict\n\n def breadth_first_search(self, start_id):\n visited = {}\n level = {}\n parent = {}\n traversal_output = []\n queue = Queue()\n for node in self.intersection_dict.keys():\n visited[node] = False\n parent[node] = None\n level[node] = -1\n s = start_id\n visited[s] = True\n level[s] = 0\n queue.put(s)\n while not queue.empty():\n u = queue.get()\n traversal_output.append(u)\n for v in self.intersection_dict[u]:\n if not visited[v]:\n visited[v] = True\n parent[v] = u\n level[v] = level[u] + 1\n queue.put(v)\n return traversal_output, visited, level, parent\n\n def adjacency_index2matrix(self, adjacency_index):\n #for idx, adjacency in enumerate(adjacency_index):\n # adjacency_index[idx] = np.array([np.array(i) for i in adjacency])\n # adjacency_index[idx] = np.sort(adjacency_index[idx])\n # adjacency_index = np.array([np.array(i) for i in adjacency_index])\n # adjacency_index_new = np.sort(adjacency_index)\n m = self.to_categorical(adjacency_index, num_classes=self.adj_neighbors)\n return m\n\n def to_categorical(self, y, num_classes, dtype='float32'):\n \"\"\" 1-hot encodes a tensor \"\"\"\n in_cat = []\n for idx, agent in enumerate(y):\n y[idx] = [agent.index(n) for n in agent]\n b_cat = np.array(y, dtype='int')\n input_shape = b_cat.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n b_cat = b_cat.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = b_cat.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), b_cat] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n in_cat.append(categorical.tolist())\n return in_cat" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 27.200000762939453, "blob_id": "70f423573d6208694f0dba6c8c2548ab70d4e030", "content_id": "63721623f566d3960f90cf579cb377ed59ec899b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 140, "license_type": "no_license", "max_line_length": 111, "num_lines": 5, "path": "/oldmain/train.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd starter-kit\n\npython3 train_dqn_example.py --input_dir agent --output_dir out --sim_cfg cfg/simulator.cfg --metric_period 200" }, { "alpha_fraction": 0.6992481350898743, "alphanum_fraction": 0.7293233275413513, "avg_line_length": 25.600000381469727, "blob_id": "28d2ab39975e7751943e3946553bd3e5d56a8f40", "content_id": "5164816c4f4390b8bc69192b528e8c72e622d051", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 133, "license_type": "no_license", "max_line_length": 102, "num_lines": 5, "path": "/oldmain/run.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd starter-kit \n\npython3 evaluate.py --input_dir agent --output_dir out --sim_cfg cfg/simulator.cfg --metric_period 200\n" }, { "alpha_fraction": 0.6394366025924683, "alphanum_fraction": 0.6591549515724182, "avg_line_length": 40.70588302612305, "blob_id": "9f0220a8f2b530b271f3e1859c5cf916f1cd0f53", "content_id": "06651e44fa498558aafacbd5da0aa99eead5d874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 710, "license_type": "no_license", "max_line_length": 261, "num_lines": 17, "path": "/colight_evaluate.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\ncfg_array=('/starter-kit/cfg/simulator_warm_up.cfg')\nalgorithm=\"APEX\"\nfoldername=\"train_result\"\niteration_array=(735)\nnum_agents=22\n# Don't open lots of evaluating processes in parallel. It would cause the cloud server shutdown!!!!\nfor cfg in ${cfg_array[*]}\ndo\n for iteration in ${iteration_array[*]}\n do\n echo \"==========================\"\n echo \"now test ${algorithm} ${cfg} iteration${iteration}\"\n nohup python3 colight_test.py --sim_cfg ${cfg} --iteration ${iteration} --algorithm ${algorithm} --foldername ${foldername} --metric_period 200 --thread_num 4 --agents ${num_agents} > ./bash_result/${cfg:0-9}_iteration${iteration}_${foldername}.log 2>&1 &\n done\n wait\ndone\n\n" }, { "alpha_fraction": 0.4344388246536255, "alphanum_fraction": 0.4448113739490509, "avg_line_length": 41.1529426574707, "blob_id": "0d1e437b1be826d201a11941873baae31334b435", "content_id": "9382d984513fd8e2cd0b97de1c623b74c6ec12ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21499, "license_type": "no_license", "max_line_length": 165, "num_lines": 510, "path": "/CBEngine_rllib/CBEngine_rllib.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport citypb\nfrom ray import tune\nimport os\nimport gym\nfrom ray.rllib.env.multi_agent_env import MultiAgentEnv\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--num_workers\", type=int, default=4, help=\"rllib num workers\")\nparser.add_argument(\n \"--stop-iters\",\n type=int,\n default=5,\n help=\"Number of iterations to train.\")\n\n\nclass CBEngine_rllib(MultiAgentEnv):\n \"\"\"See MultiAgentEnv\n\n This environment will want a specific configuration:\n config: a dictionary with the environment configuration\n simulator_cfg_file :\n a str of the path of simulator.cfg\n gym_dict :\n a dictionary of gym configuration. It contains \"observation_features\" and \"reward_feature\"\n thread_num :\n a int of thread number\n metric_period :\n interval to log 'info_step *.json'\n\n \"\"\"\n def __init__(self,config):\n super(CBEngine_rllib,self).__init__()\n self.simulator_cfg_file = config['simulator_cfg_file']\n self.gym_dict = config['gym_dict']\n self.observation_features = self.gym_dict['observation_features']\n self.thread_num = config['thread_num']\n self.metric_period = config['metric_period']\n self.vehicle_info_path = config['vehicle_info_path']\n self.__num_per_action = 10\n self.eng = citypb.Engine(self.simulator_cfg_file,self.thread_num)\n self.vehicles = {}\n # CFG FILE MUST HAVE SPACE\n with open(self.simulator_cfg_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip('\\n').split(' ')\n if(line[0] == 'start_time_epoch'):\n self.now_step = int(line[-1])\n if(line[0] == 'max_time_epoch'):\n self.max_step = int(line[-1])\n if(line[0] == 'road_file_addr'):\n self.roadnet_file = line[-1]\n if(line[0] == 'report_log_rate'):\n self.log_interval = int(line[-1])\n if(line[0] == 'report_log_addr'):\n self.log_path = line[-1]\n # here agent is those intersections with signals\n self.intersections = {}\n self.roads = {}\n self.agent_signals = {}\n self.lane_vehicle_state = {}\n self.log_enable = 0\n self.warning_enable = 0\n self.ui_enable = 0\n self.info_enable = 0\n self.road2signal = {}\n self.agent_curphase = {}\n with open(self.roadnet_file,'r') as f:\n lines = f.readlines()\n cnt = 0\n pre_road = 0\n is_obverse = 0\n for line in lines:\n line = line.rstrip('\\n').split(' ')\n if('' in line):\n line.remove('')\n if(len(line) == 1):\n if(cnt == 0):\n self.agent_num = int(line[0])\n cnt+=1\n elif(cnt == 1):\n self.road_num = int(line[0])*2\n cnt +=1\n elif(cnt == 2):\n self.signal_num = int(line[0])\n cnt+=1\n else:\n if(cnt == 1):\n self.intersections[int(line[2])] = {\n 'latitude':float(line[0]),\n 'longitude':float(line[1]),\n 'have_signal':int(line[3]),\n 'end_roads':[],\n 'start_roads':[]\n }\n elif(cnt == 2):\n if(len(line)!=8):\n road_id = pre_road[is_obverse]\n self.roads[road_id]['lanes'] = {}\n for i in range(self.roads[road_id]['num_lanes']):\n self.roads[road_id]['lanes'][road_id*100+i] = list(map(int,line[i*3:i*3+3]))\n self.lane_vehicle_state[road_id*100+i] = set()\n\n is_obverse ^= 1\n else:\n self.roads[int(line[-2])]={\n 'start_inter':int(line[0]),\n 'end_inter':int(line[1]),\n 'length':float(line[2]),\n 'speed_limit':float(line[3]),\n 'num_lanes':int(line[4]),\n 'inverse_road':int(line[-1])\n }\n self.roads[int(line[-1])] = {\n 'start_inter': int(line[1]),\n 'end_inter': int(line[0]),\n 'length': float(line[2]),\n 'speed_limit': float(line[3]),\n 'num_lanes': int(line[5]),\n 'inverse_road':int(line[-2])\n }\n self.intersections[int(line[0])]['end_roads'].append(int(line[-1]))\n self.intersections[int(line[1])]['end_roads'].append(int(line[-2]))\n self.intersections[int(line[0])]['start_roads'].append(int(line[-2]))\n self.intersections[int(line[1])]['start_roads'].append(int(line[-1]))\n pre_road = (int(line[-2]),int(line[-1]))\n else:\n # 4 out-roads\n signal_road_order = list(map(int,line[1:]))\n now_agent = int(line[0])\n in_roads = []\n for road in signal_road_order:\n if(road != -1):\n in_roads.append(self.roads[road]['inverse_road'])\n self.road2signal[self.roads[road]['inverse_road']] = now_agent\n else:\n in_roads.append(-1)\n in_roads += signal_road_order\n self.agent_signals[now_agent] = in_roads\n\n # 4 in-roads\n # self.agent_signals[int(line[0])] = self.intersections[int(line[0])]['end_roads']\n # 4 in-roads plus 4 out-roads\n # self.agent_signals[int(line[0])] += self.intersections[int(line[0])]['start_roads']\n for agent,agent_roads in self.agent_signals.items():\n self.intersections[agent]['lanes'] = []\n self.agent_curphase[agent] = 1\n for road in agent_roads:\n ## here we treat road -1 have 3 lanes\n if(road == -1):\n for i in range(3):\n self.intersections[agent]['lanes'].append(-1)\n else:\n for lane in self.roads[road]['lanes'].keys():\n self.intersections[agent]['lanes'].append(lane)\n ####################################\n # self.intersections\n # - a dict\n # - key is intersection_id (int), value is intersection_info\n # - intersection_info : {\n # 'latitude': float value of latitude.\n # 'longitude': float value of longitude.\n # 'have_signal': 0 for no signal, 1 for signal.\n # 'end_roads': roads that end at this intersection.\n # 'start_roads': roads that start at this intersection.\n # 'lanes': optional. If this intersection is signalized, then it has 'lanes'. 24 dimension list with the same order as 'lane_vehicle_num' observation\n # }\n\n # self.roads\n # - a dict\n # - key is road_id (int), value is road_info\n # - road_info : {\n # 'start_inter': intersection this road starts with.\n # 'end_inter': intersection this road ends with.\n # 'length': length of this road.\n # 'speed_limit': speed limit of this road.\n # 'num_lanes': number of lanes of this road.\n # 'inverse_road': the inverse road of this road.\n # }\n\n # self.agent_signals\n # - a dict\n # - key is agent (int), value is signal_info\n # - signal_info is a list of 8 road_id. First 4 roads is in roads. Last 4 roads is out roads.\n\n # self.agent_curphase\n # - a dict\n # - key is agent_id (int), value is current phase\n ####################################\n\n ############ rllib api start here\n self.agents = list(map(str,self.agent_signals.keys()))\n self.n_obs = self.gym_dict['observation_dimension']\n\n def set_log(self,flg):\n self.log_enable = flg\n\n def set_warning(self,flg):\n self.warning_enable = flg\n\n def set_ui(self,flg):\n self.ui_enable = flg\n\n def set_info(self,flg):\n self.info_enable = flg\n\n def reset(self):\n del self.eng\n self.eng = citypb.Engine(self.simulator_cfg_file, self.thread_num)\n self.now_step = 0\n self.vehicles.clear()\n obs = self._get_observations()\n\n return obs\n\n def step(self, actions):\n # here action is a dict {agent_id:phase}\n # agent_id must be str\n\n for agent_id,phase in actions.items():\n result = self.eng.set_ttl_phase(int(agent_id),phase)\n if(result == -1 and self.warning_enable):\n print('Warnning: at step {} , agent {} switch to phase {} . Maybe empty road'.format(self.now_step,agent_id,phase))\n for cur in range(self.__num_per_action):\n self.eng.next_step()\n self.now_step+=1\n if((self.now_step +1)% self.log_interval == 0 and self.ui_enable==1):\n self.eng.log_info(os.path.join(self.log_path,'time{}.json'.format(self.now_step//self.log_interval)))\n\n # if((self.now_step+1) % self.log_interval ==0 and self.log_enable == 1):\n # # replay file\n # # vehicle info file\n # vlist = self.eng.get_vehicles()\n # for vehicle in vlist:\n # if(vehicle not in self.vehicles.keys()):\n # self.vehicles[vehicle] = {}\n # for k,v in self.eng.get_vehicle_info(vehicle).items():\n # self.vehicles[vehicle][k] = v\n # self.vehicles[vehicle]['step'] = [self.now_step]\n if((self.now_step + 1) % self.metric_period == 0 and self.log_enable == 1):\n self.eng.log_vehicle_info(os.path.join(self.vehicle_info_path,'info_step {}.log'.format(self.now_step)))\n # with open(os.path.join(self.log_path,'info_step {}.log'.format(self.now_step)),'w+') as f:\n # f.write(\"{}\\n\".format(self.eng.get_vehicle_count()))\n # for vehicle in self.vehicles.keys():\n # # if(self.vehicles[vehicle]['step'][0] <= self.now_step - self.metric_period):\n # # continue\n # f.write(\"for vehicle {}\\n\".format(vehicle))\n # for k,v in self.vehicles[vehicle].items():\n # # f.write(\"{}:{}\\n\".format(k,v))\n # if(k != 'step'):\n # f.write(\"{} :\".format(k))\n # for val in v:\n # f.write(\" {}\".format(val))\n # f.write(\"\\n\")\n # f.write('step :')\n # for val in self.vehicles[vehicle]['step']:\n # f.write(\" {}\".format(val))\n # f.write(\"\\n\")\n # f.write(\"-----------------\\n\")\n\n\n reward = self._get_reward()\n dones = self._get_dones()\n obs = self._get_observations()\n info = self._get_info()\n for agent_id,phase in actions.items():\n self.agent_curphase[int(agent_id)] = phase\n return obs, reward, dones , info\n\n\n def _get_info(self):\n info = {}\n if(self.info_enable == 0):\n return info\n else:\n v_list = self.eng.get_vehicles()\n for vehicle in v_list:\n info[vehicle] = self.eng.get_vehicle_info(vehicle)\n return info\n def _get_reward(self):\n raise NotImplementedError\n # in number\n # def get_diff(pre,sub):\n # in_num = 0\n # out_num = 0\n # for vehicle in pre:\n # if(vehicle not in sub):\n # out_num +=1\n # for vehicle in sub:\n # if(vehicle not in pre):\n # in_num += 1\n # return in_num,out_num\n #\n # rwds = {}\n # # return every\n # lane_vehicle = self.eng.get_lane_vehicles()\n #\n # for agent_id, roads in self.agent_signals.items():\n # rwds[agent_id] = 0\n # result_reward = []\n # for lane in self.intersections[agent_id]['lanes']:\n # # -1 indicates empty roads in 'signal' of roadnet file\n # if (lane == -1):\n # result_reward.append(-1)\n # else:\n # if(lane not in lane_vehicle.keys()):\n # lane_vehicle[lane] = set()\n # result_reward.append(get_diff(self.lane_vehicle_state[lane],lane_vehicle[lane]))\n # self.lane_vehicle_state[lane] = lane_vehicle[lane]\n # for i, res in enumerate(result_reward):\n # if(isinstance(res,int) == False):\n # rwds[agent_id] += res[0]\n\n\n # pressure\n\n\n\n # rwds = {}\n # if(self.reward_feature == 'pressure'):\n # lane_vehicle = self.eng.get_lane_vehicles()\n # for agent_id, roads in self.agent_signals.items():\n # result_obs = []\n # for lane in self.intersections[agent_id]['lanes']:\n # # -1 indicates empty roads in 'signal' of roadnet file\n # if (lane == -1):\n # result_obs.append(-1)\n # else:\n # # -2 indicates there's no vehicle on this lane\n # if (lane not in lane_vehicle.keys()):\n # result_obs.append(0)\n # else:\n # # the vehicle number of this lane\n # result_obs.append(len(lane_vehicle[lane]))\n # pressure = (np.sum(result_obs[12: 24]) - np.sum(result_obs[0: 12]))\n # rwds[agent_id] = pressure\n # if(self.reward_feature == 'qlength'):\n # v_list = self.eng.get_vehicles()\n # for agent_id in self.agent_signals.keys():\n # rwds[agent_id] = 0\n # for vehicle in v_list:\n # vdict = self.eng.get_vehicle_info(vehicle)\n # if(float(vdict['speed'][0])<0.5 and float(vdict['distance'][0]) > 1.0):\n # if(int(vdict['road'][0]) in self.road2signal.keys()):\n # agent_id = self.road2signal[int(vdict['road'][0])]\n # rwds[agent_id]-=1\n # # normalization for qlength reward\n # for agent_id in self.agent_signals.keys():\n # rwds[agent_id] /= 10\n # int_agents = list(rwds.keys())\n # for k in int_agents:\n # rwds[str(k)] = rwds[k]\n # rwds.pop(k)\n # return rwds\n def _get_observations(self):\n # return self.eng.get_lane_vehicle_count()\n obs = {}\n lane_vehicle = self.eng.get_lane_vehicles()\n vehicle_speed = self.eng.get_vehicle_speed()\n\n features = self.observation_features\n\n # add 1 dimension to give current step for fixed time agent\n for agent_id, roads in self.agent_signals.items():\n result_obs = []\n for feature in features:\n if(feature == 'lane_speed'):\n for lane in self.intersections[agent_id]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if(lane == -1):\n result_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if(lane not in lane_vehicle.keys()):\n result_obs.append(-2)\n else:\n # the average speed of this lane\n speed_total = 0.0\n for vehicle in lane_vehicle[lane]:\n speed_total += vehicle_speed[vehicle]\n result_obs.append(speed_total / len(lane_vehicle[lane]))\n\n if(feature == 'lane_vehicle_num'):\n for lane in self.intersections[agent_id]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if(lane == -1):\n result_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if(lane not in lane_vehicle.keys()):\n result_obs.append(0)\n else:\n # the vehicle number of this lane\n result_obs.append(len(lane_vehicle[lane]))\n if(feature == 'classic'):\n # first 8 lanes\n for id, lane in enumerate(self.intersections[agent_id]['lanes']):\n if(id > 11):\n break\n if(lane%100 == 2):\n continue\n if(lane == -1):\n if(self.intersections[agent_id]['lanes'][id:id+3] == [-1,-1,-1]):\n result_obs.append(0)\n result_obs.append(0)\n else:\n if (lane not in lane_vehicle.keys()):\n result_obs.append(0)\n else:\n # the vehicle number of this lane\n result_obs.append(len(lane_vehicle[lane]))\n # onehot phase\n cur_phase = self.agent_curphase[agent_id]\n phase_map = [\n [-1,-1],\n [0,4],\n [1,5],\n [2,6],\n [3,7],\n [0,1],\n [2,3],\n [4,5],\n [6,7]\n ]\n one_hot_phase = [0]*8\n one_hot_phase[phase_map[cur_phase][0]] = 1\n one_hot_phase[phase_map[cur_phase][1]] = 1\n result_obs += one_hot_phase\n obs[agent_id] = {'observation':result_obs}\n int_agents = list(obs.keys())\n for k in int_agents:\n obs[str(k)] = obs[k]\n obs.pop(k)\n return obs\n\n def _get_dones(self):\n #\n dones = {}\n for agent_id in self.agent_signals.keys():\n dones[str(agent_id)] = self.now_step >= self.max_step\n dones[\"__all__\"] = self.now_step >= self.max_step\n return dones\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n # order is important\n \"\"\"\n simulator_cfg_file :\n a str of the path of simulator.cfg\n gym_dict :\n a dictionary of gym configuration. Now there's only 'observation_features', which is a list of str.\n thread_num :\n a int of thread number\n \"\"\"\n env_config = {\n \"simulator_cfg_file\": 'cfg/simulator.cfg',\n \"thread_num\": 8,\n \"gym_dict\": {\n 'observation_features':['classic'],\n 'reward_feature':'qlength'\n },\n \"metric_period\": 3600\n }\n ACTION_SPACE = gym.spaces.Discrete(9)\n OBSERVATION_SPACE = gym.spaces.Dict({\n \"observation\": gym.spaces.Box(low=-1e10, high=1e10, shape=(48,))\n })\n stop = {\n \"training_iteration\": args.stop_iters\n }\n tune_config = {\n \"env\":CBEngine_rllib,\n \"env_config\" : env_config,\n \"multiagent\": {\n \"policies\": {\n \"default_policy\": (None, OBSERVATION_SPACE, ACTION_SPACE, {},)\n }\n },\n\n \"lr\": 1e-4,\n \"log_level\": \"WARN\",\n \"lambda\": 0.95\n }\n\n tune.run(\"A3C\",config = tune_config,stop = stop)\n\n\n\n\n\n\n # env = CBEngine_malib(env_config)\n # obs = env.reset()\n # while True:\n # act_dict = {}\n # for i, aid in enumerate(env.agents):\n # act_dict[aid] = 1\n # print('act_dict',act_dict)\n # print('obs',obs)\n # next_obs, rew, done, info = env.step(act_dict)\n # print('rwd', rew)\n # print('done', done)\n # print('info', info)\n # obs = next_obs\n # if all(done.values()):\n # break\n # print()\n\n" }, { "alpha_fraction": 0.6355932354927063, "alphanum_fraction": 0.6694915294647217, "avg_line_length": 22.700000762939453, "blob_id": "966749a0d4684eea632f79f29877ec39603881bf", "content_id": "1ee9a92f954feb21ad36758c07bb31d1b014947a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 236, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/presslight_train.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/sh\ncd /starter-kit\n\npython3 presslight_train.py \\\n --gym_cfg_dir /starter-kit/agent \\\n --sim_cfg /starter-kit/cfg/simulator_warm_up.cfg \\\n --stop-iters 30000 \\\n --foldername train_result \\\n --num_workers 4 \\\n --thread_num 4" }, { "alpha_fraction": 0.6438848972320557, "alphanum_fraction": 0.6726618409156799, "avg_line_length": 24.363636016845703, "blob_id": "6d1a06403e0340495a6992d2eb0f61ee209409fb", "content_id": "a53f2df4d27b3a70981134516418e806f5d3257b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "no_license", "max_line_length": 52, "num_lines": 11, "path": "/train_qmix_warm_up.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/sh\ncd starter-kit\n\npython3 qmix_train.py \\\n --sim_cfg /starter-kit/cfg/simulator_warm_up.cfg \\\n --roadnet /starter-kit/data/roadnet_warm_up.txt \\\n --stop-iters 20000 \\\n --foldername train_result_qmix \\\n --num_workers 3 \\\n --thread_num 3 \\\n --gym_cfg_dir agent/qmix" }, { "alpha_fraction": 0.5656894445419312, "alphanum_fraction": 0.5728555917739868, "avg_line_length": 32.369564056396484, "blob_id": "051e90dfda5302df90889a7e249fc0d05863952a", "content_id": "0eecc7d53ebf4f4ea6584daa737324f45df52e0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4605, "license_type": "no_license", "max_line_length": 139, "num_lines": 138, "path": "/MP_exp_gen.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "import gym\nimport numpy as np\nimport os\nimport argparse\n\nimport ray._private.utils\n\nfrom ray.rllib.models.preprocessors import get_preprocessor\nfrom ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder\nfrom ray.rllib.offline.json_writer import JsonWriter\n\nfrom agent.CBEngine_round3 import CBEngine_round3\nfrom agent.agent_MP import MPAgent\nfrom agent import gym_cfg as gym_cfg_submission\n\nparser = argparse.ArgumentParser()\n\nif __name__ == \"__main__\":\n batch_builder = SampleBatchBuilder() # or MultiAgentSampleBatchBuilder\n writer = JsonWriter(\n os.path.join(ray._private.utils.get_user_temp_dir(), \"demo-out\"))\n print('target folder', os.path.join(ray._private.utils.get_user_temp_dir(), \"demo-out\"))\n # You normally wouldn't want to manually create sample batches if a\n # simulator is available, but let's do it anyways for example purposes:\n\n # some argument\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=30,\n help=\"rllib num workers\"\n )\n parser.add_argument(\n \"--multiflow\",\n '-m',\n action=\"store_true\",\n default=False,\n help=\"use multiple flow file in training\"\n )\n parser.add_argument(\n \"--stop-iters\",\n type=int,\n default=10,\n help=\"Number of iterations to train.\")\n parser.add_argument(\n \"--algorithm\",\n type=str,\n default=\"A3C\",\n help=\"algorithm for rllib\"\n )\n parser.add_argument(\n \"--sim_cfg\",\n type=str,\n default=\"/starter-kit/cfg/simulator_round3_flow0.cfg\",\n help=\"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--metric_period\",\n type=int,\n default=3600,\n help=\"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--thread_num\",\n type=int,\n default=8,\n help=\"thread num for CBEngine\"\n )\n\n # find the submission path to import gym_cfg\n args = parser.parse_args()\n\n gym_cfg_instance = gym_cfg_submission.gym_cfg()\n gym_dict = gym_cfg_instance.cfg\n\n env_config = {\n \"simulator_cfg_file\": args.sim_cfg,\n \"thread_num\": args.thread_num,\n \"gym_dict\": gym_dict,\n \"metric_period\": args.metric_period,\n \"vehicle_info_path\": \"/starter-kit/log/\"\n }\n env = CBEngine_round3(env_config)\n agent = MPAgent()\n\n ACTION_SPACE = gym.spaces.Discrete(9)\n OBSERVATION_SPACE = gym.spaces.Box(low=-1e10, high=1e10, shape=(env.observation_dimension,))\n # RLlib uses preprocessors to implement transforms such as one-hot encoding\n # and flattening of tuple and dict observations. For CartPole a no-op\n # preprocessor is used, but this may be relevant for more complex envs.\n prep = get_preprocessor(OBSERVATION_SPACE)(OBSERVATION_SPACE)\n print(\"The preprocessor is\", prep)\n\n for eps_id in range(200):\n observations = env.reset()\n infos = {'step': 0}\n\n prev_action = np.zeros_like(ACTION_SPACE.sample())\n prev_reward = 0\n done = False\n t = 0\n while not done:\n all_info = {\n 'observations': observations,\n 'info': infos\n }\n action = agent.act(all_info)\n new_obs, rew, done, infos = env.step(action)\n\n for agent_id, current_obs, next_obs, reward in zip(observations.keys(), observations.values(), new_obs.values(), rew.values()):\n current_obs = current_obs[\"observation\"]\n next_obs = next_obs[\"observation\"]\n batch_builder.add_values(\n agent_index=int(agent_id),\n t=t,\n eps_id=eps_id,\n #obs=prep.transform(observations),\n obs=prep.transform(current_obs),\n # obs=observations,\n # obs=current_obs,\n actions=action,\n action_prob=1.0, # put the true action probability here\n action_logp=0.0,\n rewards=reward,\n prev_actions=prev_action,\n prev_rewards=prev_reward,\n dones=done,\n infos=infos,\n new_obs=prep.transform(next_obs)\n # new_obs=prep.transform(new_obs)\n # new_obs = new_obs\n # new_obs=next_obs\n )\n observations = new_obs\n prev_action = action\n prev_reward = rew\n t += 1\n writer.write(batch_builder.build_and_reset())\n" }, { "alpha_fraction": 0.5808189511299133, "alphanum_fraction": 0.5915948152542114, "avg_line_length": 45.400001525878906, "blob_id": "15547d871976720559496b5084a57358e2f98cb6", "content_id": "6b695e1491cfb4b45527960b58c2c46c74ff07da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 107, "num_lines": 20, "path": "/agent/qmix/gym_cfg.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "class gym_cfg():\n def __init__(self):\n \"\"\"\n 'custom_observation': If 'True', use custom observation feature in CBEngine_round3.py of agent.zip.\n If 'False', use 'observation_features'\n\n 'observation_features' : Same as round2. Add 'classic' observation feature,\n which has dimension of 16.\n\n 'observation_dimension' : The dimension of observation. Need to be correct both custom observation\n and default observation. Remember to add size of one-hot-encoding vector for agent_id.\n Size of one-hot-encoding vector for agent_id = 5 (warm_up), 10 (round2)\n \"\"\"\n\n self.cfg = {\n 'observation_features': ['lane_vehicle_num', 'classic'],\n # Add the length of one-hot encoded agent_id vector (ref above)\n 'observation_dimension': 45,\n 'custom_observation': True\n }\n" }, { "alpha_fraction": 0.5417713522911072, "alphanum_fraction": 0.5548052787780762, "avg_line_length": 30.053659439086914, "blob_id": "c03d28149a8c4a9af83bb41b671d63388f6c3ba2", "content_id": "4053b8aa05abf701d1b742e89804f70645714821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6368, "license_type": "no_license", "max_line_length": 126, "num_lines": 205, "path": "/colight_train.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "from copy import deepcopy\nimport gym\nfrom agent.colight.CBEngine_round3 import CBEngine_round3\nfrom baselines.colight.colight import Colight\nimport ray\nimport os\nimport numpy as np\nimport argparse\nimport sys\nfrom ray.rllib.agents.dqn.dqn_torch_policy import DQNTorchPolicy\nfrom ray.rllib.models import ModelCatalog\nfrom ray.tune.integration.mlflow import MLflowLoggerCallback\nparser = argparse.ArgumentParser()\n\n\n\nif __name__ == \"__main__\":\n # some argument\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=30,\n help=\"rllib num workers\"\n )\n parser.add_argument(\n \"--multiflow\",\n '-m',\n action=\"store_true\",\n default = False,\n help=\"use multiple flow file in training\"\n )\n parser.add_argument(\n \"--stop-iters\",\n type=int,\n default=10,\n help=\"Number of iterations to train.\")\n parser.add_argument(\n \"--algorithm\",\n type=str,\n default=\"A3C\",\n help=\"algorithm for rllib\"\n )\n parser.add_argument(\n \"--sim_cfg\",\n type=str,\n default=\"/starter-kit/cfg/simulator_round3_flow0.cfg\",\n help = \"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--metric_period\",\n type=int,\n default=3600,\n help = \"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--thread_num\",\n type=int,\n default=8,\n help = \"thread num for CBEngine\"\n )\n parser.add_argument(\n \"--gym_cfg_dir\",\n type = str,\n default=\"agent\",\n help = \"gym_cfg (observation, reward) for CBEngine\"\n )\n parser.add_argument(\n \"--checkpoint_freq\",\n type = int,\n default = 5,\n help = \"frequency of saving checkpoint\"\n )\n\n parser.add_argument(\n \"--foldername\",\n type = str,\n default = 'train_result',\n help = 'The result of the training will be saved in ./model/$algorithm/$foldername/. Foldername can\\'t have any space'\n )\n\n parser.add_argument(\n \"--agents\",\n type = int,\n required =True,\n help = 'The number of agents of the provided roadnet'\n )\n\n # find the submission path to import gym_cfg\n args = parser.parse_args()\n for dirpath, dirnames, file_names in os.walk(args.gym_cfg_dir):\n for file_name in [f for f in file_names if f.endswith(\".py\")]:\n if file_name == \"gym_cfg.py\":\n cfg_path = dirpath\n sys.path.append(str(cfg_path))\n import gym_cfg as gym_cfg_submission\n gym_cfg_instance = gym_cfg_submission.gym_cfg()\n gym_dict = gym_cfg_instance.cfg\n simulator_cfg_files=[]\n\n # if set '--multiflow', then the CBEngine will utilize flows in 'simulator_cfg_files'\n if(args.multiflow):\n simulator_cfg_files = [\n '/starter-kit/cfg/simulator_round3_flow0.cfg'\n ]\n else:\n simulator_cfg_files = [args.sim_cfg]\n print('The cfg files of this training ',format(simulator_cfg_files))\n class MultiFlowCBEngine(CBEngine_round3):\n def __init__(self, env_config):\n env_config[\"simulator_cfg_file\"] = simulator_cfg_files[(env_config.worker_index - 1) % len(simulator_cfg_files)]\n super(MultiFlowCBEngine, self).__init__(config=env_config)\n\n\n # some configuration\n env_config = {\n \"simulator_cfg_file\": args.sim_cfg,\n \"thread_num\": args.thread_num,\n \"gym_dict\": gym_dict,\n \"metric_period\":args.metric_period,\n \"vehicle_info_path\":\"/starter-kit/log/\"\n }\n obs_size = gym_dict['observation_dimension']\n OBSERVATION_SPACE = gym.spaces.Dict({\n \"observation\": gym.spaces.Box(low=-1e10, high=1e10, shape=(args.agents,obs_size,), dtype=np.float32),\n 'adj': gym.spaces.Box(low=-1e10, high=1e10, shape=(args.agents,args.agents,args.agents), dtype=np.float32)\n })\n ACTION_SPACE = gym.spaces.Discrete(9)\n stop = {\n \"training_iteration\": args.stop_iters\n }\n ################################\n ModelCatalog.register_custom_model(\n \"colight\", Colight)\n if args.algorithm == \"APEX\":\n from ray.rllib.agents.dqn.apex import APEX_DEFAULT_CONFIG\n config = deepcopy(APEX_DEFAULT_CONFIG['model'])\n config.update({\n 'custom_model': \"colight\",\n 'custom_model_config': {\n 'num_neighbors': args.agents,\n 'num_agents': args.agents,\n 'num_lanes': 24,\n 'mlp_layer': [32, 32],\n 'cnn_layer': [[32, 32], [32, 32]],\n 'cnn_heads': [8],\n },\n 'fcnet_hiddens': [8, 8],\n })\n elif args.algorithm == \"DQN\":\n from ray.rllib.agents.dqn.dqn import DEFAULT_CONFIG\n config = deepcopy(DEFAULT_CONFIG['model'])\n config.update({\n 'custom_model': \"colight\",\n 'custom_model_config': {\n 'num_neighbors': args.agents,\n 'num_agents': args.agents,\n 'num_lanes': 24,\n 'mlp_layer': [32, 32],\n 'cnn_layer': [[32, 32], [32, 32]],\n 'cnn_heads': [8],\n },\n 'fcnet_hiddens': [8, 8],\n })\n\n # modify this\n tune_config = {\n # env config\n \"framework\": \"torch\",\n \"env\":MultiFlowCBEngine,\n \"env_config\" : env_config,\n \"multiagent\": {\n \"policies\": {\n \"default_policy\": (DQNTorchPolicy, OBSERVATION_SPACE, ACTION_SPACE, {},)\n }\n },\n \"num_cpus_per_worker\":args.thread_num,\n \"num_workers\":args.num_workers,\n \"num_gpus\": 1,\n \"model\": config,\n \"n_step\": 5\n\n\n\n # add your training config\n\n }\n ########################################\n #ray.init(address = \"auto\")\n #ray.init(local_mode=True)\n local_path = 'model'\n \n\n\n def name_creator(self=None):\n return args.foldername\n\n\n # train model\n ray.tune.run(args.algorithm, config=tune_config, local_dir=local_path, stop=stop,\n checkpoint_freq=args.checkpoint_freq,trial_dirname_creator = name_creator,\n callbacks=[MLflowLoggerCallback(\n tracking_uri=\"http://10.195.1.7:5000\",\n experiment_name=\"colight_APEX_rllib\",\n save_artifact=True)]\n )\n\n\n" }, { "alpha_fraction": 0.5222734212875366, "alphanum_fraction": 0.5271889567375183, "avg_line_length": 37.29411697387695, "blob_id": "12bf2e4c86d36838dbca556296345d42d974f885", "content_id": "983399bc88038bc14ed440020342c60e729202bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3255, "license_type": "no_license", "max_line_length": 80, "num_lines": 85, "path": "/agent/qmix/CBEngine_qmix.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfrom CBEngine_rllib.CBEngine_rllib import CBEngine_rllib as CBEngine_rllib_class\n\n\nclass CBEngine_qmix(CBEngine_rllib_class):\n\n def __init__(self, config):\n super(CBEngine_qmix, self).__init__(config)\n self.observation_features = self.gym_dict['observation_features']\n self.custom_observation = self.gym_dict['custom_observation']\n self.observation_dimension = self.gym_dict['observation_dimension']\n self.agent_N_mapping = {}\n self.agent_id_one_hot_mapping = {}\n\n def _get_observations(self):\n obs = super(CBEngine_qmix, self)._get_observations()\n # RLLib QMix expects obs with key 'obs' unlike 'observation'\n # provided by CBEngine env\n for agent_id, obs_dict in obs.items():\n obs[agent_id] = {'obs': obs_dict['observation']}\n\n if self.custom_observation == False:\n return obs\n else:\n # Custom observation for QMix\n # Obs from env + one hot encoding of agent_id\n\n if len(self.agent_N_mapping) == 0:\n for i, agent_id in enumerate(self.agents):\n self.agent_N_mapping[agent_id] = i\n\n self.agent_id_one_hot_mapping = self.get_one_hot_encoding(\n self.agent_N_mapping\n )\n for agent_id, agent_obs in obs.items():\n obs[agent_id]['obs'] = agent_obs['obs'] \\\n + self.agent_id_one_hot_mapping[agent_id]\n\n return obs\n\n def _get_reward(self):\n\n rwds = {}\n\n ##################\n ## Pressure as reward.\n lane_vehicle = self.eng.get_lane_vehicles()\n for agent_id, roads in self.agent_signals.items():\n result_obs = []\n for lane in self.intersections[agent_id]['lanes']:\n # -1 indicates empty roads in 'signal' of roadnet file\n if (lane == -1):\n result_obs.append(-1)\n else:\n # -2 indicates there's no vehicle on this lane\n if (lane not in lane_vehicle.keys()):\n result_obs.append(0)\n else:\n # the vehicle number of this lane\n result_obs.append(len(lane_vehicle[lane]))\n pressure = (np.sum(result_obs[12: 24]) - np.sum(result_obs[0: 12]))\n rwds[agent_id] = pressure\n\n int_agents = list(rwds.keys())\n for k in int_agents:\n rwds[str(k)] = rwds[k]\n rwds.pop(k)\n return rwds\n\n def get_one_hot_encoding(self, agent_id_n_mapping):\n agent_one_hot_mapping = {}\n n = len(agent_id_n_mapping)\n one_hot_n = [int(i) for i in bin(n)[2:]]\n for agent_id, agent_n in agent_id_n_mapping.items():\n binary_agent_n = [\n int(i) for i in bin(agent_n)[2:]\n ]\n prefix_vec = [\n 0 for i in range(len(one_hot_n) - len(binary_agent_n))\n ]\n agent_one_hot_id = prefix_vec + binary_agent_n\n assert len(agent_one_hot_id) == len(one_hot_n)\n agent_one_hot_mapping[agent_id] = agent_one_hot_id\n return agent_one_hot_mapping\n" }, { "alpha_fraction": 0.6505746841430664, "alphanum_fraction": 0.6528735756874084, "avg_line_length": 31.22222137451172, "blob_id": "ef37ea1452589fcc10cb54e66d6208e3c245a6e7", "content_id": "293551263222a0d8b93cf32a1d48d00898746684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 97, "num_lines": 27, "path": "/baselines/presslight/presslight.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "import torch.nn as nn\nimport os\nimport sys\n\npath = os.path.split(os.path.realpath(__file__))[0]\nsys.path.append(path)\n\n# contains all of the intersections\nclass Presslight(nn.Module):\n\n def __init__(self, ob_length, action_space, num_hidden_nodes, num_hidden_layers):\n # Torch version\n super(Presslight, self).__init__()\n\n layers = [nn.Linear(in_features=ob_length, out_features=num_hidden_nodes)]\n\n # number of hidden layers specified in hp_config\n for _ in range(num_hidden_layers - 2):\n layers.append(nn.ReLU())\n layers.append(nn.Linear(in_features=num_hidden_nodes, out_features=num_hidden_nodes))\n\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_hidden_nodes, out_features=action_space))\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.net(x)\n" }, { "alpha_fraction": 0.4047619104385376, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 13, "blob_id": "9a24980ec252932683fc9dd0081fbd6c55435e05", "content_id": "803e054ee5c16ae4286f890d0f3f22f1884a3b90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 42, "license_type": "no_license", "max_line_length": 13, "num_lines": 3, "path": "/oldmain/requirements.txt", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "pytest==6.2.3\npylint==2.7.4\nflake8==3.9.1\n" }, { "alpha_fraction": 0.7192581295967102, "alphanum_fraction": 0.7446646094322205, "avg_line_length": 43.202247619628906, "blob_id": "fa2883d8939475535db0418b20a6c427de459876", "content_id": "f5455ec7a11bf6d49c26dc951cf6d77ad8748d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3936, "license_type": "no_license", "max_line_length": 296, "num_lines": 89, "path": "/README.md", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "# KDDCup 2021: City Brain Challenge - Praktikum Big Data Science, SoSe 2021\n## Introduction\n\nChallenge documentation: [KDDCup 2021 City-Brain-Challenge](https://kddcup2021-citybrainchallenge.readthedocs.io/en/latest/city-brain-challenge.html). \n\nEach directory contains a baseline and its respective files to conduct training. The training metrics and artefacts are logged to an MLFlow dashboard. The resulting checkpoints could be evaluated in order to obtain the total amount of served vehicles, the delay index and the average travel time.\n\nMLFlow dashboard: [http://10.195.1.7:5000/#](http://10.195.1.7:5000/#)\n\n## How to train the baselines\n### For local training \nFor local training, run the docker container:\n```\ndocker run -it -v <path to code repository>:/starter-kit --shm-size=20gb citybrainchallenge/cbengine:0.1.3 bash\n```\nWhen the docker container is running and the shell is accessible, proceed to model specific instructions below. \n\n\n#### Presslight\nTo run Presslight training locally, in the bash shell inside docker container, execute:\n```\n$ cd starter-kit\n$ ./presslight_train.sh\n```\nSet the required params according to your choice. \n**Note:** Change the values for **sim_cfg** and **roadnet** path based on which challenge stage (_warm_up_, _round2_, _round3_) you wish to train for. MLFlow logging can be activated in the presslight_train.py file. The checkpoints will be saved in model/presslight/ directory. \n\nPaper: http://personal.psu.edu/hzw77/publications/presslight-kdd19.pdf\n\n#### QMix\nTo run QMix training locally, in the bash shell inside docker container, execute:\n```\n$ cd starter-kit\n$ python3 qmix_train.py --sim_cfg /starter-kit/cfg/simulator_warm_up.cfg --roadnet /starter-kit/data/roadnet_warm_up.txt --stop-iters 20000 --foldername train_result_qmix_warm_up_20000_iters --num_workers 3 --thread_num 3\n```\nSet the required params according to your choice. \n**Note:** Change the values for **sim_cfg** and **roadnet** path based on which challenge stage (_warm_up_, _round2_, _round3_) you wish to train for. \n**Important:** Change the \"observation_dimension\" in agent/qmix/gym_cfg.py depending upon the challenge stage chosen, before training. Set it to 45 for _warm_up_ and 50 for _round2_.\n\nPaper: https://arxiv.org/abs/1803.11485\n\n#### Colight\nTo run Colight training locally, in the bash shell inside docker container, execute:\n```\n$ cd starter-kit\n$ bash colight_train.sh\n```\nSet the required params according to your choice. \n**Note:** Change the values for **sim_cfg**, **roadnet** path based on which challenge stage (_warm_up_, _round2_, _round3_) and **agents** to the you wish to train for. \n**Important:** Change the \"adj_neighbors\" in agent/colight/gym_cfg.py depending upon the challenge stage chosen, before training. It has to match the number of agents. _warm_up_: 22, _round2_: 859, _round3_: 1004\n\nPaper: http://personal.psu.edu/hzw77/publications/colight-cikm19.pdf\n\n## How to evaluate training\n\nFor local evaluation, run the docker container:\n```\ndocker run -it -v <path to code repository>:/starter-kit --shm-size=20gb citybrainchallenge/cbengine:0.1.3 bash\n```\nWhen the docker container is running and the shell is accessible, proceed to model specific instructions below. \n\n### Presslight\n\nTo run the evaluation, in the bash shell inside docker container, execute:\n```\n$ cd starter-kit\n$ ./presslight_evaluate.sh\n```\n\n### Colight\n\nTo run the evaluation, in the bash shell inside docker container, execute:\n```\n$ cd starter-kit\n$ bash colight_evaluate.sh\n```\n\n**Important:** Change the \"iteration_array\" to the checkpoints you want to evaluate, Change \"num_agents\" according to the configuration of the trained model.\n\n\n## Generate Max Pressure Experience\n\nTo generate max pressure experience as batches, please execute the ```MP_exp_gen.py```. The resulting JSON-file will be in directory \"demo-out\".\n\n## Team \nNiklas StrauรŸ (supervisor) \nCharlotte Vaessen \nFaheem Zunjani \nMaximilian Gawlick \n" }, { "alpha_fraction": 0.6870588064193726, "alphanum_fraction": 0.7529411911964417, "avg_line_length": 59.57143020629883, "blob_id": "60de94fcc05b3afd658bea9f5f508a44cab5464d", "content_id": "18b22a41e5a040fa7628cdff2701ef6aa42fb429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 425, "license_type": "no_license", "max_line_length": 220, "num_lines": 7, "path": "/colight_train.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd starter-kit\npip install mlflow\npip install ray[rllib] -U\npip3 install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html\npython3 colight_train.py --sim_cfg /starter-kit/cfg/simulator_warm_up.cfg --gym_cfg_dir /starter-kit/agent/colight --algorithm APEX --stop-iters 30000 --foldername train_result --num_workers 16 --thread_num 4 --agents 22\n\n" }, { "alpha_fraction": 0.6413043737411499, "alphanum_fraction": 0.6775362491607666, "avg_line_length": 24.18181800842285, "blob_id": "d844e4423393b7a6bf0c04b279803b7d11398bae", "content_id": "e335977fd1192be08a16c91ef337bee7bd00779f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 276, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/train_qmix_round2.sh", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "#!/bin/sh\ncd starter-kit\n\npython3 qmix_train.py \\\n --sim_cfg /starter-kit/cfg/simulator_round2.cfg \\\n --roadnet /starter-kit/data/roadnet_round2.txt \\\n --stop-iters 20000 \\\n --foldername train_result_qmix \\\n --num_workers 3 \\\n --thread_num 3 \\\n --gym_cfg_dir agent/qmix" }, { "alpha_fraction": 0.4792167544364929, "alphanum_fraction": 0.49476125836372375, "avg_line_length": 33.24117660522461, "blob_id": "9daef310782fc4d22cd65554480893f7398b26af", "content_id": "127bf3f0021b1834048ebe68b637a3c5bc9747b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11644, "license_type": "no_license", "max_line_length": 153, "num_lines": 340, "path": "/qmix_train.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\nfrom pathlib import Path\nimport gym\nimport ray\nfrom ray import tune\nfrom ray.tune.integration.mlflow import MLflowLoggerCallback\n\nfrom agent.qmix.CBEngine_qmix import CBEngine_qmix as CBEngine_rllib_class\n\nparser = argparse.ArgumentParser()\n\n\ndef process_roadnet(roadnet_file):\n # intersections[key_id] = {\n # 'have_signal': bool,\n # 'end_roads': list of road_id. Roads that end at this intersection. The order is random.\n # 'start_roads': list of road_id. Roads that start at this intersection. The order is random.\n # 'lanes': list, contains the lane_id in. The order is explained in Docs.\n # }\n # roads[road_id] = {\n # 'start_inter':int. Start intersection_id.\n # 'end_inter':int. End intersection_id.\n # 'length': float. Road length.\n # 'speed_limit': float. Road speed limit.\n # 'num_lanes': int. Number of lanes in this road.\n # 'inverse_road': Road_id of inverse_road.\n # 'lanes': dict. roads[road_id]['lanes'][lane_id] = list of 3 int value. Contains the Steerability of lanes.\n # lane_id is road_id*100 + 0/1/2... For example, if road 9 have 3 lanes, then their id are 900, 901, 902\n # }\n # agents[agent_id] = list of length 8. contains the inroad0_id, inroad1_id, inroad2_id,inroad3_id, outroad0_id, outroad1_id, outroad2_id, outroad3_id\n\n intersections = {}\n roads = {}\n agents = {}\n\n agent_num = 0\n road_num = 0\n signal_num = 0\n with open(roadnet_file, 'r') as f:\n lines = f.readlines()\n cnt = 0\n pre_road = 0\n is_obverse = 0\n for line in lines:\n line = line.rstrip('\\n').split(' ')\n if ('' in line):\n line.remove('')\n if (len(line) == 1):\n if (cnt == 0):\n agent_num = int(line[0])\n cnt += 1\n elif (cnt == 1):\n road_num = int(line[0]) * 2\n cnt += 1\n elif (cnt == 2):\n signal_num = int(line[0])\n cnt += 1\n else:\n if (cnt == 1):\n intersections[int(line[2])] = {\n 'have_signal': int(line[3]),\n 'end_roads': [],\n 'start_roads': [],\n 'lanes':[]\n }\n elif (cnt == 2):\n if (len(line) != 8):\n road_id = pre_road[is_obverse]\n roads[road_id]['lanes'] = {}\n for i in range(roads[road_id]['num_lanes']):\n roads[road_id]['lanes'][road_id * 100 + i] = list(map(int, line[i * 3:i * 3 + 3]))\n is_obverse ^= 1\n else:\n roads[int(line[-2])] = {\n 'start_inter': int(line[0]),\n 'end_inter': int(line[1]),\n 'length': float(line[2]),\n 'speed_limit': float(line[3]),\n 'num_lanes': int(line[4]),\n 'inverse_road': int(line[-1])\n }\n roads[int(line[-1])] = {\n 'start_inter': int(line[1]),\n 'end_inter': int(line[0]),\n 'length': float(line[2]),\n 'speed_limit': float(line[3]),\n 'num_lanes': int(line[5]),\n 'inverse_road': int(line[-2])\n }\n intersections[int(line[0])]['end_roads'].append(int(line[-1]))\n intersections[int(line[1])]['end_roads'].append(int(line[-2]))\n intersections[int(line[0])]['start_roads'].append(int(line[-2]))\n intersections[int(line[1])]['start_roads'].append(int(line[-1]))\n pre_road = (int(line[-2]), int(line[-1]))\n else:\n # 4 out-roads\n signal_road_order = list(map(int, line[1:]))\n now_agent = int(line[0])\n in_roads = []\n for road in signal_road_order:\n if (road != -1):\n in_roads.append(roads[road]['inverse_road'])\n else:\n in_roads.append(-1)\n in_roads += signal_road_order\n agents[now_agent] = in_roads\n for agent, agent_roads in agents.items():\n intersections[agent]['lanes'] = []\n for road in agent_roads:\n ## here we treat road -1 have 3 lanes\n if (road == -1):\n for i in range(3):\n intersections[agent]['lanes'].append(-1)\n else:\n for lane in roads[road]['lanes'].keys():\n intersections[agent]['lanes'].append(lane)\n\n return intersections, roads, agents\n\nif __name__ == \"__main__\":\n # some argument\n parser.add_argument(\n \"--num_workers\",\n type=int,\n default=30,\n help=\"rllib num workers\"\n )\n parser.add_argument(\n \"--multiflow\",\n '-m',\n action=\"store_true\",\n default = False,\n help=\"use multiple flow file in training\"\n )\n parser.add_argument(\n \"--stop-iters\",\n type=int,\n default=10,\n help=\"Number of iterations to train.\")\n parser.add_argument(\n \"--algorithm\",\n type=str,\n default=\"QMIX\",\n help=\"algorithm for rllib\"\n )\n parser.add_argument(\n \"--sim_cfg\",\n type=str,\n default=\"/starter-kit/cfg/simulator_round3_flow0.cfg\",\n help = \"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--metric_period\",\n type=int,\n default=3600,\n help = \"simulator file for CBEngine\"\n )\n parser.add_argument(\n \"--thread_num\",\n type=int,\n default=8,\n help = \"thread num for CBEngine\"\n )\n parser.add_argument(\n \"--gym_cfg_dir\",\n type = str,\n default=\"agent\",\n help = \"gym_cfg (observation, reward) for CBEngine\"\n )\n parser.add_argument(\n \"--checkpoint_freq\",\n type = int,\n default = 5,\n help = \"frequency of saving checkpoint\"\n )\n\n parser.add_argument(\n \"--foldername\",\n type = str,\n default = 'train_result',\n help = 'The result of the training will be saved in ./model/$algorithm/$foldername/. Foldername can\\'t have any space'\n )\n\n parser.add_argument(\n \"--roadnet\",\n type=str,\n default='/starter-kit/data/roadnet_warm_up.txt',\n help='Specify the roadnet file path'\n )\n\n # find the submission path to import gym_cfg\n args = parser.parse_args()\n for dirpath, dirnames, file_names in os.walk(args.gym_cfg_dir):\n for file_name in [f for f in file_names if f.endswith(\".py\")]:\n if file_name == \"gym_cfg.py\":\n cfg_path = dirpath\n sys.path.append(str(cfg_path))\n import gym_cfg as gym_cfg_submission\n gym_cfg_instance = gym_cfg_submission.gym_cfg()\n gym_dict = gym_cfg_instance.cfg\n simulator_cfg_files=[]\n\n # if set '--multiflow', then the CBEngine will utilize flows in 'simulator_cfg_files'\n if(args.multiflow):\n simulator_cfg_files = [\n '/starter-kit/cfg/simulator_round3_flow0.cfg'\n ]\n else:\n simulator_cfg_files = [args.sim_cfg]\n print('The cfg files of this training ',format(simulator_cfg_files))\n class MultiFlowCBEngine(CBEngine_rllib_class):\n def __init__(self, env_config):\n env_config[\"simulator_cfg_file\"] = simulator_cfg_files[0]\n super(MultiFlowCBEngine, self).__init__(config=env_config)\n\n\n # some configuration\n env_config = {\n \"simulator_cfg_file\": args.sim_cfg,\n \"thread_num\": args.thread_num,\n \"gym_dict\": gym_dict,\n \"metric_period\": args.metric_period,\n \"vehicle_info_path\": \"/starter-kit/log/\"\n }\n\n obs_size = gym_dict['observation_dimension']\n\n stop = {\n \"training_iteration\": args.stop_iters\n }\n\n roadnet_path = Path(args.roadnet)\n intersections, roads, agents = process_roadnet(roadnet_path)\n agent_group = {\n \"group1\": [str(agent_id) for agent_id, obsv in agents.items()]\n }\n\n OBSERVATION_SPACE = gym.spaces.Tuple(\n [\n gym.spaces.Dict({\n \"obs\": gym.spaces.Box(low=-1e10, high=1e10, shape=(obs_size,))\n }) for i in range(len(agents))\n ]\n )\n\n ACTION_SPACE = gym.spaces.Tuple(\n [gym.spaces.Discrete(9) for i in range(len(agents))]\n )\n\n ################################\n # modify this\n tune_config = {\n # env config\n \"env_config\": env_config,\n \"num_cpus_per_worker\": args.thread_num,\n \"num_workers\": args.num_workers,\n # \"num_gpus\": 1,\n\n # === QMix ===\n # Mixing network\n \"mixer\": \"qmix\",\n # Size of the mixing network embedding\n \"mixing_embed_dim\": 32,\n # Optimize over complete episodes by default\n \"batch_mode\": \"complete_episodes\",\n\n # === Exploration Settings ===\n \"exploration_config\": {\n # The Exploration class to use.\n \"type\": \"EpsilonGreedy\",\n # Config for the Exploration class' constructor:\n \"initial_epsilon\": 1.0,\n \"final_epsilon\": 0.02,\n \"epsilon_timesteps\": 10000, # Timesteps over which to anneal epsilon.\n },\n\n # Number of env steps to optimize for before returning\n \"timesteps_per_iteration\": 1000,\n \"target_network_update_freq\": 20,\n\n # === Replay buffer ===\n \"buffer_size\": 2000,\n\n # RMPProp Optimization\n \"lr\": 0.005,\n \"optim_alpha\": 0.99,\n \"optim_eps\": 0.00001,\n \"learning_starts\": 2000,\n \"train_batch_size\": 32,\n\n # === Model ===\n # Presslight as agent DQN\n # \"model\": {\n # \"fcnet_hiddens\": [20],\n # \"fcnet_activation\": \"relu\",\n # },\n # RNN as agent network\n \"model\": {\n \"lstm_cell_size\": 64,\n \"max_seq_len\": 999999,\n },\n\n # Only torch supported so far.\n \"framework\": \"torch\",\n }\n tune.register_env(\n \"grouped_multiagent\",\n lambda config: MultiFlowCBEngine(config).with_agent_groups(\n agent_group, obs_space=OBSERVATION_SPACE, act_space=ACTION_SPACE))\n tune_config = dict(tune_config, **{\n \"env\": \"grouped_multiagent\",\n })\n\n ########################################\n # ray.init(address=\"auto\") # Use for challenge submission\n # ray.init(local_mode=True) # Use for local debugging\n local_path = './model'\n\n\n def name_creator(self=None):\n return args.foldername\n\n\n # train model\n ray.tune.run(args.algorithm,\n config=tune_config,\n local_dir=local_path,\n stop=stop,\n checkpoint_freq=args.checkpoint_freq,\n trial_dirname_creator=name_creator,\n callbacks=[\n MLflowLoggerCallback(\n tracking_uri=\"http://10.195.1.7:5000\",\n experiment_name=\"qmix-rllib-lstm-cc-warm_up-20000-iters\",\n save_artifact=True\n )\n ],\n )\n\n\n" }, { "alpha_fraction": 0.42524683475494385, "alphanum_fraction": 0.531734824180603, "avg_line_length": 36.342105865478516, "blob_id": "250156b4ff9a2ff8671a5c66454153da2264006c", "content_id": "0be94e4944805b75899030c2f9260480fc051ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 88, "num_lines": 38, "path": "/oldmain/baselines/presslight/hp_config.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "from ray import tune\nfrom collections import deque\n\nrt_hp_config = {\n 'learning_rate': tune.grid_search([0.005, 0.01, 0.015, 0.001, 0.0001, 0.00001]),\n 'epsilon': tune.grid_search([0.005, 0.01, 0.015, 0.1, 0.2]),\n 'epsilon_decay': tune.grid_search([0.85, 0.95, 0.99, 0.995]),\n 'epsilon_min': tune.grid_search([0.1]),\n 'gamma': tune.grid_search([0.95, 0.99, 0.9999]),\n 'num_hidden_nodes': tune.grid_search([20, 128, 1024, 8192]),\n 'num_hidden_layers': tune.grid_search([2, 3, 4, 5, 6, 7, 8]),\n 'batch_size': tune.grid_search([32, 128, 1024]),\n 'memory': tune.grid_search([2000]),\n #'learning_start': tune.grid_search([deque(maxlen=2000)]),\n 'update_model_freq': tune.grid_search([1]),\n 'update_target_model_freq': tune.grid_search([20]),\n # MLFlow config has to be passed\n \"mlflow\": {\n \"experiment_name\": \"presslight_3\",\n \"tracking_uri\": \"http://10.195.1.7:5000\"\n }\n }\n\nhp_config = {\n 'learning_rate': 0.015,\n 'epsilon': 0.1,\n 'epsilon_decay': 0.995,\n 'epsilon_min': 0.1,\n 'gamma': 0.95,\n 'num_hidden_nodes': 20,\n 'num_hidden_layers': 2,\n 'batch_size': 32,\n # MLFlow config has to be passed\n \"mlflow\": {\n \"experiment_name\": \"presslight_3\",\n \"tracking_uri\": \"http://10.195.1.7:5000\"\n }\n }" }, { "alpha_fraction": 0.6244897842407227, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 46.165775299072266, "blob_id": "76fe1abc5f94108b67ba87131ad6fdd928829a5c", "content_id": "5877dd70ddc1dcee29c28f8eb369538d367f69a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8820, "license_type": "no_license", "max_line_length": 149, "num_lines": 187, "path": "/baselines/colight/colight.py", "repo_name": "wh-forker/city-brain-challenge", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport ray\nfrom ray import tune\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Colight(TorchModelV2, nn.Module):\n def __init__(self, obs_space, action_space, num_outputs, model_config, name, **customized_model_kwargs):\n TorchModelV2.__init__(self, obs_space, action_space, num_outputs,\n model_config, name)\n nn.Module.__init__(self)\n #super(Colight, self).__init__()\n # neighbor have to be min(num_agents, num_neighbors) if neighbors should be adjusted for test purposes\n self.num_neighbors = model_config['custom_model_config']['num_neighbors']\n self.num_agents = model_config['custom_model_config']['num_agents']\n self.num_lanes = model_config['custom_model_config']['num_lanes']\n self.num_actions = action_space.n - 1\n\n\n\n\n # dimension oriented at official CoLight implementation\n self.dimension = model_config['custom_model_config']['mlp_layer'][-1]\n self.cnn_layer = model_config['custom_model_config']['cnn_layer']\n self.cnn_heads = model_config['custom_model_config']['cnn_heads'] * len(self.cnn_layer)\n self.mlp_layer = model_config['custom_model_config']['mlp_layer']\n\n self.mham_layers = nn.ModuleList()\n # MLP, feature, dimension\n self.mlp = MLP(self.num_lanes + self.num_actions, self.mlp_layer)\n # num of intersections, neighbor representation\n neighbor = torch.Tensor(self.num_agents, self.num_neighbors, self.num_agents)\n\n for CNN_layer_index, CNN_layer_size in enumerate(self.cnn_layer):\n mham = MHAM(self.num_agents, neighbor, self.num_actions, self.cnn_layer, self.num_neighbors, self.num_lanes,\n self.dimension, CNN_layer_size[0], self.cnn_heads[CNN_layer_index],\n CNN_layer_size[1])\n self.mham_layers.append(mham)\n\n # self.mham = MHAM(self.num_agents, neighbor, self.cnn_layer, num_lanes,\n # self.dimension, self.head_dim, self.num_heads, self.output_dim)\n self.out_hidden_layer = nn.Linear(self.cnn_layer[-1][1], self.num_actions)\n\n\n #def forward(self, nei, nei_actions, agent, actions):\n def forward(self, input_dict, state, seq_lens):\n adj = input_dict['obs']['adj']\n agent = input_dict['obs']['observation']\n batch_size = agent.shape[0]\n att_record = []\n #agent = torch.from_numpy(agent).float()\n x = self.mlp(agent)\n att_record_all_layers = []\n for i, mham in enumerate(self.mham_layers):\n x, att_record = mham(x, adj)\n att_record_all_layers.append(att_record)\n if len(self.cnn_layer) > 1:\n att_record_all_layers = torch.cat(att_record_all_layers, dim=1)\n else:\n att_record_all_layers = att_record_all_layers[0]\n att_record = torch.reshape(att_record_all_layers, (batch_size, len(self.cnn_layer), self.num_agents, self.cnn_heads[-1], self.num_neighbors))\n x = self.out_hidden_layer(x)\n x = x[:,0,:]\n return x, [] #att_record\n\n# LambdaLayer for mimic Keras.Lambda layer\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\n# see CoLight 4.1 (https://dl.acm.org/doi/10.1145/3357384.3357902)\nclass MLP(nn.Module):\n def __init__(self, input_shape, layer):\n super(MLP, self).__init__()\n layers = []\n for layer_index, layer_size in enumerate(layer):\n if layer_index == 0:\n layers.append(nn.Linear(input_shape, layer_size))\n layers.append(nn.ReLU())\n else:\n layers.append(nn.Linear(layer[layer_index - 1], layer_size))\n layers.append(nn.ReLU())\n\n self.seq = nn.Sequential(*layers)\n\n def forward(self, ob):\n x = self.seq(ob)\n return x\n\n\n# see CoLight 4.2 (https://dl.acm.org/doi/10.1145/3357384.3357902)\nclass MHAM(nn.Module):\n\n def __init__(self, num_agents, neighbor, action_space, cnn_layer, num_neighbors, input_shape=24, dimension=128, dv=16, nv=8, dout=128):\n super(MHAM, self).__init__()\n self.num_agents = num_agents\n self.num_neighbors = num_neighbors\n self.dimension = dimension\n self.dv = dv\n self.nv = nv\n #self.neighbor = neighbor\n self.feature_length = input_shape\n self.dout = dout\n self.action_space = action_space\n\n # [agent,1,dim]->[agent,1,dv*nv], since representation of specific agent=1\n self.agent_head_hidden_layer = nn.Linear(self.dimension, self.dv*self.nv)\n self.agent_head_lambda_layer = LambdaLayer((lambda x: x.permute(0,1,4,2,3)))\n\n # self.neighbor_repr_3D = RepeatVector3D(num_agents)\n # [agent,neighbor,agent]x[agent,agent,dim]->[agent,neighbor,dim]\n #self.neighbor_repr_lambda_layer = LambdaLayer((lambda x: torch.einsum('ana, aad -> and', x[0], x[1])))\n self.neighbor_repr_lambda_layer = LambdaLayer((lambda x: torch.matmul(x[0], x[1])))\n\n # representation for all neighbors\n self.neighbor_repr_head_hidden_layer = nn.Linear(in_features=self.feature_length + self.action_space, out_features=dv*nv)\n self.neighbor_repr_head_lambda_layer = LambdaLayer((lambda x: x.permute(0,1,4,2,3)))\n\n # [batch,agent,nv,1,dv]x[batch,agent,nv,neighbor,dv]->[batch,agent,nv,1,neighbor]\n self.attention_layer = LambdaLayer((lambda x: F.softmax(torch.einsum('bancd, baned -> bance', x[0], x[1]))))\n\n # self embedding\n self.neighbor_hidden_repr_head_hidden_layer = nn.Linear(self.feature_length + self.action_space, dv*nv)\n self.neighbor_hidden_repr_head_lambda_layer = LambdaLayer((lambda x: x.permute(0,1,4,2,3)))\n # mean values, preserving tensor shape\n self.out_lambda_layer = LambdaLayer((lambda x: torch.mean(torch.matmul(x[0], x[1]), 2)))\n self.out_hidden_layer = nn.Linear(dv, dout)\n\n def forward(self, agent, nei):\n batch_size = agent.size()[0]\n agent_repr = torch.reshape(agent, (batch_size, self.num_agents, 1, self.dimension))\n neighbor_repr = torch.reshape(agent, (batch_size, 1, self.num_agents, self.dimension))\n neighbor_repr = torch.tile(neighbor_repr, (1, self.num_agents,1,1))\n # nei = torch.FloatTensor(nei)\n #neighbor_repr = nei #self.neighbor_repr_lambda_layer([nei, neighbor_repr])\n neighbor_repr = self.neighbor_repr_lambda_layer([nei, neighbor_repr])\n\n agent_repr_head = self.agent_head_hidden_layer(agent_repr)\n agent_repr_head = F.relu(agent_repr_head)\n agent_repr_head = torch.reshape(agent_repr_head, (batch_size, self.num_agents, 1, self.dv, self.nv))\n\n agent_repr_head = self.agent_head_lambda_layer(agent_repr_head)\n neighbor_repr_head = self.neighbor_repr_head_hidden_layer(neighbor_repr)\n neighbor_repr_head = F.relu(neighbor_repr_head)\n # second num_agents could be replaced with num_neighbors if min(num_agents, num_neighbors)\n neighbor_repr_head = torch.reshape(neighbor_repr_head, (batch_size, self.num_agents, self.num_neighbors, self.dv, self.nv))\n neighbor_repr_head = self.neighbor_repr_head_lambda_layer(neighbor_repr_head)\n\n # agent_repr_head = agent_repr_head.reshape(-1, self.nv, 1, self.dv)\n # neighbor_repr_head = neighbor_repr_head.reshape(self.num_agents, self.nv, -1, self.dv)\n\n att = self.attention_layer([agent_repr_head, neighbor_repr_head])\n # second num_agents could be replaced with num_neighbors if min(num_agents, num_neighbors)\n att_record = torch.reshape(att, (batch_size, self.num_agents, self.nv, self.num_neighbors))\n\n neighbor_hidden_repr_head = self.neighbor_hidden_repr_head_hidden_layer(neighbor_repr)\n neighbor_hidden_repr_head = F.relu(neighbor_hidden_repr_head)\n neighbor_hidden_repr_head = torch.reshape(neighbor_hidden_repr_head, (batch_size, self.num_agents, self.num_neighbors, self.dv, self.nv))\n neighbor_hidden_repr_head = self.neighbor_hidden_repr_head_lambda_layer(neighbor_hidden_repr_head)\n out = self.out_lambda_layer([att, neighbor_hidden_repr_head])\n out = torch.reshape(out, (batch_size,self.num_agents, self.dv))\n out = self.out_hidden_layer(out)\n out = F.relu(out)\n return out, att_record\n\n\n# Repeat vector x times\nclass RepeatVector3D(nn.Module):\n\n def __init__(self, times):\n super(RepeatVector3D, self).__init__()\n self.times = times\n\n def forward(self, x):\n x = torch.tile(torch.unsqueeze(x, 0), (1, self.times, 1, 1))\n return x\n\n\n\nColight = Colight" } ]
20
jcwayd/chihuoServerEnd
https://github.com/jcwayd/chihuoServerEnd
f98df144c8bf42b7331cba60f5851a449ab6d8c8
97ffa74b255340ad9d27e263f888748a0e87e494
ad519aecf228850d512a8451a5d14495391c4657
refs/heads/master
2018-09-03T18:02:23.029747
2018-06-04T08:03:59
2018-06-04T08:03:59
126,955,113
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6010977625846863, "alphanum_fraction": 0.6058781743049622, "avg_line_length": 31.45977020263672, "blob_id": "07585af30d47f94377c80b12bbd2dbebdff9ad7f", "content_id": "ee1fbbe48288d86f007bf534b1f1f463a73eaebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5794, "license_type": "no_license", "max_line_length": 96, "num_lines": 174, "path": "/chihuo/views/aboutUser.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os, re\n\nimport datetime\nfrom chihuo.views import SERVER_IP\nfrom flask import Blueprint, request, jsonify, render_template, json, Response\nfrom aboutCookbook import getRandFilename\nfrom ..dbConnect import db_session\nfrom ..dbModels import share, user, action\nfrom sqlalchemy import func\n\ndatetimeRegx = '%Y-%m-%d %H:%M:%S'\n\naboutUser = Blueprint('aboutUser', __name__)\n\n\n@aboutUser.route(\"/uploadVideo\", methods=['POST'])\ndef uploadVideo():\n try:\n file = request.files['video']\n fn = getRandFilename()\n tn = os.path.splitext(file.filename)[1]\n print tn\n fullFileName = fn + tn\n if not os.path.isdir('chihuo/static/videosUpload/'):\n os.mkdir('chihuo/static/videosUpload')\n if (tn == \".mp4\") or (tn == \".mpeg\") or (tn == \".ogg\"):\n file.save(os.path.join('chihuo/static/videosUpload/', fullFileName))\n msg = SERVER_IP + \"static/videosUpload/\" + fullFileName\n else:\n msg = \"err\"\n except Exception, e:\n print e\n msg = \"err\"\n res = Response(msg)\n res.headers[\"Content-Type\"] = \"text/plain\"\n res.headers[\"Charset\"] = \"utf-8\"\n res.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return res\n\n\n@aboutUser.route(\"/uploadImage\", methods=['POST'])\ndef uploadImg():\n file = request.files['image']\n print file.filename\n print os.path.splitext(file.filename)[1]\n if file is None or \\\n (os.path.splitext(file.filename)[1] != \".jpg\"\n and os.path.splitext(file.filename)[1] != \".gif\"\n and os.path.splitext(file.filename)[1] != \".png\"):\n jsonInfo = {\n \"status\": 0,\n \"msg\": \"ไธŠไผ ๅคฑ่ดฅ,่ฏท็กฎไฟไฝ ไธŠไผ ็š„ๆ˜ฏๅ›พ็‰‡ๆ–‡ไปถjpg/gif\"\n }\n return jsonify(jsonInfo)\n else:\n try:\n filename = getRandFilename() + '.jpg'\n if not os.path.isdir('chihuo/static/imgsUpload/'):\n os.mkdir('chihuo/static/imgsUpload')\n file.save(os.path.join('chihuo/static/imgsUpload/', filename))\n jsonInfo = {\n \"status\": 1,\n \"url\": SERVER_IP + \"static/imgsUpload/\" + filename + \"\"\n }\n return jsonify(jsonInfo)\n except Exception, e:\n jsonInfo = {\n \"status\": 0,\n \"msg\": \"error|\" + str(e)\n }\n return jsonify(jsonInfo)\n\n\n@aboutUser.route(\"/shareEdit\", methods=['GET'])\ndef shareEdit():\n return render_template('shareEdit.html', serverip=SERVER_IP)\n\n\n@aboutUser.route(\"/pubShare\", methods=['POST'])\ndef pubShare():\n try:\n shareData = json.loads(request.form.get('data'))\n shareTitle = shareData['shareTitle']\n shareAuthorId = shareData['shareAuthorId']\n shareDetail = shareData['shareDetail']\n print shareTitle\n print shareAuthorId\n print shareDetail\n pubtime = datetime.datetime.now().strftime(datetimeRegx)\n newShare = share(int(shareAuthorId), shareDetail, pubtime, shareTitle)\n db_session.add(newShare)\n db_session.commit()\n try:\n newid = db_session.query(func.max(share.shareId)).first()[0]\n newaction = action(2, shareAuthorId, newid, pubtime)\n db_session.add(newaction)\n db_session.commit()\n except Exception, e1:\n print e1\n db_session.close()\n msg = \"0\"\n res = Response(msg)\n res.headers[\"Content-Type\"] = \"text/plain\"\n res.headers[\"Charset\"] = \"utf-8\"\n return res\n except Exception, e:\n print e\n msg = \"-1\"\n res = Response(msg)\n res.headers[\"Content-Type\"] = \"text/plain\"\n res.headers[\"Charset\"] = \"utf-8\"\n return res\n\n\ndef share2json(s):\n shareAuthor = db_session.query(user).filter(user.userId == s.shareAuthorId).first().nickName\n print s.shareDetail\n # ไฝฟ็”จๆญฃๅˆ™่กจ่พพๅผ่Žทๅ–ๅ†…ๅฎนไธญ็ฌฌไธ€ไธชๅ›พ็‰‡็š„ๅœฐๅ€.ๅฆ‚ๆžœๆฒกๆœ‰ๅ›พ็‰‡,้ป˜่ฎคไฝฟ็”จ็จ‹ๅบๅ›พๆ ‡ไฝœไธบๅˆ†ไบซ็š„็ผฉ็•ฅๅ›พ\n imgSrcList = re.findall(r\"<img src=\\\"(.*?)\\.jpg\", s.shareDetail)\n if imgSrcList == []:\n imgsrc = SERVER_IP + \"static/imgsUpload/chihuo.png\"\n else:\n imgsrc = imgSrcList[0] + \".jpg\"\n return {\n \"shareId\": s.shareId,\n \"shareTitle\": s.shareTitle,\n \"shareAuthor\": shareAuthor,\n \"pubTimeStr\": str(s.pubTime),\n \"shareTitleImg\": imgsrc\n }\n\n\n# ็Œœไฝ ๅ–œๆฌข้ƒจๅˆ†็š„็ฝ‘็ปœ่ฏทๆฑ‚่ฟ”ๅ›ž็ป“ๆžœ\n@aboutUser.route('/getGuessList')\ndef getGuessList():\n try:\n shareInfoList = db_session.query(share).order_by(func.rand()).limit(10).all()\n except Exception, e:\n print e\n shareInfoList = []\n shareJsonList = []\n for s in shareInfoList:\n shareJsonList.append(share2json(s))\n print json.dumps(shareJsonList)\n return json.dumps(shareJsonList)\n\n\n@aboutUser.route('/getShareInfoList<authorId>')\ndef getShareInfoList(authorId):\n shareInfoList = []\n try:\n shareInfoList = db_session.query(share).filter(share.shareAuthorId == authorId).all()\n except Exception, e:\n print e\n shareInfoList = []\n shareJsonList = []\n for s in shareInfoList:\n shareJsonList.append(share2json(s))\n print json.dumps(shareJsonList)\n return json.dumps(shareJsonList)\n\n\n@aboutUser.route(\"/getShareInfo<shareId>\")\ndef getShareInfo(shareId):\n shareInfo = db_session.query(share).filter(share.shareId == shareId).first()\n if shareInfo is None:\n pass\n else:\n authorId = shareInfo.shareAuthorId\n authorName = db_session.query(user).filter(user.userId == authorId).first().nickName\n shareInfo.hotIndex += 10\n db_session.commit()\n return render_template('shareInfoShow.html', shareInfo=shareInfo, authorName=authorName)\n" }, { "alpha_fraction": 0.7600554823875427, "alphanum_fraction": 0.7656033039093018, "avg_line_length": 39.05555725097656, "blob_id": "197def94dfdf311ac8bb144a6df9c231c443919b", "content_id": "7e1ef6f33dcbda9eb7329b14c9c1c00a80119c65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "no_license", "max_line_length": 110, "num_lines": 36, "path": "/chihuo/__init__.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom flask import Flask, Config\n\nfrom chihuo.dbViews import userView, foodView, foodTypeView, foodStarView, shareView, actionView, watchView, \\\n shareTypeView\nfrom views.aboutHome import aboutHome\nfrom views.aboutCookbook import aboutCookbook\nfrom views.aboutFriends import aboutFriends\nfrom views.aboutMe import aboutMe\nfrom views.aboutUser import aboutUser\nfrom flask_admin import Admin\nfrom dbConnect import db_session\nfrom dbModels import *\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '123456'\napp.config['UPLOAD_FOLDER'] = '/static/imgsUpload'\n# app.config.from_object('chihuo.config')\n# app.config.from_object(Config())\napp.register_blueprint(aboutHome)\napp.register_blueprint(aboutCookbook)\napp.register_blueprint(aboutFriends)\napp.register_blueprint(aboutMe)\napp.register_blueprint(aboutUser)\nadmin = Admin(app, name='ๅƒ่ดงAPPๅŽๅฐๆ•ฐๆฎๅบ“็ฎก็†็ณป็ปŸ', template_mode='bootstrap3')\n\n\ndef models_import_admin():\n admin.add_view(userView(user, db_session, name='็”จๆˆท็ฎก็†'))\n admin.add_view(foodView(food, db_session, name='่œๅ“็ฎก็†'))\n admin.add_view(foodTypeView(foodType, db_session, name='่œ็ณป็ฎก็†'))\n admin.add_view(foodStarView(foodStar, db_session, name='่œ็ณป็‚น่ตž็ฎก็†'))\n admin.add_view(shareView(share, db_session, name='ๅˆ†ไบซ็ฎก็†'))\n admin.add_view(actionView(action, db_session, name='ๅŠจๆ€็ฎก็†'))\n admin.add_view(watchView(watch, db_session, name='ๅ…ณๆณจ็ฎก็†'))\n admin.add_view(shareTypeView(shareType, db_session,name='ๅˆ†ไบซ็ฑปๅž‹็ฎก็†'))\n" }, { "alpha_fraction": 0.6620111465454102, "alphanum_fraction": 0.6815642714500427, "avg_line_length": 34.849998474121094, "blob_id": "42ec332b68c0cbb9bb6db08df7ed2e7444fc5e46", "content_id": "241a2e9b1c9c511d5ae4d800e7476e4b1bf4ae2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/chihuo/dbConnect.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "#coding: utf-8\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker,scoped_session\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\ndbEngine = create_engine(\n 'mysql://root:123456@localhost:3306/chihuo?charset=utf8',\n pool_size=20\n )\ndbSession = sessionmaker(bind = dbEngine)\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=dbEngine))\nBase.query = db_session.query_property()\n\ndef init_db():\n # ๅœจ่ฟ™้‡Œๅฏผๅ…ฅๅฎšไน‰ๆจกๅž‹ๆ‰€้œ€่ฆ็š„ๆ‰€ๆœ‰ๆจกๅ—๏ผŒ่ฟ™ๆ ทๅฎƒไปฌๅฐฑไผšๆญฃ็กฎ็š„ๆณจๅ†Œๅœจๅ…ƒๆ•ฐๆฎไธŠใ€‚ๅฆๅˆ™ไฝ ๅฐฑๅฟ…้กปๅœจ่ฐƒ็”จ init_db() ไน‹ๅ‰ๅฏผๅ…ฅๅฎƒไปฌ????\n import chihuo.dbModels\n Base.metadata.create_all(bind=dbEngine)" }, { "alpha_fraction": 0.5904958844184875, "alphanum_fraction": 0.5952479243278503, "avg_line_length": 31.9251708984375, "blob_id": "a2f01fb09e05032c8eca249c8d71bc710ad54094", "content_id": "a4b8d66c41cd77316abcba8b44700d79b645357e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4852, "license_type": "no_license", "max_line_length": 120, "num_lines": 147, "path": "/chihuo/views/aboutFriends.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport re\nfrom operator import or_, and_\n\nfrom flask import Blueprint, json\nfrom chihuo.views import SERVER_IP\nfrom ..dbModels import action, user, food, share, watch\nfrom ..dbConnect import db_session\n\n\naboutFriends = Blueprint('aboutFriends', __name__)\n\n\ndef getShrImgStr(shrId):\n try:\n shr = db_session.query(share).filter(share.shareId == shrId).first()\n imgList = re.findall(r\"<img src=\\\"(.*?)\\.jpg\", shr.shareDetail)\n if imgList == []:\n return str(SERVER_IP + \"static/imgsUpload/chihuo.png\")\n else:\n return str(imgList[0] + \".jpg\")\n except Exception, e:\n return str(SERVER_IP + \"static/imgsUpload/chihuo.png\")\n\n\ndef action2json(a):\n subject = db_session.query(user).filter(user.userId == a.subjectId).first()\n subjectName = subject.nickName\n subjectId = subject.userId\n if a.actionType == 1 or a.actionType == 3:\n actionob = db_session.query(food).filter(food.foodId == a.objectId).first()\n objectName = actionob.foodName\n objectId = actionob.foodId\n imgList = re.findall(r\"/imgsUpload/(.*?)\\.jpg\", actionob.foodDetail)\n if imgList == []:\n titleImg = SERVER_IP + \"static/imgsUpload/chihuo.png\"\n else:\n titleImg = SERVER_IP + \"static/imgsUpload/\" + imgList[0] + \".jpg\"\n elif a.actionType == 2:\n actionob = db_session.query(share).filter(share.shareId == a.objectId).first()\n objectName = actionob.shareTitle\n objectId = actionob.shareId\n imgList = re.findall(r\"<img src=\\\"(.*?)\\.jpg\", actionob.shareDetail)\n if imgList == []:\n titleImg = SERVER_IP + \"static/imgsUpload/chihuo.png\"\n else:\n titleImg = imgList[0] + \".jpg\"\n return {\n \"subjectName\": subjectName,\n \"objectName\": objectName,\n \"titleImg\": titleImg,\n \"actionType\": a.actionType,\n \"actionTime\": str(a.actionTime),\n \"subjectId\": subjectId,\n \"objectId\": objectId\n }\n\n\n@aboutFriends.route(\"/getHomeActionList\")\ndef getHomeActionList():\n aJsonList = []\n try:\n actionList = db_session.query(action).filter(or_(action.actionType == 1, action.actionType == 2)).limit(5).all()\n actionList.sort(key=lambda x: x.actionTime, reverse=True)\n for a in actionList:\n aJsonList.append(action2json(a))\n except Exception, e:\n print e\n print json.dumps(aJsonList)\n return json.dumps(aJsonList)\n\n\n@aboutFriends.route(\"/getActionList<usrid>\")\ndef getActionList(usrid):\n aJsonList = []\n try:\n actionList = []\n watchs = db_session.query(watch).filter(watch.userId == usrid).all()\n userIds = []\n for w in watchs:\n userIds.append(w.watchedId)\n for uid in userIds:\n actionList.extend(db_session.query(action).filter(action.subjectId == uid).all())\n actionList.extend(db_session.query(action).filter(action.subjectId == usrid))\n actionList.sort(key=lambda x: x.actionTime, reverse=True)\n for a in actionList:\n aJsonList.append(action2json(a))\n except Exception, e:\n aJsonList = []\n print e\n print \"ๅŠจๆ€ๅˆ—่กจไธบ๏ผš\" + json.dumps(aJsonList)\n return json.dumps(aJsonList)\n\n\n@aboutFriends.route(\"/watchStatus<watchedId>/<userId>\")\ndef watchStatus(watchedId, userId):\n try:\n status = db_session.query(watch).filter(watch.watchedId == watchedId, watch.userId == userId).first()\n if status == None:\n return \"0\"\n else:\n return \"1\"\n except Exception, e:\n print e\n return \"0\"\n\n\n@aboutFriends.route(\"/watchOrCancel<watchedId>/<userId>\")\ndef watchOrCancel(watchedId, userId):\n try:\n status = db_session.query(watch).filter(watch.watchedId == watchedId, watch.userId == userId).first()\n if status == None:\n newwatch = watch(watchedId, userId)\n db_session.add(newwatch)\n db_session.commit()\n db_session.close()\n print \"watch\"\n else:\n try:\n db_session.delete(status)\n db_session.commit()\n db_session.close()\n print \"cancel\"\n except Exception, e1:\n print e1\n except Exception, e2:\n print e2\n return \"\"\n\n\n@aboutFriends.route(\"/getimgs/<userId>\")\ndef getimgs(userId):\n imglist = []\n try:\n shrlist = db_session.query(share).filter(share.shareAuthorId == userId).all()\n if len(shrlist) > 3:\n shrlist = shrlist[1: 3]\n for s in shrlist:\n imglist.append(\n {\n \"imgid\": s.shareId,\n \"imgstr\": getShrImgStr(s.shareId)\n })\n except Exception, e:\n print e\n print json.dumps(imglist)\n return json.dumps(imglist)\n" }, { "alpha_fraction": 0.5453006029129028, "alphanum_fraction": 0.562235414981842, "avg_line_length": 27.829267501831055, "blob_id": "d17b1141b42838d2dd5f5d65ccad22b302063c31", "content_id": "291cef4b8c60316f70dd0d23c9cfddb9349dd68f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1217, "license_type": "no_license", "max_line_length": 111, "num_lines": 41, "path": "/chihuo/templates/shareInfoShow.html", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\" rel=\"stylesheet\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no\"/>\n <title>ๆŸฅ็œ‹ๅˆ†ไบซ๏ผš{{ shareInfo.shareTitle }}</title>\n</head>\n <style >\n #contentshow img,video{\n\t\t\twidth: 100%;\n\t\t\theight: auto;\n\t\t\tbox-sizing: border-box;\n\t\t}\n </style>\n<body>\n\n <div class=\"page-header\" >\n <h1>\n &nbsp;{{ shareInfo.shareTitle }}\n </h1>\n <h6 style=\"color: steelblue;\" onclick=\"callJs2And({{ shareInfo.shareAuthorId }})\">\n &nbsp;&nbsp;&nbsp;&nbsp;{{ authorName }}\n <script >\n function callJs2And(authorId){\n document.location = \"js://webview?userId=\"+authorId+\"&nickName={{ authorName }}\";\n }\n </script>\n </h6>\n <h6>\n &nbsp;&nbsp;&nbsp;&nbsp;{{ shareInfo.pubTime}}\n </h6>\n </div>\n <div class=\"panel-body\" id=\"contentshow\">\n {#ไธ‹้ข่ฟ™ไธชsafeๆ˜ฏๆ”พ็ฝฎflask jinja2่‡ชๅŠจ่ฝฌไน‰ๅญ—็ฌฆ#}\n {{ shareInfo.shareDetail | safe }}\n </div>\n\n\n</body>\n</html>" }, { "alpha_fraction": 0.552830159664154, "alphanum_fraction": 0.5580188632011414, "avg_line_length": 31.121212005615234, "blob_id": "f91a4629a9b6692a6cc5579a1906ac4de9f497af", "content_id": "758041aa62e539e6b83ca46081e0ccf550b54af1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2120, "license_type": "no_license", "max_line_length": 108, "num_lines": 66, "path": "/chihuo/views/aboutHome.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "from operator import or_\n\nimport re\nfrom flask import Blueprint, json,render_template\n\nfrom chihuo.views import SERVER_IP\nfrom ..dbModels import user, action, food, share\nfrom ..dbConnect import db_session\n\naboutHome = Blueprint('aboutHome', __name__)\n\n\n@aboutHome.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@aboutHome.route(\"/getHotUserList\", methods=['GET'])\ndef getHotUserList():\n uJsonList = []\n try:\n userList = db_session.query(user).all()\n for u in userList:\n uJsonList.append(hotUser2Json(u))\n except Exception, e:\n print e\n uJsonList = []\n print json.dumps(uJsonList)\n return json.dumps(uJsonList)\n\n\ndef hotUser2Json(u):\n return {\n \"userid\": u.userId,\n \"nickname\": u.nickName,\n \"imgList\": getUserImgs(u.userId)\n }\n\n\ndef getUserImgs(id):\n imgList = []\n try:\n actions = db_session.query(action).filter(action.subjectId == id,\n or_(action.actionType == 1, action.actionType == 2)).all()\n print len(actions)\n if len(actions) > 3:\n print \"???\"\n actions = actions[1:3]\n for a in actions:\n if a.actionType == 1:\n actionob = db_session.query(food).filter(food.foodId == a.objectId).first()\n jpglist = re.findall(r\"/imgsUpload/(.*?)\\.jpg\", actionob.foodDetail)\n if jpglist == []:\n imgList.append(SERVER_IP + \"static/imgsUpload/chihuo.png\")\n else:\n imgList.append(SERVER_IP + \"static/imgsUpload/\" + jpglist[0] + \".jpg\")\n elif a.actionType == 2:\n actionob = db_session.query(share).filter(share.shareId == a.objectId).first()\n jpglist = re.findall(r\"src=\\\"(.*?)\\.jpg\", actionob.shareDetail)\n if jpglist == []:\n imgList.append(SERVER_IP + \"static/imgsUpload/chihuo.png\")\n else:\n imgList.append(jpglist[0] + \".jpg\")\n except Exception, e:\n print e\n imgList = []\n return imgList\n" }, { "alpha_fraction": 0.6705882549285889, "alphanum_fraction": 0.699999988079071, "avg_line_length": 21.600000381469727, "blob_id": "bcb13dac52457e653dbc6dac66af07de061867bc", "content_id": "16839c4a331adb3e4b0540157ff4328354b4cae4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/runServer.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding:utf-8\nfrom chihuo import app\nfrom chihuo.dbConnect import init_db\nfrom chihuo import models_import_admin\n\n\"\"\"่ฝฌๆขๅญ—็ฌฆ๏ผŒ็กฎไฟไธญๆ–‡ๅ‚ๆ•ฐๅฏไปฅไผ ๅˆฐๅ‰็ซฏ้กต้ข,ๅญ—็ฌฆ็ผ–็ ็›ธๅ…ณ๏ผŒๅฆๅˆ™ไผšๆœ‰ไธญๆ–‡็ผ–็ ้”™่ฏฏใ€‚\"\"\"\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nif __name__ == '__main__':\n init_db()\n models_import_admin()\n app.run('0.0.0.0', 5000, debug=True, threaded=True)\n\n" }, { "alpha_fraction": 0.6333262324333191, "alphanum_fraction": 0.6388770341873169, "avg_line_length": 31.641115188598633, "blob_id": "e6fe2712f19f9b2dbcc104e534f3710b025dde08", "content_id": "5e1a340f4cb5ef546827e3aa7cc21c5ebd8ad3e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9440, "license_type": "no_license", "max_line_length": 113, "num_lines": 287, "path": "/chihuo/views/aboutCookbook.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os\n\nimport datetime\nimport random\n\nimport re\nfrom flask import Blueprint, render_template, request, Response, json, jsonify\n\nfrom chihuo.views import SERVER_IP\nfrom ..dbModels import food, user, foodType, foodStar, action\nfrom ..dbConnect import db_session\nfrom sqlalchemy import func\n\ndatetimeRegx = '%Y-%m-%d %H:%M:%S'\naboutCookbook = Blueprint('aboutCookbook', __name__)\n\n\n@aboutCookbook.route('/editCookbook', methods=['GET'])\ndef editCookbook():\n foodTypeList = getFoodTypeLIst()\n return render_template('cookbookEdit.html', title='็ผ–่พ‘่œ่ฐฑ', foodTypeList=foodTypeList, serverip=SERVER_IP)\n\n\ndef getRandFilename():\n today = datetime.date.today()\n randomstr = ''.join(random.sample('zyxwvutsrqponmlkjihgfedcba987654321', 10))\n return str(today.year) + str(today.month) + str(today.day) + randomstr\n\n\n@aboutCookbook.route(\"/upload\", methods=[\"POST\"])\ndef GetImage():\n file = request.files['wangEditorMobileFile']\n if file == None:\n return \"error|file is not exist\"\n else:\n try:\n filename = getRandFilename() + '.jpg'\n print os.path.isdir('chihuo/static/imgsUpload/')\n if not os.path.isdir('chihuo/static/imgsUpload/'):\n os.mkdir('chihuo/static/imgsUpload')\n file.save(os.path.join('chihuo/static/imgsUpload/', filename))\n result = os.path.join('/static', 'imgsUpload/', filename)\n res = Response(result)\n res.headers[\"ContentType\"] = \"text/html\"\n res.headers[\"Charset\"] = \"utf-8\"\n return res\n except Exception, e:\n print(e)\n return \"error|\" + str(e)\n\n\ndef getFoodTypeLIst():\n foodTypeList = db_session.query(foodType).all()\n return foodTypeList\n\n\n@aboutCookbook.route('/createNewFood', methods=['POST'])\ndef createNewFood():\n foodName = request.form['foodNameInput']\n foodDetail = request.form['foodDetailInput']\n foodAuthorId = int(request.form['foodAuthorIdInput'])\n foodTypeId = request.form['foodTypeIdInput']\n print foodName\n print foodDetail\n print type(foodAuthorId)\n print foodTypeId\n newfood = food(foodName, foodAuthorId, foodTypeId, foodDetail)\n actiontime = datetime.datetime.now().strftime(datetimeRegx)\n try:\n db_session.add(newfood)\n db_session.commit()\n try:\n newid = db_session.query(func.max(food.foodId)).first()[0]\n print newid\n newAction = action(1, foodAuthorId, newid, actiontime)\n db_session.add(newAction)\n db_session.commit()\n except Exception, e1:\n print e1\n db_session.close()\n # foodTypeList = getFoodTypeLIst()\n # return render_template('cookbookEdit.html', title='็ผ–่พ‘่œ่ฐฑ',foodTypeList = foodTypeList)\n return \"<script>alert('ๅˆ›ๅปบๆˆๅŠŸ๏ผŒ่ฏท่ฟ”ๅ›žไธŠไธ€ๅฑ‚');</script>\"\n except Exception, e:\n print e\n return \"<script>alert(\" + e + \");</script>\"\n\n\n@aboutCookbook.route(\"/getFoodInfo<foodId>\")\ndef getfoodInfo(foodId):\n foodInfo = db_session.query(food).filter(food.foodId == foodId).first()\n if foodInfo == None:\n return \"<script>alert('่ฏทๆฑ‚ๅคฑ่ดฅ,่ฏท้‡่ฏ•');</script>\"\n else:\n authorId = foodInfo.foodAuthorId\n authorName = db_session.query(user).filter(user.userId == authorId).first().nickName\n foodInfo.hotIndex += 10\n db_session.commit()\n return render_template('foodInfoShow.html', foodInfo=foodInfo, authorName=authorName)\n\n\n@aboutCookbook.route(\"/addNewFoodType\", methods=['POST'])\ndef addNewFoodType():\n foodTypeInput = request.form['foodTypeInput']\n foodDescInput = request.form['foodDescInput']\n foodCoverPath = request.form['foodCoverPath']\n print foodTypeInput\n print foodDescInput\n newFoodType = foodType(foodTypeInput, foodDescInput, foodCoverPath)\n try:\n db_session.add(newFoodType)\n db_session.commit()\n db_session.close()\n return \"0\"\n except Exception, e:\n print e\n return \"-1\"\n\n\ndef getfoodCoverimg(f):\n imgList = re.findall(r\"/imgsUpload/(.*?)\\.jpg\", f.foodDetail)\n if imgList == []:\n return SERVER_IP + \"static/imgsUpload/chihuo.png\"\n else:\n return SERVER_IP + \"static/imgsUpload/\" + imgList[0] + \".jpg\"\n\n\ndef foodtype2json(ft):\n return {\n \"foodTypeId\": ft.foodTypeId,\n \"foodTypeName\": ft.foodTypeName,\n \"foodTypeDesc\": ft.foodTypeDesc,\n \"coverImg\": ft.coverPath\n }\n\n\ndef food2json(f):\n foodAuthor = db_session.query(user).filter(user.userId == f.foodAuthorId).first().nickName\n return {\n \"foodId\": f.foodId,\n \"foodAuthor\": foodAuthor,\n \"foodName\": f.foodName,\n \"foodAuthorId\": f.foodAuthorId,\n \"foodImgSrc\": getfoodCoverimg(f),\n \"starCount\": f.starCount\n }\n\n\n@aboutCookbook.route(\"/getNewFoodList\")\ndef getNewFoodList():\n foodlist = db_session.query(food).limit(10).all()\n foodlist.sort(key=lambda x: x.foodId, reverse=True)\n foodJsonList = []\n for f in foodlist:\n foodJsonList.append(food2json(f))\n print json.dumps(foodJsonList)\n return json.dumps(foodJsonList)\n\n\n@aboutCookbook.route(\"/getHotFoodList\")\ndef getHotFoodList():\n foodlist = db_session.query(food).limit(10).all()\n foodlist.sort(key=lambda x: x.starCount, reverse=True)\n foodJsonList = []\n for f in foodlist:\n foodJsonList.append(food2json(f))\n print json.dumps(foodJsonList)\n return json.dumps(foodJsonList)\n\n\n@aboutCookbook.route(\"/getDesignList<authorId>\")\ndef getDesignList(authorId):\n foodList = db_session.query(food).filter(food.foodAuthorId == authorId).all()\n foodJsonList = []\n for f in foodList:\n foodJsonList.append(food2json(f))\n print json.dumps(foodJsonList)\n return json.dumps(foodJsonList)\n\n\n@aboutCookbook.route(\"/getFavoList<authorId>\")\ndef getFavoList(authorId):\n foodList = db_session.query(food).filter(foodStar.foodId == food.foodId, foodStar.userId == authorId).all()\n foodJsonList = []\n for f in foodList:\n foodJsonList.append(food2json(f))\n print json.dumps(foodJsonList)\n return json.dumps(foodJsonList)\n\n\n@aboutCookbook.route(\"/getFoodList<typeId>\")\ndef getFoodList(typeId):\n foodList = db_session.query(food).filter(food.foodTypeId == typeId).all()\n foodJsonList = []\n for f in foodList:\n foodJsonList.append(food2json(f))\n print json.dumps(foodJsonList)\n return json.dumps(foodJsonList)\n\n\n@aboutCookbook.route(\"/getFoodTypeList\")\ndef foodTypeList():\n foodTypeList = getFoodTypeLIst()\n typeJsonList = []\n for type in foodTypeList:\n typeJsonList.append(foodtype2json(type))\n print json.dumps(typeJsonList)\n print typeJsonList\n return json.dumps(typeJsonList)\n\n\n@aboutCookbook.route(\"/getStarCount<foodId>\", methods=['GET'])\ndef getStarCount(foodId):\n return None\n\n\n@aboutCookbook.route(\"/starStatus<foodId>/<userId>\", methods=['GET'])\ndef starStatus(foodId, userId):\n try:\n status = db_session.query(foodStar).filter(foodStar.userId == userId, foodStar.foodId == foodId).first()\n if status == None:\n print 0\n return \"0\"\n else:\n print 1\n return \"1\"\n except Exception, e:\n print e\n return \"0\"\n\n\n@aboutCookbook.route(\"/starOrCancel<foodId>/<userId>\")\ndef starOrCancel(foodId, userId):\n try:\n status = db_session.query(foodStar).filter(foodStar.userId == userId, foodStar.foodId == foodId).first()\n if status == None:\n fs = foodStar(userId, foodId)\n actiontime = datetime.datetime.now().strftime(datetimeRegx)\n newaction = action(3, userId, foodId, actiontime)\n db_session.query(food).filter(food.foodId == foodId).update({food.starCount: food.starCount + 1})\n db_session.add(fs)\n db_session.add(newaction)\n db_session.commit()\n db_session.close()\n print 1\n return \"1\"\n else:\n try:\n db_session.query(food).filter(food.foodId == foodId).update({food.starCount: food.starCount - 1})\n delaction = db_session.query(action) \\\n .filter(action.subjectId == userId, action.objectId == foodId, action.actionType == 3) \\\n .first()\n db_session.delete(delaction)\n except Exception, e1:\n print e1\n db_session.delete(status)\n db_session.commit()\n db_session.close()\n print 0\n return \"0\"\n except Exception, e:\n print e\n return \"-1\"\n\n\n@aboutCookbook.route(\"/uploadCoverImg\", methods=['POST'])\ndef uploadCoverImg():\n try:\n file = request.files['coverImg']\n print \"ๅฐ้ขๅ็งฐ:\" + file.filename\n fn = getRandFilename()\n tn = os.path.splitext(file.filename)[1]\n print tn\n fullFileName = fn + tn\n if not os.path.isdir('chihuo/static/imgsUpload/'):\n os.mkdir('chihuo/static/imgsUpload')\n file.save(os.path.join('chihuo/static/imgsUpload/', fullFileName))\n msg = SERVER_IP + \"static/imgsUpload/\" + fullFileName\n except Exception, e:\n print e\n msg = \"ๅฐ้ขไธŠไผ ๅคฑ่ดฅ\"\n res = Response(msg)\n res.headers[\"Content-Type\"] = \"text/plain\"\n res.headers[\"Charset\"] = \"utf-8\"\n res.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return res\n" }, { "alpha_fraction": 0.6432027220726013, "alphanum_fraction": 0.6521528959274292, "avg_line_length": 28.52857208251953, "blob_id": "f014506b119855b149ec98953ef6cbdf7e9e9db0", "content_id": "afe08cdb4913d5b3c0982f6a80754c0db13289f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4238, "license_type": "no_license", "max_line_length": 103, "num_lines": 140, "path": "/chihuo/dbModels.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding:utf-8\nfrom sqlalchemy import Column, String, Integer, DateTime, SmallInteger, ForeignKey\nfrom sqlalchemy.dialects.mysql import LONGTEXT\nfrom chihuo.dbConnect import Base\n\n\"\"\"่ฟ™ไธช็ฑปๅฎšไน‰ไบ†็”จๆˆท็ฑปๅž‹๏ผŒไน‹ๅŽไผšๅœจๆ•ฐๆฎๅบ“ๅˆ›ๅปบ็›ธๅบ”็š„่กจ\"\"\"\n\n\nclass user(Base):\n __tablename__ = 'user'\n\n userId = Column(Integer(), primary_key=True)\n userName = Column(String(16), unique=True, nullable=False)\n password = Column(String(16), nullable=False)\n nickName = Column(String(30), nullable=False, unique=True)\n emailAddress = Column(String(20), unique=True, default=None)\n phoneNumber = Column(String(11), unique=True, default=None)\n followerCount = Column(Integer, default=0)\n selfIntroduction = Column(String(255), nullable=True, default=None)\n headIcon = Column(String(127), nullable=True, default=None)\n\n # ๅˆๅง‹ๅŒ–\n def __init__(self, username=None, psw=None, nn=None):\n self.userName = username\n self.password = psw\n self.nickName = nn\n\n\nclass food(Base):\n __tablename__ = 'food'\n\n foodId = Column(Integer, primary_key=True)\n foodName = Column(String(16), unique=True, nullable=False)\n foodAuthorId = Column(Integer, nullable=False)\n foodTypeId = Column(Integer)\n foodDetail = Column(LONGTEXT)\n starCount = Column(Integer, default=0)\n hotIndex = Column(Integer, default=0)\n\n def __init__(self, name=None, authorId=None, typeId=None, detail=None, hotindex=None):\n self.foodName = name\n self.foodAuthorId = authorId\n self.foodTypeId = typeId\n self.foodDetail = detail\n self.hotIndex = hotindex\n\n\nclass foodType(Base):\n __tablename__ = 'foodType'\n\n foodTypeId = Column(Integer, primary_key=True)\n foodTypeName = Column(String(16), unique=True, nullable=False)\n foodTypeDesc = Column(LONGTEXT)\n coverPath = Column(String(80), unique=False, nullable=False)\n\n def __init__(self, name=None, desc=None, path=None):\n self.foodTypeName = name\n self.foodTypeDesc = desc\n self.coverPath = path\n\n\nclass foodStar(Base):\n __tablename__ = 'foodStar'\n\n foodStarId = Column(Integer, primary_key=True)\n foodId = Column(Integer)\n userId = Column(Integer)\n\n def __init__(self, userid=None, foodid=None):\n self.foodId = foodid\n self.userId = userid\n\n\nclass share(Base):\n __tablename__ = 'share'\n\n shareId = Column(Integer, primary_key=True)\n shareTitle = Column(String(16), nullable=False)\n shareAuthorId = Column(Integer, nullable=False)\n shareDetail = Column(LONGTEXT)\n pubTime = Column(DateTime, nullable=False)\n hotIndex = Column(Integer, default=0)\n typeName = Column(String(16), nullable=True)\n\n def __init__(self, authotid=None, detail=None, pubtime=None, title=None, hotindex=None, type=None):\n self.shareAuthorId = authotid\n self.shareDetail = detail\n self.pubTime = pubtime\n self.shareTitle = title\n self.hotIndex = hotindex\n self.typeName = type\n\n\nclass shareType(Base):\n __tablename__ = 'shareType'\n\n typeId = Column(Integer, primary_key=True)\n typeName = Column(String(16), nullable=False, unique=True)\n\n def __init__(self, id=None, name=None):\n self.typeId = id\n self.typeName = name\n\n\nclass action(Base):\n __tablename__ = 'action'\n\n actionId = Column(Integer, primary_key=True)\n \"\"\"\n TypeไธๅŒ็š„ๆ•ฐๅญ—่กจ็คบ:\n 1:ๅˆ›ๅปบ่œๅ“\n 2:ๅ‘่กจๅˆ†ไบซ\n 3:ๆ”ถ่—่œๅ“\n 4:....\n \"\"\"\n actionType = Column(SmallInteger, nullable=False)\n # ไธป่ฏญ\n subjectId = Column(Integer, nullable=False)\n # ๅฎพ่ฏญ\n objectId = Column(Integer, nullable=False)\n # ๆ—ถ้—ด\n actionTime = Column(DateTime, nullable=False)\n\n def __init__(self, type=None, sid=None, oid=None, atime=None):\n self.actionType = type\n self.subjectId = sid\n self.objectId = oid\n self.actionTime = atime\n\n\nclass watch(Base):\n __tablename__ = \"watch\"\n\n watchId = Column(Integer, primary_key=True)\n userId = Column(Integer, nullable=False)\n watchedId = Column(Integer, nullable=False)\n\n def __init__(self, wid=None, uid=None):\n self.userId = uid\n self.watchedId = wid\n" }, { "alpha_fraction": 0.8066298365592957, "alphanum_fraction": 0.8066298365592957, "avg_line_length": 25, "blob_id": "660961ecd63639a87fc0e38d49f897bb13dfe7e1", "content_id": "b46ff78799d5897eaf0274f6bbe901c9d814a918", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 217, "license_type": "no_license", "max_line_length": 88, "num_lines": 7, "path": "/README.md", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# chihuoServerEndfood\n\n่ฟ™ไธชๆ˜ฏchihuoApp็š„ๅŽๅฐ้กต้ข,App็š„ๅฎ‰ๅ“่ฝฏไปถๆ˜พ็คบ่ฏทๆŸฅ็œ‹\n\nThis is the server end of the App Chihuo,and if you want to see the app end,please goto:\n\nhttps://github.com/jcwayd/chihuoApp" }, { "alpha_fraction": 0.5898305177688599, "alphanum_fraction": 0.5952542424201965, "avg_line_length": 27.640777587890625, "blob_id": "87b382e8237e5db78a1c3b5757d01928b4994867", "content_id": "817806cb337608ba9e293d8fd166666eb5418bda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2978, "license_type": "no_license", "max_line_length": 83, "num_lines": 103, "path": "/chihuo/views/aboutMe.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding:utf-8\nfrom threading import Thread\nimport time\nfrom flask import Blueprint, request, json\n\nfrom ..dbModels import user\nfrom ..dbConnect import db_session\nfrom sqlalchemy import func\n\naboutMe = Blueprint('aboutMe', __name__)\n\n\n@aboutMe.route('/login', methods=['POST'])\ndef login():\n username = request.form['username']\n password = request.form['password']\n users = db_session.query(user).filter(user.userName == username).all()\n if users == []:\n # ็”จๆˆทๅไธๅญ˜ๅœจไธบ-1\n return \"-1\"\n elif users[0].password != password:\n # ๅฏ†็ ไธๆญฃ็กฎ่ฟ”ๅ›ž-2\n return \"-2\"\n else:\n return str(users[0].userId)\n\n\n@aboutMe.route('/reg', methods=['POST'])\ndef reg():\n username = request.form['username']\n password = request.form['password']\n nickname = request.form['nickname']\n users_username = db_session.query(user).filter(user.userName == username).all()\n users_nickname = db_session.query(user).filter(user.nickName == nickname).all()\n if not users_username:\n if not users_nickname:\n try:\n newUser = user(username, password, nickname)\n db_session.add(newUser)\n db_session.commit()\n db_session.close()\n return \"0\"\n except(BaseException):\n return \"-3\"\n else:\n return \"-1\"\n else:\n return \"-2\"\n\n\n@aboutMe.route('/getCurrentUserInfo', methods=['POST'])\ndef getCurrentUserInfo():\n getId = request.form['currentUserId']\n currentUserId = int(getId)\n users = db_session.query(user).filter(user.userId == currentUserId).all()\n if users == []:\n return \"error\"\n else:\n currentUser = users[0]\n return json.dumps(currentUser, default=user2json)\n\n\ndef user2json(u):\n return {\n \"userId\": u.userId,\n \"username\": u.userName,\n \"password\": u.password,\n \"nickname\": u.nickName,\n \"emailAddress\": u.emailAddress,\n \"phoneNumber\": u.phoneNumber,\n \"selfIntroduction\": u.selfIntroduction,\n \"headIcon\": u.headIcon\n }\n\n\n@aboutMe.route('/modifyMyInfo', methods=['POST'])\ndef modifyMyInfo():\n currentId = int(request.form['currentUserId'])\n print currentId\n nickname = request.form['nickname']\n print nickname\n emailAddress = request.form['emailAddress']\n print emailAddress\n phoneNumber = request.form['phoneNumber']\n print phoneNumber\n selfIntro = request.form['selfIntro']\n print selfIntro\n try:\n db_session.query(user).filter(user.userId == currentId).update(\n {\n 'userId': currentId,\n 'nickName': nickname,\n 'emailAddress': emailAddress,\n 'phoneNumber': phoneNumber,\n 'selfIntroduction': selfIntro\n }\n )\n db_session.commit()\n db_session.close()\n return \"1\"\n except Exception, ecp:\n print ecp\n return \"-2\"\n" }, { "alpha_fraction": 0.5099999904632568, "alphanum_fraction": 0.5102631449699402, "avg_line_length": 23.35897445678711, "blob_id": "b0bb7ec77cdbce8f12d5086567c118255893ae60", "content_id": "d064debfbff69bdd9c64e000cda4c53b38e6fde7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4072, "license_type": "no_license", "max_line_length": 73, "num_lines": 156, "path": "/chihuo/dbViews.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding:utf-8\nfrom flask_admin.contrib.sqla import ModelView\n\n\nclass userView(ModelView):\n column_list = ('userId',\n 'userName',\n 'nickName',\n # 'password',\n # 'emailAddress',\n # 'phoneNumber',\n # 'followerCount',\n # 'selfIntroduction',\n # 'headIcon'\n )\n column_labels = {\n 'userId': u'็”จๆˆทID',\n 'userName': u'็”จๆˆทๅ',\n 'password': u'็”จๆˆทๅฏ†็ ',\n 'nickName': u'็”จๆˆทๆ˜ต็งฐ',\n 'emailAddress': u'็”ตๅญ้‚ฎไปถ',\n 'phoneNumber': u'็”ต่ฏๅท็ ',\n 'followerCount': u'็ฒ‰ไธๆ•ฐ้‡',\n 'selfIntroduction': u'่‡ชๆˆ‘ไป‹็ป',\n 'headIcon': u'็”จๆˆทๅคดๅƒ'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(userView, self).__init__(modelType, session, **kwargs)\n\n\nclass foodView(ModelView):\n column_list = (\n 'foodId',\n 'foodName',\n 'foodAuthorId',\n 'foodTypeId'\n )\n column_labels = {\n 'foodId': u'่œๅ“ID',\n 'foodName': u'่œๅ“ๅ็งฐ',\n 'foodAuthorId': u'่œๅ“ไฝœ่€…็š„ID',\n 'foodTypeId': u'ๆ‰€ๅฑž่œ็ณปID',\n 'foodDetail': u'่œๅ“่ฏฆ็ป†ไฟกๆฏ',\n 'starCount': u'็‚น่ตžไบบๆ•ฐ',\n 'hotIndex': u'่œๅ“็ƒญๅบฆ'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(foodView, self).__init__(modelType, session, **kwargs)\n\n\nclass foodTypeView(ModelView):\n column_list = (\n 'foodTypeId',\n 'foodTypeName',\n 'foodTypeDesc',\n 'coverPath'\n )\n column_labels = {\n 'foodTypeId': u'่œ็ณปID',\n 'foodTypeName': u'่œ็ณปๅ็งฐ',\n 'foodTypeDesc': u'่œ็ณปไป‹็ป',\n 'coverPath':u'ๅฐ้ข่ทฏๅพ„'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(foodTypeView, self).__init__(modelType, session, **kwargs)\n\n\nclass foodStarView(ModelView):\n column_list = (\n 'foodStarId',\n 'foodId',\n 'userId'\n )\n column_labels = {\n 'foodStarId': u'็‚น่ตžID',\n 'foodId': u'่œๅ“ID',\n 'userId': u'็”จๆˆทID'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(foodStarView, self).__init__(modelType, session, **kwargs)\n\n\nclass shareView(ModelView):\n column_list = (\n 'shareId',\n 'shareTitle',\n 'shareAuthorId',\n 'shareDetail',\n 'pubTime',\n 'typeName',\n )\n column_labels = {\n 'shareId': u'ๅˆ†ไบซID',\n 'shareTitle': u'ๅˆ†ไบซๆ ‡้ข˜',\n 'shareAuthorId': u'ไฝœ่€…ID',\n 'shareDetail': u'ๅˆ†ไบซ่ฏฆๆƒ…',\n 'pubTime': u'ๅ‘่กจๆ—ถ้—ด',\n 'hotIndex': u'ๅˆ†ไบซ็ƒญๅบฆ',\n 'typeName':u'ๅˆ†ไบซ็ฑปๅž‹ๅ็งฐ',\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(shareView, self).__init__(modelType, session, **kwargs)\n\n\nclass shareTypeView(ModelView):\n column_list = (\n 'typeName',\n )\n column_labels = {\n 'typeId': u\"ๅˆ†ไบซ็ฑปๅž‹ID\",\n 'typeName': u'ๅˆ†ไบซ็ฑปๅž‹ๅ็งฐ'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(shareTypeView, self).__init__(modelType, session, **kwargs)\n\n\nclass actionView(ModelView):\n column_list = (\n 'actionId',\n 'actionType',\n 'subjectId',\n 'objectId',\n 'actionTime'\n )\n column_labels = {\n 'actionId': 'ๅŠจๆ€Id',\n 'actionType': 'ๅŠจๆ€็ฑปๅž‹',\n 'subjectId': 'ไธป่ฏญId',\n 'objectId': 'ๅฎพ่ฏญId',\n 'actionTime': 'ๅŠจๆ€ๆ—ถ้—ด'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(actionView, self).__init__(modelType, session, **kwargs)\n\n\nclass watchView(ModelView):\n column_list = {\n 'watchId',\n 'userId',\n 'watchedId'\n }\n column_labels = {\n 'watchId': 'ๅ…ณๆณจId',\n 'userId': '็”จๆˆทId',\n 'watchedId': '่ขซๅ…ณๆณจ่€…Id'\n }\n\n def __init__(self, modelType, session, **kwargs):\n super(watchView, self).__init__(modelType, session, **kwargs)\n" }, { "alpha_fraction": 0.375, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 27, "blob_id": "48ee44106941e3357791f2f0fba5a68bd1d45c95", "content_id": "99bde5ae6074fe8105c8747e8fb2b9980eb8bdda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 40, "num_lines": 2, "path": "/chihuo/views/__init__.py", "repo_name": "jcwayd/chihuoServerEnd", "src_encoding": "UTF-8", "text": "# coding=utf-8\nSERVER_IP = \"http://192.168.1.101:5000/\"\n" } ]
13
gitter-badger/open-ods
https://github.com/gitter-badger/open-ods
dfbe66efbe9200cd688ca06236882bd35871ca86
c1f6dd29a017ce1379803f8b6ebe7d7398c91d57
46aa22af1c693fa0977c396ed48983e6ed60a2a0
refs/heads/develop
2021-01-21T02:20:40.681010
2015-12-06T08:58:34
2015-12-06T08:58:34
47,580,872
0
0
null
2015-12-07T21:39:01
2015-12-03T09:44:52
2015-12-07T21:05:25
null
[ { "alpha_fraction": 0.48500001430511475, "alphanum_fraction": 0.6949999928474426, "avg_line_length": 15.666666984558105, "blob_id": "1218c17f065f20dad8274a9b108069c7a0d8a477", "content_id": "246463d5921c55736413f8921448a027cfb293a4", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 400, "license_type": "permissive", "max_line_length": 26, "num_lines": 24, "path": "/requirements.txt", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "dicttoxml==1.6.6\nFlask==0.10.1\nFlask-Autodoc==0.1.2\nFlask-Cache==0.13.1\nFlask-Cors==2.1.2\nFlask-Heroku-Cacheify==1.5\ngunicorn==19.3.0\nitsdangerous==0.24\nJinja2==2.8\nlxml==3.4.4\nMako==1.0.3\nMarkupSafe==0.23\nnewrelic==2.58.2.45\nnose==1.3.7\npsycopg2==2.6.1\npylibmc==1.5.0\npython-editor==0.4\npython-status==1.0.1\nPyYAML==3.11\nredis==2.10.5\nsix==1.10.0\nSQLAlchemy==1.0.9\nWerkzeug==0.10.4\nwheel==0.24.0\n" }, { "alpha_fraction": 0.6179507374763489, "alphanum_fraction": 0.6846703886985779, "avg_line_length": 27.613636016845703, "blob_id": "d32117afa9a2d15fcf7e3d8e8da00808e7f8e9e8", "content_id": "0d6b9021345dce0c9b13b57b15f8944567a5ec46", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1259, "license_type": "permissive", "max_line_length": 170, "num_lines": 44, "path": "/docs/data_import.md", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "# Importing ODS Data Into OpenODS\n\n### Pre-requisites\n* Runningn instance of PostgreSQL\n* A way of running SQL queries against PostgreSQL (I use psql or [pgAdmin](http://www.pgadmin.org/download/macosx.php))\n* All setup steps in the main README must have been completed\n\n### Steps\n\n1. Execute the SQL scripts, which can be found in the sql sub-directory, from the root folder of your repository as follows:\n\n ```bash\n $ psql -f sql/create_user_and_database.sql -U postgres\n \n $ psql -d openods -f sql/run_migration_scripts.sql -U postgres\n ```\n\n2. In the terminal, navigate to the data sub-directory of the repository and ensure that both `odsfull.xml.zip` and `import_ods_xml.py` files are present in the directory\n\n ```bash\n $ ls -l\n \n -rw-r--r--@ 1 matt staff 19885585 3 Nov 14:56 odsfull.xml.zip\n -rw-r--r-- 1 matt staff 6930 7 Nov 17:53 import_ods_xml.py\n ```\n\n3. Run the import script:\n\n ```bash\n $ python import_ods_xml.py\n\n Starting data import\n Connected to database\n New Version Ref is: 11c4f5ed-e0b7-4b3e-9e3f-6b156333d0a6\n 202637\n 0.0.0.1\n 2015-10-08\n 116\n Full\n A full file of all organisations\n Starting import\n Import took 363.49907088279724s\n Import Complete.\n ```\n" }, { "alpha_fraction": 0.7514705657958984, "alphanum_fraction": 0.783823549747467, "avg_line_length": 29.954545974731445, "blob_id": "860e7329e1d0df30621e412b2d02b7c59951bbbb", "content_id": "96dd89425ff8c5e85f983476c0f7547b86798629", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 680, "license_type": "permissive", "max_line_length": 103, "num_lines": 22, "path": "/RELEASE.md", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "0.3 - 10th November 2015\n---\n* Added remaining metadata for roles and relationships\n* Added a simple landing page for users arriving at the site\n* Added fake /organisations/<ods_code>/endpoints route to demonstrate endpoint repository functionality\n\n0.2 - 6th November 2015\n---\n* Updated docs to try and help people get started / contribute to the project\n* Added some parameters to some routes\n* Slightly improved HATEOAS compliance\n\n\n0.1 - 5th November 2015\n---\nInitial release of prototype API containing following features:\n\n* View Basic API Documentation\n* Search For Organisation By Name\n* Get Specific Organisation By ODS Code\n* Get List Of Roles\n* Basic HATEOAS compliance" }, { "alpha_fraction": 0.7182555198669434, "alphanum_fraction": 0.7271323800086975, "avg_line_length": 27.788888931274414, "blob_id": "9307a80016bbfcb6f3254f2d0babd87b1ecbf5a7", "content_id": "b39bbd5d768f606a33ecc176ae5b5e4c4b89632f", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2591, "license_type": "permissive", "max_line_length": 153, "num_lines": 90, "path": "/README.md", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "# open-ods\n\n**This is an unofficial personal project making use of open data and is unrelated to the NHS and associated organisations**\n\n**The data is regularly changing and so there is absolutely no guarantee on stability or accuracy.**\n\n### Source Data Attribution\nOrganisation Data Service, Health and Social Care Information Centre, licenced under the Open Government Licence v2.0 - Open Government Licence\n\nMore information on the Organisation Data Service can be found [on the HSCIC website](http://systems.hscic.gov.uk/data/ods)\n\n\n## Issue Tracker\n[![Stories in Ready](https://badge.waffle.io/mattstibbs/open-ods.png?label=1%20-%20Ready&title=Ready)](http://waffle.io/mattstibbs/open-ods)\n\nYou can view a board for issues using either: [Waffle.io](https://waffle.io/mattstibbs/open-ods) or [Huboard](https://huboard.com/mattstibbs/open-ods/#/)\n\n## Continuous Integration\nBuilds are handled by Travis CI at [https://travis-ci.org/mattstibbs/open-ods](https://travis-ci.org/mattstibbs/open-ods)\n\n[![Build Status](https://travis-ci.org/mattstibbs/open-ods.svg?branch=develop)](https://travis-ci.org/mattstibbs/open-ods) develop\n\n[![Build Status](https://travis-ci.org/mattstibbs/open-ods.svg?branch=master)](https://travis-ci.org/mattstibbs/open-ods) master\n\n\n## Getting Started\n\n### Pre-requisites\n* Python 3.4+\n* Virtualenv `pip install -g virtualenv`\n* PostgreSQL ([Postgres.app](http://postgresapp.com) is good for OSX development)\n* [Heroku Toolbelt](https://toolbelt.heroku.com) (Not mandatory but helpful if you're going to interact with Heroku)\n\n### Steps\n\n1. Clone this repository to your local machine\n\n ```bash\n git clone https://github.com/mattstibbs/open-ods.git\n ```\n \n \n2. In the terminal, navigate to the directory of the repository e.g.\n\n ```bash\n cd ~/Source/open-ods\n ```\n\n\n3. Create a Python3 Virtualenv\n\n ```bash\n virtualenv -p python3 env\n ```\n\n Check that python3 is installed properly by running `python` and checking the version.\n\n\n\n4. Activate the virtualenv\n\n ```bash\n source env/bin/activate\n ```\n\n\n5. Install libmemcached (for caching using [flask-heroku-cacheify](http://rdegges.github.io/flask-heroku-cacheify/))\n\n ```bash\n brew install libmemcached\n ```\n\n\n6. Do a pip install\n\n ```bash\n pip install -r requirements.txt\n ```\n\n\n7. Now go import the ODS data into your OpenODS database -> [Instructions for importing the ODS data into your PostgreSQL database](docs/data_import.md)\n\n\n## License\n\nThis project is licensed under MIT License.\n\nCopyright (c) 2015 Matt Stibbs\n\nSee [LICENSE.md](LICENSE.md).\n" }, { "alpha_fraction": 0.696866512298584, "alphanum_fraction": 0.7132152318954468, "avg_line_length": 40.97142791748047, "blob_id": "b2f9bdc42430668275c534303d6eb646aad6fe86", "content_id": "8659b8ad92ad8b1f85a1a161efcfbd55c337a553", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1468, "license_type": "permissive", "max_line_length": 93, "num_lines": 35, "path": "/test/test_openods_api_auth.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import unittest\nfrom openods_api import app\nimport openods_api.auth as auth\n\n\nclass AuthTests(unittest.TestCase):\n\n def test_check_auth_returns_false_for_invalid_credentials(self):\n self.assertFalse(auth.check_auth('incorrect_user', 'incorrect_password'))\n\n def test_check_auth_returns_true_for_valid_credentials(self):\n self.assertTrue(auth.check_auth('env_test_user','env_test_pass'))\n\n\nclass RouteAuthTests(unittest.TestCase):\n\n def test_organisations_request_with_no_auth_returns_403_response(self):\n tester = app.test_client(self)\n response = tester.get('/organisations/', content_type='application/json')\n self.assertEqual(response.status_code, 401)\n\n def test_organisations_search_request_with_no_auth_returns_403_response(self):\n tester = app.test_client(self)\n response = tester.get('/organisations/search/test/', content_type='application/json')\n self.assertEqual(response.status_code, 401)\n\n def test_organisations_single_request_with_no_auth_returns_403_response(self):\n tester = app.test_client(self)\n response = tester.get('/organisations/RFF/', content_type='application/json')\n self.assertEqual(response.status_code, 401)\n\n def test_roles_request_with_no_auth_returns_403_response(self):\n tester = app.test_client(self)\n response = tester.get('/role-types/', content_type='application/json')\n self.assertEqual(response.status_code, 401)" }, { "alpha_fraction": 0.7485954761505127, "alphanum_fraction": 0.7485954761505127, "avg_line_length": 26.423076629638672, "blob_id": "9b9f4ff1522a9dc32f45158a4afe6f37ff8122bc", "content_id": "d6802051e4596090f2c2a710f3db43e4a5112826", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 712, "license_type": "permissive", "max_line_length": 70, "num_lines": 26, "path": "/sql/003-create_primary_role_view.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "-- Column: primary_role\n\n-- ALTER TABLE roles DROP COLUMN primary_role;\n\nALTER TABLE roles ADD COLUMN primary_role boolean;\nALTER TABLE roles ALTER COLUMN primary_role SET NOT NULL;\n\n\n-- View: active_organisations_primary_roles\n\n-- DROP VIEW active_organisations_primary_roles;\n\nCREATE OR REPLACE VIEW organisations_primary_roles AS\n SELECT o.org_name,\n o.org_odscode,\n r.role_code,\n cs.codesystem_displayname,\n o.org_recordclass\n FROM roles r\n JOIN codesystems cs ON r.role_code::text = cs.codesystem_id::text\n JOIN organisations o ON r.organisation_ref = o.organisation_ref\n WHERE r.primary_role IS TRUE\n ORDER BY o.org_name;\n\nALTER TABLE organisations_primary_roles\n OWNER TO openods;" }, { "alpha_fraction": 0.6775956153869629, "alphanum_fraction": 0.6775956153869629, "avg_line_length": 22, "blob_id": "26c4ea8ee2ef7a5e77921d152d16f1f51bec7606", "content_id": "59c8c3f75685811f8b893d619465342c776e5f99", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "permissive", "max_line_length": 53, "num_lines": 8, "path": "/test/test_openods_api_startup.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "# import unittest\n#\n#\n# class ApplicationTests(unittest.TestCase):\n#\n# def test_application_can_start_correctly(self):\n# import openods_api\n# self.assertTrue(True)" }, { "alpha_fraction": 0.581190288066864, "alphanum_fraction": 0.581190288066864, "avg_line_length": 22.465517044067383, "blob_id": "e1c083f96638214eb8d8b4214cc83484cbbc5365", "content_id": "511aedb9821f606bd7c6725daa5e219592924eb5", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "permissive", "max_line_length": 64, "num_lines": 58, "path": "/test/test_ODSFileManager.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "# from distutils import file_util\n# from lxml import etree as xml_tree_parser\n# import logging\n# import os.path\n# import sys\n# import unittest\n#\n# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n#\n# from controller.ODSFileManager import ODSFileManager\n#\n# # Logging Setup\n# log = logging.getLogger('import_ods_xml')\n# log.setLevel(logging.DEBUG)\n#\n#\n# File_manager = ODSFileManager()\n# Src_file = 'data/odsfull.xml.zip'\n# Dst_file = 'controller/odsfull.xml.zip'\n#\n#\n# class ods_file_manager_test(unittest.TestCase):\n#\n# __ods_xml_data = None\n#\n# def setUp(self):\n# self.__ods_xml_data = None\n#\n# def test_local_file_available(self):\n#\n# file_util.copy_file(Src_file,\n# Dst_file, update=True)\n#\n# self.__ods_xml_data = File_manager.get_latest_xml()\n# self.assertTrue(self.__ods_xml_data)\n# log.info(self.__ods_xml_data\n# .find('./Manifest/Version')\n# .attrib.get('value'))\n#\n# def test_newer_remote_file_available(self):\n# pass\n#\n# def test_not_zip_file(self):\n# pass\n#\n# def test_zip_file_invalid(self):\n# pass\n#\n# def test_schema_invalid(self):\n# pass\n#\n# def tearDownClass():\n# # os.remove(Dst_file)\n# pass\n#\n#\n# if __name__ == '__main__':\n# unittest.main()\n" }, { "alpha_fraction": 0.6863270998001099, "alphanum_fraction": 0.7104557752609253, "avg_line_length": 45.6875, "blob_id": "c7613facf2441cc6524e5d56f00521d59b5170e5", "content_id": "265771b8e834523790f24b6b658775cb941bed7a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "permissive", "max_line_length": 100, "num_lines": 16, "path": "/openods_api/config.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import os\n\nTARGET_SCHEMA_VERSION = '007'\nDATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://openods:openods@localhost:5432/openods')\nCACHE_TIMEOUT = int(os.environ.get('CACHE_TIMEOUT', '30'))\nAPP_HOSTNAME = os.environ.get('APP_HOSTNAME', 'localhost:5000/api')\nAPI_USER = os.environ.get('API_USER', 'user')\nAPI_PASS = os.environ.get('API_PASS', 'pass')\nLIVE_DEPLOYMENT = os.environ.get('LIVE_DEPLOYMENT', 'FALSE')\nINSTANCE_NAME = os.environ.get('INSTANCE_NAME', 'Development')\n\nprint(str.format(\"Database URL: {0}\", DATABASE_URL))\nprint(str.format(\"Cache Timeout: {0}\", CACHE_TIMEOUT))\nprint(str.format(\"App Hostname: {0}\", APP_HOSTNAME))\nprint(str.format(\"App User: {0}\", API_USER))\nprint(str.format(\"App Password: {0}\", API_PASS))" }, { "alpha_fraction": 0.5736623406410217, "alphanum_fraction": 0.5785487294197083, "avg_line_length": 28.659420013427734, "blob_id": "04f1fae06396a1874317ff30b56fd0afd8f2dc03", "content_id": "8d59fd3e6b7ec3ba7d9717126be3e4db272f2733", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4093, "license_type": "permissive", "max_line_length": 87, "num_lines": 138, "path": "/controller/ODSFileManager.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "from lxml import etree as xml_tree_parser\nimport lxml\nimport os.path\nimport sys\nimport zipfile\nimport logging\nimport time\n\nlog = logging.getLogger('import_ods_xml')\nlog.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nlog.addHandler(ch)\n\n\nclass ODSFileManager(object):\n\n __ods_xml_data = None\n __ods_schema = None\n\n def __init__(self):\n pass\n\n def __return_attribute(self, attribute_name):\n pass\n\n def __retrieve_latest_datafile(self):\n \"\"\"The purpose of this function is to retrieve the latest\n published file from a public published location\n\n Parameters\n ----------\n None\n Returns\n -------\n String: Filename if found\n \"\"\"\n\n # TODO: Retrieve latest file from the local directory until\n # such time it is published and retrievable\n if os.path.isfile('data/HSCOrgRefData_Full_20151130.xml.zip'):\n return 'data/HSCOrgRefData_Full_20151130.xml.zip'\n # if os.path.isfile('data/test.xml.zip'):\n # return 'data/test.xml.zip'\n else:\n raise ValueError('unable to locate the data file')\n\n\n def retrieve_latest_schema(self):\n \"\"\"Get the latest XSD for the ODS XML data and return it as an XMLSchema object\n\n Parameters\n ----------\n None\n\n Returns\n -------\n xml_schema: the ODS XSD as an XMLSchema object\n \"\"\"\n # TODO: Retrieve latest schema file from the local directory until\n # such time it is published and retrievable\n try:\n with open('data/HSCOrgRefData.xsd') as f:\n doc = xml_tree_parser.parse(f)\n return xml_tree_parser.XMLSchema(doc)\n\n except Exception as e:\n raise\n\n # The purpose of this function is to determine if we have a zip\n # for or xml file, check it is valid\n # and then populate an etree object for us to parse\n # TODO: validate the xml file against a schema\n def __import_latest_datafile(self, data_filename):\n \"\"\"The purpose of this function is to determine if we have a zip\n for or xml file, check it is valid\n\n Parameters\n ----------\n String: filename of the zip file containing the xml\n Returns\n -------\n None\n \"\"\"\n\n try:\n with zipfile.ZipFile(data_filename) as local_zipfile:\n # get to the name of the actual zip file\n # TODO: this is a horrible impementation\n data_filename = data_filename.replace('.zip', '')\n data_filename = data_filename.split('/', 1)\n data_filename = data_filename[1]\n\n with local_zipfile.open(data_filename) as local_datafile:\n self.__ods_xml_data = xml_tree_parser.parse(local_datafile)\n\n except:\n print('Unexpected error:', sys.exc_info()[0])\n raise\n\n def __validate_xml_against_schema(self):\n try:\n doc = self.__ods_xml_data\n schema = self.__ods_schema\n valid = schema.validate(doc)\n\n if not valid:\n raise Exception(\"XML file is not valid against the schema\")\n\n else:\n return valid\n\n except Exception as e:\n raise\n sys.exit(1)\n\n def get_latest_xml(self):\n \"\"\"The purpose of this function is to check if we have odsxml data\n if we don't it should retrieve the latest version available and\n explode it from zip format into a xmltree object\n\n Parameters\n ----------\n None\n Returns\n -------\n xml_tree_parser: containing the entire xml dataset\n \"\"\"\n\n if self.__ods_schema is None:\n self.__ods_schema = self.retrieve_latest_schema()\n\n if self.__ods_xml_data is None:\n data_filename = self.__retrieve_latest_datafile()\n self.__import_latest_datafile(data_filename)\n self.__validate_xml_against_schema()\n\n return self.__ods_xml_data\n" }, { "alpha_fraction": 0.6182456016540527, "alphanum_fraction": 0.6238596439361572, "avg_line_length": 30.66666603088379, "blob_id": "099d0a21468f3cb084ce22b8d4c56a92ba2b4aee", "content_id": "1a1877150d1db201d5c087a824d2602765383ef8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1425, "license_type": "permissive", "max_line_length": 68, "num_lines": 45, "path": "/models/Addresses.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, ForeignKey, Integer, String, DateTime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import relationship\nimport sys\nimport os.path\n\n# setup path so we can import our own models and controllers\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom models.base import Base\n\n\nclass Addresses(Base):\n \"\"\"\n Addresses class that keeps track of information about a\n particular Addresses. This class uses SQLAlchemy as an ORM\n\n \"\"\"\n __tablename__ = 'addresses'\n\n addresses_ref = Column(Integer, primary_key=True)\n organisation_ref = Column(Integer)\n org_odscode = Column(String(10))\n street_address_line1 = Column(String)\n street_address_line2 = Column(String)\n street_address_line3 = Column(String)\n town = Column(String)\n county = Column(String)\n postal_code = Column(String)\n location_id = Column(String)\n\n # Returns a printable version of the objects contents\n def __repr__(self):\n return \"<Addresses(%s %s %s %s %s %s %s %s %s %s\\)>\" \\\n % (\n self.addresses_ref,\n self.organisation_ref,\n self.org_odscode,\n self.street_address_line1,\n self.street_address_line2,\n self.street_address_line3,\n self.town,\n self.county,\n self.postal_code,\n self.location_id)\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 19, "blob_id": "03e7cee2a2a7aef5b484d5b7a708aa8ae5ec96fb", "content_id": "a7b159c224e19671a2bda7ea7fee8b03fcf346c8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "permissive", "max_line_length": 19, "num_lines": 1, "path": "/openods_api/database/__init__.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "__author__ = 'matt'" }, { "alpha_fraction": 0.687635600566864, "alphanum_fraction": 0.6919739842414856, "avg_line_length": 26.117647171020508, "blob_id": "fdbba2d1b3a8f1aff1baf2aecb43dfa20d9a126d", "content_id": "c374d98f93e861bbffb478bf08a79dfca2e8e39c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "permissive", "max_line_length": 52, "num_lines": 17, "path": "/test/test_openods_api_config.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import unittest\nimport openods_api.config as config\n\n\nclass ConfigTests(unittest.TestCase):\n\n def test_app_hostname_is_not_none(self):\n value = config.APP_HOSTNAME\n self.assertIsNotNone(value)\n\n def test_cache_timeout_is_greater_equal_0(self):\n value = config.CACHE_TIMEOUT\n self.assertGreaterEqual(value, 0)\n\n def test_database_url_is_not_none(self):\n value = config.DATABASE_URL\n self.assertIsNotNone(value)\n" }, { "alpha_fraction": 0.7350835204124451, "alphanum_fraction": 0.7438345551490784, "avg_line_length": 32.97297286987305, "blob_id": "3689f52f73880b36a5c28bf8170548bdafc8c8d0", "content_id": "fd28fb7286606bbc3f4d5487982c9026fac57be5", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1257, "license_type": "permissive", "max_line_length": 119, "num_lines": 37, "path": "/docs/deploying_to_heroku.md", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "# Deploying To Heroku [IN PROGRESS]\n\nThe following instructions will get you set up with a Heroku app running open-ods building automatically when you do a \n`git push heroku master` from your local git repository.\n\n## Pre-requisites\n\n* You already have the open-ods repository cloned onto your dev machine\n* You already have open-ods running locally on your dev machine\n* You have the Heroku Toolbelt CLI installed and working on your dev machine\n\n## Creating a Heroku deployment of open-ods\n\n1. In your Heroku dashboard, create a new app and give it a meaningful name (e.g. openods-test)\n\n2. Link your local repository to your Heroku app with:\n\n heroku git:remote -a <heroku_app_name>\n\n3. Next step...\n\n\n### Migrating your Postgres database to your Heroku app\n\n1. Export the data from your local PostgreSQL database\n\n ```bash\n pg_dump -Fc --no-acl --no-owner -h localhost -U postgres openods > openods.dump\n ```\n\n2. Upload the dump file to a web server which can be accessed from Heroku (e.g. Amazon S3)\n\n3. Import the data to the remote PostgreSQL database from the URL using the Heroku Toolbelt CLI\n\n ```bash\n heroku pg:backups restore 'https://s3.amazonaws.com/openods-assets/database_backups/openods006.dump' DATABASE_URL\n ```\n" }, { "alpha_fraction": 0.645283043384552, "alphanum_fraction": 0.645283043384552, "avg_line_length": 30.547618865966797, "blob_id": "963134c7737a409222f2524327a0a36fbf4fefa7", "content_id": "819b6358b7a10912a86751035b93dd6ac49a0402", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1325, "license_type": "permissive", "max_line_length": 68, "num_lines": 42, "path": "/models/Versions.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nimport sys\nimport os.path\n\n# setup path so we can import our own models and controllers\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom models.base import Base\n\n\nclass Versions(Base):\n \"\"\"Versions class that keeps track of information about a\n particular ods file update. This class uses SQLAlchemy as an ORM\n\n \"\"\"\n __tablename__ = 'versions'\n\n version_ref = Column(Integer, primary_key=True)\n import_timestamp = Column(String)\n file_version = Column(String)\n publication_seqno = Column(String)\n publication_date = Column(String)\n publication_type = Column(String)\n\n # Returns a printable version of the objects contents\n def __repr__(self):\n return \"<Version(ref='%s',\\\n import_timestamp='%s',\\\n file_version='%s',\\\n publication_seqno='%s',\\\n publication_date='%s',\\\n publication_type='%s'\\\n )>\" % (\n self.version_ref,\n self.import_timestamp,\n self.file_version,\n self.publication_seqno,\n self.publication_date,\n self.publication_type)\n" }, { "alpha_fraction": 0.5534279942512512, "alphanum_fraction": 0.5561054944992065, "avg_line_length": 32.674861907958984, "blob_id": "26fdc3dc821c6257ef1559c8c6ce515169f2004c", "content_id": "00c7b3719772d9e479cb3f9efeefe451822d2e2a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12325, "license_type": "permissive", "max_line_length": 108, "num_lines": 366, "path": "/controller/DataBaseSetup.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nfrom lxml import etree as xml_tree_parser\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport logging\nimport os.path\nimport sys\nimport datetime\nimport time\nimport zipfile\nimport psycopg2\n\n# setup path so we can import our own models and controllers\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\n\n# import controllers\nfrom controller.ODSFileManager import ODSFileManager\n\n# import models\nfrom models.Addresses import Addresses\nfrom models.base import Base\nfrom models.CodeSystem import CodeSystem\nfrom models.Organisation import Organisation\nfrom models.Relationship import Relationship\nfrom models.Role import Role\nfrom models.Settings import Settings\nfrom models.Versions import Versions\n\n# Logging Setup\nlog = logging.getLogger('import_ods_xml')\nlog.setLevel(logging.DEBUG)\n\n# We need a filemanager to bring the xml data tree structure in\nFile_manager = ODSFileManager()\n\n# SQLAlchemy objects\n#engine = create_engine('sqlite:///openods.sqlite', echo=False)\nengine = create_engine(\"postgresql://openods:openods@localhost/openods\", isolation_level=\"READ UNCOMMITTED\")\nmetadata = Base.metadata\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass DataBaseSetup(object):\n\n __ods_xml_data = None\n __version = Versions()\n __code_system_dict = {}\n\n def __init__(self):\n # Creates the tables of all objects derived from our Base object\n metadata.create_all(engine)\n\n def __create_settings(self):\n\n codesystem = CodeSystem()\n\n session.add(codesystem)\n\n def __create_codesystems(self):\n \"\"\"Loops through all the code systems in an organisation and adds them\n to the database\n\n Parameters\n ----------\n None\n Returns\n -------\n None\n \"\"\"\n\n # these are all code systems, we have a DRY concept here as so much of\n # this code is common, it doesn't make sense to do it 3 times, lets\n # loop\n code_system_types = [\n './CodeSystems/CodeSystem[@name=\"OrganisationRelationship\"]',\n './CodeSystems/CodeSystem[@name=\"OrganisationRecordClass\"]',\n './CodeSystems/CodeSystem[@name=\"OrganisationRole\"]']\n\n for code_system_type in code_system_types:\n # we are going to need to append a lot of data into this array\n codesystems = {}\n\n relationships = self.__ods_xml_data.find(code_system_type)\n relationship_types = {}\n\n # enumerate the iter as it doesn't provide an index which we need\n for idx, relationship in enumerate(relationships.iter('concept')):\n\n codesystems[idx] = CodeSystem()\n\n relationship_id = relationship.attrib.get('id')\n display_name = relationship.attrib.get('displayName')\n relationship_types[relationship_id] = display_name\n\n code_system_type_name = code_system_type\n code_system_type_name = code_system_type_name.replace(\n './CodeSystems/CodeSystem[@name=\"', '').replace('\"]', '')\n\n codesystems[idx].id = relationship_id\n codesystems[idx].name = code_system_type_name\n codesystems[idx].displayname = display_name\n\n # pop these in a global dictionary, we will use these later in\n # __create_organisations\n self.__code_system_dict[relationship_id] = display_name\n\n # append this instance of code system to the session\n session.add(codesystems[idx])\n\n codesystems = None\n\n def __create_organisations(self):\n \"\"\"Creates the organisations and adds them to the session\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n organisations = {}\n\n for idx, organisation in enumerate(self.__ods_xml_data.findall(\n '.Organisations/Organisation')):\n\n organisations[idx] = Organisation()\n\n organisations[idx].odscode = organisation.find(\n 'OrgId').attrib.get('extension')\n\n organisations[idx].name = organisation.find('Name').text\n\n organisations[idx].status = organisation.find(\n 'Status').attrib.get('value')\n\n organisations[idx].record_class = self.__code_system_dict[\n organisation.attrib.get('orgRecordClass')]\n\n organisations[idx].last_changed = organisation.find(\n 'LastChangeDate').attrib.get('value')\n\n for date in organisation.iter('Date'):\n if date.find('Type').attrib.get('value') == 'Legal':\n\n try:\n organisations[idx].legal_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n\n try:\n organisations[idx].legal_end_date = date.find('End').attrib.get('value')\n except:\n pass\n\n elif date.find('Type').attrib.get('value') == 'Operational':\n try:\n organisations[idx].operational_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n\n try:\n organisations[idx].operational_end_date = date.find('End').attrib.get('value')\n except:\n pass\n\n session.add(organisations[idx])\n\n self.__create_roles(organisations[idx], organisation)\n\n self.__create_relationships(organisations[idx], organisation)\n\n organisations = None\n\n def __create_roles(self, organisation, organisation_xml):\n \"\"\"Creates the roles, this should only be called from\n __create_organisations()\n\n Parameters\n ----------\n organisation = xml element of the full organisation\n\n Returns\n -------\n None\n \"\"\"\n roles_xml = organisation_xml.find('Roles')\n roles = {}\n\n for idx, role in enumerate(roles_xml):\n\n roles[idx] = Role()\n\n roles[idx].organisation_ref = organisation.ref\n roles[idx].org_odscode = organisation.odscode\n roles[idx].code = role.attrib.get('id')\n roles[idx].primary_role = bool(role.attrib.get('primaryRole'))\n roles[idx].status = role.find('Status').attrib.get('value')\n roles[idx].unique_id = role.attrib.get('uniqueRoleId')\n\n # Add Operational and Legal start/end dates if present\n for date in role.iter('Date'):\n if date.find('Type').attrib.get('value') == 'Legal':\n try:\n roles[idx].legal_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n try:\n roles[idx].legal_end_date = date.find('End').attrib.get('value')\n except:\n pass\n elif date.find('Type').attrib.get('value') == 'Operational':\n try:\n roles[idx].operational_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n try:\n roles[idx].operational_end_date = date.find('End').attrib.get('value')\n except:\n pass\n\n session.add(roles[idx])\n\n roles = None\n\n def __create_relationships(self, organisation, organisation_xml):\n \"\"\"Creates the relationships, this should only be called from\n __create_organisations()\n\n Parameters\n ----------\n organisation = xml element of the full organisation\n\n Returns\n -------\n None\n \"\"\"\n relationships_xml = organisation_xml.find('Rels')\n relationships = {}\n\n for idx, relationship in enumerate(relationships_xml):\n\n relationships[idx] = Relationship()\n\n relationships[idx].organisation_ref = organisation.ref\n relationships[idx].org_odscode = organisation.odscode\n relationships[idx].code = relationship.attrib.get('id')\n relationships[idx].target_odscode = relationship.find(\n 'Target/OrgId').attrib.get('extension')\n relationships[idx].status = relationship.find('Status').attrib.get('value')\n relationships[idx].unique_id = relationship.attrib.get('uniqueRelId')\n\n for date in relationship.iter('Date'):\n if date.find('Type').attrib.get('value') == 'Legal':\n try:\n relationships[idx].legal_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n try:\n relationships[idx].legal_end_date = date.find('End').attrib.get('value')\n except:\n pass\n elif date.find('Type').attrib.get('value') == 'Operational':\n try:\n relationships[idx].operational_start_date = date.find('Start').attrib.get('value')\n except:\n pass\n try:\n relationships[idx].operational_end_date = date.find('End').attrib.get('value')\n except:\n pass\n\n # self.__code_system_dict[]\n\n session.add(relationships[idx])\n\n relationships = None\n\n def __create_addresses(self):\n\n pass\n # address = Addresses()\n\n # address.organisation_ref = 123\n # address.org_odscode = '123test'\n # address.street_address_line1 = '123test'\n # address.street_address_line2 = '123test'\n # address.street_address_line3 = '123test'\n # address.town = '123test'\n # address.county = '123test'\n # address.postal_code = '123test'\n # address.location_id = '123test'\n\n # session.add(address)\n\n def __create_version(self):\n \"\"\"adds all the version information to the versions table\n\n Parameters\n ----------\n None\n Returns\n -------\n None\n \"\"\"\n # TODO: Change to local variable from private class variable\n self.__version.file_version = self.__ods_xml_data.find(\n './Manifest/Version').attrib.get('value')\n self.__version.publication_date = self.__ods_xml_data.find(\n './Manifest/PublicationDate').attrib.get('value')\n self.__version.publication_type = self.__ods_xml_data.find(\n './Manifest/PublicationType').attrib.get('value')\n self.__version.publication_seqno = self.__ods_xml_data.find(\n './Manifest/PublicationSeqNum').attrib.get('value')\n self.__version.import_timestamp = datetime.datetime.now()\n\n session.add(self.__version)\n\n def create_database(self, ods_xml_data):\n \"\"\"creates a sqlite database in the current path with all the data\n\n Parameters\n ----------\n ods_xml_data: xml_tree_parser object required that is valid\n TODO: check validity here\n Returns\n -------\n None\n \"\"\"\n\n self.__ods_xml_data = ods_xml_data\n if self.__ods_xml_data is not None:\n try:\n self.__create_addresses()\n self.__create_version()\n self.__create_codesystems()\n self.__create_organisations()\n\n session.commit()\n\n except Exception as e:\n # If anything fails, let's not commit anything\n session.rollback()\n print(\"Unexpected error:\", sys.exc_info()[0])\n log.error(e)\n raise\n\n finally:\n session.close()\n\nif __name__ == '__main__':\n start_time = time.time()\n log.info('Starting data import...')\n\n ods_xml_data = File_manager.get_latest_xml()\n DataBaseSetup().create_database(ods_xml_data)\n\n log.info('Data Import Time = %s', time.strftime(\n \"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n" }, { "alpha_fraction": 0.6543685793876648, "alphanum_fraction": 0.6672815680503845, "avg_line_length": 26.35321044921875, "blob_id": "1eb90ff9df49a9a555f8bb41b60f71f2a1d0177d", "content_id": "ed3a1a68a9e6294c50fb777872c91c73ff235649", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5963, "license_type": "permissive", "max_line_length": 116, "num_lines": 218, "path": "/openods_api/routes.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import logging\nimport status\nimport dicttoxml\nfrom flask import jsonify, Response, request, render_template\nfrom flask.ext.autodoc import Autodoc\n\nfrom openods_api import app, config, sample_data\nimport openods_api.cache as ocache\nfrom openods_api.database import db, schema_check\nfrom openods_api.auth import requires_auth\n\nlog = logging.getLogger('openods')\n\nauto = Autodoc(app)\n\nschema_check.check_schema_version()\n\n\n@app.route('/loaderio-65382ad6fe5e607ac92df47b82787e88/')\ndef verify():\n return \"loaderio-65382ad6fe5e607ac92df47b82787e88\"\n\n\n@app.route('/')\ndef landing_page():\n \"\"\"\n\n Returns API documentation as HTML\n \"\"\"\n return render_template('index.html', instance_name=config.INSTANCE_NAME, live_deployment=config.LIVE_DEPLOYMENT)\n\n\n@app.route('/try/')\ndef tryit_page():\n return render_template('tryit.html')\n\n\n@app.route('/documentation/')\ndef documentation():\n \"\"\"\n\n Returns API documentation as HTML\n \"\"\"\n return auto.html()\n\n\n@auto.doc()\n@app.route(\"/api/\", methods=['GET'])\n@ocache.cache.cached(timeout=3600, key_prefix=ocache.generate_cache_key)\ndef get_root():\n root_resource = {\n 'organisations': str.format('http://{0}/organisations/', config.APP_HOSTNAME),\n 'role-types': str.format('http://{0}/role-types/', config.APP_HOSTNAME)\n }\n return jsonify(root_resource)\n\n\n@auto.doc()\n@app.route(\"/api/organisations/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef get_organisations():\n\n \"\"\"\n\n Returns a list of ODS organisations\n\n Params:\n - offset=x (Offset start of results [0])\n - limit=y (Limit number of results [1000])\n - recordclass=HSCOrg/HSCSite/both (filter results by recordclass [both])\n - primaryRoleCode=xxxx (filter results to only those with a specific primaryRole)\n - roleCode=xxxx (filter result to only those with a specific role)\n \"\"\"\n\n log.debug(str.format(\"Cache Key: {0}\", ocache.generate_cache_key()))\n offset = request.args.get('offset') if request.args.get('offset') else 0\n limit = request.args.get('limit') if request.args.get('limit') else 1000\n record_class = request.args.get('recordclass') if request.args.get('recordclass') else 'both'\n primary_role_code = request.args.get('primaryRoleCode' if request.args.get('primaryRoleCode') else None)\n role_code = request.args.get('roleCode' if request.args.get('roleCode') else None)\n log.debug(offset)\n log.debug(limit)\n log.debug(record_class)\n log.debug(primary_role_code)\n log.debug(role_code)\n data = db.get_org_list(offset, limit, record_class, primary_role_code, role_code)\n\n if data:\n result = {'organisations': data}\n return jsonify(result)\n else:\n return Response(\"404: Not Found\", status.HTTP_404_NOT_FOUND )\n\n\n@auto.doc()\n@app.route(\"/api/organisations/<ods_code>/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef get_organisation(ods_code):\n\n \"\"\"\n\n Returns a specific organisation resource\n\n Params:\n - format=xml/json (Return the data in specified format - defaults to json)\n \"\"\"\n\n format_type = request.args.get('format')\n log.debug(format_type)\n\n data = db.get_organisation_by_odscode(ods_code)\n\n if data:\n\n try:\n del data['org_lastchanged']\n\n except Exception as e:\n pass\n\n if format_type == 'xml':\n log.debug(\"Returning xml\")\n result = dicttoxml.dicttoxml(data, attr_type=False, custom_root='organisation')\n # log.debug(result)\n return Response(result, mimetype='text/xml')\n\n elif format_type == 'json':\n log.debug(\"Returning json\")\n result = jsonify(data)\n # log.debug(result)\n return result\n\n else:\n log.debug(\"Returning json\")\n result = jsonify(data)\n # log.debug(result)\n return result\n\n else:\n return \"Not found\", status.HTTP_404_NOT_FOUND\n\n\n@auto.doc()\n@app.route(\"/api/organisations/search/<search_text>/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef search_organisations(search_text):\n\n \"\"\"\n\n Returns a list of organisations\n\n Params:\n - offset=x (Offset start of results by x)\n - limit=y (Retrieve y results)\n \"\"\"\n\n log.debug(str.format(\"Cache Key: {0}\", ocache.generate_cache_key()))\n offset = request.args.get('offset') if request.args.get('offset') else 0\n limit = request.args.get('limit') if request.args.get('limit') else 1000\n log.debug(offset)\n log.debug(limit)\n orgs = db.search_organisation(search_text)\n\n if orgs:\n result = {'organisations': orgs}\n return jsonify(result)\n\n else:\n return \"Not found\", status.HTTP_404_NOT_FOUND\n\n\n@auto.doc()\n@app.route(\"/api/role-types/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef route_role_types():\n\n \"\"\"\n\n Returns the list of available OrganisationRole types\n \"\"\"\n\n roles_list = db.get_role_types()\n\n result = {\n 'role-types': roles_list\n }\n\n return jsonify(result)\n\n\n\n@auto.doc()\n@app.route(\"/api/role-types/<role_code>/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef route_role_type_by_code(role_code):\n\n \"\"\"\n\n Returns the list of available OrganisationRole types\n \"\"\"\n\n result = db.get_role_type_by_id(role_code)\n\n return jsonify(result)\n\n\n@auto.doc()\n@app.route(\"/api/organisations/<ods_code>/endpoints/\", methods=['GET'])\n@ocache.cache.cached(timeout=config.CACHE_TIMEOUT, key_prefix=ocache.generate_cache_key)\ndef organisation_endpoints(ods_code):\n\n \"\"\"\n FAKE ENDPOINT\n\n Returns a list of endpoints for a specific Organisation.\n \"\"\"\n\n return jsonify(sample_data.endpoint_data)\n" }, { "alpha_fraction": 0.3987792432308197, "alphanum_fraction": 0.40183112025260925, "avg_line_length": 31.799999237060547, "blob_id": "3119487e2b88bbc27b380e00929a37ec6ee2014c", "content_id": "d7bc9cda8bef96b3bb65c2af067551420066cb73", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "permissive", "max_line_length": 75, "num_lines": 30, "path": "/openods_api/sample_data.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "endpoint_data = \\\n {\n 'notes': 'This is currently dummy data',\n 'endpoints': [\n {\n 'usage': 'For general communication with the organisation',\n 'type': 'email',\n 'correspondenceType': 'administrative',\n 'value': 'email.address@nhs.net',\n 'acceptsPid': False,\n 'orderNo': 1\n },\n {\n 'usage': 'For patient-identifiable CDA messages',\n 'type': 'itk',\n 'correspondenceType': 'clinical',\n 'value': 'http://itk.endpoint.nhs.uk/ITK',\n 'acceptsPid': True,\n 'orderNo': 2\n },\n {\n 'usage': 'For patient-identifiable test results',\n 'type': 'dts',\n 'correspondenceType': 'clinical',\n 'value': 'test.results@dts.nhs.uk',\n 'acceptsPid': True,\n 'orderNo': 3\n }\n ]\n }" }, { "alpha_fraction": 0.8024691343307495, "alphanum_fraction": 0.8101851940155029, "avg_line_length": 31.399999618530273, "blob_id": "5bacca48d3e26f0b4703921a2387fd35fd5ae6f2", "content_id": "8ee6b1016291ebacaaa376b406604c70fa4eadfe", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1296, "license_type": "permissive", "max_line_length": 73, "num_lines": 40, "path": "/sql/006-add_more_metadata.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "ALTER TABLE roles RENAME role_start_date TO role_operational_start_date;\nALTER TABLE roles RENAME role_end_date TO role_operational_end_date;\nALTER TABLE roles\n ADD COLUMN role_legal_start_date date;\nALTER TABLE roles\n ADD COLUMN role_legal_end_date date;\n\nALTER TABLE organisations\n ADD COLUMN organisation_legal_start_date date;\nALTER TABLE organisations\n ADD COLUMN organisation_legal_end_date date;\nALTER TABLE organisations\n ADD COLUMN organisation_operational_start_date date;\nALTER TABLE organisations\n ADD COLUMN organisation_operational_end_date date;\n\nALTER TABLE relationships\n ADD COLUMN relationship_legal_start_date date;\nALTER TABLE relationships\n ADD COLUMN relationship_legal_end_date date;\nALTER TABLE relationships\n ADD COLUMN relationship_operational_start_date date;\nALTER TABLE relationships\n ADD COLUMN relationship_operational_end_date date;\nALTER TABLE relationships\n ADD COLUMN relationship_unique_id character varying(10);\nALTER TABLE relationships\n ADD COLUMN relationship_status character varying(10);\n\n\nALTER TABLE relationships DROP COLUMN target_ref;\n\n\nALTER TABLE addresses\n ADD COLUMN streetAddressLine3 text;\nALTER TABLE addresses\n ADD COLUMN \"LocationId\" character varying(12);\n\n\nUPDATE settings SET value = '006' WHERE key = 'schema_version';\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.7931034564971924, "avg_line_length": 43, "blob_id": "852079814b63020433eea0dc8362d611bcb6a689", "content_id": "d9f971a80e75af45217a70bc1ddd842ee0b6de44", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 87, "license_type": "permissive", "max_line_length": 43, "num_lines": 2, "path": "/sql/001-install_extensions.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";\nCREATE EXTENSION IF NOT EXISTS \"adminpack\";" }, { "alpha_fraction": 0.7425742745399475, "alphanum_fraction": 0.7656765580177307, "avg_line_length": 32.66666793823242, "blob_id": "22694484b1414035f1e1368c46a809fb12564754", "content_id": "843c35d6a4ceb842d55180cc492154568e736b65", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 303, "license_type": "permissive", "max_line_length": 63, "num_lines": 9, "path": "/sql/005-add_role_metadata.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "ALTER TABLE roles\n ADD COLUMN role_unique_id character varying(10);\nALTER TABLE roles\n ADD COLUMN role_status character varying(10);\nALTER TABLE roles\n ADD COLUMN role_start_date date;\nALTER TABLE roles\n ADD COLUMN role_end_date date;\nUPDATE settings SET value = '005' WHERE key = 'schema_version';\n" }, { "alpha_fraction": 0.6845637559890747, "alphanum_fraction": 0.6868008971214294, "avg_line_length": 23.83333396911621, "blob_id": "c5a66eaa8c50383a5b8e7b238bf9fbf7d42775e8", "content_id": "448788b15ef42235c72ad4ccf2cbabda95e33f8a", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "permissive", "max_line_length": 69, "num_lines": 18, "path": "/openods_api/cache.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import urllib.parse\nimport logging\nfrom flask import request\nfrom openods_api import app\nfrom flask.ext.cacheify import init_cacheify\n\nlog = logging.getLogger('openods')\n\ncache = init_cacheify(app)\n\n\ndef generate_cache_key():\n args = request.args\n key = request.path + '?' + urllib.parse.urlencode([\n (k, v) for k in sorted(args) for v in sorted(args.getlist(k))\n ])\n log.debug(str.format(\"Cache Key: {0}\", key))\n return key\n" }, { "alpha_fraction": 0.6169354915618896, "alphanum_fraction": 0.625, "avg_line_length": 28.760000228881836, "blob_id": "ee01de7c3d403a449e1f110d648e11dd0b2c4ea5", "content_id": "81b7df3f12a05fb8abf4baa96c4b42a1a3ad2a35", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1488, "license_type": "permissive", "max_line_length": 95, "num_lines": 50, "path": "/openods_api/database/schema_check.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import psycopg2, psycopg2.extras\nfrom urllib.parse import urlparse as urlparse\nimport logging\nimport openods_api.config as config\nimport sys\n\nlog = logging.getLogger('openods')\n\nurl = urlparse(config.DATABASE_URL)\n\n\ndef check_schema_version():\n try:\n conn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n log.info(\"Connected to database\")\n\n except psycopg2.Error as e:\n log.error(\"Unable to connect to the database\")\n sys.exit(1)\n\n try:\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n sql = \"SELECT value from settings WHERE key = 'schema_version';\"\n cur.execute(sql)\n result = cur.fetchone()\n db_schema_version = result['value']\n\n except TypeError as e:\n log.error(\"Unable to read schema version from the database\")\n log.error(\"Exception: %s\" % e)\n sys.exit(1)\n\n except psycopg2.Error as e:\n log.error(\"Error retrieving schema_version from database\")\n raise\n\n if not (config.TARGET_SCHEMA_VERSION == db_schema_version):\n raise RuntimeError(str.format(\"Incorrect database schema version. Wanted {0}, Got {1}\",\n config.TARGET_SCHEMA_VERSION, db_schema_version))\n\n else:\n log.info(str.format(\"Database schema version is {0}\", db_schema_version))\n\n return True\n" }, { "alpha_fraction": 0.37940025329589844, "alphanum_fraction": 0.6362451314926147, "avg_line_length": 90.95999908447266, "blob_id": "203837e346ce4074936c25d7a465d29698409d47", "content_id": "ce5fbd0235d0bd642a8b27fae124342a25986bb1", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2301, "license_type": "permissive", "max_line_length": 132, "num_lines": 25, "path": "/sql/import_test_data.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "INSERT into organisations (org_odscode, org_name, org_status, org_recordclass, org_lastchanged,\n organisation_legal_start_date, organisation_legal_end_date,\n organisation_operational_start_date, organisation_operational_end_date) VALUES\n('TSITE1', 'Test Site 1', 'ACTIVE', 'HSCSite', '2015-05-01', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('TSITE2', 'Test Site 2', 'INACTIVE', 'HSCSite', '2015-05-02', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('TORG1', 'Test Org 1', 'ACTIVE', 'HSCOrg', '2015-06-01', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('TORG2', 'Test Org 2', 'INACTIVE', 'HSCOrg', '2015-06-02', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01');\n\n\nINSERT into relationships (organisation_ref, org_odscode, target_odscode, relationship_code,\n relationship_legal_start_date, relationship_legal_end_date,\n relationship_operational_start_date, relationship_operational_end_date) VALUES\n('9821509c-ecfd-4d2e-a0a8-d65bb327cb6e', 'TSITE1', 'TORG1', 'RE2', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('aededa1f-740a-468b-9208-fe6404b73bbd', 'TSITE2', 'TORG2', 'RE1', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('9c544acb-8c7c-4f7f-bb0d-9bcb32599d60', 'TORG1', 'TSITE1', 'RE1', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('1e8b85f0-823c-4e39-b2f3-d616cf89aa3d', 'TORG2', 'TSITE2', 'RE1', '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01');\n\n\nINSERT into roles (organisation_ref, org_odscode, role_code, primary_role, role_status,\n role_legal_start_date, role_legal_end_date,\n role_operational_start_date, role_operational_end_date ) VALUES\n('9821509c-ecfd-4d2e-a0a8-d65bb327cb6e', 'TSITE1', 'RO198', TRUE, 'ACTIVE' , '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('aededa1f-740a-468b-9208-fe6404b73bbd', 'TSITE2', 'RO108', TRUE, 'ACTIVE' , '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('9c544acb-8c7c-4f7f-bb0d-9bcb32599d60', 'TORG1', 'RO177', TRUE, 'ACTIVE' , '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01'),\n('1e8b85f0-823c-4e39-b2f3-d616cf89aa3d', 'TORG2', 'RO57', TRUE, 'ACTIVE' , '2012-01-01', '2013-01-01', '2012-01-01','2014-01-01')\n\n\n" }, { "alpha_fraction": 0.6983606815338135, "alphanum_fraction": 0.7245901823043823, "avg_line_length": 17, "blob_id": "c173d4f358587d369354c266412c2797017e9078", "content_id": "58dbdb4c43d15f685c95a5c78806897204d74ce8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 305, "license_type": "permissive", "max_line_length": 54, "num_lines": 17, "path": "/sql/004-create_settings_table.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "-- Table: settings\n\n-- DROP TABLE settings;\n\nCREATE TABLE settings\n(\n key character varying(20) NOT NULL,\n value character varying(200),\n CONSTRAINT settings_pkey PRIMARY KEY (key)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE settings\n OWNER TO openods;\n\nINSERT INTO settings VALUES ('schema_version', '004');" }, { "alpha_fraction": 0.7099697589874268, "alphanum_fraction": 0.7160120606422424, "avg_line_length": 21.066667556762695, "blob_id": "ea9c9283706ff496c2447c0dc10b204d2643678b", "content_id": "2f0a050fe44a5b2d6e442584de0f100cd423d71c", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "permissive", "max_line_length": 50, "num_lines": 15, "path": "/openods_api/__init__.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask.ext.cors import CORS\nimport logging\n__version__ = '0.3'\n\nlog = logging.getLogger('openods')\nlog.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlog.addHandler(ch)\n\napp = Flask(__name__)\nCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\nimport openods_api.routes\n" }, { "alpha_fraction": 0.6349413394927979, "alphanum_fraction": 0.6453715562820435, "avg_line_length": 21.58823585510254, "blob_id": "8f7838699f008a6e3585c12c3302045e11ec13be", "content_id": "859734e807fa346f502df21a87d66d9befc00b71", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/openods_api/database/connection.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import psycopg2, psycopg2.pool, psycopg2.extras\nfrom urllib.parse import urlparse as urlparse\nimport logging\nimport openods_api.config as config\nimport sys\n\nlog = logging.getLogger('openods')\n\n\nurl = urlparse(config.DATABASE_URL)\n\n\ndef get_connection():\n try:\n conn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n log.info(\"Connected to database\")\n\n except psycopg2.Error as e:\n log.warning(\"I am unable to connect to the database\")\n sys.exit(1)\n\n return conn\n\n\ndef get_cursor():\n conn = get_connection()\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n return cur" }, { "alpha_fraction": 0.7335923910140991, "alphanum_fraction": 0.7487046718597412, "avg_line_length": 19.864864349365234, "blob_id": "c1e93dfb7ab1fd0b1fcad434d1e35ca3696931d6", "content_id": "8d94bcefc853d1c66c330d5e61630c8d41276e2e", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2316, "license_type": "permissive", "max_line_length": 69, "num_lines": 111, "path": "/sql/002-create_tables.sql", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "-- Table: roles\n\n-- DROP TABLE roles;\n\nCREATE TABLE roles\n(\n role_ref uuid NOT NULL DEFAULT uuid_generate_v4(),\n organisation_ref uuid NOT NULL,\n org_odscode character varying(10),\n role_code character varying(10) NOT NULL,\n CONSTRAINT roles_pk PRIMARY KEY (role_ref)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE roles\n OWNER TO openods;\n\nCREATE INDEX role_code_idx ON roles (role_code);\n\n\n\n-- Table: relationships\n\n-- DROP TABLE relationships;\n\nCREATE TABLE relationships\n(\n relationship_ref uuid NOT NULL DEFAULT uuid_generate_v4(),\n organisation_ref uuid NOT NULL,\n target_ref uuid,\n relationship_code character varying(10),\n target_odscode character varying(10),\n org_odscode character varying(10),\n CONSTRAINT relationships_pk PRIMARY KEY (relationship_ref)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE relationships\n OWNER TO openods;\n\n\n\n-- Table: organisations\n\n-- DROP TABLE organisations;\n\nCREATE TABLE organisations\n(\n organisation_ref uuid NOT NULL DEFAULT uuid_generate_v4(),\n org_odscode character varying(10),\n org_name character varying(200),\n org_status character varying(10),\n org_recordclass character varying(10),\n org_lastchanged date,\n CONSTRAINT organisations_pk PRIMARY KEY (organisation_ref)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE organisations\n OWNER TO openods;\n\nCREATE INDEX org_name_idx ON organisations (org_name);\nCREATE INDEX org_odscode_idx ON organisations (org_odscode);\n\n\n\n-- Table: codesystems\n\n-- DROP TABLE codesystems;\n\nCREATE TABLE codesystems\n(\n codesystem_name character varying(50),\n codesystem_ref uuid NOT NULL DEFAULT uuid_generate_v4(),\n codesystem_id character varying(10),\n codesystem_displayname character varying(200),\n CONSTRAINT codesystems_pk PRIMARY KEY (codesystem_ref)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE codesystems\n OWNER TO openods;\n\nCREATE UNIQUE INDEX codesystem_id_idx ON codesystems (codesystem_id);\n\n-- Table: addresses\n\n-- DROP TABLE addresses;\n\nCREATE TABLE addresses\n(\n address_ref uuid NOT NULL DEFAULT uuid_generate_v4(),\n organisation_ref uuid NOT NULL,\n org_odscode character varying(10),\n \"streetAddressLine1\" text,\n \"streetAddressLine2\" text,\n town text,\n county text,\n postal_code text,\n country text,\n CONSTRAINT addresses_pk PRIMARY KEY (address_ref)\n)\nWITH (\n OIDS=FALSE\n);\nALTER TABLE addresses\n OWNER TO openods;\n" }, { "alpha_fraction": 0.5558192729949951, "alphanum_fraction": 0.5592649579048157, "avg_line_length": 33.3684196472168, "blob_id": "7eca0dcc6137e5412b10615296e121b865e6364f", "content_id": "2c48c6aa498000088f5761fadd958cd3004ecab8", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13060, "license_type": "permissive", "max_line_length": 135, "num_lines": 380, "path": "/openods_api/database/db.py", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": "import psycopg2, psycopg2.pool, psycopg2.extras\nimport logging\n\nimport openods_api.database.connection as connect\nimport openods_api.config as config\n\nlog = logging.getLogger('openods')\n\n\ndef remove_none_values_from_dictionary(dirty_dict):\n clean_dict = dict((k, v) for k, v in dirty_dict.items() if v is not None)\n return clean_dict\n\n\n# TODO: Is this method even needed any more?\ndef get_latest_org():\n conn = connect.get_connection()\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * from organisations order by lastchanged desc limit 1;\")\n rows = cur.fetchall()\n\n for row in rows:\n print(row)\n return row\n\n\ndef get_org_list(offset=0, limit=1000, recordclass='both', primary_role_code=None, role_code=None):\n log.debug(str.format(\"Offset: {0} Limit: {1}, RecordClass: {2}\", offset, limit, recordclass))\n conn = connect.get_connection()\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n record_class_param = '%' if recordclass == 'both' else recordclass\n\n if role_code:\n sql = \"SELECT odscode, name, record_class from organisations \" \\\n \"WHERE record_class LIKE %s AND odscode in \" \\\n \"(SELECT org_odscode from roles \" \\\n \"WHERE status = 'Active' \" \\\n \"AND code = %s)\" \\\n \"order by name OFFSET %s LIMIT %s;\"\n data = (record_class_param, role_code, offset, limit)\n\n elif primary_role_code:\n sql = \"SELECT odscode, name, record_class from organisations \" \\\n \"WHERE record_class LIKE %s AND odscode in \" \\\n \"(SELECT org_odscode from roles WHERE primary_role = TRUE \" \\\n \"AND status = 'Active' \" \\\n \"AND code = %s)\" \\\n \"order by name OFFSET %s LIMIT %s;\"\n data = (record_class_param, primary_role_code, offset, limit)\n\n else:\n sql = \"SELECT odscode, name, record_class from organisations \" \\\n \"WHERE record_class LIKE %s \" \\\n \"order by name OFFSET %s LIMIT %s;\"\n data = (record_class_param, offset, limit)\n\n log.debug(sql)\n cur.execute(sql, data)\n rows = cur.fetchall()\n log.debug(str.format(\"{0} rows in result\", len(rows)))\n result = []\n\n for row in rows:\n link_self_href = str.format('http://{0}/organisations/{1}', config.APP_HOSTNAME, row['odscode'])\n item = {\n 'odsCode': row['odscode'],\n 'name': row['name'],\n 'recordClass': row['record_class'],\n 'links': [{\n 'rel':'self',\n 'href': link_self_href\n }]\n }\n result.append(item)\n\n return result\n\n\ndef get_organisation_by_odscode(odscode):\n\n # Get a database connection\n conn = connect.get_connection()\n\n # Use the RealDictCursor to return data as a python dictionary type\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n # Try and retrieve the organisation record for the provided ODS code\n try:\n sql = \"SELECT * from organisations \" \\\n \"WHERE odscode = %s \"\\\n \"limit 1;\"\n data = (odscode,)\n\n cur.execute(sql, data)\n row_org = cur.fetchone()\n log.debug(str.format(\"Organisation Record: {0}\", row_org))\n\n # Raise an exception if the organisation record is not found\n if row_org is None:\n raise Exception(\"Record Not Found\")\n\n row_org = remove_none_values_from_dictionary(row_org)\n\n # Get the organisation_ref from the retrieved record\n organisation_odscode = row_org['odscode']\n\n # Retrieve the roles for the organisation\n sql = \"SELECT r.code, csr.displayname, r.unique_id, r.status, \" \\\n \"r.operational_start_date, r.operational_end_date, r.legal_start_date, \" \\\n \"r.legal_end_date, r.primary_role from roles r \" \\\n \"left join codesystems csr on r.code = csr.id \" \\\n \"WHERE r.org_odscode = %s; \"\n data = (organisation_odscode,)\n\n cur.execute(sql, data)\n rows_roles = cur.fetchall()\n log.debug(rows_roles)\n\n # Retrieve the relationships for the organisation\n sql = \"SELECT rs.code, csr.displayname, rs.target_odscode, rs.status, \" \\\n \"rs.operational_start_date, rs.operational_end_date, rs.legal_start_date, \" \\\n \"rs.legal_end_date, o.name from relationships rs \" \\\n \"left join codesystems csr on rs.code = csr.id \" \\\n \"left join organisations o on rs.target_odscode = o.odscode \" \\\n \"WHERE rs.org_odscode = %s; \"\n data = (organisation_odscode,)\n\n cur.execute(sql, data)\n rows_relationships = cur.fetchall()\n log.debug(rows_relationships)\n\n # Create an object from the returned organisation record to hold the data to be returned\n result_data = row_org\n\n # Add the retrieved relationships data to the object\n relationships = []\n\n for relationship in rows_relationships:\n\n relationship = remove_none_values_from_dictionary(relationship)\n\n link_target_href = str.format('http://{0}/organisations/{1}',\n config.APP_HOSTNAME, relationship['target_odscode'])\n\n relationship['relatedOdsCode'] = relationship.pop('target_odscode')\n relationship['relatedOrganisationName'] = relationship.pop('name')\n relationship['description'] = relationship.pop('displayname')\n relationship['status'] = relationship.pop('status')\n\n try:\n relationship['operationalStartDate'] = relationship.pop('operational_start_date').isoformat()\n except:\n pass\n\n try:\n relationship['legalEndDate'] = relationship.pop('legal_end_date').isoformat()\n except:\n pass\n\n try:\n relationship['legalStartDate'] = relationship.pop('legal_start_date').isoformat()\n except:\n pass\n\n try:\n relationship['operationalEndDate'] = relationship.pop('operational_end_date').isoformat()\n except:\n pass\n\n relationship['links'] = [{\n 'rel': 'target',\n 'href': link_target_href\n }]\n\n relationships.append({'relationship': relationship})\n\n result_data['relationships'] = relationships\n\n # Add the retrieved roles data to the object\n roles = []\n\n for role in rows_roles:\n\n role = remove_none_values_from_dictionary(role)\n\n link_role_href = str.format('http://{0}/role-types/{1}',\n config.APP_HOSTNAME, role['code'])\n\n role['code'] = role.pop('code')\n role['description'] = role.pop('displayname')\n role['primaryRole'] = role.pop('primary_role')\n\n try:\n role['status'] = role.pop('status')\n except:\n pass\n\n try:\n role['uniqueId'] = role.pop('unique_id')\n except:\n pass\n\n try:\n role['operationalStartDate'] = role.pop('operational_start_date').isoformat()\n except Exception as e:\n pass\n\n try:\n role['legalEndDate'] = role.pop('legal_end_date').isoformat()\n except Exception as e:\n pass\n\n try:\n role['legalStartDate'] = role.pop('legal_start_date').isoformat()\n except Exception as e:\n pass\n\n try:\n role['operationalEndDate'] = role.pop('operational_end_date').isoformat()\n except Exception as e:\n pass\n\n role['links'] = [{\n 'rel': 'role-type',\n 'href': link_role_href\n }]\n\n roles.append({'role': role})\n\n # Tidy up the field names etc. in the organisation dictionary before it's returned\n result_data['roles'] = roles\n # result_data['name'] = result_data.pop('name')\n result_data['odsCode'] = result_data.pop('odscode')\n result_data['recordClass'] = result_data.pop('record_class')\n # result_data['status'] = result_data.pop('status')\n result_data.pop('ref')\n\n link_self_href = str.format('http://{0}/organisations/{1}', config.APP_HOSTNAME, result_data['odsCode'])\n result_data['links'] = [\n {'rel': 'self',\n 'href': link_self_href\n }]\n\n try:\n result_data['operationalStartDate'] = result_data.pop('operational_start_date').isoformat()\n except:\n pass\n\n try:\n result_data['legalEndDate'] = result_data.pop('legal_end_date').isoformat()\n except:\n pass\n\n try:\n result_data['legalStartDate'] = result_data.pop('legal_start_date').isoformat()\n except:\n pass\n\n try:\n result_data['operationalEndDate'] = result_data.pop('operational_end_date').isoformat()\n except:\n pass\n\n return result_data\n\n except psycopg2.DatabaseError as e:\n log.error(str.format(\"Error {0}\", e))\n\n except Exception as e:\n log.error(e)\n\n\ndef search_organisation(search_text):\n\n # Get a database connection\n conn = connect.get_connection()\n\n # Use the RealDictCursor to return data as a python dictionary type\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n try:\n search_term = str.format(\"%{0}%\", search_text)\n sql = \"SELECT * from organisations \" \\\n \"WHERE name like UPPER(%s) and status = 'Active';\"\n data = (search_term,)\n\n cur.execute(sql, data)\n rows = cur.fetchall()\n print(rows)\n\n # Raise an exception if the organisation record is not found\n if rows == []:\n raise Exception(\"Record Not Found\")\n\n result = []\n\n for row in rows:\n link_self_href = str.format('http://{0}/organisations/{1}', config.APP_HOSTNAME, row['odscode'])\n item = {\n 'odsCode': row['odscode'],\n 'name': row['name'],\n 'recordClass': row['record_class'],\n 'links': [{\n 'rel': 'self',\n 'href': link_self_href\n }]\n }\n result.append(item)\n\n return result\n\n except Exception as e:\n log.error(e)\n\n\ndef get_role_types():\n conn = connect.get_connection()\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT displayname, id from codesystems \"\n \"where name = 'OrganisationRole' \"\\\n \"order by displayname;\")\n rows = cur.fetchall()\n result = []\n\n for row in rows:\n role_code = row['id']\n role_display_name = row['displayname']\n link_self_href = str.format('http://{0}/role-types/{1}', config.APP_HOSTNAME, role_code)\n link_search_primary_role_code_href = str.format('http://{0}/organisations?primaryRoleCode={1}', config.APP_HOSTNAME, role_code)\n link_search_role_code_href = str.format('http://{0}/organisations?roleCode={1}', config.APP_HOSTNAME, role_code)\n result.append({\n 'name': role_display_name,\n 'code': role_code,\n 'links': [{\n 'rel':'self',\n 'href': link_self_href\n }, {\n 'rel':'organisations.searchByPrimaryRoleCode',\n 'href': link_search_primary_role_code_href\n }, {\n 'rel':'organisations.searchByRoleCode',\n 'href': link_search_role_code_href\n }]\n })\n\n return result\n\n\ndef get_role_type_by_id(role_id):\n\n sql = \"SELECT displayname, id from codesystems \" \\\n \"where name = 'OrganisationRole' AND id = %s;\"\n data = (role_id,)\n\n cur = connect.get_cursor()\n cur.execute(sql, data)\n\n returned_row = cur.fetchone()\n\n role_code = returned_row['id']\n role_display_name = returned_row['displayname']\n link_self_href = str.format('http://{0}/role-types/{1}', config.APP_HOSTNAME, role_code)\n link_search_primary_role_code_href = str.format('http://{0}/organisations?primaryRoleCode={1}', config.APP_HOSTNAME, role_code)\n link_search_role_code_href = str.format('http://{0}/organisations?roleCode={1}', config.APP_HOSTNAME, role_code)\n result = {\n 'name': role_display_name,\n 'code': role_code,\n 'links': [{\n 'rel':'self',\n 'href': link_self_href\n }, {\n 'rel':'searchOrganisationsWithThisPrimaryRoleType',\n 'href': link_search_primary_role_code_href\n }, {\n 'rel':'searchOrganisationsWithThisRoleType',\n 'href': link_search_role_code_href\n }]\n }\n\n return result\n" }, { "alpha_fraction": 0.7518247961997986, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 12.699999809265137, "blob_id": "dca008574f77f819a3bf2be85ba96329a29ccd30", "content_id": "d206fd94960a6723292679e7b31730633e0203ab", "detected_licenses": [ "LicenseRef-scancode-unknown-license-reference", "MIT", "OGL-UK-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 137, "license_type": "permissive", "max_line_length": 33, "num_lines": 10, "path": "/Makefile", "repo_name": "gitter-badger/open-ods", "src_encoding": "UTF-8", "text": ".PHONY: init clean build\n\nbuilddata:\n\t./controller/DataBaseSetup.py\n\ninit:\n\tpip3 install -r requirements.txt\n\nclean:\n\t-rm openods.sqlite\n" } ]
30
zafirr31/sijambumuda-group-3
https://github.com/zafirr31/sijambumuda-group-3
ddb934f570ff20b693d5e463cce95ff042ac744f
ab88033936db8ef7a4b70dab2bde62b69b0a323c
5c7032e442bd91e364775f8aa9a0bb7c8be19947
refs/heads/master
2022-12-15T12:24:45.770634
2019-05-10T07:07:42
2019-05-10T07:07:42
224,801,899
0
0
null
2019-11-29T07:39:48
2019-11-29T07:41:01
2022-11-22T03:37:12
JavaScript
[ { "alpha_fraction": 0.5342902541160583, "alphanum_fraction": 0.5693780183792114, "avg_line_length": 26.2608699798584, "blob_id": "f37ed25c3973cf8315652ba1134f45c758ac50c9", "content_id": "b7a2493c222348a47dc7d66b8a0af34fc751f434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 114, "num_lines": 23, "path": "/About_dan_Testimoni/migrations/0001_initial.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2a1 on 2019-04-30 11:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Testimoni',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('Username', models.CharField(max_length=250)),\n ('Pesan', models.CharField(max_length=5000)),\n ('Tanggal_Pesan', models.DateField(auto_now_add=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.496971070766449, "alphanum_fraction": 0.571236252784729, "avg_line_length": 47.978023529052734, "blob_id": "60db6030930c9ab45ccd16c05e368f152d387d04", "content_id": "c6f5535ea3c13180171bc295acee79f95eff6e32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4457, "license_type": "no_license", "max_line_length": 70, "num_lines": 91, "path": "/staticfiles/js/effect_buku.js", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n let currentbg = $(\".container-fluid\").css(\"background-color\");\n let currenttopborder = $(\".border\").css(\"border-top-color\");\n let currentleftborder = $(\".border\").css(\"border-left-color\");\n let currentrightborder = $(\".border\").css(\"border-right-color\");\n let currentbottomborder = $(\".border\").css(\"border-bottom-color\");\n let currentbutton = $(\".border\").css(\"background-color\");\n let currentswitchbutton = $(\"#switch\").css(\"background-color\");\n let currentnamebutton = $(\"#name\").css(\"background-color\");\n\n let expectbg = \"rgb(255, 108, 99)\";\n let expecttopborder = \"rgb(255, 108, 99)\";\n let expectleftborder = \"rgb(255, 108, 99)\";\n let expectrightborder = \"rgb(255, 108, 99)\";\n let expectbottomborder = \"rgb(255, 108, 99)\";\n let expectbutton = \"rgb(255, 108, 99)\";\n let expectswitchbutton = \"rgb(255, 108, 99)\";\n let expectnamebutton = \"rgb(255, 108, 99)\";\n\n // $(\".loader\")\n // .delay(600)\n // .fadeOut();\n\n $(\"#switch\").mouseenter(function () {\n $(\".container-fluid\").css(\"background-color\", expectbg);\n $(\".border\").css(\"border-top-color\", expecttopborder);\n $(\".border\").css(\"border-left-color\", expectleftborder);\n $(\".border\").css(\"border-right-color\", expectrightborder);\n $(\".border\").css(\"border-bottom-color\", expectbottomborder);\n $(\".btn-primary\").css(\"background-color\", expectbutton);\n $(\"#switch\").css(\"background-color\", expectswitchbutton);\n $(\"#name\").css(\"background-color\", expectnamebutton);\n });\n\n $(\"#switch\").mouseleave(function () {\n $(\".container-fluid\").css(\"background-color\", currentbg);\n $(\".border\").css(\"border-top-color\", currenttopborder);\n $(\".border\").css(\"border-left-color\", currentleftborder);\n $(\".border\").css(\"border-right-color\", currentrightborder);\n $(\".border\").css(\"border-bottom-color\", currentbottomborder);\n $(\".btn-primary\").css(\"background-color\", currentbutton);\n $(\"#switch\").css(\"background-color\", currentswitchbutton);\n $(\"#name\").css(\"background-color\", currentnamebutton);\n });\n\n $(\"#switch\").click(function () {\n $(\".container-fluid\").css(\"background-color\", expectbg);\n $(\".border\").css(\"border-top-color\", expecttopborder);\n $(\".border\").css(\"border-left-color\", expectleftborder);\n $(\".border\").css(\"border-right-color\", expectrightborder);\n $(\".border\").css(\"border-bottom-color\", expectbottomborder);\n $(\".btn-primary\").css(\"background-color\", expectbutton);\n $(\"#switch\").css(\"background-color\", expectswitchbutton);\n $(\"#name\").css(\"background-color\", expectnamebutton);\n if (expectbg == \"rgb(255, 108, 99)\") {\n expectbg = \"rgb(108, 99, 255)\";\n expecttopborder = \"rgb(108, 99, 255)\";\n expectleftborder = \"rgb(108, 99, 255)\";\n expectrightborder = \"rgb(108, 99, 255)\";\n expectbottomborder = \"rgb(108, 99, 255)\";\n expectbutton = \"rgb(108, 99, 255)\";\n expectswitchbutton = \"rgb(108, 99, 255)\";\n expectnamebutton = \"rgb(108, 99, 255)\";\n currentbg = \"rgb(255, 108, 99)\";\n currenttopborder = \"rgb(255, 108, 99)\";\n currentleftborder = \"rgb(255, 108, 99)\";\n currentrightborder = \"rgb(255, 108, 99)\";\n currentbottomborder = \"rgb(255, 108, 99)\";\n currentbutton = \"rgb(255, 108, 99)\";\n currentswitchbutton = \"rgb(255, 108, 99)\";\n currentnamebutton = \"rgb(255, 108, 99)\";\n } else {\n currentbg = \"rgb(108, 99, 255)\";\n currenttopborder = \"rgb(108, 99, 255)\";\n currentleftborder = \"rgb(108, 99, 255)\";\n currentrightborder = \"rgb(108, 99, 255)\";\n currentbottomborder = \"rgb(108, 99, 255)\";\n currentbutton = \"rgb(108, 99, 255)\";\n currentswitchbutton = \"rgb(108, 99, 255)\";\n currentnamebutton = \"rgb(108, 99, 255)\";\n expectbg = \"rgb(255, 108, 99)\";\n expecttopborder = \"rgb(255, 108, 99)\";\n expectleftborder = \"rgb(255, 108, 99)\";\n expectrightborder = \"rgb(255, 108, 99)\";\n expectbottomborder = \"rgb(255, 108, 99)\";\n expectbutton = \"rgb(255, 108, 99)\";\n expectswitchbutton = \"rgb(255, 108, 99)\";\n expectnamebutton = \"rgb(255, 108, 99)\";\n }\n });\n});\n" }, { "alpha_fraction": 0.6872881650924683, "alphanum_fraction": 0.6927965879440308, "avg_line_length": 26.126436233520508, "blob_id": "8e6c33cb280a6cbea225c85025b65d4889fb1d8b", "content_id": "8d0ad54630965e1261d460a5232d4a4937485343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2360, "license_type": "no_license", "max_line_length": 62, "num_lines": 87, "path": "/form_anggota/forms.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom .models import *\nimport re\n\n\nclass RegisterMember(forms.Form):\n\n\tusername = forms.CharField()\n\temail = forms.EmailField()\n\tpassword = forms.CharField(widget=forms.PasswordInput())\n\tre_password = forms.CharField(widget=forms.PasswordInput())\n\n\tdef clean(self):\n\t\t# Cleans all fields\n\n\t\tdef clean_username(self):\n\t\t\t# Checks if username is already in database\n\t\t\tcleaned_data = super(RegisterMember, self).clean()\n\n\t\t\tusername = cleaned_data.get('username')\n\t\t\ttry:\n\t\t\t\tuser = User.objects.get(username=username)\n\t\t\texcept:\n\t\t\t\treturn username\n\t\t\traise forms.ValidationError(\n\t\t\t\t\t'Username already taken!'\n\t\t\t\t)\n\n\t\tdef clean_email(self):\n\t\t\t# Checks if email is already in database\n\t\t\tcleaned_data = super(RegisterMember, self).clean()\n\t\n\t\t\temail = cleaned_data.get('email')\n\t\t\ttry:\n\t\t\t\temail_taken = User.objects.get(email=email)\n\t\t\texcept:\n\t\t\t\treturn email\n\t\t\traise forms.ValidationError(\n\t\t\t\t\t'Email already taken!'\n\t\t\t\t\t)\n\t\tdef clean_password(self):\n\t\t\t# Cleans password inputted, synchronously\n\t\t\tcleaned_data = super(RegisterMember, self).clean()\n\n\t\t\tpassword = cleaned_data.get('password')\n\t\t\tre_password = cleaned_data.get('re_password')\n\n\t\t\t# password length is greater than or equal to 8 \n\t\t\tvalidation_1 = (len(password) >= 8)\n\n\t\t\t# Password is equal to re_password \n\t\t\tvalidation_2 = (password == re_password)\n\n\t\t\t# Password contains atleast one number\n\t\t\tvalidation_3 = bool(re.search(r'\\d+', password))\n\n\t\t\t# Password contains atleast one uppercase character\n\t\t\tvalidation_4 = bool(re.search(r'[A-Z]+', password))\n\n\t\t\t# Password contains atleast one lowercase character\n\t\t\tvalidation_5 = bool(re.search(r'[a-z]+', password))\t\t\n\n\t\t\tif not validation_1:\n\t\t\t\traise forms.ValidationError(\n\t\t\t\t\t\t'Password must be atleast 8 characters long!'\n\t\t\t\t\t)\n\t\t\tif not validation_2:\n\t\t\t\traise forms.ValidationError(\n\t\t\t\t\t\t'Passwords do not match!'\n\t\t\t\t\t)\n\t\t\tif not validation_3:\n\t\t\t\traise forms.ValidationError(\n\t\t\t\t\t\t'Password must contain atleast one number!'\n\t\t\t\t\t)\n\t\t\tif not validation_4:\n\t\t\t\traise forms.ValidationError(\n\t\t\t\t\t\t'Password must contain atleast one uppercase character!'\n\t\t\t\t\t)\n\t\t\tif not validation_5:\n\t\t\t\traise forms.ValidationError(\n\t\t\t\t\t\t'Password must contain atleast one lowercase character!'\n\t\t\t\t\t)\n\n\t\tclean_username(self)\n\t\tclean_email(self)\n\t\tclean_password(self)\n" }, { "alpha_fraction": 0.5322777032852173, "alphanum_fraction": 0.5627284049987793, "avg_line_length": 30.576923370361328, "blob_id": "047203b03cd0614e514757efece989d6db070e03", "content_id": "4f2c7e4259c5ccd0ba8086f8659eda2c982e603a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/shafiya_pinjam/migrations/0001_initial.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-03-22 08:08\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='PinjamModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=20)),\n ('email', models.EmailField(max_length=50)),\n ('nomor_buku', models.IntegerField()),\n ('tanggal_pinjam', models.DateField(auto_now_add=True)),\n ('nama_peminjam', models.CharField(max_length=100)),\n ('buku_dipinjam', models.CharField(max_length=100)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7125890851020813, "alphanum_fraction": 0.7363420724868774, "avg_line_length": 34.08333206176758, "blob_id": "96e9afe036c8f69585d769bf124e838ece8668af", "content_id": "66871ff3a39c529fd4bbfb038477b408273e5999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/shafiya_pinjam/models.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass PinjamModel(models.Model):\n username = models.CharField(max_length=20)\n email = models.EmailField(max_length=50, null=True, blank=True)\n nomor_buku = models.IntegerField()\n tanggal_pinjam = models.DateField(auto_now_add=True, blank=True)\n nama_peminjam = models.CharField(max_length=100)\n buku_dipinjam = models.CharField(max_length=100)\n" }, { "alpha_fraction": 0.6810699701309204, "alphanum_fraction": 0.6844993233680725, "avg_line_length": 30.69565200805664, "blob_id": "f699d9dfe28f6acdde21a86f57588d30727370e9", "content_id": "debef02e0cdbe4e32708ed261168cf6587be135e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1458, "license_type": "no_license", "max_line_length": 100, "num_lines": 46, "path": "/About_dan_Testimoni/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase, Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom About_dan_Testimoni.apps import AboutDanTestimoniConfig\n\nfrom .models import Testimoni\nfrom .views import *\nimport datetime\n\n\n\nclass AboutDanTestimoni(TestCase):\n def test_AboutDanTestimoni_url_ada(self):\n response = Client().get('/about-dan-testimoni/')\n self.assertEqual(response.status_code, 200)\n\n def test_AboutDanTestimoni_fungsi_renderpage(self):\n found = resolve('/about-dan-testimoni/')\n self.assertEqual(found.func, about)\n\n #def test_AboutDanTestimoni_isi_html(self):\n #request = HttpRequest()\n #response = about(request)\n #html_response = response.content.decode('utf8')\n #self.assertIn('Tentang SijambuMuda', html_response)\n\n def test_AboutDanTestimoni_model(self):\n #time = datetime.datetime.now()\n testMember = Testimoni.objects.create(\n Username = \"somedude\",\n Pesan = \"website ini keren\",\n Tanggal_Pesan = datetime.datetime.now(),\n )\n\n jumlah_testimoni = Testimoni.objects.all().count()\n self.assertEqual(jumlah_testimoni, 1 )\n\n class ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(AboutDanTestimoniConfig.name, 'About_dan_Testimoni')\n self.assertEqual(apps.get_app_config('About_dan_Testimoni').name, 'About_dan_Testimoni')\n\n\n\n\n# Create your tests here.\n" }, { "alpha_fraction": 0.47568854689598083, "alphanum_fraction": 0.47568854689598083, "avg_line_length": 30.29787254333496, "blob_id": "3220282692760a228a824fdb11a50bd0fec2e9bf", "content_id": "e2d096246cbd6248d5d20f11908834dd733d4b5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2941, "license_type": "no_license", "max_line_length": 150, "num_lines": 94, "path": "/form_anggota/static/js/checkRegisterForms.js", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "var csrftoken = $('input[name=\"csrfmiddlewaretoken\"]').val();\nvar username = $(\"#id_username\").val()\n\n$(document).ready(function () {\n $(\"#id_username\").addClass(\"form-control\");\n $(\"#id_email\").addClass(\"form-control\");\n $(\"#id_password\").addClass(\"form-control\");\n $(\"#id_re_password\").addClass(\"form-control\");\n});\n\n$(\"#id_username\").change(function () {\n username = $(\"#id_username\").val()\n $.ajaxSetup({\n headers: {\n \"X-CSRFToken\": csrftoken,\n }\n });\n $.ajax({\n url: '/check-username/',\n type: 'POST',\n data: { 'username': username },\n success: function (data) {\n if (data == \"True\") {\n $(\"#usernameSuccess\").remove();\n $(\"#id_username\").removeClass(\"has-error\");\n $(\"#id_username\").addClass(\"has-success\").after(`<small id=\\\"usernameSuccess\\\" class=\\\"text-success\\\">Username available</small>`);;\n $(\"#usernameError\").remove();\n }\n else {\n $(\"#usernameError\").remove();\n $(\"#id_username\").removeClass(\"has-success\")\n $(\"#id_username\").addClass(\"has-error\").after(`<small id=\\\"usernameError\\\" class=\\\"text-danger\\\">Username already taken</small>`);\n $(\"#usernameSuccess\").remove();\n }\n }\n });\n});\n\n$(\"#id_email\").change(function () {\n email = $(\"#id_email\").val()\n $.ajaxSetup({\n headers: {\n \"X-CSRFToken\": csrftoken,\n }\n });\n $.ajax({\n url: '/check-email/',\n type: 'POST',\n data: { 'email': email },\n success: function (data) {\n if (data == \"True\") {\n $(\"#id_email\").removeClass(\"has-error\");\n $(\"#emailError\").remove();\n\n }\n else {\n $(\"#emailError\").remove();\n $(\"#id_email\").addClass(\"has-error\").after(`<small id=\\\"emailError\\\" class=\\\"text-danger\\\">Email already taken</small>`);\n }\n }\n });\n});\n\n$(\"#id_password\").change(function () {\n checkPass();\n});\n$(\"#id_re_password\").change(function () {\n checkPass();\n});\n\nfunction checkPass() {\n password = $(\"#id_password\").val()\n re_password = $(\"#id_re_password\").val()\n $.ajaxSetup({\n headers: {\n \"X-CSRFToken\": csrftoken,\n }\n });\n $.ajax({\n url: '/check-password/',\n type: 'POST',\n data: { 'password': password, 're_password': re_password },\n success: function (data) {\n if (data == \"True\") {\n $(\"#id_re_password\").removeClass(\"has-error\");\n $(\"#passwordError\").remove();\n }\n else {\n $(\"#passwordError\").remove();\n $(\"#id_re_password\").addClass(\"has-error\").after(`<small id=\\\"passwordError\\\" class=\\\"text-danger\\\">Passwords do not match!</small>`);\n }\n }\n });\n}" }, { "alpha_fraction": 0.7045454382896423, "alphanum_fraction": 0.7045454382896423, "avg_line_length": 26.625, "blob_id": "64398102ac063d46e6d663d81a76bfc9bc225e14", "content_id": "59b724a76b7080f05111f4162ae018ee7c6b7bec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/show_buku/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Buku\n\n# Create your views here.\ndef buku(request):\n db = Buku.objects.all()\n content = {'data_base': db}\n return render(request, \"show_buku.html\", content)" }, { "alpha_fraction": 0.5988583564758301, "alphanum_fraction": 0.6071614027023315, "avg_line_length": 35.377357482910156, "blob_id": "945fb22999724affa0e2aebc6200ae4ca4b89a31", "content_id": "117bdde971e1d0a1f91eb6bf5686f3a1a5e1f1ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1927, "license_type": "no_license", "max_line_length": 88, "num_lines": 53, "path": "/data_peminjaman/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom data_peminjaman.apps import DataPeminjamanConfig\nfrom django.apps import apps\nfrom django.contrib.auth.models import User\n\nfrom shafiya_pinjam.models import PinjamModel\nfrom show_buku.models import Buku\nimport datetime\n\nclass DataPage(TestCase):\n\n def test_data_page_add(self):\n time = datetime.datetime.now()\n before = PinjamModel.objects.all().count()\n buku = Buku.objects.create(\n nomor_buku = \"4\",\n judul_buku = \"Test Judul\",\n pengarang = \"Test Pengarang\",\n kategori = \"Test Kategori\",\n penerbit = \"Test Penerbit\",\n sinopsis = \"Test Sinopsis\",\n )\n peminjaman = PinjamModel.objects.create(\n nomor_buku = \"4\",\n tanggal_pinjam = time,\n nama_peminjam = \"Test Nama\",\n buku_dipinjam = \"Test Judul\",\n )\n after = PinjamModel.objects.all().count()\n self.assertEqual(before + 1, after)\n\n def test_login(self):\n User.objects.create_user('test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n buku = Buku.objects.create(\n nomor_buku = 1,\n judul_buku = \"Test Judul\",\n pengarang = \"Test Pengarang\",\n kategori = \"Test Kategori\",\n penerbit = \"Test Penerbit\",\n sinopsis = \"Test Sinopsis\",\n kuota = 1,\n )\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n test_client.post('/form-pinjam/', {'nomor_buku': 1})\n response = test_client.get('/datapeminjaman/')\n self.assertIn('test123', response.content.decode('utf-8'))\n\nclass ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(DataPeminjamanConfig.name, 'data_peminjaman')\n self.assertEqual(apps.get_app_config('data_peminjaman').name, 'data_peminjaman')" }, { "alpha_fraction": 0.523754358291626, "alphanum_fraction": 0.5504055619239807, "avg_line_length": 30.962963104248047, "blob_id": "e36531019c8383c9dcfce885d18c93b5805fcbd0", "content_id": "0c87753431f803ed82e11b070b4f8f8c75eeaa88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/show_buku/migrations/0001_initial.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-03-22 08:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Buku',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nomor_buku', models.IntegerField()),\n ('judul_buku', models.CharField(max_length=50)),\n ('pengarang', models.CharField(max_length=50)),\n ('kategori', models.CharField(max_length=50)),\n ('penerbit', models.CharField(max_length=50)),\n ('cover', models.ImageField(blank=True, upload_to='gallery')),\n ('sinopsis', models.TextField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6801242232322693, "alphanum_fraction": 0.6894409656524658, "avg_line_length": 25.83333396911621, "blob_id": "145e31a8b60fe68de96f90e32f9ec6637b441d7a", "content_id": "e05f366ad745a3df3ff042ae88eb791602d93284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 62, "num_lines": 12, "path": "/form_anggota/models.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n primary_key=True,\n )\n profile_picture = models.ImageField()\n alamat_rumah = models.CharField(null=True, max_length=500)\n" }, { "alpha_fraction": 0.6954314708709717, "alphanum_fraction": 0.6954314708709717, "avg_line_length": 20.88888931274414, "blob_id": "47cc139225d2531b6b081c1608311a57d3515710", "content_id": "8b326f186da9b540a28da3048ca1f4c8a64625e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/history_pinjaman/forms.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django import forms\nfrom form_anggota.models import Profile\n\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n fields = ['profile_picture', 'alamat_rumah']\n" }, { "alpha_fraction": 0.7830188870429993, "alphanum_fraction": 0.7830188870429993, "avg_line_length": 20.200000762939453, "blob_id": "35176b446fc38464875dd623b5e1fd7cd56f866b", "content_id": "cb95dcddd8ff94000d07171bd6d35b3ca72ea2a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/history_pinjaman/apps.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass HistoryPinjamanConfig(AppConfig):\n name = 'history_pinjaman'\n" }, { "alpha_fraction": 0.5220588445663452, "alphanum_fraction": 0.5980392098426819, "avg_line_length": 21.66666603088379, "blob_id": "ef1a44703c6a5f9a18866177f30f69e912544a5c", "content_id": "63d961e58c9d65779709ed528a71e3fecbdc2642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 57, "num_lines": 18, "path": "/form_anggota/migrations/0004_auto_20190505_2028.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-05-05 13:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('form_anggota', '0003_auto_20190505_2007'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='profile_picture',\n field=models.ImageField(upload_to='gallery'),\n ),\n ]\n" }, { "alpha_fraction": 0.5285714268684387, "alphanum_fraction": 0.6023809313774109, "avg_line_length": 22.33333396911621, "blob_id": "ad5a060cad78fd6f302af6a94d8a82091c931404", "content_id": "ac11fb382298092934e4be4af8f509812567d395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 69, "num_lines": 18, "path": "/form_anggota/migrations/0003_auto_20190505_2007.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-05-05 13:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('form_anggota', '0002_auto_20190427_1306'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='profile_picture',\n field=models.ImageField(blank=True, upload_to='gallery'),\n ),\n ]\n" }, { "alpha_fraction": 0.6517857313156128, "alphanum_fraction": 0.6517857313156128, "avg_line_length": 17.66666603088379, "blob_id": "6bb735331719a98237f16a19b35e9e52bf0d67b4", "content_id": "e28a3d6b9ac6bd24e7b978f71b0b280a40620e7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/shafiya_pinjam/urls.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', pinjam, name='pinjam'),\n]\n" }, { "alpha_fraction": 0.4426807761192322, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 36.86666488647461, "blob_id": "28e42a5af1fde5231e6fedd278732365238c2580", "content_id": "d8b2ac95c14adfbae77ddc9198fba64a8d61ce1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 567, "license_type": "no_license", "max_line_length": 117, "num_lines": 15, "path": "/history_pinjaman/static/janscript.js", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n $.ajax({\n url: \"json/\",\n type: \"GET\",\n dataType: \"json\",\n success: function(data) {\n var innerHTML = \"<p>Jumlah buku yang pernah dipinjam : \" + data.length + \"</p>\";\n for (var i = 0; i < data.length; i++) {\n innerHTML += \"<li>\" + data[i].nomor_buku + \" \" + data[i].judul_buku + \" \" + data[i].pengarang + \" \" +\n data[i].penerbit + \" \" + data[i].tanggal_pinjam + \"</li>\";\n }\n $(\"#history_buku\").html(innerHTML);\n }\n })\n});" }, { "alpha_fraction": 0.684303343296051, "alphanum_fraction": 0.684303343296051, "avg_line_length": 32.35293960571289, "blob_id": "2df3b87accb51001ffc02c5ac159dae9f03e3fd3", "content_id": "42fd3be332411a56ff8f1ba808ddd20181b3ebbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 118, "num_lines": 34, "path": "/About_dan_Testimoni/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport datetime\nfrom .models import Testimoni\nfrom .forms import IsiTestimoni\nfrom django.http import HttpResponseRedirect, JsonResponse\n\ndef about(request):\n if request.user.is_authenticated:\n formTestimoni = IsiTestimoni()\n return render(request, 'about_dan_testimoni.html', {'form' : formTestimoni})\n return render(request, 'about_dan_testimoni.html')\n\ndef tampilkan(request):\n isiTestimoni = Testimoni.objects.all()\n listTestimoni = []\n\n for data in isiTestimoni :\n listTestimoni.append({\"Username\" : data.Username, \"Pesan\" : data.Pesan, \"Tanggal_Pesan\" : data.Tanggal_Pesan})\n\n return JsonResponse(listTestimoni, safe=False)\n\ndef create(request):\n if request.method == \"POST\":\n username = request.user.username\n pesan = request.POST[\"pesan\"]\n tanggal_pesan = datetime.datetime.now()\n\n newTestimoni = Testimoni.objects.create(\n Username = username,\n Pesan = pesan,\n Tanggal_Pesan = tanggal_pesan\n )\n newTestimoni.save()\n return HttpResponseRedirect('/about-dan-testimoni/')\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 17.399999618530273, "blob_id": "7d0a3b2b07c2937e749013301ac09443c566967c", "content_id": "d398c38a46d4cb60803f09077406a8cb512c115c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/show_buku/apps.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ShowBukuConfig(AppConfig):\n name = 'show_buku'\n" }, { "alpha_fraction": 0.7962962985038757, "alphanum_fraction": 0.800000011920929, "avg_line_length": 44, "blob_id": "b396e2558d4116d89d143b48139a29fb573ba12a", "content_id": "389c41e83030056604c011e06f81ed587b4a7b12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 86, "num_lines": 6, "path": "/data_peminjaman/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom shafiya_pinjam.models import PinjamModel\n\ndef datapeminjaman(request):\n\tdatapeminjaman_context = {\"datapeminjaman\": PinjamModel.objects.all().values()[::-1]}\n\treturn render(request, \"datapeminjaman.html\", datapeminjaman_context)\n" }, { "alpha_fraction": 0.5828729271888733, "alphanum_fraction": 0.5923879742622375, "avg_line_length": 33.294734954833984, "blob_id": "7664a4eebd7cd0c37460073e35f1c7564558106a", "content_id": "e4483197f3a7522088e5969a8f4265d03fe05764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3258, "license_type": "no_license", "max_line_length": 74, "num_lines": 95, "path": "/shafiya_pinjam/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom shafiya_pinjam.apps import ShafiyaPinjamConfig\nfrom django.apps import apps\nfrom django.contrib.auth.models import User\n\nfrom .views import *\nfrom .models import PinjamModel\nfrom .forms import PinjamForm\nimport datetime\n\n\nclass SampleTest(TestCase):\n\n def test_form_pinjam_using_index_func(self):\n found = resolve('/form-pinjam/')\n self.assertEqual(found.func, pinjam)\n\n def test_pinjam_models_created(self):\n time = datetime.datetime.now()\n dummy_pinjam = PinjamModel.objects.create(\n username=\"shafiya123\",\n email=\"shafiya123@gmail.com\",\n nomor_buku=\"1\",\n tanggal_pinjam=time,\n )\n total_pinjam = PinjamModel.objects.all().count()\n self.assertEqual(total_pinjam, 1)\n\n def test_form_nobuku_validated(self):\n form = PinjamForm(data={'nomor_buku': ''})\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors['nomor_buku'],\n ['This field is required.']\n )\n\n def test_buku_no_kuota(self):\n User.objects.create_user(\n 'test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n buku = Buku.objects.create(\n nomor_buku=1,\n judul_buku=\"Test Judul\",\n pengarang=\"Test Pengarang\",\n kategori=\"Test Kategori\",\n penerbit=\"Test Penerbit\",\n sinopsis=\"Test Sinopsis\",\n kuota=0,\n )\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n test_client.post('/form-pinjam/', {'nomor_buku': 1})\n response = test_client.get('/datapeminjaman/')\n self.assertNotIn('Test Judul', response.content.decode('utf-8'))\n\n def test_not_logged_in(self):\n User.objects.create_user(\n 'test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n buku = Buku.objects.create(\n nomor_buku=1,\n judul_buku=\"Test Judul\",\n pengarang=\"Test Pengarang\",\n kategori=\"Test Kategori\",\n penerbit=\"Test Penerbit\",\n sinopsis=\"Test Sinopsis\",\n kuota=0,\n )\n test_client = Client()\n response = test_client.post('/form-pinjam/', {'nomor_buku': 1})\n\n def test_with_get(self):\n User.objects.create_user(\n 'test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n buku = Buku.objects.create(\n nomor_buku=1,\n judul_buku=\"Test Judul\",\n pengarang=\"Test Pengarang\",\n kategori=\"Test Kategori\",\n penerbit=\"Test Penerbit\",\n sinopsis=\"Test Sinopsis\",\n kuota=0,\n )\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n response = test_client.get('/form-pinjam/', {'nomor_buku': 1})\n self.assertIn(\"Peminjaman Buku\", response.content.decode('utf-8'))\n\n\nclass ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(ShafiyaPinjamConfig.name, 'shafiya_pinjam')\n self.assertEqual(apps.get_app_config(\n 'shafiya_pinjam').name, 'shafiya_pinjam')\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 16.5, "blob_id": "a59859e88b3f5562842eeb2d92272d603f231e9a", "content_id": "40abd578a854acf533b3eeab0319e0ce10a3d0db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/show_buku/urls.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from .views import buku\nfrom django.urls import path\n\nurlpatterns = [\n path('', buku, name='buku'),\n]\n" }, { "alpha_fraction": 0.6178861856460571, "alphanum_fraction": 0.6298102736473083, "avg_line_length": 35.91999816894531, "blob_id": "7a173cdc6b0f376477b0cc8631c471d1e550675a", "content_id": "5ccef71b89485ae9da9462b307d448d0176124ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1845, "license_type": "no_license", "max_line_length": 87, "num_lines": 50, "path": "/history_pinjaman/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom show_buku.models import Buku\nfrom .views import show_history, history_json, profile\nfrom django.contrib.auth.models import User\n\n\nclass HistoryPinjamTest(TestCase):\n\n def test_history_url_no_login(self):\n response = Client().get('/history/')\n self.assertEqual(response.status_code, 302)\n\n def test_history_fungsi_show_history(self):\n found = resolve('/history/')\n self.assertEqual(found.func, show_history)\n\n def test_history_json_url_no_login(self):\n response = Client().get('/history/json/')\n self.assertEqual(response.status_code, 302)\n\n def test_history_json_fungsi_show_history(self):\n found = resolve('/history/json/')\n self.assertEqual(found.func, history_json)\n\n def test_logged_in(self):\n User.objects.create_user('test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n test_client.get('/history/')\n buku = Buku.objects.create(\n nomor_buku = 1,\n judul_buku = \"Test Judul\",\n pengarang = \"Test Pengarang\",\n kategori = \"Test Kategori\",\n penerbit = \"Test Penerbit\",\n sinopsis = \"Test Sinopsis\",\n kuota = 1,\n )\n test_client.post('/form-pinjam/', {'nomor_buku': 1})\n test_client.get('/history/json/')\n\n def test_logged_in2(self):\n User.objects.create_user('test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n test_client.get('/history/profile/')\n test_client.post('/history/profile/', {})" }, { "alpha_fraction": 0.6500290036201477, "alphanum_fraction": 0.6540917158126831, "avg_line_length": 35.680850982666016, "blob_id": "1c63442c59014793363b8621a55a4cbc2a47a10d", "content_id": "9ea9f78fc0a1928044fefb48c4b4783cb97d5f1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 232, "num_lines": 47, "path": "/show_buku/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom show_buku.apps import ShowBukuConfig\nfrom django.apps import apps\n\nfrom .views import buku\nfrom .models import Buku\n\nclass LandingPage(TestCase):\n\n def test_show_buku_url_ada(self):\n response = Client().get('/buku/')\n self.assertEqual(response.status_code, 200)\n\n def test_show_buku_fungsi_show_buku(self):\n found = resolve('/buku/')\n self.assertEqual(found.func, buku)\n\n def test_show_buku_isi_html(self):\n request = HttpRequest()\n response = buku(request)\n html_response = response.content.decode('utf8')\n self.assertIn('Daftar Buku', html_response)\n\n def test_book_models_created(self):\n dummy_book = Buku.objects.create(\n nomor_buku = 1,\n judul_buku = \"Sapiens\",\n pengarang = \"Yuval Noah Harari\",\n kategori = \"Ilmiah\",\n penerbit = \"Gramed\",\n cover = \"image.jpg\",\n sinopsis = \"Sinopsis : Di Sapiens, Dr Yuval Noah Harari mencakup seluruh sejarah manusia, dari manusia pertama yang berjalan di bumi hingga terobosan radikal - dan terkadang menghancurkan - Revolusi Kognitif, Pertanian, dan Ilmiah\"\n )\n total_buku = Buku.objects.all().count()\n self.assertEqual(total_buku, 1)\n request = HttpRequest()\n response = buku(request)\n html_response = response.content.decode('utf8')\n self.assertIn('Sapiens', html_response)\n\nclass ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(ShowBukuConfig.name, 'show_buku')\n self.assertEqual(apps.get_app_config('show_buku').name, 'show_buku')" }, { "alpha_fraction": 0.6410044431686401, "alphanum_fraction": 0.6470750570297241, "avg_line_length": 55.640625, "blob_id": "68148a1bcf48f64ef7afdbbbba090fb5e91fa9d7", "content_id": "9049011c0b6d6c1880a6dc2b3fe8a4fffd35cf50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3624, "license_type": "no_license", "max_line_length": 144, "num_lines": 64, "path": "/form_anggota/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom form_anggota.apps import FormAnggotaConfig\nfrom django.apps import apps\n\nfrom .models import *\nfrom .views import *\nfrom datetime import datetime\n\nclass FormAnggota(TestCase):\n def test_form_member_url_ada(self):\n response = Client().get('/register/')\n self.assertEqual(response.status_code, 200)\n\n def test_username_already_exists(self):\n Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcde1ABC\", \"re_password\": \"abcde1ABC\"})\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcde1ABC\", \"re_password\": \"abcde1ABC\"})\n self.assertIn(\"Username already taken!\", response.content.decode('utf-8'))\n\n def test_email_already_exists(self):\n Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcde1ABC\", \"re_password\": \"abcde1ABC\"})\n response = Client().post(\"/register/\", {\"username\": \"testt\", \"email\": \"test@test.com\", \"password\": \"abcde1ABC\", \"re_password\": \"abcde1ABC\"})\n self.assertIn(\"Email already taken!\", response.content.decode('utf-8'))\n\n def test_password_invalid_length(self):\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcde\", \"re_password\": \"abcde\"})\n self.assertIn(\"Password must be atleast 8 characters long!\", response.content.decode('utf-8'))\n\n def test_password_doesnt_match(self):\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcdefgh\", \"re_password\": \"abcdefgg\"})\n self.assertIn(\"Passwords do not match!\", response.content.decode('utf-8'))\n\n def test_password_doesnt_contain_number(self):\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcdefgH\", \"re_password\": \"abcdefgH\"})\n self.assertIn(\"Password must contain atleast one number!\", response.content.decode('utf-8'))\n\n def test_password_doesnt_contain_uppercase(self):\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcdefg1\", \"re_password\": \"abcdefg1\"})\n self.assertIn(\"Password must contain atleast one uppercase character!\", response.content.decode('utf-8'))\n\n def test_password_doesnt_contain_lowercase(self):\n response = Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"ABCDEFG1\", \"re_password\": \"ABCDEFG1\"})\n self.assertIn(\"Password must contain atleast one lowercase character!\", response.content.decode('utf-8'))\n\n def test_create_user_and_see_if_user_is_made(self):\n before = jumlahMember = User.objects.all().count()\n Client().post(\"/register/\", {\"username\": \"test\", \"email\": \"test@test.com\", \"password\": \"abcde1ABC\", \"re_password\": \"abcde1ABC\"})\n after = jumlahMember = User.objects.all().count()\n self.assertEqual(before + 1, after)\n\n def test_login(self):\n User.objects.create_user('test123', 'test@test.com', 'TestPassword123', first_name=\"Test\")\n test_client = Client()\n test_client.login(username=\"test123\", password=\"TestPassword123\")\n response = test_client.get('/')\n self.assertIn(\"test123\", response.content.decode('utf-8'))\n\n \nclass ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(FormAnggotaConfig.name, 'form_anggota')\n self.assertEqual(apps.get_app_config('form_anggota').name, 'form_anggota')" }, { "alpha_fraction": 0.7788461446762085, "alphanum_fraction": 0.7788461446762085, "avg_line_length": 19.799999237060547, "blob_id": "da3d0a82cc1797e26c8596fc49578634242575d0", "content_id": "f7ada17fbaab8e5e12bfb1c7c2ed2803cd07186c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/data_peminjaman/apps.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass DataPeminjamanConfig(AppConfig):\n name = 'data_peminjaman'\n" }, { "alpha_fraction": 0.5694143176078796, "alphanum_fraction": 0.5710412263870239, "avg_line_length": 41.88372039794922, "blob_id": "b932b25b47a39e7bdc1d0cbdd246e94fa95f0e8e", "content_id": "199ef74e80734f3e5951f755dba882200e13e16d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1844, "license_type": "no_license", "max_line_length": 129, "num_lines": 43, "path": "/shafiya_pinjam/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .forms import PinjamForm\nfrom .models import PinjamModel\nfrom show_buku.models import Buku\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.contrib import messages\nfrom django.db.models import F\n\n# Create your views here.\n\n\ndef pinjam(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n pinjam_form = PinjamForm(request.POST)\n if pinjam_form.is_valid():\n username = request.user.username\n nomor_buku = int(request.POST.get('nomor_buku'))\n tanggal_pinjam = datetime.datetime.now()\n buku = Buku.objects.get(nomor_buku=nomor_buku)\n if buku.kuota > 0:\n buku.kuota = F('kuota') - 1\n buku.save()\n judul_buku = Buku.objects.filter(nomor_buku=nomor_buku).values()[\n 0]['judul_buku']\n pinjam_model = PinjamModel.objects.create(username=username, buku_dipinjam=judul_buku, nomor_buku=nomor_buku,\n tanggal_pinjam=tanggal_pinjam)\n pinjam_model.save()\n messages.success(\n request, \"Terima kasih!\\n Peminjaman Anda akan segera diproses.\")\n return HttpResponseRedirect('/form-pinjam')\n else:\n messages.info(\n request, \"Maaf, buku habis\")\n return HttpResponseRedirect('/form-pinjam')\n else:\n pinjam_form = PinjamForm()\n else:\n alert = None\n return HttpResponseRedirect('/login/')\n return render(request, 'page/form-pinjam.html', {'form': pinjam_form})\n" }, { "alpha_fraction": 0.5291262269020081, "alphanum_fraction": 0.5631067752838135, "avg_line_length": 25.869565963745117, "blob_id": "43a09a4456fb91b4a3824cfef8acf3123524f853", "content_id": "cbaf3af3731a6080b3f8f2ebd955f43dbd6cb72f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 114, "num_lines": 23, "path": "/landingpage/migrations/0001_initial.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-03-20 14:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Peminjaman',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('book_title', models.CharField(max_length=100)),\n ('borrower_name', models.CharField(max_length=100)),\n ('borrow_time', models.DateField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.4664310812950134, "alphanum_fraction": 0.6325088143348694, "avg_line_length": 19.214284896850586, "blob_id": "7c88a2ece6eab541fdd809ec90ece6b5fae48c45", "content_id": "c6d96755818d534831bd5ca290a4339271d470ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/form_anggota/migrations/0005_merge_20190506_1911.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-05-06 12:11\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('form_anggota', '0003_auto_20190502_1052'),\n ('form_anggota', '0004_auto_20190505_2028'),\n ]\n\n operations = [\n ]\n" }, { "alpha_fraction": 0.6581145524978638, "alphanum_fraction": 0.6581145524978638, "avg_line_length": 31.86274528503418, "blob_id": "b5cfb0f96fead26b8b87f77e0f4658c76ddced7d", "content_id": "0428c9c5e3ca7984f31dd08ff7535e793adbbacf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1676, "license_type": "no_license", "max_line_length": 90, "num_lines": 51, "path": "/form_anggota/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .forms import *\nfrom .models import *\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nimport re\nimport datetime\n\n\ndef register_member(request):\n if request.method == 'POST':\n register_form = RegisterMember(request.POST)\n if register_form.is_valid():\n user = User.objects.create_user(\n username=register_form.cleaned_data['username'],\n email=register_form.cleaned_data['email'],\n password=register_form.cleaned_data['password'],\n )\n return HttpResponseRedirect('/login/')\n else:\n register_form = RegisterMember()\n return render(request, 'registration/register.html', {'register_form': register_form})\n\n\ndef check_username(request):\n username = request.POST.get('username')\n username = re.compile(f\"^{username}$\")\n all_usernames = User.objects.all().values('username')\n not_taken = True\n for i in all_usernames:\n if re.search(username, i['username']):\n not_taken = False\n return HttpResponse(str(not_taken))\n\n\ndef check_email(request):\n email = request.POST.get('email')\n email = re.compile(f\"^{email}$\")\n all_emails = User.objects.all().values('email')\n not_taken = True\n for i in all_emails:\n if re.search(email, i['email']):\n not_taken = False\n return HttpResponse(str(not_taken))\n\n\ndef check_password(request):\n password = request.POST.get('password')\n re_password = request.POST.get('re_password')\n return HttpResponse(str(password == re_password))\n" }, { "alpha_fraction": 0.795918345451355, "alphanum_fraction": 0.795918345451355, "avg_line_length": 18.799999237060547, "blob_id": "f62f9779a0f86e7aff5898661bcf5e5f47fa3914", "content_id": "afbac293cfc72a9e3c35e831089ed0f1df57d1e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/history_pinjaman/models.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n# didefinisikan menunggu fitur yang lain" }, { "alpha_fraction": 0.5136612057685852, "alphanum_fraction": 0.6994535326957703, "avg_line_length": 17.33333396911621, "blob_id": "ed451dc9948deca474ec0cd516027ec2ad4ec25f", "content_id": "c0a6e504e51e0bcef0d71b50513848e56590b7a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 549, "license_type": "no_license", "max_line_length": 29, "num_lines": 30, "path": "/requirements.txt", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "astroid==2.0.4\ncertifi==2018.8.24\nchardet==3.0.4\ncolorama==0.3.9\ncoverage==4.4.1\ndj-database-url==0.4.2\nDjango==2.1.1\ndjango-cors-middleware==1.3.1\ndjango-environ==0.4.4\ndjango-oauth-toolkit==1.2.0\ngunicorn==19.7.1\nidna==2.6\nisort==4.2.15\nlazy-object-proxy==1.3.1\nmccabe==0.6.1\noauthlib==3.0.1\nPillow==5.4.1\npylint==1.7.2\npython-social-auth==0.3.6\npytz==2017.2\nrequests==2.18.4\nselenium==3.5.0\nsix==1.10.0\nsocial-auth-app-django==3.1.0\nsocial-auth-core==3.1.0\ntyped-ast==1.1.0\nurllib3==1.22\nwhitenoise==3.3.0\nwrapt==1.10.11\npython-social-auth==0.3.6" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.682539701461792, "avg_line_length": 30.5, "blob_id": "bf6ae5734f0eb4191029f20487234acae9a33517", "content_id": "8d226d4853806d58e6b66de816d53313ad18e230", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 54, "num_lines": 8, "path": "/history_pinjaman/urls.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from .views import show_history, history_json, profile\nfrom django.urls import path\n\nurlpatterns = [\n path('', show_history, name='show_history'),\n path('json/', history_json, name='history_json'),\n path('profile/', profile, name='profile')\n]\n" }, { "alpha_fraction": 0.5115830302238464, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 26.263158798217773, "blob_id": "9d9a60fe062f4f5951c4ce17d26c139e01b61021", "content_id": "240e46f40c6a18ab8d8f07e5e829a032d4cece81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 76, "num_lines": 38, "path": "/form_anggota/migrations/0002_auto_20190427_1306.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-04-27 13:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('form_anggota', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='profile',\n old_name='user',\n new_name='member',\n ),\n migrations.RemoveField(\n model_name='profile',\n name='nomor_identitas',\n ),\n migrations.AddField(\n model_name='profile',\n name='profile_picture',\n field=models.ImageField(default='', upload_to=''),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='member',\n name='email',\n field=models.EmailField(max_length=254, null=True, unique=True),\n ),\n migrations.AlterField(\n model_name='profile',\n name='alamat_rumah',\n field=models.CharField(max_length=500, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6171779036521912, "alphanum_fraction": 0.6257668733596802, "avg_line_length": 32.91666793823242, "blob_id": "9ddb36681ef96b74c9b441955381e0f7d55dec59", "content_id": "5f3560f8e12e7cdadffc6235c6c71abdc8da03d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 815, "license_type": "no_license", "max_line_length": 94, "num_lines": 24, "path": "/About_dan_Testimoni/templates/about_dan_testimoni.html", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %} {% block content %}\n{% load static %}\n<div class=\"dalam-border\" style=\"margin-bottom: 5px;\">\n <div id = \"about\">\n <h1>Tentang SijambuMuda</h1>\n <p>SiJambuMuda Merupakan sebuah website peminjaman buku</p>\n </div>\n {% if user.is_authenticated %}\n <form method=\"POST\">\n {% csrf_token %}\n {{ form }}\n <button id ='submit' type=\"submit\" onclick=\"buat()\"> Post Komentar</button>\n </form>\n</div>\n{% endif %}\n\n<div class=\"dalam-border\">\n<div id=\"tampilkanTestimoni\"></div>\n</div>\n{% load static %}\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\"></script>\n<script src=\"{% static 'tampilkantestimoni.js' %}\"></script>\n<script src={% static 'js/effect_about.js' %} type=\"text/javascript\" charset=\"utf-8\"></script>\n{% endblock %}\n\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 23.75, "blob_id": "0a9900939f47978e55564aa874a26c1a76d7f11c", "content_id": "c31eab8c3a3bd706c32e18c4385cea6c1d33b97d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 48, "num_lines": 8, "path": "/About_dan_Testimoni/urls.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', about, name = \"about\"),\n path('tampil/', tampilkan, name = \"tampil\"),\n path('buat/', create, name = 'create')\n]\n" }, { "alpha_fraction": 0.5855855941772461, "alphanum_fraction": 0.5855855941772461, "avg_line_length": 28.600000381469727, "blob_id": "ddb26809194a966a12a04b439d9e1113543d887c", "content_id": "3c566e4af74b3fbef4cc618bf5425586c3df4cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 62, "num_lines": 15, "path": "/About_dan_Testimoni/forms.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Testimoni\n\n\nclass IsiTestimoni(forms.Form):\n model = Testimoni\n Pesan = forms.CharField(widget=forms.TextInput(attrs={\n 'class':'form-control',\n 'required':'True',\n 'placeholder':'Testimoni Anda',\n }))\n\n def clean(self):\n cleaned_data = super(IsiTestimoni, self).clean()\n check_pesan = cleaned_data.get('Pesan')\n" }, { "alpha_fraction": 0.717131495475769, "alphanum_fraction": 0.7450199127197266, "avg_line_length": 30.375, "blob_id": "8f78e4aa6ceb7b5a91ef8b93dd66b50b2058950d", "content_id": "bf4b7d3c7d7d68fe1a4469ed66c3a5337849a64a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 67, "num_lines": 8, "path": "/About_dan_Testimoni/models.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Testimoni(models.Model):\n Username = models.CharField(max_length=250)\n Pesan = models.CharField(max_length=5000)\n Tanggal_Pesan = models.DateField(auto_now_add=True, blank=True)\n\n# Create your models here.\n" }, { "alpha_fraction": 0.5117647051811218, "alphanum_fraction": 0.6117647290229797, "avg_line_length": 19, "blob_id": "ff6b1318edd25222d1476f58b3c7bab5e5a43317", "content_id": "1280f469d38dec7823d877bafe374852fe6d2d6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/landingpage/migrations/0002_delete_peminjaman.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.1 on 2019-03-21 10:49\n# Generated by Django 2.1.1 on 2019-03-21 10:43\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('landingpage', '0001_initial'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Peminjaman',\n ),\n ]\n" }, { "alpha_fraction": 0.5174999833106995, "alphanum_fraction": 0.5924999713897705, "avg_line_length": 21.22222137451172, "blob_id": "daf8696b712fea260544f785d11704309e96ed54", "content_id": "ba820c8d1397ea18a6f7b0a05903cd45b153bcab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/form_anggota/migrations/0006_auto_20190507_0927.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-05-07 09:27\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('form_anggota', '0005_merge_20190506_1911'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='profile_picture',\n field=models.ImageField(upload_to=''),\n ),\n ]\n" }, { "alpha_fraction": 0.6273938417434692, "alphanum_fraction": 0.6273938417434692, "avg_line_length": 38.37704849243164, "blob_id": "d0ac48fcb9bd569b2f4e7ed410a4965b9dbba2ed", "content_id": "b229b94ec46a7e3214e87bd8ada5e1c1421b9569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2402, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/history_pinjaman/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom shafiya_pinjam.models import PinjamModel\nfrom show_buku.models import Buku\nfrom form_anggota.models import Profile\nfrom django.contrib.auth.models import User\nfrom .forms import ProfileForm\nfrom django.http import HttpResponseRedirect, JsonResponse\n\n# Create your views here.\n# @login_required\n\n\ndef show_history(request):\n if request.user.is_authenticated:\n username = request.user.username\n user = User.objects.get(username=username)\n if Profile.objects.filter(user=user).exists():\n profile = Profile.objects.get(user=user)\n return render(request, 'history.html', {'profile': profile, 'username': username})\n else:\n return render(request, 'history.html')\n return HttpResponseRedirect(\"/login/\")\n\n\ndef history_json(request):\n if request.user.is_authenticated:\n username = request.user.username\n db_pinjam = PinjamModel.objects.filter(username=username)\n response_data = []\n for pinjam in db_pinjam:\n attr = dict()\n buku = Buku.objects.get(nomor_buku=pinjam.nomor_buku)\n attr[\"nomor_buku\"] = buku.nomor_buku\n attr[\"judul_buku\"] = buku.judul_buku\n attr[\"pengarang\"] = buku.pengarang\n attr[\"penerbit\"] = buku.penerbit\n attr[\"tanggal_pinjam\"] = pinjam.tanggal_pinjam\n response_data.append(attr)\n return JsonResponse(response_data, safe=False)\n return HttpResponseRedirect(\"/\")\n\n\ndef profile(request):\n if request.user.is_authenticated:\n if request.method == \"POST\":\n form = ProfileForm(request.POST, request.FILES)\n if form.is_valid():\n user = User.objects.get(username=request.user.username)\n propic = request.FILES['profile_picture']\n address = form.cleaned_data[\"alamat_rumah\"]\n if Profile.objects.filter(user=user).exists():\n query = Profile.objects.get(user=user)\n query.delete()\n profile = Profile(\n user=user, profile_picture=propic, alamat_rumah=address)\n profile.save()\n return HttpResponseRedirect(\"/history/\")\n else:\n form = ProfileForm()\n return render(request, \"proform.html\", {\"form\": form})\n return HttpResponseRedirect(\"/login/\")\n" }, { "alpha_fraction": 0.681506872177124, "alphanum_fraction": 0.681506872177124, "avg_line_length": 25.545454025268555, "blob_id": "ab7e1f05802a33d8a360dbc33ed06785d3523cf0", "content_id": "e92a9b49abd62fae967741d4fb3ae900e8d491e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 45, "num_lines": 11, "path": "/landingpage/views.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.template import RequestContext\n\n\ndef index(request):\n if request.user.is_authenticated:\n user = request.user.username\n else:\n user = \"\"\n # index_context = {'user': user}\n return render(request, \"index.html\")\n" }, { "alpha_fraction": 0.6936936974525452, "alphanum_fraction": 0.6936936974525452, "avg_line_length": 24.227272033691406, "blob_id": "9d09e6f6d06212f2367164b32a454844aa8c0707", "content_id": "ab8ecdc5a0f2ea227292b6186469b50b972b1323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/shafiya_pinjam/forms.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import PinjamModel\nfrom show_buku.models import Buku\n\n\nclass PinjamForm(forms.Form):\n\tnomor_buku = forms.IntegerField(widget=forms.TextInput(attrs={\n\t\t'class': 'form-control',\n\t\t'required': 'True',\n\t\t'placeholder': 'Nomor Buku',\n\t}))\n\n\tdef clean(self):\n\t\tcleaned_data = super(PinjamForm, self).clean()\n\t\tbuku = cleaned_data.get('nomor_buku')\n\n\t\tdb_buku = []\n\t\tfor i in Buku.objects.all().values('nomor_buku'):\n\t\t\tdb_buku.append(i['nomor_buku'])\n\n\t\tif buku not in db_buku:\n\t\t\traise forms.ValidationError(\"Buku tidak ada\")\n" }, { "alpha_fraction": 0.7653061151504517, "alphanum_fraction": 0.7653061151504517, "avg_line_length": 18.600000381469727, "blob_id": "4549959c3af7494efe6a70283c021dc7d68a7c7f", "content_id": "d32d469419c314dacf60785e6363ea20883ccf02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/form_anggota/apps.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass FormAnggotaConfig(AppConfig):\n name = 'form_anggota'\n" }, { "alpha_fraction": 0.7032967209815979, "alphanum_fraction": 0.7208791375160217, "avg_line_length": 31.5, "blob_id": "9b45f3f4f77e6000337897fc9dbfb53e3ee250f4", "content_id": "d33e2b0d22ce5f81a81d5bafc920e400ab7b3ac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/show_buku/models.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass Buku(models.Model):\n nomor_buku = models.IntegerField()\n judul_buku = models.CharField(max_length=50)\n pengarang = models.CharField(max_length=50)\n kategori = models.CharField(max_length=50)\n penerbit = models.CharField(max_length=50)\n cover = models.ImageField(upload_to=\"gallery\", blank=True)\n sinopsis = models.TextField()\n kuota = models.IntegerField(null=True)\n" }, { "alpha_fraction": 0.5257452726364136, "alphanum_fraction": 0.574525773525238, "avg_line_length": 19.5, "blob_id": "04c09d80980bcec2ad0341920488e998500597a6", "content_id": "3098025cc2b8e883923d1b93c1507697366b7e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/show_buku/migrations/0002_buku_kuota.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2 on 2019-05-07 02:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('show_buku', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='buku',\n name='kuota',\n field=models.IntegerField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.791304349899292, "alphanum_fraction": 0.791304349899292, "avg_line_length": 18.16666603088379, "blob_id": "d56e352f60ab65a0047872cb0da6c1231e54242d", "content_id": "143ba6015015c65eee9f62d9fbc6fba1f94113e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/show_buku/admin.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Buku\n\nadmin.site.register(Buku)\n" }, { "alpha_fraction": 0.6797900199890137, "alphanum_fraction": 0.6797900199890137, "avg_line_length": 37.099998474121094, "blob_id": "6d80558b6c74e8b6d220db3bdd29d2d13a1b0748", "content_id": "6f23c775a6ab783e28e6a7ad2478bebab4a789e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 67, "num_lines": 10, "path": "/form_anggota/urls.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n path('register/', register_member, name=\"register\"),\n path('', include('django.contrib.auth.urls')),\n path('check-username/', check_username, name=\"check-username\"),\n path('check-email/', check_email, name=\"check-email\"),\n path('check-password/', check_password, name=\"check-password\"),\n]\n" }, { "alpha_fraction": 0.7028571367263794, "alphanum_fraction": 0.7097142934799194, "avg_line_length": 29.20689582824707, "blob_id": "6e11461011dc3516608ec2128faed1a3762d7a2a", "content_id": "052562394f5821e71f1b25d73854891a1c18f7c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "no_license", "max_line_length": 80, "num_lines": 29, "path": "/landingpage/tests.py", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.test import Client\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom landingpage.apps import LandingpageConfig\nfrom django.apps import apps\n\nfrom .views import *\nfrom .models import *\nimport datetime\n\nclass LandingPage(TestCase):\n\n def test_landing_page_url_ada(self):\n response = Client().get('/')\n self.assertEqual(response.status_code, 200)\n\n def test_landing_page_fungsi_index(self):\n found = resolve('/')\n self.assertEqual(found.func, index)\n \n def test_dummy_page(self):\n response = Client().get(\"/test\")\n self.assertEqual(response.status_code,404)\n\nclass ConfigTest(TestCase):\n def test_apps(self):\n self.assertEqual(LandingpageConfig.name, 'landingpage')\n self.assertEqual(apps.get_app_config('landingpage').name, 'landingpage')" }, { "alpha_fraction": 0.7334235310554504, "alphanum_fraction": 0.7523680925369263, "avg_line_length": 35.95000076293945, "blob_id": "23734185d041194070fdfb0d862702085056bd39", "content_id": "1848130cfa35c9a887caa19266408c4d4a3a04a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 739, "license_type": "no_license", "max_line_length": 164, "num_lines": 20, "path": "/README.md", "repo_name": "zafirr31/sijambumuda-group-3", "src_encoding": "UTF-8", "text": "# SIJAMBUMUDA Kelompok 3\n\nNama Anggota :\n\n1. Jahns Michael\n2. Rendya Yuschak\n3. Shafiya Ayu A.\n4. Zafir Rasyidi Taufik \n\nPembagian Tugas 2 :\n1. Dashboard Buku yang Pernah Dipinjam [history_pinjaman] : Jahns Michael\n2. Login Django-auth dan Google OAuth [form_anggota] : Zafir Rasyidi Taufik\n3. Update Form Peminjaman [shafiya_pinjam] : Shafiya Ayu A.\n4. Testimoni dan About [About-dan-Testimoni] : Rendya Yuschak\n\n[![pipeline status](https://gitlab.com/Haskucy/sijambumuda-kelompok-3/badges/master/pipeline.svg)](https://gitlab.com/Haskucy/sijambumuda-kelompok-3/commits/master)\n\n[![coverage report](https://gitlab.com/Haskucy/sijambumuda-kelompok-3/badges/master/coverage.svg)](https://gitlab.com/Haskucy/sijambumuda-kelompok-3/commits/master)\n\nLink Heroku: https://sijambu-tiga.herokuapp.com/\n" } ]
50
ejones44/NCB_Stats
https://github.com/ejones44/NCB_Stats
83a7ff559b63e47026db8b5689f3d2ceb4277a92
a9f9cd11458c8394f826000a35ea1279595846d2
79686cd6521e07dec3caadfe84a99f7d2dc1a3a0
refs/heads/master
2021-01-18T20:10:50.571654
2015-04-06T02:53:08
2015-04-06T02:53:08
33,462,106
0
0
null
2015-04-06T01:49:27
2015-04-06T01:49:12
2015-04-06T01:47:37
null
[ { "alpha_fraction": 0.6456456184387207, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 29.272727966308594, "blob_id": "18d7dc66d3a40194c216592ef77c494daa10dd19", "content_id": "c4b61948bf340faa2fcde9699f99eefa0b792ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 59, "num_lines": 11, "path": "/Main.py", "repo_name": "ejones44/NCB_Stats", "src_encoding": "UTF-8", "text": "__author__ = 'Ryan'\nimport FBB_League\nfrom Scrape_espn_league import *\nimport pandas as pd\n#RYAN IS A BADDY TEST \n\ndef main():\n NCB = FBB_league('123478', '2015')\n hitters = pd.read_csv('Data/Hitters_projections.csv')\n pitchers = pd.read_csv('Data/Pitchers_projections.csv')\n teams = pd.read_csv('Data/NCB_teams.csv')\n" }, { "alpha_fraction": 0.45213866233825684, "alphanum_fraction": 0.4559330940246582, "avg_line_length": 39.26736068725586, "blob_id": "725fed6f5613d63dbb77879230981bf6de451887", "content_id": "be2fe4cf2b3f3ab5404164ba746bb7ba2bf8e09b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11596, "license_type": "no_license", "max_line_length": 119, "num_lines": 288, "path": "/FBB_League.py", "repo_name": "ejones44/NCB_Stats", "src_encoding": "UTF-8", "text": "__author__ = 'Ryan'\n\nimport pandas as pd\nfrom Scrape_espn_league import *\nimport pickle\n\n\nclass FBB_League:\n def __init__(self, leagueID, year):\n self.leagueID = leagueID\n self.year = year\n # data frame containing\n # [teamID, teamName, wins, losses, draws]\n self.teams = pd.DataFrame()\n # data frame containing all of the matchups\n # [weekID, gameID, teamID, H/A]\n self.matchups = pd.DataFrame()\n #data frame containing all of te results for each weeks matchups\n #[weekID, gameID, teamID, H, R, 2B, 3B, HR, XBH, RBI, BB, SB, AVG, OBP, SLG,\n # K, QS, CG, SO, W, L, SV, HD, BAA, ERA, WHIP, K/9, Wins, Losses, Ties, H/A]\n self.matchUpResults = pd.DataFrame()\n #data frame containing all of the batters and their year to date stats\n # [playerID, Name, Team, catcher, first base, second base, third base, shortstop,\n # left field, center field, right field, designated hitter\n # H, AB, R, 2B, 3B, HR, XBH, RBI, BB, SB, AVG, OBP, SLG]\n self.batters = pd.DataFrame()\n #data frame containing all of the batters and their projections\n # [playerID, Name, Team, catcher, first base, second base, third base, shortstop,\n # left field, center field, right field, designated hitter\n # H, AB, R, 2B, 3B, HR, XBH, RBI, BB, SB, AVG, OBP, SLG]\n self.batterProjections = pd.DataFrame()\n #data frame containing all of the batters and what FBB team their are on\n # [playerID, TeamID]\n self.batterRosters = pd.DataFrame()\n #data frame containing each batters week as they played for a team\n #[playerID, Name, FBBteamID, H, AB, R, 2B, 3B, HR, XBH, RBI, BB, SB, AVG, OBP, SLG]\n self.matchUpBatters = pd.DataFrame()\n #data frame containing all of the pitchers and their year to data stats\n #[playerID, Name, Team, Starting Pitcher, Relief Pitcher, IP, K, QS, CG, SO, W, L, SV, HD, BAA, ERA, WHIP, K/9]\n self.pitchers = pd.DataFrame()\n #data frame containing all of the pitchers and their projections\n #[playerID, Name, Team, Starting Pitcher, Relief Pitcher, IP, K, QS, CG, SO, W, L, SV, HD, BAA, ERA, WHIP, K/9]\n self.pitcherProjections = pd.DataFrame()\n #data frame containing all of the pitcher and what FBB team their are on\n # [playerID, FBBTeamID]\n self.pitcherRosters = pd.DataFrame()\n #data frame containing each pitchers week as they played for a team\n #[playerID, Name, FBBteamID, gameID, H, AB, R, 2B, 3B, HR, XBH, RBI, BB, SB, AVG, OBP, SLG]\n self.matchUpPitchers = pd.DataFrame()\n #data frame containing all of the teamIDs and their ELO rating\n #[teamID, ELO, Init, week 1 ELO, , week 2 ELO, ... ]\n self.ELO = pd.DataFrame()\n #data frame containing all of the information for how much each roster can hold\n # [Roster Position, Num Starters, Min, Max]\n # note bench and DL will be roster positions\n self.leagueInfo = pd.DataFrame()\n # data frame containing the season stats for each team\n #[Name, teamID, ...Scoring Stats...]\n self.seasonStats = pd.DataFrame()\n # ############################################################################\n # #\n # #\n # League Functions #\n # NOTE* No Scraping is done by this class #\n # #\n #############################################################################\n\n #############################################################################\n # #\n # ELO #\n # #\n #############################################################################\n\n def createELO(self):\n teams = list(self.teams['teamID'])\n for t in teams:\n self.ELO = self.ELO.append(pd.Series([t, 1500.0, 1500.0]), ignore_index=True)\n self.ELO.columns = ['teamID', 'ELO', 'Init']\n\n def updateELO(self, weekID):\n if weekID not in self.ELO.columns:\n games = list(self.matchups[(self.matchups['weekID'] == weekID)]['gameID'])\n for g in games:\n self.calcELO(g)\n self.ELO[weekID] = pd.Series(list(self.ELO['ELO']))\n\n def calcELO(self, gameID):\n teamsMatch = self.matchups[(self.matchups[gameID] == gameID)]\n weekID = list(teamsMatch['weekID'])[0]\n teams = list(teamsMatch['teamID'])\n teamA = self.ELO[(self.ELO['teamID'] == teams[0])]['ELO']\n teamB = self.ELO[(self.ELO['teamID'] == teams[1])]['ELO']\n teamA_new, teamB_new = self.ELOMath(teamA, teamB)\n self.ELO.loc[self.ELO.teamID == teams[0], 'ELO'] = teamA_new\n self.ELO.loc[self.ELO.teamID == teams[1], 'ELO'] = teamB_new\n\n def ELOMath(self, teamA, teamB):\n A = 1 / (1 + 10 ** ((teamB - teamA) / 400))\n B = 1 / (1 + 10 ** ((teamA - teamB) / 400))\n return A, B\n\n #############################################################################\n # #\n # Analysis #\n # #\n #############################################################################\n\n def calculateZScores(self):\n \n\n\n #############################################################################\n # #\n # End of Week Analysis #\n # team of the week, player of the week, next week predictions #\n #############################################################################\n\n\n\n\n #############################################################################\n # #\n # #\n # GETTERS, SETTERS, UPDATERS #\n # #\n # #\n #############################################################################\n\n #############################################################################\n # #\n # Getters #\n # #\n #############################################################################\n\n\n\n def getLeagueID(self):\n return self.leagueID\n\n def getYear(self):\n return self.year\n\n def getELO(self):\n return self.ELO\n\n def getTeams(self):\n return self.teams\n\n def getMatchups(self):\n return self.matchups\n\n def getMatchUpResults(self):\n return self.matchupresults\n\n def getBatters(self):\n return self.batters\n\n def getBatterProjections(self):\n return self.batterProjections\n\n def getBatterRosters(self):\n return self.batterRosters\n\n def getMatchUpBatters(self):\n return self.matchUpBatters\n\n def getPitchers(self):\n return self.pitchers\n\n def getPitcherProjections(self):\n return self.pitcherProjections\n\n def getPitcherRosters(self):\n return self.pitcherRosters\n\n def getMatchUpPitchers(self):\n return self.matchUpPitchers\n\n def getLeagueInfo(self):\n return self.leagueInfo\n\n def getSeasonStats(self):\n return self.seasonStats\n\n #############################################################################\n # #\n # Setters #\n # #\n #############################################################################\n\n def setLeagueID(self, leagueID):\n self.leagueID = leagueID\n\n def setYear(self, year):\n self.year = year\n\n def setELO(self, ELO):\n self.ELO = ELO\n\n def setTeams(self, teams):\n self.teams = teams\n\n def setMatchups(self, matchups):\n self.matchups = matchups\n\n def setMatchUpResults(self, matchupresults):\n self.matchupresults = matchupresults\n\n def setBatters(self, batters):\n self.batters = batters\n\n def setBatterProjections(self, batterProjections):\n self.batterProjections = batterProjections\n\n def setBatterRosters(self, batterRosters):\n self.batterRosters = batterRosters\n\n def setMatchUpBatters(self, matchUpBatters):\n self.matchUpBatters = matchUpBatters\n\n def setPitchers(self, pitchers):\n self.pitchers = pitchers\n\n def setPitcherProjections(self, pitcherProjections):\n self.pitcherProjections = pitcherProjections\n\n def setPitcherRosters(self, pitcherRosters):\n self.pitcherRosters = pitcherRosters\n\n def setMatchUpPitchers(self, matchUpPitchers):\n self.matchUpPitchers = matchUpPitchers\n\n def setLeagueInfo(self, leagueInfo):\n self.leagueInfo = leagueInfo\n\n def setSeasonStats(self, seasonStats):\n self.seasonStats = seasonStats\n\n \"\"\"\n #############################################################################\n # #\n # Updaters #\n # #\n #############################################################################\n\n def updateELO(self, ELO):\n self.ELO = ELO\n\n def updateTeams(self, teams):\n self.teams = teams\n\n def updateMatchups(self, matchups):\n self.matchups = matchups\n\n def updateMatchUpResults(self, matchupresults):\n self.matchupresults = matchupresults\n\n def updateBatters(self, batters):\n self.batters = batters\n\n def updateBatterProjections(self, batterProjections):\n self.batterProjections = batterProjections\n\n def updateBatterRosters(self, batterRosters):\n self.batterRosters = batterRosters\n\n def updateMatchUpBatters(self, matchUpBatters):\n self.matchUpBatters = matchUpBatters\n\n def updatePitchers(self, pitchers):\n self.pitchers = pitchers\n\n def updatePitcherProjections(self, pitcherProjections):\n self.pitcherProjections = pitcherProjections\n\n def updatePitcherRosters(self, pitcherRosters):\n self.pitcherRosters = pitcherRosters\n\n def updateMatchUpPitchers(self, matchUpPitchers):\n self.matchUpPitchers = matchUpPitchers\n\n def updateLeagueInfo(self, leagueInfo):\n self.leagueInfo = leagueInfo\n\n def updateeasonStats(self, seasonStats):\n self.seasonStats = seasonStats\n\n \"\"\"" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.5925925970077515, "avg_line_length": 23.545454025268555, "blob_id": "2da32d03a3307f720558db2c735447ce47565274", "content_id": "735739ea6212282ffc645210801f28f9761a1b17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 47, "num_lines": 11, "path": "/FBB_Team.py", "repo_name": "ejones44/NCB_Stats", "src_encoding": "UTF-8", "text": "__author__ = 'Ryan'\nimport pandas as pd\n\n\nclass FBB_Team:\n def __init__(self, leagueID, year, teamId):\n self.leagueID = leagueID\n self.year = year\n self.teamId = teamId\n self.batters = pd.DataFrame()\n self.pitchers = pd.DataFrame()\n" }, { "alpha_fraction": 0.5592188835144043, "alphanum_fraction": 0.5737511515617371, "avg_line_length": 29.586111068725586, "blob_id": "e727124f71f4a2e22bcd8f9582ff3acf85509733", "content_id": "abe5417d0f62435e45a2f567b37752795f9850d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11010, "license_type": "no_license", "max_line_length": 219, "num_lines": 360, "path": "/Scrape_espn_league.py", "repo_name": "ejones44/NCB_Stats", "src_encoding": "UTF-8", "text": "__author__ = 'Ryan'\nimport time\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\nimport pickle\nfrom robobrowser import RoboBrowser\nfrom espn_login import *\n\n\nespn_header = {'1/0': 'H/AB', '1/0': 'H/AB', }\n\ndef loginToESPN(leagueID, year):\n link = 'http://games.espn.go.com/flb/leagueoffice?leagueId='+str(leagueID)+'&seasonId='+str(year)\n br = RoboBrowser(history=True)\n br.open(link)\n try:\n form = br.get_form(action=\"https://r.espn.go.com/espn/memberservices/pc/login\")\n form['username'].value = login.username.value\n form['password'].value = login.password.value\n br.submit_form(form)\n print('\\nLogging In\\n')\n except:\n print('\\nNo need to login!\\n')\n\n return br\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef nameToBatPos(d):\n #BatPos = ['Catcher', 'First Base', 'Second Base', 'Third Base', 'Shortstop', 'Left Field', 'Center Field', 'Right Field', 'Designated Hitter']\n s = d.text.format('ascii')\n name = getPlayerName(s)\n s = s[s.find(',')+2:]\n pID = getPlayerID(d)\n team = s[:s.find('\\xa0')]\n pos = s[s.find('\\xa0')+1:]\n posOut = getBatPositions(pos)\n return [pID, name, team] + posOut\n\n\ndef getPlayerName(s):\n return s[:s.find(',')]\n\n\ndef getPlayerID(d):\n return d.find_all('a')[0]['playerid']\n\n\ndef getBatPositions(s):\n posOut = [None]*9\n if 'SSPD' in s:\n s = s.replace('SSPD', '')\n if '1B' in s:\n posOut[1] = 1\n s = s.replace('1B', '')\n if '2B' in s:\n posOut[2] = 1\n s = s.replace('2B', '')\n if '3B' in s:\n posOut[3] = 1\n s = s.replace('3B', '')\n if 'SS' in s:\n posOut[4] = 1\n s = s.replace('SS', '')\n if 'LF' in s:\n posOut[5] = 1\n s = s.replace('LF', '')\n if 'CF' in s:\n posOut[6] = 1\n s = s.replace('CF', '')\n if 'RF' in s:\n posOut[7] = 1\n s = s.replace('RF', '')\n if 'DH' in s:\n posOut[8] = 1\n s = s.replace('DH', '')\n if 'C' in s:\n posOut[0] = 1\n s = s.replace('C', '')\n return posOut\n\ndef splitHAB(s):\n hits = s[:s.find('/')]\n ab = s[s.find('/')+1:]\n if is_number(hits):\n hits = float(hits)\n else:\n hits = 0\n if is_number(ab):\n ab = float(ab)\n else:\n ab = 0\n return [hits, ab]\n\ndef nameToPitchPos(d):\n #['Starting Pitcher', 'Relief Pitcher']\n s = d.text.format('ascii')\n name = s[:s.find(',')]\n s = str(s[s.find(',')+2:])\n pID = d.find_all('a')[0]['playerid']\n team = s[:s.find('\\xa0')]\n pos = s[s.find('\\xa0')+1:]\n posOut = getPitchPositions(pos)\n return [pID, name, team] + posOut\n\ndef getPitchPositions(s):\n posOut = [None]*2\n if 'SSPD' in s:\n s = s.replace('SSPD', '')\n if 'SP' in s:\n posOut[0] = 1\n s = s.replace('SP', '')\n if 'RP' in s:\n posOut[1] = 1\n s = s.replace('RP', '')\n return posOut\n\ndef tableToBatters(table):\n Hitters = pd.DataFrame()\n rows = table.find_all('tr')\n rows = rows[2:]\n for r in rows:\n data = r.find_all('td')\n data = [data[0]] + data[8:20]\n row_data = []\n for i, d in enumerate(data):\n if i == 0:\n row_data = nameToBatPos(d)\n elif '/' in d.text:\n row_data += splitHAB(d.text)\n else:\n if is_number(d.text):\n row_data.append(float(d.text))\n else:\n row_data.append(0)\n Hitters = Hitters.append(pd.Series(row_data), ignore_index=True)\n return Hitters\n\ndef tableToPitchers(table):\n Pitchers = pd.DataFrame()\n rows = table.find_all('tr')\n rows = rows[2:]\n for r in rows:\n data = r.find_all('td')\n data = [data[0]] + data[8:24]\n row_data = []\n for i, d in enumerate(data):\n if i == 0:\n row_data = nameToPitchPos(d)\n else:\n if is_number(d.text):\n row_data.append(float(d.text))\n else:\n row_data.append(0)\n Pitchers = Pitchers.append(pd.Series(row_data), ignore_index=True)\n return Pitchers\n\n\ndef scrapePlayerProjections(leagueID, year):\n br = loginToESPN(leagueID, year)\n Hitters = pd.DataFrame()\n HitPos = ['Catcher', 'First Base', 'Second Base', 'Third Base', 'Shortstop', 'Left Field', 'Center Field', 'Right Field', 'Designated Hitter']\n Pitchers = pd.DataFrame()\n PitchPos = ['Starting Pitcher', 'Relief Pitcher']\n thead = []\n index = 0\n #get batter values\n br.open('http://games.espn.go.com/flb/freeagency?leagueId='+str(leagueID)+'&teamId=1&seasonId='+str(year)+'&context=freeagency&view=stats&version=projections&startIndex=0&avail=-1&startIndex='+str(index))\n table = br.find_all('table', class_='playerTableTable tableBody')[0]\n rows = table.find_all('tr')\n\n #get the column headers\n header = rows[1]\n data = header.find_all('td')\n data = [data[0]] + data[8:20]\n for d in data:\n txt = d.text.replace('\\xa0', '')\n thead.append(txt.format('ascii'))\n thead[0] = 'PlayerId'\n if 'H/AB' in thead:\n ind = thead.index('H/AB')\n thead[ind] = 'AB' #AB stored in ind+1\n thead.insert(ind, 'H') #H stored in ind\n thead.insert(1, 'Team')\n thead.insert(1, 'Name')\n thead = thead[0:3]+HitPos+thead[3:]\n #get player projections\n while index < 250:\n br.open('http://games.espn.go.com/flb/freeagency?leagueId='+str(leagueID)+'&teamId=1&seasonId='+str(year)+'&context=freeagency&view=stats&version=projections&avail=-1&startIndex='+str(index))\n table = br.find_all('table', class_='playerTableTable tableBody')[0]\n Hitters = Hitters.append(tableToBatters(table))\n index += 50\n Hitters.columns = thead\n index = 0\n\n\n #get Pitchers\n br.open('http://games.espn.go.com/flb/freeagency?leagueId='+str(leagueID)+'&teamId=1&seasonId='+str(year)+'&context=freeagency&view=stats&version=projections&avail=-1&slotCategoryGroup=2&startIndex='+str(index))\n table = br.find_all('table', class_='playerTableTable tableBody')[0]\n rows = table.find_all('tr')\n\n #get the column headers\n thead = []\n header = rows[1]\n data = header.find_all('td')\n data = [data[0]] + data[8:24]\n for d in data:\n txt = d.text.replace('\\xa0', '')\n thead.append(txt.format('ascii'))\n thead[0] = 'PlayerId'\n thead.insert(1, 'Team')\n thead.insert(1, 'Name')\n thead = thead[0:3]+PitchPos+thead[3:]\n #get player projections\n while index < 250:\n br.open('http://games.espn.go.com/flb/freeagency?leagueId='+str(leagueID)+'&teamId=1&seasonId='+str(year)+'&context=freeagency&view=stats&version=projections&avail=-1&slotCategoryGroup=2&startIndex='+str(index))\n table = br.find_all('table', class_='playerTableTable tableBody')[0]\n Pitchers = Pitchers.append(tableToPitchers(table))\n index += 50\n Pitchers.columns = thead\n\n return Hitters, Pitchers\n\n\ndef scrapeTeamPlayers(leagueID, year, teams):\n br = loginToESPN(leagueID, year)\n\n teamBatters = pd.DataFrame()\n teamPitchers = pd.DataFrame()\n\n urls = list(teams['Link'])\n for u in urls:\n br.open('http://games.espn.go.com' + u)\n teamId = teams[teams['Link'] == u].iloc[0]['teamId']\n # batters\n Btable = br.find_all('table', class_='playerTableTable tableBody')[0]\n rows = Btable.find_all('tr')\n rows = rows[2:]\n for r in rows:\n d = r.find_all('td')[1]\n if d.find_all('a'):\n pID = getPlayerID(d)\n teamBatters = teamBatters.append(pd.Series([teamId, pID]), ignore_index=True)\n\n\n\n #pitchers\n Ptable = br.find_all('table', class_=\"playerTableTable tableBody playerTableMoreTable\")[0]\n rows = Ptable.find_all('tr')\n rows = rows[2:]\n for r in rows:\n d = r.find_all('td')[1]\n if d.find_all('a'):\n pID = getPlayerID(d)\n teamPitchers = teamPitchers.append(pd.Series([teamId, pID]), ignore_index=True)\n\n teamBatters.columns = ['teamId', 'playerId']\n teamPitchers.columns = ['teamId', 'playerId']\n return teamBatters, teamPitchers\n\n\ndef scrapeMatchups():\n pass\n\n\ndef scrapeLeagueSchedule():\n pass\n\n\ndef scrapeMatchupPlayers():\n pass\n\n\n# returns data frame containing\n# [teamID, teamName, shortName, wins, losses, draws]\ndef scrapeLeagueTeams(leagueID, year):\n br = loginToESPN(leagueID, year)\n\n # dataframe will have the following columns:\n #[teamID, teamName, wins, losses, draws]\n teams = pd.DataFrame()\n\n br.open('http://games.espn.go.com/flb/standings?leagueId=' + str(leagueID) + '&seasonId=' + str(year))\n tables = br.find_all('table', class_='tableBody')\n tables = tables[:-1]\n for t in tables:\n print('\\nTABLE\\n')\n row = t.find_all('tr')[2:]\n for r in row:\n data = r.find_all('td')\n name = data[0]\n name_row = teamNameToRow(name)\n wins = float(data[1].text)\n losses = float(data[2].text)\n draw = float(data[3].text)\n out = name_row + [wins, losses, draw]\n teams = teams.append(pd.Series(out), ignore_index=True)\n teams.columns = ['teamId', 'Name', 'Link', 'W', 'L', 'T']\n return teams\n\n\ndef teamNameToRow(name):\n link = name.find_all('a')[0]['href']\n ID = link.split('&')[1]\n teamID = int(ID[ID.find('=') + 1:])\n teamName = name.text\n\n return [teamID, teamName, link]\n\n\ndef scrapeTeamStats(leagueID, year):\n br = loginToESPN(leagueID, year)\n\n # dataframe will have the following columns:\n #[teamID, teamName, wins, losses, draws]\n teamStats = pd.DataFrame()\n\n br.open('http://games.espn.go.com/flb/standings?leagueId=' + str(leagueID) + '&seasonId=' + str(year))\n tables = br.find_all('table', class_='tableBody')\n table = tables[-1]\n rows = table.find_all('tr')\n head = rows[2].find_all('td')\n header = [h.text for h in head]\n while '' in header:\n header.remove('')\n header.insert(0, 'Name')\n header.insert(0, 'teamId')\n stats = rows[3:]\n\n for r in stats:\n data_row = []\n data = r.find_all('td')\n name = teamNameToRow(data[1])\n data = data[2:-2]\n for d in data:\n if is_number(d.text):\n data_row.append(float(d.text))\n out = name[:2] + data_row\n\n teamStats = teamStats.append(pd.Series(out), ignore_index=True)\n teamStats.columns = header\n return teamStats\n\n\nHitters, Pitchers = scrapePlayerProjections('123478', '2015')\nHitters.to_csv('Data/Hitters_projections.csv')\nPitchers.to_csv('Data/Pitchers_projections.csv')\n\n\"\"\"\nteams = pd.read_csv('NCB_teams.csv', index_col=0)\nteamBatters, teamPitchers = scrapeTeamPlayers('123478', '2015', teams)\nteamBatters.to_csv('activeRoster_batter.csv')\nteamPitchers.to_csv('activeRoster_pitcher.csv')\n\"\"\"" }, { "alpha_fraction": 0.682170569896698, "alphanum_fraction": 0.682170569896698, "avg_line_length": 20.66666603088379, "blob_id": "a7275a67ea20c51daedd6aef91457d823c8c9dcd", "content_id": "33b24d5165834e53351627693f3a2a973811d832", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/espn_login.py", "repo_name": "ejones44/NCB_Stats", "src_encoding": "UTF-8", "text": "__author__ = 'Ryan'\nfrom enum import Enum\n\nclass login(Enum):\n username = #put username here\n password = #put password here" } ]
5
810978558/projet-vuln-detect-python
https://github.com/810978558/projet-vuln-detect-python
52030b8c52e4eecc9a579dcb371bacd5ca71114c
bfd8dde79ba962da25a4da2b92e1428e20199503
d81e22fb2f9e30189e9ca1e328fab90b0ca449e4
refs/heads/master
2016-07-25T21:58:55.759508
2014-07-17T01:11:36
2014-07-17T01:11:36
43,934,208
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3734627962112427, "alphanum_fraction": 0.3802588880062103, "avg_line_length": 71.35713958740234, "blob_id": "6a34f1ebc0cd1a4a567e6ac69e8fd65764604f83", "content_id": "e479f1caa14582dea85af3f0132bb8ae63ff68e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3091, "license_type": "no_license", "max_line_length": 138, "num_lines": 42, "path": "/ScanPortBENJ.py", "repo_name": "810978558/projet-vuln-detect-python", "src_encoding": "UTF-8", "text": " # Creation de la fonction de scan\r\ndef fnScanportsParIP(scanports_ip_debut_liste, scanports_ip_fin_liste, pos, chaine_adrip) :\r\n \"\"\" Fonction permettant de tester les ports pour chaque adresse IP d'une plage donnee en parametre \"\"\"\r\n \r\n\tfor octet in range(int(scanports_ip_debut_liste[pos]), int(scanports_ip_fin_liste[pos])+1) :\r\n \tif pos < 3 :\r\n # On construit l'adresse IP avec les autres octets\r\n \tfnScanportsParIP(scanports_ip_debut_liste, scanports_ip_fin_liste, pos+1, chaine_adrip+str(octet)+\".\")\r\n else :\r\n \tscanports_hote = chaine_adrip + str(octet)\r\n \tscanports_port = 0\r\n \tresultat = 1\r\n # On teste les ports\r\n try :\r\n \tfor scanports_port in range(1, 1025) :\r\n # On tente d'acceder ร  l'adresse IP par un port\r\n \tconnexion_scanports = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n \tresultat = connexion_scanports.connect_ex((scanports_hote, scanports_port))\r\n \r\n if resultat == 0 : # On obtient une reponse\r\n #print(\"IP: {} Port: {}\".format(scanports_hote, scanports_port))\r\n \r\n # Reception du tag client \"pret a recevoir\"\r\n tag_reception = self.connexion.recv(1024)\r\n \tif tag_reception == b\"tagMENUSUPERVISIONSCANPRET\" :\r\n # Envoi du resultat au client\r\n \tresultat_scan = scanports_hote + \" \" + str(scanports_port)\r\n self.connexion.send(resultat_scan.encode())\r\n else :\r\n \tprint(\"Erreur :\", tag_reception.decode())\r\n \r\n connexion_scanports.close()\r\n \r\n if resultat == 10060 : # L'adresse IP n'est pas joignable\r\n print(\"IP: \"+scanports_hote+\" introuvable\")\r\n break\r\n except :\r\n \tprint(\"Erreur lors du scan de ports.\")\r\n \r\n \r\n # On appelle la fonction qui se charge du scan de port\r\n fnScanportsParIP(scanports_ip_debut_liste, scanports_ip_fin_liste, 0, \"\")\r\n\r\n" }, { "alpha_fraction": 0.5681470036506653, "alphanum_fraction": 0.5727412104606628, "avg_line_length": 18.34375, "blob_id": "7ce24031585f8f3a9067363377b99a647acdcb00", "content_id": "464d1498b30c81ca2b9709ec0aa303939006bee9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 60, "num_lines": 32, "path": "/ScanPortKIL.py", "repo_name": "810978558/projet-vuln-detect-python", "src_encoding": "UTF-8", "text": "\r\nimport socket, sys, threading, time, os\r\n \r\n\r\n \r\ndef ScanPort(port):\r\n \r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(2)\r\n try:\r\n s.connect((ip,port))\r\n print port\r\n s.close()\r\n except socket.error:\r\n \r\n s.close()\r\n \r\n \r\nprint('Entrez adresse IP ou nom de domaine')\r\n#ip=input()\r\nip=sys.argv[1]\r\n\r\nportdebut=int(input('Entrer port de debut: \\n'))\r\n\r\nportfin=int(input('Entrer port de fin\\n'))\r\n\r\n\r\nwhile (portdebut != portfin):\r\n# a = threading.Thread(None, ScanPort, None,(portdebut,))\r\n \r\n portdebut= portdebut + 1\r\n ScanPort(portdebut)\r\n# a.start()\r\n" }, { "alpha_fraction": 0.7350993156433105, "alphanum_fraction": 0.7350993156433105, "avg_line_length": 24.16666603088379, "blob_id": "050d32f8bc9c59dbdff63dd3498e6ba1b94afd74", "content_id": "529bd948474aef2e14676685bc057b5edd66dfe4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 58, "num_lines": 6, "path": "/test.py", "repo_name": "810978558/projet-vuln-detect-python", "src_encoding": "UTF-8", "text": "#!/bin/bash/python\n\n#Fichier de test que j'agrandit\n\n#Premiere chose a faire scan de port :\nfrom scapy.all import * # j'importe la librairie de scapy.\n" }, { "alpha_fraction": 0.6258925199508667, "alphanum_fraction": 0.6384211182594299, "avg_line_length": 27.113636016845703, "blob_id": "bc67b48ac6693eff3b264c3e1035103e4922314b", "content_id": "2952ba0405cbb9879faa5c0a86f7c017385b8c1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7423, "license_type": "no_license", "max_line_length": 178, "num_lines": 264, "path": "/start.py", "repo_name": "810978558/projet-vuln-detect-python", "src_encoding": "UTF-8", "text": "###### MISE A JOUR DE LA BASES DE VULNERABILITE fermer socket \n\n\nimport socket, sys, threading, time, os, urllib, string, csv, ftplib, urllib2, tarfile, subprocess\nfrom subprocess import call\nportList = []\n\n#fname=\"Resultat.CSV\"\n#write = open(\"Resultat.csv\",\"w\")\nBDD=\"formater.csv\"\n#ftpfile = open(\"FTPFILE.csv\",\"w\")\n#httpfile= open(\"HTTPFILE.csv\",\"w\")\n#httpsfile= open(\"HTTPSFILE.csv\",\"w\")\nvulnscan = open(\"Scan_Vuln.csv\",\"w\")\n\ndef ScanPort(port):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.settimeout(2)\n\ttry:\n\t\ts.connect((ip_address,port))\n\t\tportList.append(port)\n\t\ts.close()\n\texcept socket.error:\n\t\ts.close()\n\n\ndef Compare(filename,port):\n\tf1 = file(filename,\"r\")\n\tf2 = file('formater.csv','r')\n\tcd1 = csv.reader(f1, delimiter = ' ')\n\tcd2 = csv.reader(f2, delimiter = ' ')\n\t\n\tfor c1_row in cd1:\n\t\tfor c2_row in cd2:\n\t\t\tif c1_row[0] == c2_row[1] and c1_row[1] == c2_row[2]:\n\t\t\t\tprint \"Vulnerabiliter trouvee ecriture dans fichier\"\n\t\t\t\tvulnscan.write(\"Vulnerabilite trouvee sur port \" + str(port) + \" : \" + str(c2_row[1]) + \" \" +str(c2_row[2]) + \" http://www.exploit-db.com/exploits/\" + str(c2_row[0]) + \"\\n\")\n\ndef grab_banner(ip_address,port):\n\ttry:\n\t\tif port == 21:\n\t\t\ts=socket.socket()\n\t\t\ts.connect((ip_address,port))\n\t\t\tbanner = s.recv(1024)\n\t\t#\twrite.write(str(port) + \",\" + str(banner) + \"\\n\")\n\t\t\tftpfile = open(\"FTPFILE.csv\",\"w\")\n\t\t\tftpfile.write(str(banner))\n\t\t\tftpfile.close()\n\t\t\t\n\t\t\tftpresult = open(\"FTPRESULT.csv\",\"w\")\n\t\t\tftpfile = open(\"FTPFILE.csv\",\"r\")\n\t\t\tfor lines in ftpfile:\n\t\t\t\trpl1 = lines.replace(\"220 \", \"\")\n\t\t\t\trpl2 = rpl1.replace(\"(\", \"\")\n\t\t\t\trpl3 = rpl2.replace(\")\", \"\")\n\t\t\t\tftpresult.write(rpl3.upper())\n\t\t\tftpresult.close()\t\t\t\n\t\t\tCompare(\"FTPRESULT.csv\",port)\n\t\t\tftp = ftplib.FTP(ip_address)\n\t\t\tres = ftp.login()\n\t\t\tprint \"Tentative de connexion en anonymous : \" + res\n\t\t\tvulnscan.write(\"Tentative de connexion en anonymous : \" + res + \"\\n\")\n\t\t\tftp.quit()\n\t\t\tos.remove(\"FTPFILE.csv\")\n\t\t\tos.remove(\"FTPRESULT.csv\")\t\n\t\telif port == 22:\n\t\t\ts=socket.socket()\n\t\t\ts.connect((ip_address,port))\n\t\t\tbanner = s.recv(1024)\n\t\t#\tprint (banner)\n\t\t\tsshfile = open(\"SSHRESULT.csv\",\"w\")\n\t\t\trpl1 = banner.replace(\"SSH-2.0-\", \"\")\n\t\t\trpl2 = rpl1.replace(\"_\", \" \")\n\t\t\tsshfile.write(rpl2.upper())\n\t\t\tsshfile.close()\n\t\t\tCompare(\"SSHRESULT.csv\",port)\n\t\t#\tos.remove(\"SSHRESULT.csv\")\n\t\telif port == 23:\n\t\t\tvulnscan.write(\"Port Telnet ouvert ... peut etre quelque chose d'interessant ici... \\n\")\n\n\t\telif port == 80:\n\t\t\thttphd=urllib.urlopen(URL)\n\t\t\tinfohttp = httphd.info().headers\n\t\t#\twrite.write(str(port) + \",\" + str(infohttp))\n\t\t\thttpfile = open(\"HTTPFILE.csv\",\"w\")\n\t\t\thttpfile.write(str(port) + \",\" + str(infohttp).upper())\n\t\t\thttpfile.close()\n\n\t\t\thttpfile = open(\"HTTPFILE.csv\",\"r\")\n\t\t\tformathttp = open(\"resultatHTTP.csv\",\"w\")\n\n\t\t\treader = csv.reader(httpfile)\n\t\t\tfor row in reader:\n\t\t\t\tformathttp.write(row[3] + \",\" + row[4])\n\t\t\thttpfile.close()\n\t\t\t\n\t\t\tformathttp.close()\n\n\t\t\trechhttp = open(\"Seekhttp.csv\",\"w\")\n\t\t\tformathttp = open(\"resultatHTTP.csv\",\"r\")\n\t\t\tfor lines in formathttp:\n\t\t\t\trechhttp.write(lines.replace(' ' , ','))\n\t\t\trechhttp.close()\n\t\t\tformathttp.close()\n\t\t\t\n\t\t\thttpresult = open(\"HTTPRESULT.csv\",\"w\")\n\t\t\trechhttp = open(\"Seekhttp.csv\",\"r\")\n\t\t\tphpresult = open(\"PHPRESULT.csv\",\"w\")\n\t\t\treader = csv.reader(rechhttp)\n\t\t\tfor row in reader:\n\t\t\t\tfinalhttp = row[2].replace(\"/\", \" \")\n\t\t\t\thttpresult.write(finalhttp)\n\t\t\t\tphp = row[7].replace(\"/\", \" \")\n\t\t\t\tphp1 = php.replace(\"-\",\" \")\n\t\t\t\tphpresult.write(php1)\n\t\t\tos.remove(\"HTTPFILE.csv\")\n\t\t\tos.remove(\"Seekhttp.csv\")\n\t\t\tos.remove(\"resultatHTTP.csv\")\n\t\t\tphpresult.close()\n\t\t\t\n\t\t\thttpresult.close()\n\n\t\t\tCompare(\"HTTPRESULT.csv\",port)\n\t\t\tCompare(\"PHPRESULT.csv\",port)\n\t\t\tos.remove(\"HTTPRESULT.csv\")\n\t\t\tos.remove(\"PHPRESULT.csv\")\n\n\t\telif port == 53:\n\t\t\tdnsfile = open(\"DNSFILE.csv\",\"w\")\n\t\t\toutput = subprocess.Popen([\"nslookup\",\"-q=txt\",\"-class=CHAOS\",\"version.bind\",ip_address], stdout=subprocess.PIPE).communicate()[0]\n\t\t\t\n\t\t\tdnsfile.write(output)\n\t\t\tdnsfile.close()\n\t\t\t\n\t\t\t\n\t\t\tfile = open(\"DNSFILE.csv\",\"r\")\n\t\t\tformatdns = open(\"DNSFORMAT.csv\",\"w\")\n\t\t\tfor line in file:\n\t\t\t\tif 'version.bind' in line:\n\t\t\t\t\tformatdns.write(line)\n\t\t\tfile.close()\n\t\t\tformatdns.close()\n\t\t\tafile = open(\"DNSFORMAT.csv\",\"r\")\n\t\t\tdnsfile = open(\"DNSAV.csv\",\"w\")\n\t\t\treader = csv.reader(afile, delimiter = '\"')\n\t\t\tfor row in reader:\n\t\t\t\tdnsfile.write(row[1])\n\t\t\t\n\t\t\tdnsfile.close()\n\t\t\tCompare(\"DNSAV.csv\",port)\n\t\t\tos.remove(\"DNSFILE.csv\")\n\t\t\t\n\t\t\tos.remove(\"DNSFORMAT.csv\")\n\t\t\t\t\n\t\telif port == 443:\n\t\t\thttphd=urllib.urlopen(URLS)\n\t\t\tinfohttp = httphd.info().headers\n\t\t#\twrite.write(str(port) + \",\" + str(infohttp))\n\t\t\t\n\t\t\thttpshd = open(\"HTTPSFILE.csv\",\"w\")\n\t\t\thttpshd.write(str(infohttp).upper())\n\t\t\thttpshd.close()\n\t\t\t\n\t\t\thttpshd = open(\"HTTPSFILE.csv\",\"r\")\n\t\t\treader = csv.reader(httpshd)\n\t\t\tformathttps = open(\"resultatHTTPS.csv\",\"w\")\n\t\t\tfor row in reader:\n\t\t\t\tformathttps.write(row[2] + \",\" + row[3])\n\t\t\tformathttps.close()\n\n\t\t\trechhttps = open(\"Seekhttps.csv\",\"w\")\n\t\t\tformathttps = open(\"resultatHTTPS.csv\",\"r\")\n\t\t\tfor lines in formathttps:\n\t\t\t\trechhttps.write(lines.replace(' ', ','))\n\t\t\trechhttps.close()\n\t\t\tformathttps.close()\n\t\t\trechhttps = open(\"Seekhttps.csv\",\"r\")\n\t\t\thttpsresult = open(\"HTTPSRESULT.csv\",\"w\")\n\t\t\treader = csv.reader(rechhttps)\n\t\t\tfor row in reader:\n\t\t\t\t#print row[2].replace(\"/\", \" \")\n\t\t\t\t#httpsbuf = row[2].replace(\"/\", \" \")\n\t\t\t\thttpsresult.write(httpsbuf)\n\t\t\tos.remove(\"HTTPSFILES.csv\")\n\t\t\thttpsresult.close()\n\t\t\tseekhttps.close()\n\t\t\tos.remove(\"Seekhttps.csv\")\n\t\t\tCompare(\"HTTPSRESULT.csv\",port)\n\t\t\tos.remove(\"HTTPSRESULT.csv\")\n\n\t\t\t\n\t\t\t\n\t\telse:\n\t\t\ts=socket.socket()\n\t\t\ts.connect((ip_address,port))\n\t\t\tbanner = s.recv(1024)\n\t\t\twrite.write(str(port) + \",\" + str(banner) + \"\\n\")\n\t\t\ts.close()\n\texcept:\n\t\treturn\n\nif len(sys.argv) == 1:\n\tprint \"Argument necessaires\"\n\tprint \"Pour avoir la liste des arguments taper : python start.py -h\"\n\tsys.exit(1)\n\n\nelif sys.argv[1] == \"-u\":\n\tprint \"Upgrade ...\"\n\turllib.urlretrieve(\"http://www.exploit-db.com/archive.tar.bz2\",\"archive.tar.bz2\")\n\tprint \"Telechargement termine\"\n\tprint \"Recuperation et formatage de la base de donne en cours\"\n\tFichierTAR = 'archive.tar.bz2'\n\tTAR = tarfile.open(FichierTAR)\n\tif tarfile.is_tarfile(FichierTAR):\n\t\tTAR.extract(\"files.csv\")\n\tos.remove(\"archive.tar.bz2\")\n\tcall([\"chmod\",\"777\",\"files.csv\"])\n\tfile = open(\"files.csv\",\"r\")\n\twrite = open(\"formater.csv\",\"w\")\n\treader = csv.reader(file)\n\tfor row in reader:\n\t\twrite.write(row[0] + \" \" + row[2].upper() + \"\\n\")\n#\twrite.close()\n\tfile.close()\n\tos.remove(\"files.csv\")\n\nelif os.path.isfile(BDD) == False:\n\tprint \"Acun fichier de base de donnee trouvee\"\n\tprint \"Pour telecharger la base de donnee mise a jour taper : python start.py -u \"\n\nelif sys.argv[1] == \"-h\":\n\tprint \"Scanner de vulnerailite :\"\n\tprint \"Usage : python start.py X.X.X.X\"\n\tprint \"-u : mise a jour de la base de donnee de vulnerabilite\"\n\tprint \"-h : affiche cette aide\"\n\tprint \"Programme cree par Jean-Yves Nazaire, Kilian Lavieille, Benjamin Berthelot\"\n\n\t\n\n\n\n\n\nelse:\n\tprint('Entrez adresse IP ou nom de domaine')\n\tip_address=sys.argv[1]\n\tportdebut=int(input('Entrer port de debut: \\n'))\n\tportfin=int(input('Entrer port de fin\\n'))\n\n\tURL=\"http://\" + ip_address\n\tURLS=\"https://\" + ip_address\n\twhile (portdebut != portfin):\n \t\tportdebut= portdebut + 1\n \t\tScanPort(portdebut)\n\tprint('FIN SCAN')\n\tfor port in portList:\n\t\tgrab_banner(ip_address,port)\n#\twrite.close()\n\n\t#os.remove(\"DNSFORMAT.csv\")\n\t#os.remove(\"resultatHTTP.csv\")\n\t#os.remove(\"FTPFILE.csv\")\n\t#os.remove(\"Seekhttp.csv\")\n\n" }, { "alpha_fraction": 0.5122615694999695, "alphanum_fraction": 0.5885558724403381, "avg_line_length": 25.214284896850586, "blob_id": "1d577bad4645e4edbaac4c4b40e636769bf13f13", "content_id": "2cbf77f04a0b88a01e26f5563011106bba81eec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/compare.py", "repo_name": "810978558/projet-vuln-detect-python", "src_encoding": "UTF-8", "text": "import csv, sys\n\nf1 = file(sys.argv[1],'r')\n#f1 = file('FTPRESULT.csv','r')\nf2 = file('formater.csv','r')\n\nc1 = csv.reader(f1, delimiter = ' ')\nc2 = csv.reader(f2, delimiter = ' ')\n\nfor c1_row in c1:\n#\tprint c1_row[0]\n\tfor c2_row in c2:\n\t\tif c1_row[0] == c2_row[1] and c1_row[1] == c2_row[2] :\n\t\t\tprint c2_row[1], c2_row[2] + \" http://dbexploit/exploits\" + c2_row[0]\n" } ]
5
tw4l/brunnhilde-gui
https://github.com/tw4l/brunnhilde-gui
b3624f9da7aaf7d72b615f833690cf5c27266ce6
e1f9c1dfa45b26c4f092047e99bc4975621511c4
74f1e50fa100f6c1cb8dee916690cf389de52986
refs/heads/main
2023-03-06T00:08:18.836804
2023-02-23T05:35:19
2023-02-23T05:35:19
67,456,482
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6405684351921082, "alphanum_fraction": 0.6470035910606384, "avg_line_length": 35.3853645324707, "blob_id": "b2e982d521cff521ad0d559aaa0704a3b8ac9dc1", "content_id": "09d8fc0022dda923305bc2b4982cd90a3ba9593f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7459, "license_type": "no_license", "max_line_length": 144, "num_lines": 205, "path": "/main.py", "repo_name": "tw4l/brunnhilde-gui", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import * \nfrom PyQt5.QtWidgets import *\nimport subprocess\nimport sys\n\nimport design\n\nclass StartScanThread(QThread):\n\n def __init__(self, process_list):\n QThread.__init__(self)\n self.process_list = process_list\n\n def start_scan(self):\n print(self.process_list) # for debugging\n subprocess.check_output(self.process_list)\n\n def run(self):\n self.start_scan()\n\nclass BrunnhildeApp(QMainWindow, design.Ui_Brunnhilde):\n\n def __init__(self, parent=None):\n super(BrunnhildeApp, self).__init__(parent)\n self.setupUi(self)\n\n # build browse functionality buttons\n self.dirSourceBtn.clicked.connect(self.browse_dirsource)\n self.diskImageSourceBtn.clicked.connect(self.browse_diskimage)\n self.dirDestinationBtn.clicked.connect(self.browse_dirdest)\n self.diskImageDestinationBtn.clicked.connect(self.browse_diskimagedest)\n\n # build start functionalities\n self.dirStartScanBtn.clicked.connect(self.start_scan_dir)\n self.diskImageStartScan.clicked.connect(self.start_scan_diskimage)\n\n # about menu functionality\n self.actionAbout.triggered.connect(self.about_dialog)\n\n # Set buttons\n self.dirCancelBtn.setEnabled(False)\n self.dirStartScanBtn.setEnabled(True)\n self.diskImageCancelBtn.setEnabled(False)\n self.diskImageStartScan.setEnabled(True)\n \n def about_dialog(self):\n QMessageBox.information(self, \"About\", \n \"Brunnhilde GUI v.2.1.0\\nTessa Walsh, 2017-2023\\nMIT License\\nhttps://github.com/tw4l/brunnhilde-gui\\nCompatible with Brunnhilde 1.6.1+\")\n\n def browse_dirsource(self):\n self.dirSource.clear() # clear directory source text\n directory = QFileDialog.getExistingDirectory(self, \"Select folder\")\n\n if directory: # if user didn't pick directory don't continue\n self.dirSource.setText(directory)\n\n def browse_diskimage(self):\n self.diskImageSource.clear() # clear existing disk image source text\n diskimage = QFileDialog.getOpenFileName(self, \"Select disk image\")[0]\n\n if diskimage:\n self.diskImageSource.setText(diskimage)\n\n def browse_dirdest(self):\n self.dirDestination.clear() # clear existing report destination text\n directory = QFileDialog.getExistingDirectory(self, \"Select folder\")\n\n if directory: # if user didn't pick directory don't continue\n self.dirDestination.setText(directory)\n\n def browse_diskimagedest(self):\n self.diskImageDestination.clear() # clear existing report destination text\n directory = QFileDialog.getExistingDirectory(self, \"Select folder\")\n\n if directory: # if user didn't pick directory don't continue\n self.diskImageDestination.setText(directory)\n\n def done_dir(self):\n self.dirCancelBtn.setEnabled(False)\n self.dirStartScanBtn.setEnabled(True)\n QMessageBox.information(self, \"Finished\", \"Brunnhilde scan complete.\")\n self.dirStatus.setText('Finished')\n\n def done_diskimage(self):\n self.diskImageCancelBtn.setEnabled(False)\n self.diskImageStartScan.setEnabled(True)\n QMessageBox.information(self, \"Finished\", \"Brunnhilde scan complete.\")\n self.diskImageStatus.setText('Finished')\n\n def start_scan_dir(self):\n # clear output window\n self.dirStatus.clear()\n\n # create list for process\n self.process_list = list()\n self.process_list.append(\"brunnhilde.py\")\n \n # give indication process has started\n self.dirStatus.setText('Processing')\n\n # universal option handling\n if not self.virusScan.isChecked():\n self.process_list.append('-n')\n if self.largeFiles.isChecked():\n self.process_list.append('-l')\n if self.bulkExtractor.isChecked():\n self.process_list.append('-b')\n if self.scanArchives.isChecked():\n self.process_list.append('-z')\n if self.throttleSiegfried.isChecked():\n self.process_list.append('-t')\n if self.sfWarnings.isChecked():\n self.process_list.append('-w')\n if self.sha1.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha1')\n if self.sha256.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha256')\n if self.sha512.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha512')\n\n self.process_list.append(self.dirSource.text())\n self.process_list.append(self.dirDestination.text())\n self.process_list.append(self.dirIdentifier.text())\n\n # process\n self.get_thread = StartScanThread(self.process_list)\n self.get_thread.finished.connect(self.done_dir)\n self.get_thread.start()\n self.dirCancelBtn.setEnabled(True)\n self.dirCancelBtn.clicked.connect(self.get_thread.terminate)\n self.dirStartScanBtn.setEnabled(False)\n\n\n def start_scan_diskimage(self):\n # clear output window\n self.diskImageStatus.clear()\n\n # create list for process\n self.process_list = list()\n self.process_list.append(\"brunnhilde.py\")\n\n # give indication process has started\n self.diskImageStatus.setText('Processing')\n\n # add disk image flag \n self.process_list.append('-d')\n \n # universal option handling\n if not self.virusScan.isChecked():\n self.process_list.append('-n')\n if self.largeFiles.isChecked():\n self.process_list.append('-l')\n if self.bulkExtractor.isChecked():\n self.process_list.append('-b')\n if self.scanArchives.isChecked():\n self.process_list.append('-z')\n if self.throttleSiegfried.isChecked():\n self.process_list.append('-t')\n if self.sfWarnings.isChecked():\n self.process_list.append('-w')\n if self.sha1.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha1')\n if self.sha256.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha256')\n if self.sha512.isChecked():\n self.process_list.append('--hash')\n self.process_list.append('sha512')\n\n # disk image option handling\n if self.removeFiles.isChecked():\n self.process_list.append('-r')\n if self.hfsDisk.isChecked():\n self.process_list.append('--hfs')\n if self.resForks.isChecked():\n self.process_list.append('--resforks')\n\n self.process_list.append(self.diskImageSource.text())\n self.process_list.append(self.diskImageDestination.text())\n self.process_list.append(self.diskImageIdentifier.text())\n\n # process\n self.get_thread = StartScanThread(self.process_list)\n self.get_thread.finished.connect(self.done_diskimage)\n self.get_thread.start()\n self.diskImageCancelBtn.setEnabled(True)\n self.diskImageCancelBtn.clicked.connect(self.get_thread.terminate)\n self.diskImageStartScan.setEnabled(False)\n\ndef main():\n app = QApplication(sys.argv)\n form = BrunnhildeApp()\n form.show()\n app.exec_()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6929724216461182, "alphanum_fraction": 0.7167121767997742, "avg_line_length": 63.53205108642578, "blob_id": "e0912d8d52aaa703adbe1d90a94480a5a7c9a9de", "content_id": "a2daacc9a3e7c2d0778e98eeaf4690c02494a298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20135, "license_type": "no_license", "max_line_length": 167, "num_lines": 312, "path": "/design.py", "repo_name": "tw4l/brunnhilde-gui", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'design.ui'\n#\n# Created by: PyQt5 UI code generator 5.9\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Brunnhilde(object):\n def setupUi(self, Brunnhilde):\n Brunnhilde.setObjectName(\"Brunnhilde\")\n Brunnhilde.resize(658, 465)\n self.centralwidget = QtWidgets.QWidget(Brunnhilde)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(10, 10, 631, 401))\n self.tabWidget.setObjectName(\"tabWidget\")\n self.directoryTab = QtWidgets.QWidget()\n self.directoryTab.setObjectName(\"directoryTab\")\n self.layoutWidget = QtWidgets.QWidget(self.directoryTab)\n self.layoutWidget.setGeometry(QtCore.QRect(11, 21, 611, 311))\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_9.setObjectName(\"verticalLayout_9\")\n self.verticalLayout_5 = QtWidgets.QVBoxLayout()\n self.verticalLayout_5.setObjectName(\"verticalLayout_5\")\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName(\"label\")\n self.verticalLayout_5.addWidget(self.label)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.dirSource = QtWidgets.QLineEdit(self.layoutWidget)\n self.dirSource.setObjectName(\"dirSource\")\n self.horizontalLayout.addWidget(self.dirSource)\n self.dirSourceBtn = QtWidgets.QPushButton(self.layoutWidget)\n self.dirSourceBtn.setObjectName(\"dirSourceBtn\")\n self.horizontalLayout.addWidget(self.dirSourceBtn)\n self.verticalLayout_5.addLayout(self.horizontalLayout)\n self.verticalLayout_9.addLayout(self.verticalLayout_5)\n self.verticalLayout_6 = QtWidgets.QVBoxLayout()\n self.verticalLayout_6.setObjectName(\"verticalLayout_6\")\n self.label_5 = QtWidgets.QLabel(self.layoutWidget)\n self.label_5.setObjectName(\"label_5\")\n self.verticalLayout_6.addWidget(self.label_5)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.dirDestination = QtWidgets.QLineEdit(self.layoutWidget)\n self.dirDestination.setObjectName(\"dirDestination\")\n self.horizontalLayout_2.addWidget(self.dirDestination)\n self.dirDestinationBtn = QtWidgets.QPushButton(self.layoutWidget)\n self.dirDestinationBtn.setObjectName(\"dirDestinationBtn\")\n self.horizontalLayout_2.addWidget(self.dirDestinationBtn)\n self.verticalLayout_6.addLayout(self.horizontalLayout_2)\n self.verticalLayout_9.addLayout(self.verticalLayout_6)\n self.verticalLayout_7 = QtWidgets.QVBoxLayout()\n self.verticalLayout_7.setObjectName(\"verticalLayout_7\")\n self.label_6 = QtWidgets.QLabel(self.layoutWidget)\n self.label_6.setObjectName(\"label_6\")\n self.verticalLayout_7.addWidget(self.label_6)\n self.dirIdentifier = QtWidgets.QLineEdit(self.layoutWidget)\n self.dirIdentifier.setObjectName(\"dirIdentifier\")\n self.verticalLayout_7.addWidget(self.dirIdentifier)\n self.verticalLayout_9.addLayout(self.verticalLayout_7)\n self.verticalLayout_8 = QtWidgets.QVBoxLayout()\n self.verticalLayout_8.setObjectName(\"verticalLayout_8\")\n self.label_7 = QtWidgets.QLabel(self.layoutWidget)\n self.label_7.setObjectName(\"label_7\")\n self.verticalLayout_8.addWidget(self.label_7)\n self.dirStatus = QtWidgets.QLineEdit(self.layoutWidget)\n self.dirStatus.setObjectName(\"dirStatus\")\n self.verticalLayout_8.addWidget(self.dirStatus)\n self.verticalLayout_9.addLayout(self.verticalLayout_8)\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setSpacing(6)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.dirCancelBtn = QtWidgets.QPushButton(self.layoutWidget)\n self.dirCancelBtn.setObjectName(\"dirCancelBtn\")\n self.horizontalLayout_3.addWidget(self.dirCancelBtn)\n self.dirStartScanBtn = QtWidgets.QPushButton(self.layoutWidget)\n self.dirStartScanBtn.setDefault(True)\n self.dirStartScanBtn.setFlat(False)\n self.dirStartScanBtn.setObjectName(\"dirStartScanBtn\")\n self.horizontalLayout_3.addWidget(self.dirStartScanBtn)\n self.verticalLayout_9.addLayout(self.horizontalLayout_3)\n self.tabWidget.addTab(self.directoryTab, \"\")\n self.diskImageTab = QtWidgets.QWidget()\n self.diskImageTab.setObjectName(\"diskImageTab\")\n self.layoutWidget_2 = QtWidgets.QWidget(self.diskImageTab)\n self.layoutWidget_2.setGeometry(QtCore.QRect(11, 21, 611, 341))\n self.layoutWidget_2.setObjectName(\"layoutWidget_2\")\n self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.layoutWidget_2)\n self.verticalLayout_15.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_15.setObjectName(\"verticalLayout_15\")\n self.verticalLayout_14 = QtWidgets.QVBoxLayout()\n self.verticalLayout_14.setObjectName(\"verticalLayout_14\")\n self.verticalLayout_10 = QtWidgets.QVBoxLayout()\n self.verticalLayout_10.setObjectName(\"verticalLayout_10\")\n self.label_9 = QtWidgets.QLabel(self.layoutWidget_2)\n self.label_9.setObjectName(\"label_9\")\n self.verticalLayout_10.addWidget(self.label_9)\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.diskImageSource = QtWidgets.QLineEdit(self.layoutWidget_2)\n self.diskImageSource.setObjectName(\"diskImageSource\")\n self.horizontalLayout_4.addWidget(self.diskImageSource)\n self.diskImageSourceBtn = QtWidgets.QPushButton(self.layoutWidget_2)\n self.diskImageSourceBtn.setObjectName(\"diskImageSourceBtn\")\n self.horizontalLayout_4.addWidget(self.diskImageSourceBtn)\n self.verticalLayout_10.addLayout(self.horizontalLayout_4)\n self.verticalLayout_14.addLayout(self.verticalLayout_10)\n self.hfsDisk = QtWidgets.QCheckBox(self.layoutWidget_2)\n self.hfsDisk.setObjectName(\"hfsDisk\")\n self.verticalLayout_14.addWidget(self.hfsDisk)\n self.verticalLayout_15.addLayout(self.verticalLayout_14)\n self.verticalLayout_11 = QtWidgets.QVBoxLayout()\n self.verticalLayout_11.setObjectName(\"verticalLayout_11\")\n self.label_10 = QtWidgets.QLabel(self.layoutWidget_2)\n self.label_10.setObjectName(\"label_10\")\n self.verticalLayout_11.addWidget(self.label_10)\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.diskImageDestination = QtWidgets.QLineEdit(self.layoutWidget_2)\n self.diskImageDestination.setObjectName(\"diskImageDestination\")\n self.horizontalLayout_5.addWidget(self.diskImageDestination)\n self.diskImageDestinationBtn = QtWidgets.QPushButton(self.layoutWidget_2)\n self.diskImageDestinationBtn.setObjectName(\"diskImageDestinationBtn\")\n self.horizontalLayout_5.addWidget(self.diskImageDestinationBtn)\n self.verticalLayout_11.addLayout(self.horizontalLayout_5)\n self.verticalLayout_15.addLayout(self.verticalLayout_11)\n self.verticalLayout_12 = QtWidgets.QVBoxLayout()\n self.verticalLayout_12.setObjectName(\"verticalLayout_12\")\n self.label_11 = QtWidgets.QLabel(self.layoutWidget_2)\n self.label_11.setObjectName(\"label_11\")\n self.verticalLayout_12.addWidget(self.label_11)\n self.diskImageIdentifier = QtWidgets.QLineEdit(self.layoutWidget_2)\n self.diskImageIdentifier.setObjectName(\"diskImageIdentifier\")\n self.verticalLayout_12.addWidget(self.diskImageIdentifier)\n self.verticalLayout_15.addLayout(self.verticalLayout_12)\n self.verticalLayout_13 = QtWidgets.QVBoxLayout()\n self.verticalLayout_13.setObjectName(\"verticalLayout_13\")\n self.label_12 = QtWidgets.QLabel(self.layoutWidget_2)\n self.label_12.setObjectName(\"label_12\")\n self.verticalLayout_13.addWidget(self.label_12)\n self.diskImageStatus = QtWidgets.QLineEdit(self.layoutWidget_2)\n self.diskImageStatus.setObjectName(\"diskImageStatus\")\n self.verticalLayout_13.addWidget(self.diskImageStatus)\n self.verticalLayout_15.addLayout(self.verticalLayout_13)\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_6.setSpacing(6)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.diskImageCancelBtn = QtWidgets.QPushButton(self.layoutWidget_2)\n self.diskImageCancelBtn.setObjectName(\"diskImageCancelBtn\")\n self.horizontalLayout_6.addWidget(self.diskImageCancelBtn)\n self.diskImageStartScan = QtWidgets.QPushButton(self.layoutWidget_2)\n self.diskImageStartScan.setDefault(True)\n self.diskImageStartScan.setFlat(False)\n self.diskImageStartScan.setObjectName(\"diskImageStartScan\")\n self.horizontalLayout_6.addWidget(self.diskImageStartScan)\n self.verticalLayout_15.addLayout(self.horizontalLayout_6)\n self.tabWidget.addTab(self.diskImageTab, \"\")\n self.optionsTab = QtWidgets.QWidget()\n self.optionsTab.setObjectName(\"optionsTab\")\n self.layoutWidget1 = QtWidgets.QWidget(self.optionsTab)\n self.layoutWidget1.setGeometry(QtCore.QRect(10, 24, 480, 298))\n self.layoutWidget1.setObjectName(\"layoutWidget1\")\n self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.layoutWidget1)\n self.verticalLayout_16.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_16.setObjectName(\"verticalLayout_16\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label_2 = QtWidgets.QLabel(self.layoutWidget1)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout.addWidget(self.label_2)\n self.virusScan = QtWidgets.QCheckBox(self.layoutWidget1)\n self.virusScan.setChecked(True)\n self.virusScan.setObjectName(\"virusScan\")\n self.verticalLayout.addWidget(self.virusScan)\n self.largeFiles = QtWidgets.QCheckBox(self.layoutWidget1)\n self.largeFiles.setObjectName(\"largeFiles\")\n self.verticalLayout.addWidget(self.largeFiles)\n self.verticalLayout_16.addLayout(self.verticalLayout)\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.label_3 = QtWidgets.QLabel(self.layoutWidget1)\n self.label_3.setObjectName(\"label_3\")\n self.verticalLayout_2.addWidget(self.label_3)\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.md5 = QtWidgets.QRadioButton(self.layoutWidget1)\n self.md5.setChecked(True)\n self.md5.setObjectName(\"md5\")\n self.horizontalLayout_8.addWidget(self.md5)\n self.sha1 = QtWidgets.QRadioButton(self.layoutWidget1)\n self.sha1.setObjectName(\"sha1\")\n self.horizontalLayout_8.addWidget(self.sha1)\n self.sha256 = QtWidgets.QRadioButton(self.layoutWidget1)\n self.sha256.setObjectName(\"sha256\")\n self.horizontalLayout_8.addWidget(self.sha256)\n self.sha512 = QtWidgets.QRadioButton(self.layoutWidget1)\n self.sha512.setObjectName(\"sha512\")\n self.horizontalLayout_8.addWidget(self.sha512)\n self.verticalLayout_2.addLayout(self.horizontalLayout_8)\n self.verticalLayout_16.addLayout(self.verticalLayout_2)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.label_8 = QtWidgets.QLabel(self.layoutWidget1)\n self.label_8.setObjectName(\"label_8\")\n self.verticalLayout_3.addWidget(self.label_8)\n self.removeFiles = QtWidgets.QCheckBox(self.layoutWidget1)\n self.removeFiles.setObjectName(\"removeFiles\")\n self.verticalLayout_3.addWidget(self.removeFiles)\n self.resForks = QtWidgets.QCheckBox(self.layoutWidget1)\n self.resForks.setObjectName(\"resForks\")\n self.verticalLayout_3.addWidget(self.resForks)\n self.verticalLayout_16.addLayout(self.verticalLayout_3)\n self.verticalLayout_4 = QtWidgets.QVBoxLayout()\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.label_4 = QtWidgets.QLabel(self.layoutWidget1)\n self.label_4.setObjectName(\"label_4\")\n self.verticalLayout_4.addWidget(self.label_4)\n self.horizontalLayout_10 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_10.setObjectName(\"horizontalLayout_10\")\n self.scanArchives = QtWidgets.QCheckBox(self.layoutWidget1)\n self.scanArchives.setChecked(True)\n self.scanArchives.setObjectName(\"scanArchives\")\n self.horizontalLayout_10.addWidget(self.scanArchives)\n self.bulkExtractor = QtWidgets.QCheckBox(self.layoutWidget1)\n self.bulkExtractor.setObjectName(\"bulkExtractor\")\n self.horizontalLayout_10.addWidget(self.bulkExtractor)\n self.verticalLayout_4.addLayout(self.horizontalLayout_10)\n self.horizontalLayout_11 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_11.setObjectName(\"horizontalLayout_11\")\n self.sfWarnings = QtWidgets.QCheckBox(self.layoutWidget1)\n self.sfWarnings.setChecked(True)\n self.sfWarnings.setObjectName(\"sfWarnings\")\n self.horizontalLayout_11.addWidget(self.sfWarnings)\n self.throttleSiegfried = QtWidgets.QCheckBox(self.layoutWidget1)\n self.throttleSiegfried.setObjectName(\"throttleSiegfried\")\n self.horizontalLayout_11.addWidget(self.throttleSiegfried)\n self.verticalLayout_4.addLayout(self.horizontalLayout_11)\n self.verticalLayout_16.addLayout(self.verticalLayout_4)\n self.tabWidget.addTab(self.optionsTab, \"\")\n Brunnhilde.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(Brunnhilde)\n self.statusbar.setObjectName(\"statusbar\")\n Brunnhilde.setStatusBar(self.statusbar)\n self.menubar = QtWidgets.QMenuBar(Brunnhilde)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 658, 25))\n self.menubar.setObjectName(\"menubar\")\n self.menuAbout = QtWidgets.QMenu(self.menubar)\n self.menuAbout.setObjectName(\"menuAbout\")\n Brunnhilde.setMenuBar(self.menubar)\n self.actionAbout = QtWidgets.QAction(Brunnhilde)\n self.actionAbout.setObjectName(\"actionAbout\")\n self.menuAbout.addAction(self.actionAbout)\n self.menubar.addAction(self.menuAbout.menuAction())\n\n self.retranslateUi(Brunnhilde)\n self.tabWidget.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(Brunnhilde)\n\n def retranslateUi(self, Brunnhilde):\n _translate = QtCore.QCoreApplication.translate\n Brunnhilde.setWindowTitle(_translate(\"Brunnhilde\", \"Brunnhilde\"))\n self.label.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Source</span></p></body></html>\"))\n self.dirSource.setPlaceholderText(_translate(\"Brunnhilde\", \"/path/to/source/directory\"))\n self.dirSourceBtn.setText(_translate(\"Brunnhilde\", \"Browse\"))\n self.label_5.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Destination</span></p></body></html>\"))\n self.dirDestination.setPlaceholderText(_translate(\"Brunnhilde\", \"/path/to/output/directory\"))\n self.dirDestinationBtn.setText(_translate(\"Brunnhilde\", \"Browse\"))\n self.label_6.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Accession number/identifier</span></p></body></html>\"))\n self.dirIdentifier.setPlaceholderText(_translate(\"Brunnhilde\", \"Enter accession number or other identifier (no spaces)\"))\n self.label_7.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Status</span></p></body></html>\"))\n self.dirCancelBtn.setText(_translate(\"Brunnhilde\", \"Cancel\"))\n self.dirStartScanBtn.setText(_translate(\"Brunnhilde\", \"Start scan\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.directoryTab), _translate(\"Brunnhilde\", \"Directory\"))\n self.label_9.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Source</span></p></body></html>\"))\n self.diskImageSource.setPlaceholderText(_translate(\"Brunnhilde\", \"/path/to/diskimage\"))\n self.diskImageSourceBtn.setText(_translate(\"Brunnhilde\", \"Browse\"))\n self.hfsDisk.setText(_translate(\"Brunnhilde\", \"Hierarchical File System (HFS)-formatted disk\"))\n self.label_10.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Destination</span></p></body></html>\"))\n self.diskImageDestination.setPlaceholderText(_translate(\"Brunnhilde\", \"/path/to/output/directory\"))\n self.diskImageDestinationBtn.setText(_translate(\"Brunnhilde\", \"Browse\"))\n self.label_11.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Accession number/identifier</span></p></body></html>\"))\n self.diskImageIdentifier.setPlaceholderText(_translate(\"Brunnhilde\", \"Enter accession number or other identifier (no spaces)\"))\n self.label_12.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Status</span></p></body></html>\"))\n self.diskImageCancelBtn.setText(_translate(\"Brunnhilde\", \"Cancel\"))\n self.diskImageStartScan.setText(_translate(\"Brunnhilde\", \"Start scan\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.diskImageTab), _translate(\"Brunnhilde\", \"Disk Image\"))\n self.label_2.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Virus scanning</span></p></body></html>\"))\n self.virusScan.setText(_translate(\"Brunnhilde\", \"Scan for viruses\"))\n self.largeFiles.setText(_translate(\"Brunnhilde\", \"Scan large files and sources (note: may take much longer)\"))\n self.label_3.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Checksum algorithm</span></p></body></html>\"))\n self.md5.setText(_translate(\"Brunnhilde\", \"md5\"))\n self.sha1.setText(_translate(\"Brunnhilde\", \"sha1\"))\n self.sha256.setText(_translate(\"Brunnhilde\", \"sha256\"))\n self.sha512.setText(_translate(\"Brunnhilde\", \"sha512\"))\n self.label_8.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">Disk image options</span></p></body></html>\"))\n self.removeFiles.setText(_translate(\"Brunnhilde\", \"Remove files carved from disk image at end of process\"))\n self.resForks.setText(_translate(\"Brunnhilde\", \"Extract AppleDouble resource forks from HFS disks\"))\n self.label_4.setText(_translate(\"Brunnhilde\", \"<html><head/><body><p><span style=\\\" font-weight:600;\\\">General options</span></p></body></html>\"))\n self.scanArchives.setText(_translate(\"Brunnhilde\", \"Scan archive files (zip, tar, gzip, warc, arc)\"))\n self.bulkExtractor.setText(_translate(\"Brunnhilde\", \"Run bulk_extractor\"))\n self.sfWarnings.setText(_translate(\"Brunnhilde\", \"Include Siegfried warnings in HTML report\"))\n self.throttleSiegfried.setText(_translate(\"Brunnhilde\", \"Throttle Siegfried\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.optionsTab), _translate(\"Brunnhilde\", \"Options\"))\n self.menuAbout.setTitle(_translate(\"Brunnhilde\", \"About\"))\n self.actionAbout.setText(_translate(\"Brunnhilde\", \"About\"))\n self.actionAbout.setToolTip(_translate(\"Brunnhilde\", \"About\"))\n\n" }, { "alpha_fraction": 0.7407757639884949, "alphanum_fraction": 0.7455061674118042, "avg_line_length": 27.567567825317383, "blob_id": "58536373dd93cb89b18fd64865fc160e8972ed21", "content_id": "854ad67864bb36db58c86a5ca358430222b30449", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/install", "repo_name": "tw4l/brunnhilde-gui", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npython3 -m pip install pyqt5\n\ninstall_dir='/usr/share/brunnhilde-gui'\n\nif [ -d $install_dir ]; then\n sudo rm -rf $install_dir\nfi\nsudo mkdir $install_dir\n\nsudo mv design.py $install_dir\nsudo mv design.ui $install_dir\nsudo mv icon.png $install_dir\nsudo mv launch $install_dir\nsudo mv main.py $install_dir\n\ndesktop_file='/usr/share/applications/BrunnhildeGUI.desktop'\n\nif [ -f $desktop_file ]; then\n sudo rm -rf $desktop_file\nfi\n\nsudo touch $desktop_file\necho '[Desktop Entry]' | sudo tee --append $desktop_file\necho 'Type=Application' | sudo tee --append $desktop_file\necho 'Name=Brunnhilde GUI' | sudo tee --append $desktop_file\necho 'Exec=/usr/share/brunnhilde-gui/launch' | sudo tee --append $desktop_file\necho 'Icon=/usr/share/brunnhilde-gui/icon.png' | sudo tee --append $desktop_file\necho 'Categories=Forensics and Reporting' | sudo tee -append $desktop_file\n\nsudo chown bcadmin:bcadmin $desktop_file\nsudo chmod 644 $desktop_file\nsudo chown -R bcadmin:bcadmin $install_dir\n\nsudo chmod u+x $desktop_file\nsudo chmod u+x $install_dir/launch\n" }, { "alpha_fraction": 0.740628182888031, "alphanum_fraction": 0.758865237236023, "avg_line_length": 31.899999618530273, "blob_id": "5cb3ebe35d9825381adb3f5703e1423b46630244", "content_id": "1ccb025bc4d1ddc3667e9d611466ed752dcd31d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 240, "num_lines": 60, "path": "/README.md", "repo_name": "tw4l/brunnhilde-gui", "src_encoding": "UTF-8", "text": "# Brunnhilde GUI\n\nVersion 2.1.0\n\n## Installation \n\n### Install Siegfried \n\nInstall Siegfried. See [Siegfried Github repo](https://github.com/richardlehane/siegfried/) for instructions.\n\n### Install Brunnhilde CLI utility \n\nInstall the Brunnhilde command-line utility:\n\n`sudo pip install brunnhilde` or `sudo pip3 install brunnhilde`\n\nThis version of the GUI requires Brunnhilde 1.6.1 or higher. To check which version of Brunnhilde you have installed, type: `brunnhilde.py -V`. To upgrade your version of Brunnhilde to the latest, use `sudo pip install --upgrade brunnhilde`\n\n### Install PyQt5 \n\nInstall PyQt5 if not already installed. \n\n`sudo pip install pyqt5` or `sudo pip3 install pyqt5`\n\n### GUI Installation in Bitcurator 4.x.x / Ubuntu 22\n\n```bash\ngit clone https://github.com/tw4l/brunnhilde-gui\ncd brunnhilde-gui\nsudo ./install\n```\n\nYou will now be able to launch the Brunnhilde GUI by double-clicking on the Brunnhilde icon in the upper menu under Applications -> Forensics and Reporting.\n\n### GUI Installation in Bitcurator 1.x.x-3.x.x / Ubuntu 18-20\n\n```bash\ngit clone https://github.com/tw4l/brunnhilde-gui\ncd brunnhilde-gui\nsudo ./install-bc2-ubuntu18\n```\n\nYou will now be able to launch the Brunnhilde GUI by double-clicking on the Brunnhilde icon in the \"Forensics and Reporting\" Desktop folder. \n\n### GUI Installation in MacOS/OS X \n\n* Download zip or tar.gz file from Github and extract files to location of your choice \n* Launch GUI by entering the following in a terminal: \n`python3 /path/to/brunnhilde-gui/main.py` \n\n## Usage \n\nFor detailed information about how Brunnhilde works, see the [Brunnhilde command line utility](https://github.com/tw4l/brunnhilde) repo. \n\n## Creators\n\n* Canadian Centre for Architecture\n* Tessa Walsh\n\nThis project was initially developed in 2016-2017 for the [Canadian Centre for Architecture](https://www.cca.qc.ca) by Tessa Walsh, Digital Archivist, as part of the development of the Archaeology of the Digital project.\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7592592835426331, "avg_line_length": 26, "blob_id": "c5309752041e7d569a9372e09f65241c5a80e362", "content_id": "aafa3f007beb09314a057a0c0e6c4bb12d8e936b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 54, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/launch", "repo_name": "tw4l/brunnhilde-gui", "src_encoding": "UTF-8", "text": "#!/bin/bash\npython3 /usr/share/brunnhilde-gui/main.py\n" } ]
5
arabbig/symextract
https://github.com/arabbig/symextract
27d18f4a9350587c28db378060fd3c93b047233e
eadbf3b2fc49475b7970b198b97921cfdffa03ce
1f50ae6779257ed3c30b77a1c0b4e7b6378fe7a7
refs/heads/master
2016-09-19T02:57:59.723854
2016-09-09T19:59:18
2016-09-09T19:59:18
67,204,356
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6303166151046753, "alphanum_fraction": 0.6373521089553833, "avg_line_length": 33.373626708984375, "blob_id": "bf3e9df764ced87881f0da66f5fa043feb446630", "content_id": "649cc8c485c40bcdeb7c4a4c1875bb7be7fbeb95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3127, "license_type": "no_license", "max_line_length": 93, "num_lines": 91, "path": "/production/mserver.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "import json, ast\nfrom klein import Klein, route, run\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerRunner\nfrom twisted.internet import reactor\nfrom scrapy.utils.log import configure_logging\n\nfrom ne import *\nfrom gspider import *\nfrom yspider import *\n\nconfigure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})\nrunner = CrawlerRunner()\napp = Klein()\n\nitems_all = []\nsymbol_dict_all = []\n\ndef add_item_all(item):\n item_dict = dict(item)\n item_dict['id'] = len(items_all)\n items_all.append(item_dict)\n\ndef create_symbol_dict_all(spider, reason):\n output_json = tranform_to_json(spider.company_entity_pair, spider.entity_id_pairs)\n symbol_dict_all.extend(output_json)\n\ndef build_occurrence_matrix(occurrences):\n n = len(symbol_dict_all)\n nodes = []\n links = []\n for i in range(n):\n target = symbol_dict_all[i]\n target_name = target['occurrence']['search'][0] # e.g., HSBC\n target_page_ids = target['occurrence']['page_id'] # e.g., [0, 1, 2, 7]\n nodes.append({\"name\": target_name, \"group\": 1})\n for j in range(n):\n source = symbol_dict_all[j]\n source_name = source['occurrence']['search'][0] # e.g., HSBC\n source_page_ids = source['occurrence']['page_id'] # e.g., [0, 1, 2, 7]\n value = (len(set(target_page_ids)\n .intersection(set(source_page_ids))))\n #links.append({\"source\": source_name, \"target\": target_name, \"value\": value})\n links.append({\"source\": j, \"target\": i, \"value\": value})\n occurrences['nodes'] = nodes\n occurrences['links'] = links\n\n\n@app.route('/comatrix/items-all', branch=True)\ndef getPageListAll(request):\n del items_all[:]\n del symbol_dict_all[:] \n request.setHeader('Access-Control-Allow-Origin', '*')\n GOOGCralwer = runner.create_crawler(GOOGSpider)\n GOOGCralwer.signals.connect(add_item_all, signals.item_passed)\n #logging\n #collectEntity.append(content);\n search = request.args.get('query')[0]\n d = runner.crawl(GOOGCralwer, search=search,\n start_page=0,\n end_page=5);\n d.addCallback(lambda res: json.dumps(items_all));\n return d\n\n\n@app.route('/comatrix/symdict-all', branch=True)\ndef getSymbolList(request):\n request.setHeader('Access-Control-Allow-Origin', '*')\n \n if symbol_dict_all == [] : \n runner = CrawlerRunner()\n YahooAllCralwer = runner.create_crawler(YahooSpider)\n YahooAllCralwer.signals.connect(create_symbol_dict_all, signal=signals.spider_closed)\n\n for item in items_all :\n item['entities'] = ne_extract(item['content'])\n\n d = runner.crawl(YahooAllCralwer, items_all)\n d.addCallback(lambda res: json.dumps(symbol_dict_all))\n return d\n else :\n return json.dumps(symbol_dict_all)\n\n@app.route('/comatrix/matrix', branch=True)\ndef getCoOccurrences(request) :\n request.setHeader('Access-Control-Allow-Origin', '*')\n occurrences = {}\n build_occurrence_matrix(occurrences)\n return json.dumps(occurrences)\n \napp.run(host=\"0.0.0.0\", port=8080)" }, { "alpha_fraction": 0.5354892015457153, "alphanum_fraction": 0.5426144003868103, "avg_line_length": 41.94117736816406, "blob_id": "6efc3403121b0ee2747d24570b2ac8aba2362ad7", "content_id": "0ef0c39cd1c271afd4f66cae4a5409ea79e8ea9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3649, "license_type": "no_license", "max_line_length": 126, "num_lines": 85, "path": "/production/yspider.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\ndef prepareData(items) :\n \n def cleanEnt(e):\n return re.sub(u'[\\u201d\\u201c\\u2019]','', e)\n \n for i in range(len(items)) :\n items[i]['entities'] = filter(lambda e: len(e) > 0, map(cleanEnt,items[i]['entities']))\n\n entity_id_pairs = (reduce(lambda x ,y: x + y,\n map(lambda item: [ (e, item['id']) for e in item['entities']], \n items)))\n\n entity_list = list(set(reduce(lambda x, y: x + y,\n map(lambda item: item['entities'], items))))\n \n url_list = map(lambda e: 'http://d.yimg.com/aq/autoc?lang=en-GB&region=UK&query={}'\n .format(urllib.quote_plus(e.encode('utf8'))), \n entity_list)\n \n return (entity_id_pairs, entity_list, url_list)\n\n\nimport scrapy, json, re, urllib\nfrom bs4 import BeautifulSoup\nfrom klein import Klein, route, run\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerRunner\nfrom twisted.internet import reactor\nfrom scrapy.utils.log import configure_logging\n\nclass YahooSpider(scrapy.Spider):\n name = \"cache\"\n allowed_domains = [\"d.yimg.com\"]\n\n def __init__(self, items=[], *args, **kwargs):\n super(YahooSpider, self).__init__(*args, **kwargs)\n self.entity_id_pairs, self.entity_list, self.start_urls = prepareData(items)\n self.url_entity_dict = dict(zip(self.start_urls, self.entity_list))\n self.company_entity_pair = []\n \n \n def collect(self, json_response, url):\n entity = self.url_entity_dict[url]\n yahoo_entity_pair = ((lambda a: a[0] if len(a) > 0 else None)(\n map(lambda d: (d[u'symbol'],d[u'name']), \n filter( lambda e: e[u'exchDisp'] in set(['London','NASDAQ','NYSE']) and e['typeDisp']=='Equity', \n json_response['ResultSet']['Result'])[:1])),\n entity)\n \n self.company_entity_pair.append(yahoo_entity_pair)\n \n def parse(self, response):\n json_response = json.loads(response.body_as_unicode())\n self.collect(json_response, response.request.url)\n\n\ndef tranform_to_json(company_entity_pair, entity_id_pairs):\n def nameMatched(symbol, entity) :\n if not symbol:\n return False\n else :\n symbol = re.sub(ur\"[\\.,]+\", \"\", symbol) # Remove punctuation \n diff = set(symbol.split()).difference(set(entity.split()))\n diff_lower = set(map(lambda s:s.lower(), diff))\n return diff_lower.issubset(set(['the','plc','holdings', 'group','limited','company',\n 'entertainments', 'inc', 'corporation', 'corp', 'international']))\n \n company_entity_pair=filter(lambda (c,e): c and nameMatched(c[1],e), company_entity_pair)\n company_entity_dict={p[0]:[] for p in company_entity_pair} \n for p in company_entity_pair :\n company_entity_dict[p[0]].append(p[1])\n\n entity_id_dict={p[0]:[] for p in entity_id_pairs}\n for p in entity_id_pairs:\n entity_id_dict[p[0]].append(p[1])\n\n company_entity_id_dict = { c : {'search': es, \n 'page_id': reduce( lambda x, y: x + y,\n map(lambda e:entity_id_dict[e], es)\n )} for c, es in company_entity_dict.iteritems()}\n return [{u'company': {u'ticker': k[0], u'name': k[1] },\n u'occurrence': v} for k, v in company_entity_id_dict.iteritems()]" }, { "alpha_fraction": 0.674925684928894, "alphanum_fraction": 0.6798810958862305, "avg_line_length": 30.546875, "blob_id": "ad56707957b3c20facd94b42f434a0687cba8c3f", "content_id": "b1368a5e19c666554eb99b2b064d6e3c07f69b16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "no_license", "max_line_length": 87, "num_lines": 64, "path": "/production/aserver.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport json, ast\nfrom klein import Klein, route, run\nfrom scrapy import signals\nfrom scrapy.crawler import CrawlerRunner\nfrom twisted.internet import reactor\nfrom scrapy.utils.log import configure_logging\n\nfrom ne import *\nfrom gspider import *\nfrom yspider import *\n\nconfigure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})\nrunner = CrawlerRunner()\napp = Klein()\n\nitems = []\nsymbol_dict_one = []\n\ndef add_item(item):\n item_dict = dict(item)\n item_dict['id'] = len(items)\n items.append(item_dict)\n\ndef create_symbol_dict_one(spider, reason):\n output_json = tranform_to_json(spider.company_entity_pair, spider.entity_id_pairs)\n symbol_dict_one.extend(output_json)\n \n@app.route('/articles/headlines', methods=['POST'])\ndef getPageList(request):\n del items[:] \n request.setHeader('Access-Control-Allow-Origin', '*')\n \n GOOGCralwer = runner.create_crawler(GOOGSpider)\n GOOGCralwer.signals.connect(add_item, signals.item_passed)\n content = ast.literal_eval(request.content.read());\n #logging\n #collectEntity.append(content);\n d = runner.crawl(GOOGCralwer, search=content['query'],\n start_page=content['start_page'],\n end_page=content['end_page']);\n d.addCallback(lambda res: json.dumps(items));\n return d\n\n@app.route('/articles/full', methods=['POST'])\ndef getFullArticle(request):\n del symbol_dict_one[:];\n request.setHeader('Access-Control-Allow-Origin', '*');\n \n runner = CrawlerRunner()\n YahooCralwer = runner.create_crawler(YahooSpider)\n YahooCralwer.signals.connect(create_symbol_dict_one, signal=signals.spider_closed)\n \n content = ast.literal_eval(request.content.read())\n item = filter(lambda it: it['id']==content['id'], items)[0]\n item['entities'] = ne_extract(item['content'])\n \n d = runner.crawl(YahooCralwer, [item])\n d.addCallback(lambda res: json.dumps(symbol_dict_one))\n return d\n\napp.run(host=\"0.0.0.0\", port=8000)" }, { "alpha_fraction": 0.3928571343421936, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 26, "blob_id": "e3b402ed453e054623e527984a52d7d00036a8f9", "content_id": "93cfd575f242408d80c8a795e7134ded78f39e1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 56, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/setting.sh", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "export ARTI_A=54.213.123.21\nexport STAT_A=54.200.102.113\n\n\n" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.6477272510528564, "avg_line_length": 28, "blob_id": "70575d13c92ff15db16577a773fd7aa8d72b0209", "content_id": "c6bd144133fb1f22bb36a3f2b62300e1b84ab520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 88, "license_type": "no_license", "max_line_length": 38, "num_lines": 3, "path": "/production/start.sh", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "rm log.*\nnohup python aserver.py > log.a 2>&1 &\nnohup python mserver.py > log.m 2>&1 &\n\n" }, { "alpha_fraction": 0.6617491841316223, "alphanum_fraction": 0.6701791286468506, "avg_line_length": 29.645160675048828, "blob_id": "7dcf27255cff778d93f49bebd457a96f25d64fd1", "content_id": "6aff559859d7ce9df79804d4f7d4417d79627644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 949, "license_type": "no_license", "max_line_length": 96, "num_lines": 31, "path": "/proxy/router.js", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "var express = require('express');\nvar app = express();\nvar httpProxy = require('http-proxy');\nvar proxy = httpProxy.createProxyServer();\n\nvar fs = require('fs');\nvar servers = JSON.parse(fs.readFileSync('server_list.txt', 'utf8'));\n\nvar max = 5;\nvar counter = max-1;\n\napp.all(\"/articles/*\", function(req, res) {\n proxy.web(req, res, {target: servers[counter].aserver}); \t\n});\n\napp.all(\"/comatrix/*\", function(req, res) {\n proxy.web(req, res, {target: servers[counter].mserver});\n});\n\napp.all(\"/inspect\", function(req, res) {\n\tres.send(\"aserver=\"+servers[counter].aserver+\" mserver=\"+servers[counter].mserver);\n});\n\napp.all(\"/rotate\", function(req, res) {\n\tcounter = (counter + 1) % max;\n\tres.header(\"Access-Control-Allow-Origin\", \"*\");\n \tres.header(\"Access-Control-Allow-Headers\", \"Origin, X-Requested-With, Content-Type, Accept\");\n\tres.send(\"aserver=\"+servers[counter].aserver+\" mserver=\"+servers[counter].mserver);\n});\n\napp.listen(3000);" }, { "alpha_fraction": 0.536003828048706, "alphanum_fraction": 0.5402956604957581, "avg_line_length": 34.965518951416016, "blob_id": "6ce51a5e61f26555a6a8f6b1214179d0b79cc208", "content_id": "b6856652cd88d22453870f98f93f3a64994b0cb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2097, "license_type": "no_license", "max_line_length": 141, "num_lines": 58, "path": "/production/gspider.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport scrapy, urllib, re\nfrom scrapy.http import Request\nfrom bs4 import BeautifulSoup\n\nclass PageItem(scrapy.Item):\n title = scrapy.Field()\n content = scrapy.Field()\n entities = scrapy.Field()\n\ndef siteFT_query(search):\n query = '{}+{}'.format('site%3Awww.ft.com','+'.join(search.split()))\n return 'https://www.google.co.th/search?q=' + query\n\nclass GOOGSpider(scrapy.Spider):\n name = \"Web-List\"\n allowed_domains = [\"www.google.co.th\"]\n\n\n def __init__(self, search='', start_page=0, end_page=1, *args, **kwargs):\n \n super(GOOGSpider, self).__init__(*args, **kwargs)\n \n if not search:\n raise ValueError('No Search Term')\n \n self.start_urls = ['{}&start={}'.format(siteFT_query(search), i*10) for i in range(start_page, end_page)]\n \n \n def parse_url_contents(self, response):\n paragraphs = []\n for sel in response.css('#storyContent > p'):\n paragraphs.append(BeautifulSoup(sel.extract(), \"lxml\").get_text()) \n \n article = u'<p>{}</p>'.format(u'</p><p>'.join(paragraphs))\n item = response.meta['item']\n item['content'] = article\n #item['entities'] = self.ne_extract(article)\n \n return item\n\n def parse(self, response):\n for sel in response.css('#search .g'):\n try:\n item = PageItem()\n item['title'] = re.sub('- (?:FT.com|Financial Times)', '', BeautifulSoup(sel.css('.r a').extract_first(), \"lxml\").get_text())\n encodedURL = sel.css('.s li a::attr(href)').extract_first()[7:]\n if encodedURL.find('ft.com/cms') != -1 :\n decodedURL = urllib.unquote(encodedURL).decode()\n request = Request(decodedURL, callback = lambda r: self.parse_url_contents(r), dont_filter=True)\n request.meta['item'] = item\n yield request\n else :\n pass\n except TypeError:\n pass\n " }, { "alpha_fraction": 0.5366083979606628, "alphanum_fraction": 0.5418044328689575, "avg_line_length": 31.07575798034668, "blob_id": "fdac526408728cd0d2714f519576800602061db8", "content_id": "86ee7f48c8215a00208f04b66aed1d134c92f28a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 106, "num_lines": 66, "path": "/test/nermethod.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport re\nimport nltk, pickle\nimport nltk.data\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize\nfrom nltk import pos_tag\n\nne_chunk_sents = pickle.load(open('treebank_chunk_ub.pickle'))\n\ndef preprocess(text):\n '''for i in range(len(tokenList)):\n if tokenList[i] == u'โ€“' :\n tokenList[i] = u'-'\n elif tokenList[i] == u'&':\n tokenList[i] = u'Amps''' \n text = text.replace(u'โ€“', u'-').replace(u'&', u'Amps')\n return text\n\ndef postprocess(entity_name):\n return entity_name.replace('Amps ', '& ')\n\ndef exclude(wordList):\n \n float_match = re.compile(r'[-+]?\\d*\\.?\\d+(?:[eE][-+]?\\d+)?$').match \n def is_number_re(val):\n return bool(float_match(val))\n\n if len(wordList) == 1 and is_number_re(wordList[0]) : \n return True \n elif wordList[0] in set(['January', 'February', 'March', 'April', 'May', 'June', \\\n 'July', 'August', 'September', 'October', 'November', 'December',\\\n 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'\\\n ]) : \n return True \n \n return False\n \ndef rules(wordList, labelList):\n \n if exclude(wordList) :\n return False\n\n pairs = zip(wordList, labelList)\n cleanedPairs = filter(lambda (w,l) : l != 'DT' and l!= 'IN', pairs)\n if len(cleanedPairs) == 0 :\n return False\n \n Ws, Ls = zip(*cleanedPairs)\n \n return all(map(lambda w: w[0].upper() == w[0], Ws)) and Ls[0] == 'NNP'\n \ndef extract_entity_names(t):\n entity_names = []\n if hasattr(t, 'label') and t.label:\n if t.label() == 'NP':\n wordList = [child[0] for child in t]\n labelList = [child[1] for child in t]\n if rules(wordList, labelList):\n entity_names.append(postprocess(' '.join([child[0] for child in t])))\n else:\n for child in t:\n entity_names.extend(extract_entity_names(child))\n return entity_names\n" }, { "alpha_fraction": 0.6682808995246887, "alphanum_fraction": 0.6731234788894653, "avg_line_length": 40.099998474121094, "blob_id": "507cba22b36ccf1e56bd5cdd9a8d56d97d5ae65b", "content_id": "6d13a208d3e99f24c38af220b76356f12e4383b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 413, "license_type": "no_license", "max_line_length": 95, "num_lines": 10, "path": "/docs/headlines.html", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": " \n<div data-ng-repeat=\"article in articles | orderBy:'count':true\" >\n\t<div class=\"bs-callout bs-callout-info\" id=\"callout-navbar-breakpoint\" > \n\t\t<h4>{{article.title}}</h4> \n\t\t{{article.paragraphOne}}\n\t\t<div ng-if=\"article.ready\">\n\t\t\t<button type=\"button\" ng-click=\"article.open()\" class=\"btn btn-primary btn-sm\">Read</button>\n\t\t\t<span>{{article.matches.length}} companies detected</span>\n\t\t</div>\n\t</div>\n</div>\n" }, { "alpha_fraction": 0.5929027199745178, "alphanum_fraction": 0.6000797152519226, "avg_line_length": 29.975309371948242, "blob_id": "ca172e9a3ff100d166f10cd512c18fd9e240f627", "content_id": "cd4b4b0f29a59f2b5581c48e2bc0e66cf1cf55ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2512, "license_type": "no_license", "max_line_length": 87, "num_lines": 81, "path": "/production/ne.py", "repo_name": "arabbig/symextract", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n''' \n Author: Chan Pruksapha\n Modified from : https://gist.github.com/onyxfish/322906\n Changes :\n 1) Use Treebank Chunk (WSJ corpus based) in place of default chunk (ACE corpus)\n 2) Node maching from label 'NE' change to more complex rules\n'''\n\nimport nltk, re, pickle\nimport nltk.data\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize\nfrom nltk import pos_tag\n\nne_chunk_sents = pickle.load(open('treebank_chunk_ub.pickle'))\n\ndef preprocess(text):\n '''for i in range(len(tokenList)):\n if tokenList[i] == u'โ€“' :\n tokenList[i] = u'-'\n elif tokenList[i] == u'&':\n tokenList[i] = u'Amps''' \n text = text.replace(u'โ€“', u'-').replace(u'&', u'Amps')\n return text\n\ndef postprocess(entity_name):\n return entity_name.replace('Amps ', '& ')\n\ndef exclude(wordList):\n \n float_match = re.compile(r'[-+]?\\d*\\.?\\d+(?:[eE][-+]?\\d+)?$').match \n def is_number_re(val):\n return bool(float_match(val))\n\n if len(wordList) == 1 and is_number_re(wordList[0]) : \n return True \n \n return False\n \ndef rules(wordList, labelList):\n \n if exclude(wordList) :\n return False\n\n pairs = zip(wordList, labelList)\n cleanedPairs = filter(lambda (w,l) : l != 'DT' and l!= 'IN', pairs)\n if len(cleanedPairs) == 0 :\n return False\n \n Ws, Ls = zip(*cleanedPairs)\n \n return all(map(lambda w: w[0].upper() == w[0], Ws)) and Ls[0] == 'NNP'\n \ndef extract_entity_names(t):\n entity_names = []\n if hasattr(t, 'label') and t.label:\n if t.label() == 'NP':\n wordList = [child[0] for child in t]\n labelList = [child[1] for child in t]\n if rules(wordList, labelList):\n entity_names.append(postprocess(' '.join([child[0] for child in t])))\n else:\n for child in t:\n entity_names.extend(extract_entity_names(child))\n return entity_names\n\ndef ne_extract(article): \n sentences = sent_tokenize(preprocess(article))\n tokenized_sentences = [word_tokenize(sentence) for sentence in sentences]\n tagged_sentences = [pos_tag(sentence) for sentence in tokenized_sentences]\n chunked_sentences = ne_chunk_sents.parse_sents(tagged_sentences)\n\n entity_names = []\n for tree in chunked_sentences:\n entity_names.extend(extract_entity_names(tree))\n\n entity_set = list(set(entity_names))\n\n return entity_set" } ]
10
jmanuel1/todo.txt-calendar
https://github.com/jmanuel1/todo.txt-calendar
db1ed4370d1f74b31bac7f1786296b3f09182ba0
ff2b8bab101f8b910db6ad320bd22016c520655d
3834cd29ce05dcb9161167997adfc3ac0e54054a
refs/heads/master
2020-11-27T14:24:26.051421
2019-12-21T21:38:50
2019-12-21T21:38:50
229,486,226
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5864039063453674, "alphanum_fraction": 0.5932590961456299, "avg_line_length": 35.46875, "blob_id": "5126f035cc537a448661fb4cfd7ec3ef462cd55e", "content_id": "d5df9dbd25745dab5ce1f3ef202a5b9f95baf04a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3501, "license_type": "permissive", "max_line_length": 143, "num_lines": 96, "path": "/todo_txt_calendar.py", "repo_name": "jmanuel1/todo.txt-calendar", "src_encoding": "UTF-8", "text": "import sys\nimport re\nimport datetime\nimport calendar\nimport operator\nimport cmd\nfrom typing import Iterable, Optional, Callable\n\n\nclass Todo:\n\n __date_pattern = r'[0-9]{4}-[0-9]{2}-[0-9]{2}'\n __due_pattern = r'(?:\\s+|^)due:(' + __date_pattern + r')\\s*'\n\n def __init__(self, string: str):\n priority_or_done_pattern = r'^((\\([A-Z]\\))|x)'\n start_pattern = (priority_or_done_pattern\n + r'\\s(' + self.__date_pattern + r'\\s){1,2}')\n self.__text = re.sub(start_pattern, '', string)\n # don't remove + and @ from text since we can use them as part of\n # sentences\n self.__text = re.sub(self.__due_pattern, ' ', self.__text).strip()\n due_date_match = re.search(self.__due_pattern, string)\n self.__due_date = None\n if due_date_match:\n due_date_string = due_date_match.group(1)\n self.__due_date = datetime.date.fromisoformat(due_date_string)\n\n def __str__(self) -> str:\n due_text = ' due ' + str(self.__due_date) if self.__due_date else ''\n return f'{self.__text}{due_text}'\n\n @property\n def due_date(self) -> Optional[datetime.date]:\n return self.__due_date\n\n\nclass TodoCalendar(calendar.TextCalendar):\n\n def __init__(self, todos: Iterable[Todo], firstweekday: int = 0):\n super().__init__(firstweekday)\n self.__todos = todos\n\n def __count_todos_for_date(self, date: datetime.date) -> int:\n def predicate(todo: Todo) -> bool:\n return bool(todo.due_date) and todo.due_date == date\n return len(tuple(filter(predicate, self.__todos)))\n\n def formatmonth(self, year: int, month: int, width: int = 0, height=0) -> str:\n def get_abbr(n: int) -> str:\n return calendar.day_abbr[n][:-1]\n\n def make_week_string(week: Iterable[int]) -> str:\n return (''.join(map(lambda d: d and str(f'{str(d).ljust(2)} ({self.__count_todos_for_date(datetime.date(year, month, d))})').ljust(\n width) or ' ' * width, week)))\n\n month_str = calendar.month_name[month]\n min_width = 6\n width = max(width, min_width)\n weekday_abbrs = map(get_abbr, self.iterweekdays())\n week_header = (' ' * (width - 2)).join(weekday_abbrs)\n days = '\\n'.join(make_week_string(week) for week in self.monthdayscalendar(year, month))\n return f'{month_str} {year}\\n{week_header}\\n{days}\\n'\n\n\nclass CLI(cmd.Cmd):\n intro = 'Welcome to the todo.txt calendar. Type help or ? for help.\\n'\n prompt = 'top level> '\n\n def do_m(self, arg: str) -> None:\n # view month (like in Google Calendar)\n TodoCalendar(todos, 6).prmonth(\n datetime.date.today().year, datetime.date.today().month, 8)\n\n def do_due(self, arg: str) -> None:\n def month_equal(todo: Todo) -> bool:\n return bool(todo.due_date and todo.due_date.month == 12)\n # list tasks due in the selected month in ascending due date order\n print('Due this month:')\n\n months_todos = filter(month_equal, todos)\n get_due_date: Callable[[Todo], datetime.date] = operator.attrgetter('due_date')\n months_todos_sorted = sorted(months_todos, key=get_due_date)\n print(*months_todos_sorted, sep='\\n')\n\n\ndef parse_todos(string: str) -> Iterable[Todo]:\n for line in string.splitlines():\n yield Todo(line)\n\n\nwith open(sys.argv[1]) as file:\n todo_file_content = file.read()\ntodos = tuple(parse_todos(todo_file_content))\n\nCLI().cmdloop()\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 29.83333396911621, "blob_id": "25501b58fec65522706f56e2d732476ee2175dbe", "content_id": "849e48ddd8e53ada3911ec53b3283348685c65b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 185, "license_type": "permissive", "max_line_length": 68, "num_lines": 6, "path": "/README.md", "repo_name": "jmanuel1/todo.txt-calendar", "src_encoding": "UTF-8", "text": "# todo.txt-calendar\n\nA calendar view of your todo.txt.\n\nThis is in an experimental stage. Maybe I'll build upon it to make a\nproductivity tool I'd want to use every day, but who knows?\n" } ]
2
asurendran-keck/OCAM2k
https://github.com/asurendran-keck/OCAM2k
7b3b175ba0cf0d16884ab0ea474934361a0c4635
e579b85d885c62cf58fb205548bb8b5723c828e9
1a4b868deb3d4a2cf30ca4d414eeb02de91544c1
refs/heads/master
2022-09-05T05:38:20.736274
2022-08-05T01:15:36
2022-08-05T01:15:36
247,398,756
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5888490080833435, "alphanum_fraction": 0.6199237108230591, "avg_line_length": 65.52238464355469, "blob_id": "1bc9983e79f0e013d6126b25fd9432917a3b2070", "content_id": "e5d73910c04bdffc6906f2842f41f8c1f6a580ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8914, "license_type": "no_license", "max_line_length": 210, "num_lines": 134, "path": "/ocam2k_descramble_batch_binned.py", "repo_name": "asurendran-keck/OCAM2k", "src_encoding": "UTF-8", "text": "#----------Program to unscramble the OCAM2K pixels taken with the EDT VisionLink F4 FG and compute the read noise/dark current---------\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport multiprocessing\nfrom joblib import Parallel, delayed\nfrom astropy.io import fits\nimport os\n\n# Import data from bmp images taken using EDT program simple_take or take by giving the base filename as 'frame'\n# The program first imports the bmp image, converts the 8 bit bmp to 16 bit numpy array and applies the descrambling operation to get the vector of 57600 16-bit pixels\n# The ADU matrix are divided by the gain given in the test report to get the actual electron count\ndef unscrambleImage(k, folder, filename_base, gain_total, descrambler, pixels_total, ocam2_binning2x2_offset):\n img_unscrambled_vector = np.zeros(pixels_total)\n filename_raw = folder + '/' + filename_base + '_' + \"{:0>3d}\".format(k) + '.bmp' # Import bmp file\n im = Image.open(filename_raw)\n img = np.array(im).astype('uint16') # Convert image to numpy array\n img16 = np.zeros((np.shape(img)[0], int(np.shape(img)[1] / 2)))\n for i in range(0, np.shape(img)[1], 2):\n img16[:, int (i / 2)] = (img[:, i + 1]<<8) + (img[:, i]) # Convert pixels from 8 bit to 16 bit\n img16_vector = img16.flatten()\n for i in range(0, int(pixels_total / 2)):\n img_unscrambled_vector[i] = img16_vector[descrambler[i * 2]];\n img_unscrambled_vector[int(pixels_total / 2) + i] = img16_vector[descrambler[(i * 2) + ocam2_binning2x2_offset]];\n # img_unscrambled_vector[:] = img16_vector[descrambler[:]] # Descramble pixels to the 57600 pixel format\n return img_unscrambled_vector / gain_total\n\n# Set the number of total frames, fps, gain (EMCCD * amplifier) and total valid pixels\nframes = 5000\n# Array of FPS values for which the read noise and dark current is computed. Accuracy will be better for more FPS sampling points.\n# The folder names of the frames should be the same as that of the FPS value\n# fps = ['3622', '2067', '1000', '500', '333', '200', '100', '50', '33', '20', '10']\nfps = ['3622', '2067', '1000', '500', '333', '200', '100']\n# fps = ['3622']\ngain_total = 27.665 # Total gain derived from OCAM2K test report. Product of EMCCD gain and amplifier gain\npixels_total = 14400\nfits_write = 1\nocam2_binning2x2_offset = int(57600 - pixels_total) # Set to one if fits datacube of images (for every fps setting) has to be generated\nfont = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 16,\n }\ndescrambler = np.loadtxt('ocam2_descrambling.txt', delimiter = ',').astype(int) # Source file from FLI for descrambling pixels\nnum_cores = multiprocessing.cpu_count() # Number of CPU cores used for processing the descrambling of the frames\nvar_pix = np.zeros((pixels_total, np.size(fps))) # Variance of pixel count for each pixel at different exposure times\ndark_current = np.zeros(pixels_total) # Dark current for each pixel\nread_noise = np.zeros(pixels_total) # Read noise for each pixel\nread_noise_sq = np.zeros(pixels_total) # Read noise squared for each pixel\nread_noise_ch = np.zeros((2, 4)) # Read noise for each of the 2x4 (8 in total) output channels\ndark_current_ch = np.zeros((2, 4)) # Dark current for each of the 2x4 (8 in total) output channels\nexpTime = np.zeros(np.size(fps)) # Exposure time array (one for each FPS value)\n\n# Loops over the different FPS folders, writes to FITS file (optional) and computes variance for each pixel at an FPS setting\nfor j in range(np.size(fps)):\n # Parsing bmp files acquired with EDT FG\n folder = '/home/aodev/asurendran/OCAM2k/2020-03-17/binned/' + fps[j]\n filename_base = 'frame' # This should be the same name as the base filename given during frame grab through EDT FG\n # Calls the descrambling function in parallel mode\n pixelCount_unscrambled = Parallel(n_jobs=num_cores)(delayed(unscrambleImage)(k, folder, filename_base, gain_total, descrambler, pixels_total, ocam2_binning2x2_offset) for k in range(0, frames))\n pixelCount_unscramblednp = np.asarray(pixelCount_unscrambled)\n print('Unscrambled ' + fps[j] + ' fps')\n\n # Convert bmp files into FITS datacube for each FPS setting\n if fits_write == 1:\n img = np.reshape(pixelCount_unscramblednp, (frames, 120, 120)).astype(np.int16)\n hdul = fits.HDUList(fits.PrimaryHDU(data = img))\n filename_out = 'img_darkf' + str(frames) + '_binned_' + str(fps[j]) + 'fps.fits'\n if os.path.exists(filename_out):\n os.remove(filename_out)\n hdul.writeto(filename_out)\n hdul.close()\n print('Wrote ' + fps[j] + ' fps FITS file')\n # Variance computation\n var_pix[:, j] = np.var(pixelCount_unscrambled, axis = 0)\n expTime[j] = 1 / int(fps[j])\n\n# Linear fitting exposure time with Pixel count variance to compute dark current and read noise\nexpTime_2d = np.transpose(np.reshape(np.hstack((expTime, np.ones(np.size(fps)))), (2, np.size(fps)))) # Horizontal stacking of exposure time with column of ones to be used in linear fitting\nfor i in range(0, pixels_total):\n dark_current[i], read_noise_sq[i] = np.linalg.lstsq(expTime_2d, var_pix[i, :], rcond=None)[0] # For each pixel\nmean_dark_current, mean_read_noise_sq = np.linalg.lstsq(expTime_2d, np.mean(var_pix, axis = 0), rcond=None)[0] # Mean read noise and dark current over all pixels and frames\nread_noise = np.sqrt(read_noise_sq) # Read noise as the square root of the intercept of the linear fitting line\nmean_read_noise = np.sqrt(mean_read_noise_sq)\n# Dark current and read noise for each channel\nfor i in range(0, 2):\n for j in range(0, 4):\n read_noise_ch[i, j] = np.mean(np.reshape(read_noise, (120, 120))[i * 60:(i + 1) * 60, j * 30:(j + 1) * 30])\n dark_current_ch[i, j] = np.mean(np.reshape(dark_current, (120, 120))[i * 60:(i + 1) * 60, j * 30:(j + 1) * 30])\n\n# Imshow read noise for all pixels\nfig1 = plt.figure()\nplt.imshow(np.reshape(read_noise, (120, 120)))\nplt.title('Read noise over ' + str(frames) + ' frames, Mean = ' + \"{:.3f}\".format(mean_read_noise) + 'e-')\nplt.colorbar()\nfor i in range(0, 2):\n for j in range(0, 4):\n plt.text((j * 30) + 15, (i * 60) + 30, \"{:.3f}\".format(read_noise_ch[i, j], fontdict=font))\nmanager = plt.get_current_fig_manager()\nmanager.window.showMaximized()\n# plt.show()\nfilename_out = 'img_readnoisef' + str(frames) + '_binned.png'\nplt.savefig(filename_out, bbox_inches = 'tight')\nnp.savetxt('read_noisef' + str(frames) + '_binned.txt', read_noise, delimiter = ',')\n\n# Imshow dark current for all pixels\nfig2 = plt.figure()\nplt.imshow(np.reshape(dark_current, (120, 120)))\nplt.title('Dark current over ' + str(frames) + ' frames, Mean = ' + \"{:.3f}\".format(mean_dark_current) + 'e-')\nplt.colorbar()\nfor i in range(0, 2):\n for j in range(0, 4):\n plt.text((j * 30) + 15, (i * 60) + 30, \"{:.3f}\".format(dark_current_ch[i, j], fontdict=font))\nmanager = plt.get_current_fig_manager()\nmanager.window.showMaximized()\n# plt.show()\nfilename_out = 'img_darkcurrentf' + str(frames) + '_binned.png'\nplt.savefig(filename_out, bbox_inches = 'tight')\nnp.savetxt('dark_currentf' + str(frames) + '_binned.txt', dark_current, delimiter = ',')\n\n# Plot data curve for variance vs exposure time and show the read noise^2 (intercept) and dark current (slope)\nfig3 = plt.figure()\nplt.plot(expTime, np.mean(var_pix, axis = 0), 'o')\nplt.plot(np.insert(expTime, 0, 0), np.insert((mean_dark_current * expTime) + mean_read_noise_sq, 0, mean_read_noise_sq))\nplt.legend(['OCAM2K data points', 'Fitted linear equation'])\nplt.title('Average variance of pixel count vs exposure time, \\nIntercept (${RON}^2$) = ' + \"{:.3f}\".format(mean_read_noise_sq) + '${e-}^2$ \\nSlope (Dark current) = ' + \"{:.3f}\".format(mean_dark_current) + 'e-')\nplt.xlabel('Exposure time (seconds)')\nplt.ylabel('Variance of pixel count (e-)')\nplt.gca().set_xlim(left = 0)\n# manager = plt.get_current_fig_manager()\n# manager.window.showMaximized()\n# plt.show()\nfilename_out = 'img_variancef' + str(frames) + '_binned.png'\nplt.savefig(filename_out, bbox_inches = 'tight')\n" }, { "alpha_fraction": 0.5707547068595886, "alphanum_fraction": 0.6194968819618225, "avg_line_length": 20.931034088134766, "blob_id": "fc4ab46ef9ea6a42330cf36b41e84f3a2bbb8358", "content_id": "19470310926ee2a029999307e89693743ce7de81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/OCAM2K_Disp.py", "repo_name": "asurendran-keck/OCAM2k", "src_encoding": "UTF-8", "text": "#!/usr/bin/env kpython3\nimport getopt\nimport sys\nimport time\nimport pysao\nimport OCAM2K_Images\nimport numpy as np\n## import library used to manipulate fits files\nfrom astropy.io import fits\n\ntry:\n bkgd = fits.open('/home/aodev/Data/220201/OCAM2/bkg_g300_med.fits')[0].data\n print('bkgd loaded')\nexcept:\n#if True:\n bkgd = np.zeros([240,240])\n\n\nif __name__ == '__main__':\n ds9=pysao.ds9()\n if len(sys.argv) == 1:\n inter = 0\n else:\n inter = int(sys.argv[1])\n while True:\n if inter:\n input()\n ds9.view(OCAM2K_Images.get_image()[0].astype(np.float32)-bkgd)\n time.sleep(0.1)\n" }, { "alpha_fraction": 0.5052903890609741, "alphanum_fraction": 0.5375956892967224, "avg_line_length": 48.35555648803711, "blob_id": "bb02d2b93c30ccc44c80ee11cd8f4ba9aba4a591", "content_id": "2f6d3f2c21cc20bb4a37ed07ebd9737f979f8fea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8884, "license_type": "no_license", "max_line_length": 147, "num_lines": 180, "path": "/OCAM2K_flat_analysis_multidate.py", "repo_name": "asurendran-keck/OCAM2k", "src_encoding": "UTF-8", "text": "#!/usr/bin/env kpython3\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport os\nfrom astropy.io import fits\nfrom astropy.utils.data import get_pkg_data_filename\nimport time\nimport sys\n\nif __name__ == '__main__':\n dirrd = '/usr/local/aodev/Data/220127/OCAM2K/' # Location of the flat field raw and median files\n dirflat = '/usr/local/aodev/Data/220127/OCAM2K/'\n suffix = ''\n gain = np.array([1, 10, 100, 300, 600]) # Array of gains at which the FF was taken\n fitswrite = 0 # 1 for writing new FITS flatmaps and corrected FF, 0 to skip this step.\n channelnoise = 1 # Flag for displaying channel noise in FFC image\n rawindex = 400\n # light_med = np.zeros([np.size(gain), 240, 240])\n # light_snr_raw = np.zeros((500, np.size(gain)))\n # light_snr_postff = np.zeros((500, np.size(gain)))\n ph_noise = np.zeros(np.shape(gain)[0])\n for i in range(np.shape(gain)[0]):\n print('Reading raw fits files at gain ' + str(gain[i]))\n filename_medflat = dirflat + 'light_g' + str(gain[i]) + '_med.fits'\n if suffix == '':\n filename_med = dirrd + 'light_g' + str(gain[i]) + '_med.fits'\n filename_raw = dirrd + 'light_g' + str(gain[i]) + '_raw.fits'\n else:\n filename_med = dirrd + 'light_g' + str(gain[i]) + '_' + suffix + '_med.fits'\n filename_raw = dirrd + 'light_g' + str(gain[i]) + '_' + suffix + '_raw.fits'\n filename_flatmap = dirflat + 'light_g' + str(gain[i]) + '_fmap.fits'\n filename_ffc = dirrd + 'light_g' + str(gain[i]) + '_ffc.fits'\n # Import median flat file\n hdu_medflat = fits.open(filename_medflat)\n light_medflat = hdu_medflat[0].data\n hdu_medflat.close()\n # Import raw file\n hdu_raw = fits.open(filename_raw)\n light_raw = hdu_raw[0].data\n hdu_raw.close()\n # Import median file\n hdu_med = fits.open(filename_med)\n light_med = hdu_med[0].data\n hdu_med.close()\n # Compute normalized flats for each quadrant\n\n print('Computing FF and FFC images at gain ' + str(gain[i]))\n light_med_max = np.max(np.max(light_medflat))\n light_flat_map = light_medflat / np.max(np.max(light_medflat))\n light_flat_field = np.zeros(np.shape(light_raw))\n light_noise_postff = np.zeros(np.shape(light_raw)[0])\n # Compute flat field and total noise in the FFC image\n for k in range(np.shape(light_raw)[0]):\n light_flat_field[k,:,:] = light_raw[k,:,:] / light_flat_map\n light_noise_postff[k] = np.std(light_flat_field[k,:,:])\n # Compute noise in each channel of the FFC image\n aduffc_chnoise_raw = np.zeros((np.shape(light_raw)[0], 2, 4))\n aduffc_chnoise = np.zeros((2, 4))\n for k in range(0, 2):\n for j in range(0, 4):\n for p in range(0, np.shape(light_raw)[0]):\n aduffc_chnoise_raw[p, k, j] = np.std(light_flat_field[p, k * 120:(k + 1) * 120, j * 60:(j + 1) * 60])\n aduffc_chnoise[k, j] = np.mean(aduffc_chnoise_raw[:, k, j])\n\n # Write to fits flatmap and FF corrected files\n if fitswrite == 1:\n print('Writing FF and FFC fits files for gain ' + str(gain[i]))\n hdu_fm = fits.PrimaryHDU(light_flat_map)\n hdu_fm.writeto(filename_flatmap, overwrite=True)\n hdu_ffc = fits.PrimaryHDU(light_flat_field)\n hdu_ffc.writeto(filename_ffc, overwrite=True)\n\n # Relative channel gain computation\n # 1 - Relative gain of the flats\n adu_flatch = np.zeros((2, 4))\n for k in range(0, 2):\n for j in range(0, 4):\n adu_flatch[k, j] = np.mean(light_medflat[k * 120:(k + 1) * 120, j * 60:(j + 1) * 60])\n relflatgain_ch = adu_flatch / np.max(np.max(adu_flatch))\n # 2 - Relative gain of the median image\n adu_ch = np.zeros((2, 4))\n for k in range(0, 2):\n for j in range(0, 4):\n adu_ch[k, j] = np.mean(light_med[k * 120:(k + 1) * 120, j * 60:(j + 1) * 60])\n relgain_ch = adu_ch / np.max(np.max(adu_ch))\n # 3 - Relative gain of the FFC image\n aduffc_ch = np.zeros((2, 4))\n for k in range(0, 2):\n for j in range(0, 4):\n aduffc_ch[k, j] = np.mean(light_flat_field[rawindex, k * 120:(k + 1) * 120, j * 60:(j + 1) * 60])\n relffcgain_ch = aduffc_ch / np.max(np.max(aduffc_ch))\n\n # Compute expected photon noise in each channel normalized by the gain (to be compared with measured noise computed as aduffc_chnoise)\n ph_mean_ch = np.zeros((2, 4))\n ph_noise_ch = np.zeros((2, 4))\n for k in range(0, 2):\n for j in range(0, 4):\n ph_mean_ch[k, j] = np.mean(light_med[k * 120:(k + 1) * 120, j * 60:(j + 1) * 60])\n if gain[i] != 1:\n ph_noise_ch[k, j] = np.sqrt(2 * ph_mean_ch[k, j] * gain[i] * relgain_ch[k, j] / 27.665)\n # print('Ph Noise: ' + str('{:.2f}'.format(ph_noise[i])))\n else:\n ph_noise_ch[k, j] = np.sqrt(ph_mean_ch[k, j] * gain[i] * relgain_ch[k, j] / 27.665)\n # print('Mean of photons (before FF) at gain of ' + str(gain[i]) + ' is ' + str('{:.2f}'.format(ph_mean)))\n # print('Mean of photons (after FF) at gain of ' + str(gain[i]) + ' is ' + str('{:.2f}'.format(np.mean(np.mean(light_flat_field[k,:,:])))))\n ph_noise[i] = np.mean(ph_noise_ch)\n\n # Subplot 1 - Flat map\n plt.subplot(3, np.shape(gain)[0], i + 1)\n plt.imshow(light_medflat)\n plt.title('Flat map at gain = ' + str(gain[i]), fontsize=8)\n for k in range(0, 2):\n for j in range(0, 4):\n plt.text((j * 60) + 15, (k * 120) + 60, \"{:.2f}\".format(relflatgain_ch[k, j], fontsize=4))\n cbar = plt.colorbar()\n cbar.ax.tick_params(labelsize = 8)\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n\n # Subplot 2 - Median image\n plt.subplot(3, np.shape(gain)[0], np.shape(gain)[0] + i + 1)\n plt.imshow(light_med)\n plt.title('Median image at gain = ' + str(gain[i]), fontsize=8)\n for k in range(0, 2):\n for j in range(0, 4):\n plt.text((j * 60) + 15, (k * 120) + 60, \"{:.2f}\".format(relgain_ch[k, j], fontsize=4))\n cbar = plt.colorbar()\n cbar.ax.tick_params(labelsize = 8)\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n\n # Subplot 3 - FFC image\n plt.subplot(3, np.shape(gain)[0], (2 * np.shape(gain)[0]) + i + 1)\n plt.imshow(light_flat_field[rawindex,:,:])\n plt.title(\"FFC image sample at gain = \" + str(gain[i]) + \"\\n Mean of \"\n \"Total Noise post FF = \" + str('{:.2f}'.format(np.mean(light_noise_postff))) + \" ADUs\\n\"\n \"Expected photon noise = \" + str('{:.2f}'.format(ph_noise[i])) + \" ADUs\", fontsize=8)\n for k in range(0, 2):\n for j in range(0, 4):\n if channelnoise == 0:\n plt.text((j * 60), (k * 120) + 60, \"{:.2f}\".format(relffcgain_ch[k, j], fontsize=4))\n elif channelnoise == 1:\n plt.text((j * 60), (k * 120) + 60, \"{:.2f}\".format(aduffc_chnoise[k, j], fontsize=4))\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n\n # for i in range(np.shape(gain)[0]):\n # mean_snr_raw = np.mean(light_snr_raw, axis=0)[i]\n # mean_snr_postff = np.mean(light_snr_postff, axis=0)[i]\n # print('SNR pre-FF at ' + str(gain[i]) + ' is ' + str(\"{:.2f}\".format(mean_snr_raw)))\n # print('SNR post-FF at ' + str(gain[i]) + ' is ' + str(\"{:.2f}\".format(mean_snr_postff)))\n\n # plt.figure()\n # plt.plot(light_med[i, 100,:])\n\n # Subplot 3 - Noise histogram\n # num_bins = 20\n # ax = plt.subplot(3, np.shape(gain)[0], (2 * np.shape(gain)[0]) + i + 1)\n # n, bins, patches = plt.hist(light_flat_field[rawindex,:,:].flatten(), num_bins, facecolor='blue', alpha=0.5)\n # plt.plot(bins)\n # ax.set_xlim([np.min(bins), np.max(bins)])\n # plt.xlabel('ADUs', fontsize=8)\n # plt.ylabel('Number of pixels', fontsize=8)\n # plt.title('ADU distribution for FFC image at gain = ' + str(gain[i]), fontsize=8)\n # plt.xticks(fontsize=8)\n # plt.yticks(fontsize=8)\n\n # Test division of image by flat field without normalization\n # test_ffc = light_raw[rawindex,:,:] / light_med[i, :, :]\n # print(\"Mean of non-normalized FF correction is \" + str('{:.2f}'.format(np.mean(np.mean(test_ffc)))))\n\n plt.subplots_adjust(left=0.1,\n bottom=0.1,\n right=0.9,\n top=0.9,\n wspace=0.4,\n hspace=0.4)\n plt.show()\n" }, { "alpha_fraction": 0.49689117074012756, "alphanum_fraction": 0.5323144197463989, "avg_line_length": 37.01224136352539, "blob_id": "07b071c05b349e757d513889bf03e0f1fead0017", "content_id": "de7e8c5455fa1b3c3c7a50c31a32ff8e80999630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40369, "license_type": "no_license", "max_line_length": 158, "num_lines": 1062, "path": "/OCAM2K_Images.py", "repo_name": "asurendran-keck/OCAM2k", "src_encoding": "UTF-8", "text": "############################## Import Libraries ###############################\n\n## Math Library\nimport numpy as np\n## Subprocess library\nimport subprocess\n## System library\nimport sys\n## Operating system library\nimport os\n## PIL library used to read bitmap files\nfrom PIL import Image as read_bmp\n## Library used to plot graphics and show images\nimport matplotlib.pyplot as plt\n## Time library \nimport time\n## Datatime Library\nimport datetime as dt\n## import library used to manipulate fits files\nfrom astropy.io import fits\n## Library used to display np.array into ds9 \nimport pysao\n## To be able to imshow in logscale\nfrom matplotlib.colors import LogNorm\n## Library used to over plotpolygone on imshow \nimport matplotlib.patches as patches\n\n############################## Local Definitions ##############################\n\n# =============================================================================\ndef get_path(subdirname = ''):\n ''' -----------------------------------------------------------------------\n This function return the standard path associated to the subdirectory\n provided where FIU data should be saved.\t\n Arguments:\n - subdirname[optional]: subdirectory of the main path.\n -> must be a string.\n -> / at the beginning and the end not required.\n Returns:\n Nominal case:\n - Path -- Path associated to the sub-directory name provided\n - Name -- Name of for the data based on time.\n In case of error:\n - False\n ----------------------------------------------------------------------- '''\n # Verify if a sub directory name have been provided by the user.\n if not subdirname == '': \n # if yes verify if sub dir name provided is a string.\n if not isinstance(subdirname,str):\n print('\\n\\nERROR::: Provided sub directory name must be a string')\n return\n # If it is a string.\n # Check if provided sub directory name is valid. Correct if possible.\n else:\n # Remove blank subdirectories if present.\n while '//' in subdirname: subdirname = subdirname.replace('//','/')\n # if sub dir name provided has '/' at the beginning, strip it.\n if subdirname[0] == '/': subdirname = subdirname[1:]\n # if sub directory name provided has not '/' at the end, add it.\n if not subdirname[-1] == '/': subdirname += '/'\n \n # Format for time-based filename\n FNMTFORMAT = '%H_%M_%S.%f'\n # Format for UTC-named directories\n DIRTFORMAT = '%y%m%d'\n # Default path for saving images\n dfltPath = '/home/aodev/Data/'\n # --- Check/create UTC date directory for Xenics data\n tmpdate = dt.datetime.utcnow()\n # UTC date for dir\n timestamp = tmpdate.strftime(DIRTFORMAT)\n # Main path for save\n Path = dfltPath + timestamp + '/' + subdirname\n # Get path in system \n ospath = os.path.dirname(Path)\n if not os.path.exists(ospath):\n # Path did not exist before; create it now\n os.makedirs(ospath)\n \n # Define a name based on time\n Name = tmpdate.strftime(FNMTFORMAT)\n \n return Path, Name\n\n# =============================================================================\ndef moments(data, width = False):\n ''' -----------------------------------------------------------------------\n Compute the momentum of an np.array.\n Inputs:\n - data: a np.array\n - width: \n Returns:\n - x \n - y \n if width == True:\n - width_x\n - width_y)\n ----------------------------------------------------------------------- '''\n total = np.sum(data)\n X, Y = np.indices(np.shape(data))\n cx = np.sum(Y*data)/total\n cy = np.sum(X*data)/total\n if width:\n row = data[int(round(cx)), :]\n tmp = np.sum(abs((np.arange(row.size)-cx)**2*row))/np.sum(row)\n width_x = 0.0 if tmp < 0. else np.sqrt(tmp)\n col = data[:, int(round(cy))]\n tmp = np.sum(abs((np.arange(col.size)-cy)**2*col))/np.sum(col)\n width_y = 0.0 if tmp < 0. else np.sqrt(tmp)\n else:\n width_x = 0\n width_y = 0\n # return\n return cx, cy, width_x, width_y\n\n# =============================================================================\ndef get_image():\n ''' -----------------------------------------------------------------------\n Function used to get images from the EDT framegrabber. Images are reshaped\n before to be return as a numpy array of 16 bits un-signed integer \"u16int\".\n Inputs: \n - None\n Returns:\n - frame : OCAM2K image return as a numpy array \"u16int\".\n - time_st : time stamp associated to the frame.\n ----------------------------------------------------------------------- '''\n frames = 0\n # Location of the bitmap frames\n filename = '/home/aodev/Data/tmp/OCAM2K_frame.bmp'\n # Prepare command to pull an image\n command = 'cd /opt/EDTpdv/; ./simple_take -c 0 -N 100 -b ' + filename\n # Pull an image\n trash = subprocess.call(command, shell = True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) \n # save a timestamp\n time_st = time.time() \n # Open the bitmap file and convert it into a numpy array\n # Each cell of the array can accomodate a 16 bits integer.\n array_08 = np.array(read_bmp.open(filename)).astype('uint16')\n # Remove prescan pixels (1 raw and 48 = 6*8 colums * 2x8 bits values = 96)\n # Reshape as a vector (2 x 8 bits values per pixel)\n #vect_08 = np.reshape(array_08[1:,96:],[240*240,2])\n # Combines 8 bits values into 16 bits values\n #array_16 = np.reshape(vect_08[:,1]*256 + vect_08[:,0],[120,60,8])\n # Reorganize pixel per amplifier\n #amp_1 = array_16[:,:,3]\n #amp_2 = np.fliplr(array_16[:,:,2] )\n #amp_3 = array_16[:,:,1]\n #amp_4 = np.fliplr(array_16[:,:,0] )\n #amp_5 = np.flipud( array_16[:,:,4])\n #amp_6 = np.flipud(np.fliplr(array_16[:,:,5]))\n #amp_7 = np.flipud( array_16[:,:,6])\n #amp_8 = np.flipud(np.fliplr(array_16[:,:,7]))\n # Reorganize pixel by side of the detector (upper lower)\n #frame_u = np.concatenate([amp_4,amp_3,amp_2,amp_1],1)\n #frame_l = np.concatenate([amp_8,amp_7,amp_6,amp_5],1)\n # Reconstruct the frame\n #frame = np.concatenate([frame_u,frame_l],0)\n # Return frame\n #Alternate descrambling with txt file\n img16 = np.zeros((np.shape(array_08)[0], int(np.shape(array_08)[1] / 2)))\n for i in range(0, np.shape(array_08)[1], 2):\n img16[:, int (i / 2)] = (array_08[:, i + 1]<<8) + (array_08[:, i]) # Convert pixels from 8 bit to 16 bit\n \n descrambler = np.loadtxt('ocam2_descrambling.txt', delimiter = ',').astype(int)\n img16_vector = img16.flatten()\n img_unscrambled_vector = np.zeros(np.shape(descrambler)[0])\n for i in range(0, np.shape(descrambler)[0]):\n img_unscrambled_vector[i] = img16_vector[descrambler[i]]\n frame = np.reshape(img_unscrambled_vector, (240, 240))\n return frame, time_st\n\n############################ Image Acquisition Loop ###########################\n\n# =============================================================================\ndef images_acquisition_loop(max_fps = 25):\n ''' -----------------------------------------------------------------------\n This function read the detector, reformat the images and store them into a\n shared memory.\n Inputs: \n - max_fps : maximum number of frame read per second (default value = 25)\n Returns:\n - None\n ----------------------------------------------------------------------- '''\n # Create the shm for the images.\n data = np.zeros([240,240])\n shm_im = shmlib.shm('/tmp/OCAM2K_raw_frames.im.shm',data)\n try:\n # Get an image and associated time stamp.\n im, ts = get_image()\n # Update shm.\n shm_raw_im.set_data(im)\n # Limit the fps to value provided by user\n time.sleep(1./max_fps)\n \n except KeyboardInterrupt:\n # Print message\n print('\\n Sctipt interupted by user')\n\n # Print message\n print('\\n OCAM2K image acquisition has been interupted.')\n\n # This function does not return anything\n return\n\n# =============================================================================\ndef save_images(nb_im = 100, filename = 'tmp.fits', bkg_file = 'bkgd.fits'):\n ''' -----------------------------------------------------------------------\n Temporary function used to save images.\n ----------------------------------------------------------------------- '''\n time.sleep(2)\n # Get path where data must be saved\n path, default_name = get_path('OCAM2K') \n # Prepare the filename for the images\n if filename == '':\n filename = default_name + '.fits'\n elif filename[-5:] != '.fits':\n filename += '.fits'\n\n # Try loading the background file (if any)\n bkg_filename = path + bkg_file\n try:\n bkgd = fits.open(bkg_filename)[0].data\n print('bkgd loaded')\n except:\n bkgd = np.zeros([240,240])\n print('bkgd NOT loaded')\n\n # Prepare a cube to store the images\n cube = np.zeros([nb_im,240,240])\n # Start image acquisition and display\n for i in np.arange(nb_im):\n # Get an image and store it into the cube of images.\n cube[i,:,:] = get_image()[0] - bkgd\n # Print usefull information for the user\n sys.stdout.write('\\r Frame = %04.0d/%04.0d' %(i+1,nb_im))\n sys.stdout.flush()\n \n # Save the cube of images\n fullname = path + filename[:-5] + '_raw.fits'\n hdu = fits.PrimaryHDU(cube)\n hdu.writeto(fullname, overwrite=True)\n print('\\nRaw images saved: ' + fullname )\n # Save the median of the cube of image\n fullname = path + filename[:-5] + '_med.fits'\n hdu = fits.PrimaryHDU(np.median(cube,0))\n hdu.writeto(fullname, overwrite=True)\n print('Cube median saved: ' + fullname + '\\n')\n # This function return the path where images has been saved\n return path\n\n# =============================================================================\ndef SHM(data, bkgd = '',path = '',disp = False, mask = ''):\n ''' -----------------------------------------------------------------------\n Temporary function use to determine PSF positions.\n ----------------------------------------------------------------------- '''\n # Get path if not provided bu user\n if path == '': path, _ = get_path('OCAM2K') \n\n ## Open the data fits file \n # Determine data fullname\n fullname = path + data\n # Check if file type provided\n if fullname[-5:] != '.fits': fullname += '.fits'\n # Open the data \n data = fits.open(fullname)[0].data\n \n ## Open the bkgd fits file \n # Check if user provide a file.\n # Case 1: no filename has been provided by user. \n if bkgd == '': \n # The background is an np array of zeros.\n bkgd = np.copy(data)*0.\n # Case 2: a filename has been provided by user.\n else:\n # Determine bkgd fullname\n fullname = path + bkgd\n # Check if file type provided\n if fullname[-5:] != '.fits': fullname += '.fits'\n # Open the data \n bkgd = fits.open(fullname)[0].data \n \n X,Y = np.meshgrid(np.arange(240),np.arange(240))\n if mask == 'center': \n X,Y = np.meshgrid(np.arange(240),np.arange(240))\n X -= 120\n Y -= 120\n elif mask == 'BL': \n X -= 64\n Y -= 64\n elif mask == 'UL': \n X -= 64\n Y -= 176\n elif mask == 'BR': \n X -= 180\n Y -= 60\n elif mask == 'UR': \n X -= 180\n Y -= 180\n mask = np.ceil(np.sqrt(X**2+Y**2)/40)\n mask[np.where(mask != 1)] = 0\n\n\n # Compute the difference data - bkgd\n redu = (data - bkgd)*mask\n\n limit = 15\n redu_cp = np.zeros([np.size(redu,0)+4,np.size(redu,1)+4]) \n redu_cp[2:-2,2:-2] = np.copy(redu)\n #redu_cp = np.copy(redu) \n ite = 0 \n \n while np.max(redu_cp) > limit:\n # Find the position of the maximum\n tmp_x,tmp_y = np.where(redu_cp == np.max(redu_cp))\n sub_im = redu_cp[tmp_x[0]-2:tmp_x[0]+3,tmp_y[0]-2:tmp_y[0]+3]\n opt_x,opt_y,wx,wy = moments(sub_im,False)\n \n redu_cp[tmp_x[0]-2:tmp_x[0]+3,tmp_y[0]-2:tmp_y[0]+3] = 0\n \n tmp_x = tmp_x[0] - (np.size(sub_im,0) - 1)/2. + opt_x - 2\n tmp_y = tmp_y[0] - (np.size(sub_im,1) - 1)/2. + opt_y - 2\n \n if not 'pts_list' in locals():\n pts_list = np.array([[tmp_x,tmp_y]])\n else:\n pts_list = np.concatenate((pts_list,[[tmp_x,tmp_y]]),0)\n \n ite += 1\n # Print usefull information for the user\n sys.stdout.write('\\r nb_pts_found = %04.0d' %(ite))\n sys.stdout.flush()\n # plot images if requested\n if disp:\n # Create a figure\n fig_1 = plt.figure(num = 1)\n # Title of the fig\n plt.title('PSFs position', fontsize = 10)\n plt.subplot(111) \n # Display redu image\n plt.imshow(redu, origin = 'lower')\n # Modify the axis: one ticks every 120 pixels\n X_ticks = np.arange(0, np.size(redu,1)+1, 120)\n plt.gca().set_xticks(X_ticks)\n plt.xticks(fontsize = 8)\n Y_ticks = np.arange(0, np.size(redu,0)+1, 120)\n plt.gca().set_yticks(Y_ticks)\n plt.yticks(fontsize = 8)\n # Label of the axis\n plt.xlabel('Pixel', fontsize = 8)\n plt.ylabel('Pixel', fontsize = 8)\n \n ax = fig_1.add_subplot(111)\n for k in np.arange(int(np.size(pts_list)/2.)):\n # PSF position in x direction (Standard orientation)\n sx = pts_list[k,1]\n # PSF position y direction (Standard orientation)\n sy = pts_list[k,0]\n # Prepare\n circ_0 = patches.Circle((sx,sy),radius=2. ,linestyle= '-',color='w',fill=False)\n # Draw a circle around the PSF\n ax.add_patch(circ_0)\n \n plt.subplots_adjust(bottom=0.1, right=0.8, top=0.85)\n # cax = plt.axes([left, bottom, Width, Hight])\n cax = plt.axes([0.83, 0.115, 0.03, 0.72])\n cbar = plt.colorbar(cax=cax)\n cbar.set_label('Flux (ADU)', rotation=90, fontsize = 8)\n cbar.ax.tick_params(labelsize=8, width=1)\n \n #plt.savefig(path + 'im_bottom_left.png', bbox_inches='tight', pad_inches=0.25, dpi=600)\n plt.show()\n \n return redu, redu_cp, pts_list\n\n# =============================================================================\ndef dist(pts_list):\n ''' -----------------------------------------------------------------------\n Function use to compute distances between points of a list.\n ----------------------------------------------------------------------- '''\n # Compute the number of points\n nb_pts = np.size(pts_list)/2.\n #\n x = pts_list[:,int(0)]\n y = pts_list[:,int(1)]\n #\n\n nb_cpl = 0\n for i in np.arange(nb_pts):\n for j in (np.arange(nb_pts-(i+1))+(i+1)):\n nb_cpl += 1\n\n # Prepare value to return\n distances = np.zeros([int(nb_cpl),6])\n #\n index = 0\n for i in np.arange(nb_pts):\n for j in (np.arange(nb_pts-(i+1))+(i+1)):\n # \n #print('i = %03d -- j = %03d' %(i,j))\n # Compute distance between the two selected points.\n dist_x = np.round(x[int(j)]-x[int(i)],2)\n dist_y = np.round(y[int(j)]-y[int(i)],2)\n dist_t = np.round(np.sqrt(dist_x**2 + dist_y**2),2)\n #\n distances[index,0] = i\n distances[index,1] = j\n distances[index,2] = dist_x\n distances[index,3] = dist_y\n distances[index,4] = dist_t\n distances[index,5] = dist_t/(j-i)\n #\n index += 1\n\n return distances\n'''\n# =============================================================================\ndef gap_size(bkgd = ''):\n # take images\n #_ = save_images()\n path = '/home/aodev/Data/210401/OCAM2K/'\n # Try to found all psf\n im, cp_im, pts_list = SHM('im_med.fits', 'bkgd_med.fits',path)\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-120)**2 + (pts_list[:,1]-120)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n # Extract the XY coordinate of the points less than 40 pixels away from \n # the center\n list_a = pts_list[cent_pts,:][0]\n\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-40)**2 + (pts_list[:,1]-40)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n # Extract the XY coordinate of the points less than 40 pixels away from \n # the center\n list_b = pts_list[cent_pts,:][0]\n\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-200)**2 + (pts_list[:,1]-40)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n # Extract the XY coordinate of the points less than 40 pixels away from \n # the center\n list_c = pts_list[cent_pts,:][0]\n\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-200)**2 + (pts_list[:,1]-200)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n # Extract the XY coordinate of the points less than 40 pixels away from \n # the center\n list_d = pts_list[cent_pts,:][0]\n\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-40)**2 + (pts_list[:,1]-200)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n # Extract the XY coordinate of the points less than 40 pixels away from \n # the center\n list_e = pts_list[cent_pts,:][0]\n\n plt.plot(list_a[:,0],list_a[:,1],'.r',list_b[:,0],list_b[:,1],'.g',list_c[:,0],list_c[:,1],'.b',list_d[:,0],list_d[:,1],'.k',list_e[:,0],list_e[:,1],'.m')\n plt.xlim([0,240])\n plt.ylim([0,240])\n plt.show()\n\n\n\n\n x = np.sort(tmp_list[:,0])\n y = np.sort(tmp_list[:,1])\n\n nb_pts = int(np.size(x))\n\n index = np.ones([1,1])\n nb_raw = 0\n raw_nb = 0\n pts_raw = np.zeros([1,1])\n y_c = np.copy(y)*0\n\n for i in np.arange(nb_pts):\n if np.abs(y[int(i-1)]-y[int(i)]) < 2:\n index[0,0] += 1\n if index > nb_raw:\n nb_raw = np.copy(index)\n else:\n pts_raw = np.concatenate([pts_raw,index],0)\n index = np.ones([1,1])\n raw_nb += 1\n y_c[i] = raw_nb -1\n\n pts_raw = np.concatenate([pts_raw[2:],index],0)\n \n\n index = np.ones([1,1])\n nb_col = 0\n col_nb = 0\n pts_col = np.zeros([1,1])\n x_c = np.copy(x)*0\n\n for i in np.arange(nb_pts):\n if np.abs(x[int(i-1)]-x[int(i)]) < 2:\n index[0,0] += 1\n if index > nb_col:\n nb_col = np.copy(index)\n else:\n pts_col = np.concatenate([pts_col,index],0)\n index = np.ones([1,1])\n col_nb +=1\n\n x_c[i] = col_nb - 1\n\n\n pts_col = np.concatenate([pts_col[2:],index],0)\n\n y_1 = np.sort(tmp_list[:,1])\n x_1 = np.zeros(np.size(y_1))\n n_1 = np.zeros(np.size(y_1))\n\n for i in np.arange(nb_pts):\n for j in np.arange(nb_pts):\n if tmp_list[j,1] == y_1[i]:\n x_1[i] = tmp_list[j,0]\n n_1[i] = j\n\n x_2 = np.sort(tmp_list[:,0])\n y_2 = np.zeros(np.size(x_2))\n n_2 = np.zeros(np.size(x_2))\n\n for i in np.arange(nb_pts):\n for j in np.arange(nb_pts):\n if tmp_list[j,0] == x_2[i]:\n y_2[i] = tmp_list[j,1]\n n_2[i] = j\n\n table = np.zeros([int(nb_raw),int(nb_col),2])\n for i in np.arange(nb_pts):\n tmp_y = y_2[int(np.where(n_2 == i)[0])]\n tmp_raw = x_c[int(np.where(n_2 == i)[0])]\n tmp_x = x_1[int(np.where(n_1 == i)[0])]\n tmp_col = y_c[int(np.where(n_1 == i)[0])]\n table[int(tmp_raw),int(tmp_col)] = [tmp_x,tmp_y] \n\n nb_dist = int((nb_pts**2-nb_pts)/2.)*2\n gap_size = np.zeros(nb_dist)\n index = 0\n for i in np.arange(int(nb_raw)):\n for j in np.arange(int(nb_col)):\n for k in np.arange(int(nb_raw)):\n for l in np.arange(int(nb_col)):\n cdt_0 = (table[i,j,0] != 0)\n cdt_1 = (table[i,j,1] != 0)\n cdt_2 = (table[int(k),int(l),0] != 0)\n cdt_3 = (table[int(k),int(l),1] != 0)\n cdt_4 = (i != k) and (j != l)\n if cdt_0 and cdt_1 and cdt_2 and cdt_3 and cdt_4:\n dx = table[i,j,0] - table[int(k),int(l),0]\n dy = table[i,j,1] - table[int(k),int(l),1]\n pix = np.sqrt(dx**2 + dy**2)\n dik = i-k\n djl = j-l \n gap = np.sqrt(dik**2 + djl**2)\n \n gap_size[index] = pix/gap\n index += 1\n\n gap_size = gap_size[np.where(gap_size != 0)[0]]\n gap_size = gap_size[:int(np.size(gap_size)/2.)]\n gsm = np.round(np.median(gap_size),3)\n\n # Create a figure\n fig_1 = plt.figure(num = 1)\n # Title of the fig\n plt.title('OCAM2K image (reduced)', fontsize = 10)\n # Display histogram\n plt.hist(gap_size,60,[gsm-0.30,gsm+0.30])\n # Modify the axis: one ticks every 128 pixels\n X_ticks = (np.arange(0, 61, 15)-30)*0.01\n plt.gca().set_xticks(X_ticks)\n plt.xticks(fontsize = 8)\n Y_ticks = np.arange(0, np.size(disp_im,0)+1, 30)\n plt.gca().set_yticks(Y_ticks)\n plt.yticks(fontsize = 8)\n # Label of the axis\n plt.xlabel('Pixel', fontsize = 8)\n plt.ylabel('Pixel', fontsize = 8)\n plt.show()\n print('size of the gaps = %5.3f' %(gsm))\n\n list_a_h = np.copy(list_a) \n xh = np.sort(list_a[:,0])\n yh = np.zeros(np.size(y))\n for i in np.arange(np.size(x)):\n for j in np.arange(np.size(x)):\n if list_a[j,0] == x[i]:\n y[i] = list_a[j,1]\n\n list_a_r[:,0] = x\n list_a_r[:,1] = y\n\n index = 0\n nb_raw = 0\n for i in np.arange(np.size(y)/2.):\n if np.abs(y[int(i)]-y[int(i-1)]) < 3:\n index += 1\n if index > nb_raw:\n nb_raw = np.copy(index)\n else:\n index = 0\n\n\n # Compute distance betweel all PSF found and the center of the array\n dist2cent = np.sqrt((pts_list[:,0]-124)**2 + (pts_list[:,1]-124)**2)\n # Get the index of central circle of pts\n cent_pts = np.where(dist2cent <= 40)\n\n # Isolate the PSF on the central vertical axis\n central_line = np.where(np.abs(pts_list[:,0]-pts_list[cent_pts,0]) <= 2)\n\n #redu_list = pts_list[central_line,:][0]\n redu_list = pts_list[cent_pts,:][0]\n # Sort the list of PSF by vertical position\n y = np.sort(redu_list[:,1])\n x = np.zeros(np.size(y))\n for i in np.arange(np.size(y)):\n for j in np.arange(np.size(y)):\n if redu_list[j,1] == y[i]:\n x[i] = redu_list[j,0]\n redu_list[:,0] = x\n redu_list[:,1] = y\n # Compute the distance between all the PSFs of the reduce list divided by \n # the number of gaps between those PSFs \n results = dist(redu_list)[5]\n \n sep_dir_V = np.round(np.median(results),2)\n \n plt.hist(results,50,[sep_dir_V-0.25,sep_dir_V+0.25]);plt.show()\n \n return sep_dir_V\n\n#########################################################################################\npath = '/home/aodev/Data/210330/OCAM2K/'\ndata = 'Set_03_med.fits'\nbkgd = 'Bkgd_03_med.fits'\nim,tmp,pts_list = SHM(data, bkgd, path)\n\n\ncentral_line = np.where(np.abs(pts_list[:,0]-124) <= 2)\nredu_list = pts_list[central_line,:][0]\n\ny = np.sort(redu_list[:,1])\nx = np.zeros(np.size(y))\nfor i in np.arange(np.size(y)):\n for j in np.arange(np.size(y)):\n if redu_list[j,1] == y[i]:\n x[i] = redu_list[j,0]\n\nredu_list[:,0] = x\nredu_list[:,1] = y\n\nA = dist(redu_list)\n#sep_dir_1 = np.max(A[:,4])/np.max(A[:,1]) \nsep_dir_1 = np.round(np.median(A[:,5]),2)\nplt.hist(A[:,5],50,[sep_dir_1-0.25,sep_dir_1+0.25]);plt.show()\n\n\ncentral_line = np.round(np.where(np.abs(pts_list[:,1]-124) <= 2),3)\nredu_list = pts_list[central_line,:][0]\n\nx = np.sort(redu_list[:,0])\ny = np.zeros(np.size(x))\nfor i in np.arange(np.size(x)):\n for j in np.arange(np.size(x)):\n if redu_list[j,0] == x[i]:\n y[i] = redu_list[j,1]\n\nredu_list[:,0] = x\nredu_list[:,1] = y\n\n\nB = dist(redu_list)\nsep_dir_2 = np.round(np.max(B[:,4])/np.max(B[:,1]),3) \nnp.median(B[:,5])\nplt.hist(B[:,5],50,[sep_dir_1-0.25,sep_dir_1+0.25]);plt.show()\n\ntmp = ((A[:,4]-(A[:,4]%4))/4)+np.round((A[:,4]%4)/4.) \n\n\n\ntmp = np.sqrt((pts_list[:,0]-10)**2 + (pts_list[:,1]-10)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nAx,Ay = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-10)**2 + (pts_list[:,1]-120)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nBx,By = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-10)**2 + (pts_list[:,1]-230)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nCx,Cy = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-120)**2 + (pts_list[:,1]-10)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nDx,Dy = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-120)**2 + (pts_list[:,1]-120)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nEx,Ey = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-120)**2 + (pts_list[:,1]-230)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nFx,Fy = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-230)**2 + (pts_list[:,1]-10)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nGx,Gy = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-230)**2 + (pts_list[:,1]-120)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nHx,Hy = pts_list[index,0],pts_list[index,1]\n\ntmp = np.sqrt((pts_list[:,0]-230)**2 + (pts_list[:,1]-230)**2)\nindex = int(np.where(tmp == np.min(tmp))[0][0])\nIx,Iy = pts_list[index,0],pts_list[index,1]\n\n # Create a figure\n fig_1 = plt.figure(num = 1)\n # Title of the fig\n plt.title('OCAM2K image (reduced)', fontsize = 10)\n plt.subplot(111) \n # Prepares the image for display\n disp_im = (np.abs(im)+1)\n disp_im /= np.max(disp_im)\n # The image (PSF + Calibration fibers) is show in log scale\n plt.imshow(disp_im,norm=LogNorm(vmin=1e-2, vmax=1), origin = 'lower')\n # Modify the axis: one ticks every 128 pixels\n X_ticks = np.arange(0, np.size(disp_im,1)+1, 30)\n plt.gca().set_xticks(X_ticks)\n plt.xticks(fontsize = 8)\n Y_ticks = np.arange(0, np.size(disp_im,0)+1, 30)\n plt.gca().set_yticks(Y_ticks)\n plt.yticks(fontsize = 8)\n # Label of the axis\n plt.xlabel('Pixel', fontsize = 8)\n plt.ylabel('Pixel', fontsize = 8)\n\n ax = fig_1.add_subplot(111)\n # Draw a circle around each pupils\n\n pup = 'PSF_A'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((10,10),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(10,10+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Ax,Ay),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_B'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((10,120),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(10,120+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Bx,By),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_C'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((10,230),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(10,230+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Cx,Cy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_D'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((120,10),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(120,10+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Dx,Dy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_E'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((120,120),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(120,120+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Ex,Ey),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_F'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((120,230),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(120,230+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Fx,Fy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_G'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((230,10),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(230,10+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Gx,Gy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_H'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((230,120),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(230,120+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Hx,Hy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n pup = 'PSF_I'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((230,230),radius=4.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(230,230+10,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n circ_0 = patches.Circle((Ix,Iy),radius=1.,color='r',fill=True,linewidth = 2,linestyle= '-',)\n ax.add_patch(circ_0)\n\n # Saves the image in PDF format file\n #plt.savefig(path + 'OCAM2K_Pupils.pdf', bbox_inches='tight', pad_inches=0.25, dpi=600)\n #plt.savefig(path + 'OCAM2K_Pupils.png', bbox_inches='tight', pad_inches=0.25, dpi=600)\n plt.show()\n\n\ndef plot_im():\n # Create a figure\n fig_1 = plt.figure(num = 1)\n # Title of the fig\n plt.title('OCAM2K image (reduced)', fontsize = 10)\n plt.subplot(111) \n # Prepares the image for display\n disp_im = (np.abs(im)+1)\n disp_im /= np.max(disp_im)\n # The image (PSF + Calibration fibers) is show in log scale\n plt.imshow(disp_im,norm=LogNorm(vmin=1e-2, vmax=1), origin = 'lower')\n # Modify the axis: one ticks every 128 pixels\n X_ticks = np.arange(0, np.size(disp_im,1)+1, 30)\n plt.gca().set_xticks(X_ticks)\n plt.xticks(fontsize = 8)\n Y_ticks = np.arange(0, np.size(disp_im,0)+1, 30)\n plt.gca().set_yticks(Y_ticks)\n plt.yticks(fontsize = 8)\n # Label of the axis\n plt.xlabel('Pixel', fontsize = 8)\n plt.ylabel('Pixel', fontsize = 8)\n\n plt.show()\n\n ax = fig_1.add_subplot(111)\n # Draw a circle around each pupils\n\n cx,cy = 40,40\n pup = 'Bottom Left Pupil'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((cx,cy),radius=40.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(cx,cy+50,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n\n cx,cy = 200,40\n pup = 'Bottom Right Pupil'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((cx,cy),radius=40.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(cx,cy+50,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n\n cx,cy = 40,200\n pup = 'Top Left Pupil'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((cx,cy),radius=40.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(cx,cy-50,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n\n cx,cy = 200,200\n pup = 'Top Right Pupil'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((cx,cy),radius=40.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(cx,cy-50,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n\n cx,cy = 120,120\n pup = 'Central Pupil'\n bbox = {'facecolor':'Black','alpha':1,'edgecolor':'none','pad':1}\n circ_0 = patches.Circle((cx,cy),radius=40.,color='w',fill=False,linewidth = 2,linestyle= '-',)\n plt.text(cx,cy+50,pup,color='w',fontsize=9,bbox=bbox, ha='center', va='center')\n ax.add_patch(circ_0)\n\n plt.subplots_adjust(bottom=0.1, right=0.8, top=0.85)\n # cax = plt.axes([left, bottom, Width, Hight])\n cax = plt.axes([0.83, 0.115, 0.03, 0.72])\n cbar = plt.colorbar(cax=cax)\n cbar.set_label('Normalized Flux (Log scale)', rotation=90, fontsize = 8)\n cbar.ax.tick_params(labelsize=8, width=1)\n\n # Saves the image in PDF format file\n plt.savefig(path + 'OCAM2K_Pupils.pdf', bbox_inches='tight', pad_inches=0.25, dpi=600)\n plt.savefig(path + 'OCAM2K_Pupils.png', bbox_inches='tight', pad_inches=0.25, dpi=600)\n plt.show()\n'''\n\n\n\n\n# =============================================================================\ndef gap_size(images,background,path,mask):\n # Get path if not provided bu user\n if path == '': path, _ = get_path('OCAM2K') \n # Try to found all psf\n im, cp_im, pts_list = SHM(images, background,path,mask = mask)\n # Sort the points by coordinates \n x = np.sort(pts_list[:,0])\n y = np.sort(pts_list[:,1])\n # Compute the number of points\n nb_pts = int(np.size(x))\n\n # Prepares values for index\n index = np.ones([1,1])\n # Prepares a parameter for the number of rows in the image\n nb_row = 0\n # Prepares a parameter for the row number\n row_nb = 0\n # Prepares a list of pts per row\n pts_row = np.zeros([1,1])\n # Parameter contains row nb associate to each point of the list\n y_c = np.copy(y)*0\n # for each pts of the list of points\n for i in np.arange(nb_pts):\n # Case 1: Current point and previous point on the same row\n if np.abs(y[int(i-1)]-y[int(i)]) < 2:\n # Increment the index\n index[0,0] += 1\n # Update the nb of row if needed\n if index > nb_row:\n nb_row = np.copy(index)\n # Case 2: Current point and previous point not on the same row\n else:\n # Adds the current index (nb_pts in this row) to the list of pts \n # per row\n pts_row = np.concatenate([pts_row,index],0)\n # Reset the index\n index = np.ones([1,1])\n # increment the number of row\n row_nb += 1\n # Associate a row number to the current point of the list\n y_c[i] = row_nb -1\n # Adds the last index to the number of points per row and remove the two \n # first one (not valid due to loop construction)\n pts_row = np.concatenate([pts_row[2:],index],0)\n \n # Prepares values for index\n index = np.ones([1,1])\n # Prepares a parameter for the number of columns in the image\n nb_col = 0\n # Prepares a parameter for the column number\n col_nb = 0\n # Prepares a list of pts per column\n pts_col = np.zeros([1,1])\n # Parameter contains row nb associate to each point of the list \n x_c = np.copy(x)*0\n\n # for each pts of the list of points\n for i in np.arange(nb_pts):\n # Case 1: Current point and previous point on the same column\n if np.abs(x[int(i-1)]-x[int(i)]) < 2:\n # Increment the index\n index[0,0] += 1\n # Update the nb of column if needed\n if index > nb_col:\n nb_col = np.copy(index)\n # Case 2: Current point and previous point not on the same column\n else:\n # Adds the current index (nb_pts in this column) to the list of pts\n # per column \n pts_col = np.concatenate([pts_col,index],0)\n # Reset the index\n index = np.ones([1,1])\n # Increment the number of column\n col_nb +=1\n # Associate a column number to the current point of the list\n x_c[i] = col_nb - 1\n\n # Adds the last index to the number of points per column and remove the two \n # first one (not valid due to loop construction)\n pts_col = np.concatenate([pts_col[2:],index],0)\n\n y_1 = np.sort(pts_list[:,1])\n x_1 = np.zeros(np.size(y_1))\n n_1 = np.zeros(np.size(y_1))\n\n for i in np.arange(nb_pts):\n for j in np.arange(nb_pts):\n if pts_list[j,1] == y_1[i]:\n x_1[i] = pts_list[j,0]\n n_1[i] = j\n\n x_2 = np.sort(pts_list[:,0])\n y_2 = np.zeros(np.size(x_2))\n n_2 = np.zeros(np.size(x_2))\n\n for i in np.arange(nb_pts):\n for j in np.arange(nb_pts):\n if pts_list[j,0] == x_2[i]:\n y_2[i] = pts_list[j,1]\n n_2[i] = j\n\n table = np.zeros([int(nb_row),int(nb_col),2])\n for i in np.arange(nb_pts):\n try:\n tmp_y = y_2[int(np.where(n_2 == i)[0])]\n tmp_row = x_c[int(np.where(n_2 == i)[0])]\n tmp_x = x_1[int(np.where(n_1 == i)[0])]\n tmp_col = y_c[int(np.where(n_1 == i)[0])]\n table[int(tmp_row),int(tmp_col)] = [tmp_x,tmp_y]\n except:\n print('Missing pts') \n\n nb_dist = int((nb_pts**2-nb_pts)/2.)*2\n gap_size = np.zeros(nb_dist)\n index = 0\n for i in np.arange(int(nb_row)):\n for j in np.arange(int(nb_col)):\n for k in np.arange(int(nb_row)):\n for l in np.arange(int(nb_col)):\n cdt_0 = (table[i,j,0] != 0)\n cdt_1 = (table[i,j,1] != 0)\n cdt_2 = (table[int(k),int(l),0] != 0)\n cdt_3 = (table[int(k),int(l),1] != 0)\n cdt_4 = (i != k) and (j != l)\n if cdt_0 and cdt_1 and cdt_2 and cdt_3 and cdt_4:\n dx = table[i,j,0] - table[int(k),int(l),0]\n dy = table[i,j,1] - table[int(k),int(l),1]\n pix = np.sqrt(dx**2 + dy**2)\n dik = i-k\n djl = j-l \n gap = np.sqrt(dik**2 + djl**2)\n \n gap_size[index] = pix/gap\n index += 1\n\n gap_size = gap_size[np.where(gap_size != 0)[0]]\n gap_size = gap_size[:int(np.size(gap_size)/2.)]\n gsm = np.round(np.median(gap_size),3)\n\n # Create a figure\n fig_1 = plt.figure(num = 1)\n # Title of the fig\n #plt.title('OCAM2K image (reduced)', fontsize = 10)\n # Prepare histogram\n plt.hist(gap_size,60,[gsm-0.30,gsm+0.30])\n # Label of the axis\n plt.xlabel('Average gap size in pixel', fontsize = 8)\n plt.ylabel('Number of spots set', fontsize = 8)\n plt.savefig(path + 'hist_upper_left.png', bbox_inches='tight', pad_inches=0.25, dpi=600)\n # Display histogram\n plt.show()\n # print number of pixel per gap\n print('size of the gaps = %5.3f' %(gsm))\n\n return gap_size\n" } ]
4
PPSantos/acme
https://github.com/PPSantos/acme
b64ecac48c51301995888af20a11b8e54e1ce88a
0a2d480d92971ac4979b3f2e8b9a677c1e5e3b1b
28ab3d5d0136860e0f4263fcc70d3a88b0c4ea9e
refs/heads/master
2022-12-26T23:52:46.994221
2020-09-25T17:14:06
2020-09-25T17:14:26
273,995,590
0
0
Apache-2.0
2020-06-21T22:52:15
2020-06-21T21:16:36
2020-06-19T15:27:49
null
[ { "alpha_fraction": 0.7088055610656738, "alphanum_fraction": 0.7151990532875061, "avg_line_length": 35.22105407714844, "blob_id": "d85d4b50bcca36588ddeb7c9edff32b8fde89cd1", "content_id": "e770c53ecff9bf6d798253dd2c59560d7cf08532", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3441, "license_type": "permissive", "max_line_length": 80, "num_lines": 95, "path": "/acme/agents/agent.py", "repo_name": "PPSantos/acme", "src_encoding": "UTF-8", "text": "# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The base agent interface.\"\"\"\n\nfrom typing import List\n\nfrom acme import core\nfrom acme import types\n# Internal imports.\n\nimport dm_env\nimport numpy as np\n\n\nclass Agent(core.Actor, core.VariableSource):\n \"\"\"Agent class which combines acting and learning.\n\n This provides an implementation of the `Actor` interface which acts and\n learns. It takes as input instances of both `acme.Actor` and `acme.Learner`\n classes, and implements the policy, observation, and update methods which\n defer to the underlying actor and learner.\n\n The only real logic implemented by this class is that it controls the number\n of observations to make before running a learner step. This is done by\n passing the number of `min_observations` to use and a ratio of\n `observations_per_step`\n\n Note that the number of `observations_per_step` which can also be in the range\n [0, 1] in order to allow more steps per update.\n \"\"\"\n\n def __init__(self, actor: core.Actor, learner: core.Learner,\n min_observations: int, observations_per_step: float):\n self._actor = actor\n self._learner = learner\n\n # We'll ignore the first min_observations when determining whether to take\n # a step and we'll do so by making sure num_observations >= 0.\n self._num_observations = -min_observations\n\n # Rather than work directly with the observations_per_step ratio we can\n # figure out how many observations or steps to run per update, one of which\n # should be one.\n if observations_per_step >= 1.0:\n self._observations_per_update = int(observations_per_step)\n self._steps_per_update = 1\n else:\n self._observations_per_update = 1\n self._steps_per_update = int(1.0 / observations_per_step)\n\n def select_action(self, observation: types.NestedArray) -> types.NestedArray:\n return self._actor.select_action(observation)\n\n def observe_first(self, timestep: dm_env.TimeStep):\n self._actor.observe_first(timestep)\n\n def observe(\n self,\n action: types.NestedArray,\n next_timestep: dm_env.TimeStep,\n ):\n self._num_observations += 1\n self._actor.observe(action, next_timestep)\n\n def update(self):\n # Only allow updates after some minimum number of observations have been and\n # then at some period given by observations_per_update.\n if (self._num_observations >= 0 and\n self._num_observations % self._observations_per_update == 0):\n self._num_observations = 0\n\n # Run a number of learner steps (usually gradient steps).\n for _ in range(self._steps_per_update):\n self._learner.step()\n # Update actor weights after learner, note in TF this may be a no-op.\n self._actor.update()\n\n def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:\n return self._learner.get_variables(names)\n\n\n# Internal class.\n" }, { "alpha_fraction": 0.7043300271034241, "alphanum_fraction": 0.7081732153892517, "avg_line_length": 37.6435661315918, "blob_id": "9c79fbe132a0921210a512f9f94a4d7477a7c78e", "content_id": "547f72ff252166d9bc132b89551d327d80ecddde", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3903, "license_type": "permissive", "max_line_length": 80, "num_lines": 101, "path": "/acme/tf/variable_utils.py", "repo_name": "PPSantos/acme", "src_encoding": "UTF-8", "text": "# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Variable handling utilities for TensorFlow 2.\"\"\"\n\nfrom concurrent import futures\nfrom typing import Mapping, Optional, Sequence\n\nfrom acme import core\n\nimport tensorflow as tf\nimport tree\n\n\nclass VariableClient:\n \"\"\"A variable client for updating variables from a remote source.\"\"\"\n\n def __init__(self,\n client: core.VariableSource,\n variables: Mapping[str, Sequence[tf.Variable]],\n update_period: int = 1):\n self._keys = list(variables.keys())\n self._variables = tree.flatten(list(variables.values()))\n self._call_counter = 0\n self._update_period = update_period\n self._client = client\n self._request = lambda: client.get_variables(self._keys)\n\n # Create a single background thread to fetch variables without necessarily\n # blocking the actor.\n self._executor = futures.ThreadPoolExecutor(max_workers=1)\n self._async_request = lambda: self._executor.submit(self._request)\n\n # Initialize this client's future to None to indicate to the `update()`\n # method that there is no pending/running request.\n self._future: Optional[futures.Future] = None\n\n def update(self):\n \"\"\"Periodically updates the variables with the latest copy from the source.\n\n Unlike `update_and_wait()`, this method makes an asynchronous request for\n variables and returns. Unless the request is immediately fulfilled, the\n variables are only copied _within a subsequent call to_ `update()`, whenever\n the request is fulfilled by the `VariableSource`.\n\n This stateful update method keeps track of the number of calls to it and,\n every `update_period` call, sends an asynchronous request to its server to\n retrieve the latest variables. It does so as long as there are no existing\n requests.\n\n If there is an existing fulfilled request when this method is called,\n the resulting variables are immediately copied.\n \"\"\"\n\n # Track the number of calls (we only update periodically).\n if self._call_counter < self._update_period:\n self._call_counter += 1\n\n period_reached: bool = self._call_counter >= self._update_period\n has_active_request: bool = self._future is not None\n\n if period_reached and not has_active_request:\n # The update period has been reached and no request has been sent yet, so\n # making an asynchronous request now.\n self._future = self._async_request()\n self._call_counter = 0\n\n if has_active_request and self._future.done():\n # The active request is done so copy the result and remove the future.\n self._copy(self._future.result())\n self._future: Optional[futures.Future] = None\n else:\n # There is either a pending/running request or we're between update\n # periods, so just carry on.\n return\n\n def update_and_wait(self):\n \"\"\"Immediately update and block until we get the result.\"\"\"\n self._copy(self._request())\n\n def _copy(self, new_variables: Sequence[Sequence[tf.Variable]]):\n \"\"\"Copies the new variables to the old ones.\"\"\"\n\n new_variables = tree.flatten(new_variables)\n if len(self._variables) != len(new_variables):\n raise ValueError('Length mismatch between old variables and new.')\n\n for new, old in zip(new_variables, self._variables):\n old.assign(new)\n" }, { "alpha_fraction": 0.6813697814941406, "alphanum_fraction": 0.6841882467269897, "avg_line_length": 30.963964462280273, "blob_id": "600f5698d1551da463bc5ac5fc220e2ab4dd2ef7", "content_id": "a1218d2a9599e8dfd6eb22b7452e38b83c12c21f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7096, "license_type": "permissive", "max_line_length": 80, "num_lines": 222, "path": "/acme/jax/savers.py", "repo_name": "PPSantos/acme", "src_encoding": "UTF-8", "text": "# Lint as: python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility classes for saving model checkpoints.\"\"\"\n\nimport datetime\nimport os\nimport pickle\nimport signal\nimport threading\nimport time\nfrom typing import Any, TypeVar, Union\n\nfrom absl import logging\nfrom acme import core\nfrom acme.utils import paths\nimport jax.numpy as jnp\nimport numpy as np\nimport tree\n\n# Internal imports.\n\nNumber = Union[int, float]\nCheckpointState = Any\nT = TypeVar('T')\n\n_DEFAULT_CHECKPOINT_TTL = int(datetime.timedelta(days=5).total_seconds())\n_ARRAY_NAME = 'array_nest'\n_EXEMPLAR_NAME = 'nest_exemplar'\n\n\ndef restore_from_path(ckpt_dir: str) -> CheckpointState:\n \"\"\"Restore the state stored in ckpt_dir.\"\"\"\n array_path = os.path.join(ckpt_dir, _ARRAY_NAME)\n exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)\n\n with open(exemplar_path, 'rb') as f:\n exemplar = pickle.load(f)\n\n with open(array_path, 'rb') as f:\n files = np.load(f, allow_pickle=True)\n flat_state = [files[key] for key in files.files]\n unflattened_tree = tree.unflatten_as(exemplar, flat_state)\n\n def maybe_convert_to_python(value, numpy):\n return value if numpy else np.asscalar(value)\n\n return tree.map_structure(maybe_convert_to_python, unflattened_tree, exemplar)\n\n\ndef save_to_path(ckpt_dir: str, state: CheckpointState):\n \"\"\"Save the state in ckpt_dir.\"\"\"\n\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n\n is_numpy = lambda x: isinstance(x, (np.ndarray, jnp.DeviceArray))\n flat_state = tree.flatten(state)\n nest_exemplar = tree.map_structure(is_numpy, state)\n\n array_path = os.path.join(ckpt_dir, _ARRAY_NAME)\n logging.info('Saving flattened array nest to %s', array_path)\n def _disabled_seek(*_):\n raise AttributeError('seek() is disabled on this object.')\n with open(array_path, 'wb') as f:\n setattr(f, 'seek', _disabled_seek)\n np.savez(f, *flat_state)\n\n exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)\n logging.info('Saving nest exemplar to %s', exemplar_path)\n with open(exemplar_path, 'wb') as f:\n pickle.dump(nest_exemplar, f)\n\n\nclass Checkpointer:\n \"\"\"Convenience class for periodically checkpointing.\n\n This can be used to checkpoint any numpy arrays or any object which is\n pickelable.\n \"\"\"\n\n def __init__(\n self,\n object_to_save: core.Saveable,\n directory: str = '~/acme/',\n subdirectory: str = 'default',\n time_delta_minutes: float = 10.,\n add_uid: bool = True,\n checkpoint_ttl_seconds: int = _DEFAULT_CHECKPOINT_TTL,\n ):\n \"\"\"Builds the saver object.\n\n Args:\n object_to_save: The object to save in this checkpoint, this must have a\n save and restore method.\n directory: Which directory to put the checkpoint in.\n subdirectory: Sub-directory to use (e.g. if multiple checkpoints are being\n saved).\n time_delta_minutes: How often to save the checkpoint, in minutes.\n add_uid: If True adds a UID to the checkpoint path, see\n `paths.get_unique_id()` for how this UID is generated.\n checkpoint_ttl_seconds: TTL (time to live) in seconds for checkpoints.\n \"\"\"\n # TODO(tamaranorman) accept a Union[Saveable, Mapping[str, Saveable]] here\n self._object_to_save = object_to_save\n self._time_delta_minutes = time_delta_minutes\n\n self._last_saved = 0.\n self._lock = threading.Lock()\n\n self._checkpoint_dir = paths.process_path(\n directory,\n 'checkpoints',\n subdirectory,\n ttl_seconds=checkpoint_ttl_seconds,\n backups=False,\n add_uid=add_uid)\n\n # Restore from the most recent checkpoint (if it exists).\n self.restore()\n\n def restore(self):\n \"\"\"Restores from the saved checkpoint if it exists.\"\"\"\n if os.path.exists(os.path.join(self._checkpoint_dir, _EXEMPLAR_NAME)):\n logging.info('Restoring checkpoint: %s', self._checkpoint_dir)\n with self._lock:\n state = restore_from_path(self._checkpoint_dir)\n self._object_to_save.restore(state)\n\n def save(self, force: bool = False) -> bool:\n \"\"\"Save the checkpoint if it's the appropriate time, otherwise no-ops.\n\n Args:\n force: Whether to force a save regardless of time elapsed since last save.\n\n Returns:\n A boolean indicating if a save event happened.\n \"\"\"\n\n if (not force and\n time.time() - self._last_saved < 60 * self._time_delta_minutes):\n return False\n\n logging.info('Saving checkpoint: %s', self._checkpoint_dir)\n with self._lock:\n state = self._object_to_save.save()\n save_to_path(self._checkpoint_dir, state)\n\n self._last_saved = time.time()\n return True\n\n\nclass CheckpointingRunner(core.Worker):\n \"\"\"Wrap an object and checkpoints periodically.\n\n This is either uses the run method if one doesn't exist or performs it in a\n thread.\n\n This internally creates a Checkpointer around `wrapped` object and exposes\n all of the methods of `wrapped`. Additionally, any `**kwargs` passed to the\n runner are forwarded to the internal Checkpointer.\n \"\"\"\n\n def __init__(\n self,\n wrapped: Union[core.Saveable, core.Worker],\n *,\n time_delta_minutes: float = 10.,\n **kwargs,\n ):\n self._wrapped = wrapped\n self._time_delta_minutes = time_delta_minutes\n self._checkpointer = Checkpointer(\n object_to_save=wrapped, time_delta_minutes=1, **kwargs)\n\n def run(self):\n \"\"\"Periodically checkpoints the given object.\"\"\"\n\n # Handle preemption signal. Note that this must happen in the main thread.\n def _signal_handler(signum: signal.Signals, frame):\n del signum, frame\n logging.info('Caught SIGTERM: forcing a checkpoint save.')\n self._checkpointer.save(force=True)\n\n try:\n signal.signal(signal.SIGTERM, _signal_handler)\n except ValueError:\n logging.warning(\n 'Caught ValueError when registering signal handler. '\n 'This probably means we are not running in the main thread. '\n 'Proceeding without checkpointing-on-preemption.')\n\n if isinstance(self._wrapped, core.Worker):\n # Do checkpointing in a separate thread and defer to worker's run().\n threading.Thread(target=self.checkpoint).start()\n self._wrapped.run()\n else:\n # Wrapped object doesn't have a run method; set our run method to ckpt.\n self.checkpoint()\n\n def __dir__(self):\n return dir(self._wrapped)\n\n def __getattr__(self, name):\n return getattr(self._wrapped, name)\n\n def checkpoint(self):\n while True:\n self._checkpointer.save()\n time.sleep(self._time_delta_minutes * 60)\n" } ]
3
ApenJulius/Moraliity-System
https://github.com/ApenJulius/Moraliity-System
1b0198f1f6cfb40e91e6f43fac4830e9cf3298fb
5a9aee8b846e240698c8543a6ce38df9d99d1c70
bf3206e7757113f38b6c3f537123a4465066b869
refs/heads/main
2022-12-29T04:34:47.549030
2020-10-17T22:19:04
2020-10-17T22:19:04
304,967,234
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8045976758003235, "alphanum_fraction": 0.8045976758003235, "avg_line_length": 42.5, "blob_id": "d1a4d8c334fd3f25f99f892bcabcc1b5c280e4a8", "content_id": "af5fee7bc0854de700d01b3fbab90120d4a48132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/README.md", "repo_name": "ApenJulius/Moraliity-System", "src_encoding": "UTF-8", "text": "# Moraliity-System\nThis is a pretty simple first draft of a morality system type thing\n" }, { "alpha_fraction": 0.5967742204666138, "alphanum_fraction": 0.6209677457809448, "avg_line_length": 31.384614944458008, "blob_id": "ba96d96790e57ad6f5bcb23525756040170eb6cf", "content_id": "33b0d6be38c3bbe109e74e81daf93c6941d3db21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 98, "num_lines": 26, "path": "/Morality System.py", "repo_name": "ApenJulius/Moraliity-System", "src_encoding": "UTF-8", "text": "def moral(test):\r\n if test == \"1\":\r\n return 1\r\n elif test == \"2\":\r\n return -1\r\n\r\ntests = [\r\n \"You see someone by a cliff, do you:\\n1.Warn them about getting too close\\n2.Push them off\\n\",\r\n \"A child drops its icecream, do you:\\n1.Console the child\\n2.Laugh and mock the child\\n\",\r\n \"You are given immunity and a gun, do you:\\n1.Kill someone\\n2.Not kill someone\\n\",\r\n \"You are given the cure to aids, do you:\\n1.Cure aids\\n2.Destroy the cure\\n\"\r\n]\r\n\r\nmorality = 0\r\nfor test in tests:\r\n answer = input(test)\r\n morality += moral(answer)\r\n\r\nif morality == -4:\r\n print(\"You absolute evil man\")\r\nelif morality == -1 or morality == -2 or morality == -3:\r\n print(\"you kinda evil man\")\r\nelif morality == 1 or morality == 2 or morality == 3:\r\n print(\"You kinda nice\")\r\nelif morality == 4:\r\n print(\"pretty nice person aint ya\")\r\n" } ]
2
luke-welton/ai-homework3
https://github.com/luke-welton/ai-homework3
5cc4c1921399d7e386de303c01aa2fdd848fc32e
51f52710370dfe1f9b455e17261d7707d5cd28bc
7baddc1b275f912d1fe7a66575ffca9d5b38f2f2
refs/heads/master
2020-04-05T22:37:47.480467
2018-12-04T17:37:45
2018-12-04T17:37:45
157,262,656
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4379175901412964, "alphanum_fraction": 0.447243869304657, "avg_line_length": 33.87378692626953, "blob_id": "9d2d91fb8310a9dfbdf200ceb48d58e0c6a77933", "content_id": "3756ebe1d6c2be8661113a02e3f79751d3b4bc58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7184, "license_type": "no_license", "max_line_length": 119, "num_lines": 206, "path": "/question3.py", "repo_name": "luke-welton/ai-homework3", "src_encoding": "UTF-8", "text": "import gym\nimport sys\nimport numpy as np\nfrom random import random\nimport matplotlib.pyplot as plt\n\ntry:\n ALPHA = float(sys.argv[1])\n GAMMA = float(sys.argv[2])\n EPSILON = float(sys.argv[3])\n COOLING = float(sys.argv[4])\nexcept IndexError:\n print(\"Not enough arguments passed. Shutting down.\")\n exit(0)\n\nALGS = [\"Q\", \"SARSA\", \"EXPECTED_SARSA\"]\n\nPOSITIONS = np.arange(-12, 6) / 10\nVELOCITIES = np.arange(-7, 8) / 100\nACTIONS = np.arange(3)\n\nenv = gym.make(\"MountainCar-v0\")\n\nq_values = dict()\nfor pair in np.array(np.meshgrid(POSITIONS, VELOCITIES)).T.reshape(-1, 2):\n q_values[tuple(pair)] = dict()\n\n\ndef q(current_state, next_state, reward, vals):\n next_action = max(q_values[next_state].keys(), key=(lambda key: q_values[next_state][key]))\n\n old_value = q_values[current_state][action]\n new_value = (1 - vals[\"alpha\"]) * old_value + \\\n vals[\"alpha\"] * (reward + vals[\"gamma\"] * q_values[next_state][next_action])\n q_values[current_state][action] = new_value\n\n\ndef sarsa(current_state, next_state, reward, vals):\n if random() < vals[\"epsilon\"]:\n next_action = env.action_space.sample()\n else:\n next_action = max(q_values[next_state].keys(), key=(lambda key: q_values[next_state][key]))\n\n old_value = q_values[current_state][action]\n new_value = (1 - vals[\"alpha\"]) * old_value + \\\n vals[\"alpha\"] * (reward + vals[\"gamma\"] * q_values[next_state][next_action])\n q_values[current_state][action] = new_value\n\n\ndef expected_sarsa(current_state, next_state, reward, vals):\n expectation = vals[\"epsilon\"] * env.action_space.sample() + \\\n (1 - vals[\"epsilon\"]) * max(q_values[next_state].keys(), key=(lambda key: q_values[next_state][key]))\n next_action = round(expectation)\n\n old_value = q_values[current_state][action]\n new_value = (1 - vals[\"alpha\"]) * old_value + \\\n vals[\"alpha\"] * (reward + vals[\"gamma\"] * q_values[next_state][next_action])\n q_values[current_state][action] = new_value\n\n\ndef initialize_values():\n for entry in q_values:\n q_values[entry].clear()\n for _action in ACTIONS:\n q_values[entry][_action] = 0\n\n\nfuncs = locals()\n\nfor cool_alpha in [True, False]:\n for cool_gamma in [True, False]:\n for cool_epsilon in [True, False]:\n cool_info = \"(Cool $\\\\alpha$: {}, Cool $\\gamma$: {}, Cool $\\epsilon$: {})\".format(\n \"Y\" if cool_alpha else \"N\", \"Y\" if cool_gamma else \"N\", \"Y\" if cool_epsilon else \"N\"\n )\n\n means = dict()\n variances = dict()\n\n for v in [means, variances]:\n for alg in ALGS:\n v[alg] = []\n\n print(cool_info)\n for r in range(20):\n print(\"\\nRun {}\".format(r))\n plot_means = dict()\n plot_vars = dict()\n\n for alg in ALGS:\n episode_counts = []\n\n initialize_values()\n current_vals = {\n \"alpha\": ALPHA,\n \"gamma\": GAMMA,\n \"epsilon\": EPSILON\n }\n\n for i in range(1000):\n state = env.reset()\n t = 0\n\n done = False\n while not done:\n t += 1\n\n state = tuple(state)\n d = round(state[0], 1)\n v = round(state[1], 2)\n state = (d, v)\n\n if random() < current_vals[\"epsilon\"]:\n action = env.action_space.sample()\n else:\n action = max(q_values[state].keys(), key=(lambda key: q_values[state][key]))\n\n next_state, reward, done, info = env.step(action)\n\n next_state_t = tuple(next_state)\n next_d = round(next_state_t[0], 1)\n next_v = round(next_state_t[1], 2)\n next_state_t = (next_d, next_v)\n\n funcs[alg.lower()](state, next_state_t, reward, current_vals)\n\n state = next_state\n\n if cool_alpha:\n current_vals[\"alpha\"] *= (1 - COOLING / 50)\n if cool_gamma:\n current_vals[\"gamma\"] *= (1 - COOLING / 200)\n if cool_epsilon:\n current_vals[\"epsilon\"] *= (1 - COOLING)\n\n # print(\"Run {}: Finished on Episode {}\".format(i, t))\n\n episode_counts.append(t)\n\n current_means = []\n current_variances = []\n total_sum = 0\n for i, count in enumerate(episode_counts):\n total_sum += count\n current_means.append(total_sum / (i + 1))\n\n variance = 0\n for j in range(i + 1):\n variance += (episode_counts[j] - current_means[i]) ** 2\n variance /= (i + 1)\n current_variances.append(variance)\n\n plot_means[alg] = current_means\n plot_vars[alg] = current_variances\n\n mean = current_means[len(current_means) - 1]\n variance = current_variances[len(current_variances) - 1]\n\n means[alg].append(mean)\n variances[alg].append(variance)\n\n print(\"Mean for {}: {}\".format(alg, mean))\n print(\"Variance for {}: {}\".format(alg, variance))\n\n env.close()\n\n if r == 0:\n for vals in [plot_means, plot_vars]:\n fig, ax = plt.subplots()\n\n title = \"\"\n if vals == plot_means:\n title = \"Mean over Iterations\"\n else:\n title = \"Variance over Iterations\"\n\n title += \"\\n\" + cool_info\n\n plt.title(title)\n plt.xticks(np.arange(0, len(vals[\"Q\"]), 100))\n\n for key in vals:\n plt.plot(np.arange(len(vals[key])), vals[key], label=key)\n\n plt.legend(loc=\"best\")\n plt.show()\n\n for v in [means, variances]:\n fig, ax = plt.subplots()\n\n title = \"\"\n if v == means:\n title = \"Mean over Runs\"\n else:\n title = \"Variance over Runs\"\n\n title += \"\\n\" + cool_info\n\n plt.title(title)\n plt.xticks(np.arange(0, 20, 4))\n\n for key in v:\n plt.plot(np.arange(20), v[key], label=key)\n\n plt.legend(loc=\"best\")\n plt.show()\n" } ]
1
abatilo/multiple-choice-ai
https://github.com/abatilo/multiple-choice-ai
a1139014ce70aac9fcb02f62b59ea1edea858eeb
8f314651365ee10b3c35fa79c8c9548cea81a70a
1b0fea7c82c2ec2ae9d40adfe758655feb0252a1
refs/heads/master
2021-09-01T07:25:57.360611
2017-12-25T17:11:16
2017-12-25T17:11:16
115,207,574
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.738170325756073, "alphanum_fraction": 0.738170325756073, "avg_line_length": 25.41666603088379, "blob_id": "d3159468f0f389c3ea34680c5ad3451342e93721", "content_id": "7c6188f7c7fa28ab73584c876884978339767044", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 317, "license_type": "permissive", "max_line_length": 53, "num_lines": 12, "path": "/service/src/main/java/MultipleChoiceRequest.java", "repo_name": "abatilo/multiple-choice-ai", "src_encoding": "UTF-8", "text": "import com.fasterxml.jackson.annotation.JsonProperty;\n\nimport lombok.Data;\n\n@Data\npublic class MultipleChoiceRequest {\n @JsonProperty(\"#Q\") private String q;\n @JsonProperty(\"A\") private String a;\n @JsonProperty(\"B\") private String b;\n @JsonProperty(\"C\") private String c;\n @JsonProperty(\"D\") private String d;\n}\n" }, { "alpha_fraction": 0.4345991611480713, "alphanum_fraction": 0.4571026861667633, "avg_line_length": 29.913043975830078, "blob_id": "4a2a194d16f21826c8d2f90e6e3fa1587c1c66f3", "content_id": "1696cb3c248e4330e7aec8914b0171c4b025d964", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "permissive", "max_line_length": 110, "num_lines": 23, "path": "/service/src/main/resources/evaluate.py", "repo_name": "abatilo/multiple-choice-ai", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport time\n\nwith open('./question_bank.json') as jsonin:\n i = 0\n correct = 0\n for _ in jsonin:\n i += 1\n j = json.loads(_.strip())\n ans = j['^']\n headers = {'Content-Type': 'application/json'}\n while True:\n try:\n r = requests.post('http://localhost:8080', headers=headers, data=_.strip())\n if r.status_code == 200:\n if ans == r.text:\n correct += 1\n print \"\\rGot %d correct out of %d which is %.2f%%\" % (correct, i, (float(correct) / i) * 100),\n break\n except:\n time.sleep(1)\n continue\n" }, { "alpha_fraction": 0.6324626803398132, "alphanum_fraction": 0.6526119112968445, "avg_line_length": 33.35897445678711, "blob_id": "fd224aa8e7e9881bd1e6495bb2588f59083fd392", "content_id": "edd1cb24e45400a554626fcb3df9802cb553ca5a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2680, "license_type": "permissive", "max_line_length": 151, "num_lines": 78, "path": "/service/src/main/java/Word2Vec.java", "repo_name": "abatilo/multiple-choice-ai", "src_encoding": "UTF-8", "text": "import com.google.common.base.Preconditions;\nimport com.google.common.collect.ImmutableMap;\n\nimport java.io.File;\nimport java.io.FileInputStream;\nimport java.io.IOException;\nimport java.nio.ByteOrder;\nimport java.nio.FloatBuffer;\nimport java.nio.MappedByteBuffer;\nimport java.nio.channels.FileChannel;\nimport java.util.Map;\n\npublic class Word2Vec {\n\n private final static long ONE_GB = 1024 * 1024 * 1024;\n\n // Heavily inspired by:\n // https://github.com/medallia/Word2VecJava/blob/85e8ce5715275a2c4b5440f1d62346aa6dcea52e/src/main/java/com/medallia/word2vec/Word2VecModel.java#L120\n public static Map<String, double[]> fromBin(File file) throws IOException {\n if (file.length() > (2 * ONE_GB)) {\n throw new IllegalArgumentException(\"Model cannot be larger than 2GB\");\n }\n final ImmutableMap.Builder<String, double[]> space = ImmutableMap.builder();\n final FileInputStream fis = new FileInputStream(file);\n final FileChannel channel = fis.getChannel();\n MappedByteBuffer buffer =\n channel.map(FileChannel.MapMode.READ_ONLY, 0, Math.min(channel.size(),\n file.length()));\n buffer.order(ByteOrder.LITTLE_ENDIAN);\n\n StringBuilder sb = new StringBuilder();\n char c = (char) buffer.get();\n while (c != '\\n') {\n sb.append(c);\n c = (char) buffer.get();\n }\n String firstLine = sb.toString();\n int index = firstLine.indexOf(' ');\n Preconditions.checkState(index != -1,\n \"Expected a space in the first line of file '%s'\", firstLine);\n\n final int vocabSize = Integer.parseInt(firstLine.substring(0, index));\n final int layerSize = Integer.parseInt(firstLine.substring(index + 1));\n\n for (int i = 0; i < vocabSize; ++i) {\n // Read vocab\n sb.setLength(0);\n c = (char) buffer.get();\n while (c != ' ') {\n // Ignore newlines in front of words (some binary files have newline,\n // some don't)\n if (c != '\\n') {\n sb.append(c);\n }\n c = (char) buffer.get();\n }\n\n // Read vector\n final FloatBuffer floatBuffer = buffer.asFloatBuffer();\n final float[] floats = new float[layerSize];\n floatBuffer.get(floats);\n // We need to convert to doubles because the floats don't have enough\n // precision when we multiply some of them as part of calculating cosine\n // similarity\n final double[] doubles = new double[layerSize];\n for (int j = 0; j < floats.length; ++j) {\n doubles[j] = floats[j];\n }\n\n space.put(sb.toString(), doubles);\n\n // Advance the pointer to go past all the floats\n buffer.position(buffer.position() + 4 * layerSize);\n }\n return space.build();\n }\n\n}\n" }, { "alpha_fraction": 0.669224202632904, "alphanum_fraction": 0.695652186870575, "avg_line_length": 18.88135528564453, "blob_id": "6786ea9dc69b6f3139b2b3145479260563d58a76", "content_id": "3c2adc3e2b1369908d1389f227727f480fcbad05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 1173, "license_type": "permissive", "max_line_length": 72, "num_lines": 59, "path": "/build.gradle", "repo_name": "abatilo/multiple-choice-ai", "src_encoding": "UTF-8", "text": "ext {\n opennlpVersion = \"1.8.2\"\n guavaVersion = \"23.0\"\n lombokVersion = \"1.16.18\"\n dropWizardVersion = \"1.1.4\"\n java8ModuleVersion = \"0.9.0-1\"\n}\n\nbuildscript {\n repositories {\n mavenCentral()\n jcenter()\n }\n dependencies {\n classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.1'\n }\n}\n\nallprojects {\n apply plugin: \"java\"\n apply plugin: \"idea\"\n\n repositories {\n mavenCentral()\n }\n\n sourceCompatibility = \"1.8\"\n targetCompatibility = \"1.8\"\n}\n\nsubprojects {\n group = \"io.aaronbatilo.multiple-choice-ai\"\n}\n\nproject(\":service\") {\n apply plugin: 'com.github.johnrengelman.shadow'\n apply plugin: 'application'\n\n mainClassName = \"MultipleChoiceApplication\"\n\n shadowJar {\n archiveName = \"multiple-choice-ai.jar\"\n destinationDir projectDir\n }\n\n dependencies {\n compile \"org.apache.opennlp:opennlp-tools:$opennlpVersion\"\n\n compile \"com.google.guava:guava:$guavaVersion\"\n\n compile \"org.projectlombok:lombok:$lombokVersion\"\n\n compile \"io.dropwizard:dropwizard-core:$dropWizardVersion\"\n\n compile \"io.dropwizard.modules:dropwizard-java8:$java8ModuleVersion\"\n\n testCompile group: 'junit', name: 'junit', version: '4.12'\n }\n}\n" }, { "alpha_fraction": 0.6872568130493164, "alphanum_fraction": 0.7208171486854553, "avg_line_length": 33.266666412353516, "blob_id": "eab325277e9108071d9c4e6a363acd4876ec4667", "content_id": "65e51c477a54338d56dc9f4c9817c383cdbf5027", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4120, "license_type": "permissive", "max_line_length": 184, "num_lines": 120, "path": "/README.md", "repo_name": "abatilo/multiple-choice-ai", "src_encoding": "UTF-8", "text": "# multiple-choice-ai\n\nUses a trivial approach to using word2vec vectors for picking an answer to a\nmultiple choice question.\n\nThe implementation itself is all in\n[QuestionGuesser](./service/src/main/java/QuestionGuesser.java) and the\nintuition behind the approach can be found there as well.\n\nInclude with this project is an export of\n[uberspot/OpenTriviaQA](https://github.com/uberspot/OpenTriviaQA) that has been\nconverted to JSON. These questions are what the requests were modeled after.\nThe JSON form of this repo has been put at\n[question_bank.json](./service/src/main/resources/question_bank.json)\n\nThe included [vectors.bin](./service/vectors.bin) was trained using the vanilla\nword2vec [demo\nscript](https://github.com/abatilo/word2vec/blob/master/demo-word.sh) on the\ntext8 corpus.\n\n## Getting Started\n\n### Prerequisites\n\nRequires Java 8 and Gradle 3.5 to be installed.\n\n### Build Instructions\n```\ngit clone https://github.com/abatilo/multiple-choice-ai.git\ncd multiple-choice-ai\n./gradlew :service:clean :service:shadowJar\njava -jar service/multiple-choice-ai.jar server local.yaml\n```\n\nmultiple-choice-ai is a Dropwizard service which will use the specified yaml\nfile to load in configurations.\n\n### Usage\nOnce the server is running, a request can be made like so (assumes you have\n[httpie](https://github.com/jakubroztocil/httpie) installed):\n\n```\nโ‡’ echo '{\"#Q\":\"Bears are carnivores\",\"A\":\"True\",\"B\": \"False\"}' | http POST :8080\nHTTP/1.1 200 OK\nContent-Length: 4\nContent-Type: application/json\nDate: Sun, 24 Dec 2017 08:31:23 GMT\n\nTrue\n```\n\nEach request can have up to 4 possible answers to choose from:\n```\nโ‡’ echo '{\"#Q\":\"All of these animals are omnivorous except one.\",\"A\":\"Fox\",\"B\": \"Mouse\",\"C\":\"Opossum\",\"D\":\"Snail\"}' | http POST :8080\nHTTP/1.1 200 OK\nContent-Length: 5\nContent-Type: application/json\nDate: Sun, 24 Dec 2017 08:35:47 GMT\n\nSnail\n```\n\nThe response is nothing more than the text of the answer that the service\nbelieves is correct.\n\nIf the request is malformed, you will receive an HTTP status of 400. If the\nrequest was properly formed, but the service was unable to answer due to out of\nvocabulary words, the service will return HTTP 422.\n\n## Results\n\nUnfortunately, the approach used only barely does better than random chance\nwhen it comes to accuracy of the answers.\n```\nโ‡’ python evaluate.py\nGot 14280 correct out of 48700 which is 29.32%\n```\n\nOn the bright side, testing the service locally with\n[siege](https://www.joedog.org/siege-home/) showed the approach to be pretty\nfast.\n\n```\nโ‡’\nsiege -c40 -t30s --content-type \"application/json\" 'http://localhost:8080 POST {\"#Q\":\"All of these animals are omnivorous except one.\",\"A\":\"Fox\",\"B\":\"Mouse\",\"C\":\"Opossum\",\"D\":\"Snail\"}'\nTransactions: 162299 hits\nAvailability: 100.00 %\nElapsed time: 29.99 secs\nData transferred: 0.77 MB\nResponse time: 0.01 secs\nTransaction rate: 5411.77 trans/sec\nThroughput: 0.03 MB/sec\nConcurrency: 38.52\nSuccessful transactions: 162301\nFailed transactions: 0\nLongest transaction: 0.08\nShortest transaction: 0.00\n```\n\n## Built With\n\n* [Dropwizard](http://www.dropwizard.io/1.1.4/docs/) - The web framework used\n* [Guava](https://github.com/google/guava/wiki/Release23) - Utility functions\n* [OpenNLP](https://opennlp.apache.org/docs/1.8.2/manual/opennlp.html) - NLP library for doing text processing\n* [Lombok](https://projectlombok.org/) - Annotations for less boilerplate code\n* [word2vec](https://code.google.com/archive/p/word2vec/) - Included model was trained with the original C implementation of word2vec\n\n## Contributing\n\nFork the project and submit a PR and one of the maintainers will be in touch.\n\n## Authors\n\n* Aaron Batilo - Developer / maintainer - [abatilo](https://github.com/abatilo)\n\nSee also the list of [contributors](https://github.com/abatilo/multiple-choice-ai/contributors) who participated in this project.\n\n## License\n\nThis project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details\n" } ]
5
johnatasr/CoolFreelancers
https://github.com/johnatasr/CoolFreelancers
5ae0691f74395113f994cc33f6564ea6c3a288c4
5757b72f52e4fb0ce535c47d07ec2d45b19674c5
97a8beb9deb38822999d9e0ca4bbbb09ca738634
refs/heads/master
2023-03-28T05:15:30.096346
2021-03-26T18:19:35
2021-03-26T18:19:35
346,542,481
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5076923370361328, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 16.727272033691406, "blob_id": "61c482f1477c61fb657c25428c06a155b03416b9", "content_id": "4cc264129015da5595271c9027376dbc0c4e7587", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 195, "license_type": "no_license", "max_line_length": 27, "num_lines": 11, "path": "/requirements.txt", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "asgiref==3.3.1\nDjango==3.1.7\ndjango-cors-headers==3.2.1\ndjangorestframework==3.12.2\ngunicorn==20.0.4\nnumpy==1.20.1\npydantic==1.7.3\npython-dateutil==2.8.1\npytz==2021.1\nsix==1.15.0\nsqlparse==0.4.1\n" }, { "alpha_fraction": 0.5347825884819031, "alphanum_fraction": 0.5661169290542603, "avg_line_length": 38.69643020629883, "blob_id": "40cc096deaf3bfd8c12aa97fff7437a77bcef55d", "content_id": "64c3ae846ad8da9d198de5b5cd682a6277c36d34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6675, "license_type": "no_license", "max_line_length": 120, "num_lines": 168, "path": "/freelancer/infra/tests_infra.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom freelancer.infra.repositories import FreelancerRepo\nfrom freelancer.infra.serializers import FreelancerSearchedSerializer\nfrom freelancer.domain.entities import Freelancer, Skill, ProcessedFreelancer\nfrom datetime import datetime\n\n\nclass RepositoryTestCase(TestCase):\n \"\"\"\n Tests da camada de repositรณrio\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Montando dependencias para os testes\n \"\"\"\n\n self.freelancer_one = Freelancer(\n id=1,\n user={\"firstName\": \"teste1\", \"lastName\": \"1last\", \"jobTitle\": \"Teste JS Developer\"},\n status=\"new\",\n retribution=100,\n availability_date=datetime.strptime(\"2020-06-10T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n professional_experiences=self.return_payload_skills()\n )\n\n self.skills_list = [{\"id\": i, \"name\": f\"{i}_teste\", \"total_months\": i + 10} for i, sk in enumerate(range(1, 5))]\n self.repo = FreelancerRepo()\n\n def return_payload_skills(self):\n \"\"\"\n Payload de teste\n \"\"\"\n\n return [\n {\n \"id\": 4,\n \"companyName\": \"Okuneva, Kerluke and Strosin\",\n \"startDate\": \"2016-01-01T00:00:00+01:00\",\n \"endDate\": \"2018-05-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 241,\n \"name\": \"React\"\n },\n {\n \"id\": 270,\n \"name\": \"Node.js\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 54,\n \"companyName\": \"Hayes - Veum\",\n \"startDate\": \"2014-01-01T00:00:00+01:00\",\n \"endDate\": \"2016-09-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 470,\n \"name\": \"MySQL\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n }\n ]\n\n \"\"\"\n Testes de mรฉtodos de freelancer.infra.repositories\n \"\"\"\n\n def test_create_skill_entity_by_list(self):\n list_skills_entity = self.repo.create_skill_entity_by_list(self.skills_list)\n self.assertIsInstance(list_skills_entity, list)\n self.assertEquals(len(list_skills_entity), 4)\n\n def test_create_searched_freelancer_entity(self):\n searched_freelancer_entity = self.repo.create_searched_freelancer_entity(freelancer_id=1, skills=[])\n self.assertIsInstance(searched_freelancer_entity, object)\n self.assertEquals(searched_freelancer_entity.id, 1)\n self.assertEquals(len(searched_freelancer_entity.computed_skills), 0)\n\n def test_create_freelancer_entity(self):\n freelancer_entity = self.repo.create_freelancer_entity({\n \"id\": 42,\n \"user\": {\n \"firstName\": \"Hunter\",\n \"lastName\": \"Moore\",\n \"jobTitle\": \"Fullstack JS Developer\"\n },\n \"status\": \"new\",\n \"retribution\": 650,\n \"availabilityDate\": \"2018-06-13T00:00:00+01:00\",\n \"professionalExperiences\": []\n })\n self.assertIsInstance(freelancer_entity, object)\n self.assertEquals(freelancer_entity.id, 42)\n self.assertEquals(freelancer_entity.user[0]['firstName'], \"Hunter\")\n self.assertEquals(freelancer_entity.status[0], \"new\")\n self.assertEquals(freelancer_entity.retribution[0], 650)\n self.assertEquals(freelancer_entity.availability_date[0], \"2018-06-13T00:00:00+01:00\")\n self.assertEquals(freelancer_entity.professional_experiences, [])\n\n def test_process_freelancer_experiences(self):\n processed_freelancer_entity = self.repo.process_freelancer_experiences(self.freelancer_one)\n self.assertIsInstance(processed_freelancer_entity, object)\n self.assertEquals(processed_freelancer_entity.id, 1)\n self.assertEquals(processed_freelancer_entity.computed_skills[0].name, \"MySQL\")\n self.assertEquals(processed_freelancer_entity.computed_skills[1].name, \"Java\")\n self.assertEquals(processed_freelancer_entity.computed_skills[2].name, \"Javascript\")\n self.assertEquals(processed_freelancer_entity.computed_skills[3].name, \"React\")\n self.assertEquals(processed_freelancer_entity.computed_skills[4].name, \"Node.js\")\n\n\nclass SerializersTestCase(TestCase):\n \"\"\"\n Tests da camada de serializaรงรฃo\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Montando dependencias para os testes\n \"\"\"\n\n self.skils = [Skill(id=i, name=f\"{i}_skill\", duration_months=i + 10) for i in range(1, 5)]\n self.freelancer_one = ProcessedFreelancer(\n id=1,\n computed_skills=self.skils\n )\n self.serializer = FreelancerSearchedSerializer(self.freelancer_one)\n\n \"\"\"\n Testes de mรฉtodos de freelancer.infra.serializers\n \"\"\"\n\n def test_serialize_object(self):\n payload = self.serializer.serialize_object()\n self.assertIsInstance(payload, dict)\n self.assertEquals(payload['freelance']['id'], 1)\n self.assertEquals(len(payload['freelance']['computedSkills']), 4)\n self.assertEquals(payload['freelance']['computedSkills'][0]['id'], 1)\n self.assertEquals(payload['freelance']['computedSkills'][0]['name'], '1_skill')\n self.assertEquals(payload['freelance']['computedSkills'][0]['durationInMonths'], 11)\n self.assertEquals(payload['freelance']['computedSkills'][2]['id'], 3)\n self.assertEquals(payload['freelance']['computedSkills'][2]['name'], '3_skill')\n self.assertEquals(payload['freelance']['computedSkills'][2]['durationInMonths'], 13)\n\n def test_set_nested_to_dict(self):\n payload = self.serializer.set_nested_to_dict(self.skils)\n self.assertIsInstance(payload, list)\n self.assertEquals(payload[0]['id'], 1)\n self.assertEquals(len(payload), 4)\n\n def test_mount_payload(self):\n payload = self.serializer.mount_payload(self.freelancer_one)\n self.assertIsInstance(payload['freelance'], dict)\n self.assertEquals(len(payload['freelance']['computedSkills']), 4)\n self.assertEquals(payload['freelance']['computedSkills'][0]['id'], 1)\n\n" }, { "alpha_fraction": 0.3658202886581421, "alphanum_fraction": 0.43043941259384155, "avg_line_length": 32.38071060180664, "blob_id": "a19732d574e8d12afff94e5682dea9e6a4467415", "content_id": "b7f78d01d9b438ca321684aea3a3a449d270c253", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6578, "license_type": "no_license", "max_line_length": 108, "num_lines": 197, "path": "/freelancer/presenters/tests_presenters.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from freelancer.presenters.helpers import FreelancerHelpers\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.test import TestCase\nfrom datetime import datetime\n\n\nclass HelpersTests(TestCase):\n \"\"\"\n Testes das helpers\n \"\"\"\n\n\n def setUp(self) -> None:\n self.helper = FreelancerHelpers()\n self.exps = [\n {\n \"id\": 4,\n \"companyName\": \"Okuneva, Kerluke and Strosin\",\n \"startDate\": datetime.strptime(\"2016-01-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n \"endDate\": datetime.strptime(\"2018-05-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n \"skills\": [\n {\n \"id\": 241,\n \"name\": \"React\"\n },\n {\n \"id\": 270,\n \"name\": \"Node.js\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 54,\n \"companyName\": \"Hayes - Veum\",\n \"startDate\": datetime.strptime(\"2014-01-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n \"endDate\": datetime.strptime(\"2017-01-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n \"skills\": [\n {\n \"id\": 470,\n \"name\": \"MySQL\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n }\n ]\n\n self.sk = {\n 'id': 1,\n 'name': 'Golang',\n 'total_months': 10,\n 'last_start_date': datetime.strptime(\"2014-01-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n 'last_end_date': datetime.strptime(\"2016-09-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d')\n }\n\n \"\"\"\n Testes de mรฉtodos de freelancer.presenters.helpers\n \"\"\"\n\n def test_get_experiences_by_startdate(self):\n sorted_dates = self.helper.get_experiences_by_startdate(self.exps)\n self.assertEquals(sorted_dates[0]['startDate'], datetime(2014, 1, 1))\n self.assertEquals(sorted_dates[0]['id'], 54)\n\n def test_diff_beetween_dates(self):\n start_date = datetime.strptime(\"2014-01-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d')\n end_date = datetime.strptime(\"2016-09-01T00:00:00+01:00\".split('T')[0], '%Y-%m-%d')\n result = self.helper.diff_beetween_dates(start_date, end_date)\n self.assertEquals(result, 32)\n\n def test_update_date_experiences(self):\n exp = [{\n \"startDate\": \"2014-01-01T00:00:00+01:00\",\n \"endDate\": \"2016-09-01T00:00:00+01:00\"\n }]\n result = self.helper.update_date_experiences(exp)\n self.assertEquals(result[0]['startDate'], datetime(2014, 1, 1))\n self.assertEquals(result[0]['endDate'], datetime(2016, 9, 1))\n\n def test_update_skill_process(self):\n skill = self.helper.update_skill_process(self.exps[0], self.sk, 14)\n self.assertEquals(skill['total_months'], 16)\n\n def test_set_last_skill_date(self):\n skill = self.helper.set_last_skill_date(self.exps[0], self.sk)\n self.assertEquals(skill['last_start_date'], datetime(2016, 1, 1))\n self.assertEquals(skill['last_end_date'], datetime(2018, 5, 1))\n\n\nclass FreelancerViewSetTestCase(APITestCase):\n \"\"\"\n Testes para API Viewset\n \"\"\"\n\n def setUp(self):\n self.data = {\n \"freelance\": {\n \"id\": 42,\n \"user\": {\n \"firstName\": \"Hunter\",\n \"lastName\": \"Moore\",\n \"jobTitle\": \"Fullstack JS Developer\"\n },\n \"status\": \"new\",\n \"retribution\": 650,\n \"availabilityDate\": \"2018-06-13T00:00:00+01:00\",\n \"professionalExperiences\": [\n {\n \"id\": 4,\n \"companyName\": \"Okuneva, Kerluke and Strosin\",\n \"startDate\": \"2016-01-01T00:00:00+01:00\",\n \"endDate\": \"2018-05-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 241,\n \"name\": \"React\"\n },\n {\n \"id\": 270,\n \"name\": \"Node.js\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 54,\n \"companyName\": \"Hayes - Veum\",\n \"startDate\": \"2014-01-01T00:00:00+01:00\",\n \"endDate\": \"2016-09-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 470,\n \"name\": \"MySQL\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 80,\n \"companyName\": \"Harber, Kirlin and Thompson\",\n \"startDate\": \"2013-05-01T00:00:00+01:00\",\n \"endDate\": \"2014-07-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n }\n ]\n }\n ]\n }\n }\n\n\n def test_get_freelance_empty_data(self):\n \"\"\"\n Teste de requisicao sem dados\n \"\"\"\n\n response = self.client.post('/freelancers/send-freelance')\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n def test_get_freelance_whith_data(self):\n \"\"\"\n Teste de requisicao com dados\n \"\"\"\n\n response = self.client.post('/freelancers/send-freelance', data=self.data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['freelance']['id'], 42)\n processed_skills_list = response.data['freelance']['computedSkills']\n self.assertEquals(len(processed_skills_list), 5)\n\n" }, { "alpha_fraction": 0.6230108141899109, "alphanum_fraction": 0.644228994846344, "avg_line_length": 35.92567443847656, "blob_id": "10ae3a7e7924c90219c273b292905addd47cd64a", "content_id": "ee36ececa0fcea041d035870aaf43dfea328f762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5467, "license_type": "no_license", "max_line_length": 103, "num_lines": 148, "path": "/freelancer/domain/tests_domain.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from freelancer.domain.entities import Freelancer, Skill, ProcessedFreelancer\nfrom django.test import TestCase\nfrom typing import List\nfrom datetime import datetime\n\n\nclass FreelancerEnttityTestCase(TestCase):\n \"\"\"\n Testes da Entidade Freelancer\n \"\"\"\n\n def setUp(self):\n self.freelancer_one = Freelancer(\n id=1,\n user= { \"firstName\": \"teste1\", \"lastName\": \"1last\", \"jobTitle\": \"Teste JS Developer\" },\n status=\"new\",\n retribution=100,\n availability_date=datetime.strptime(\"2020-06-10T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n professional_experiences=[]\n )\n\n self.freelancer_two = Freelancer(\n id=2,\n user={\"firstName\": \"teste2\", \"lastName\": \"2last\", \"jobTitle\": \"Teste Python Developer\"},\n status=\"new\",\n retribution=200,\n availability_date=datetime.strptime(\"2021-06-10T00:00:00+01:00\".split('T')[0], '%Y-%m-%d'),\n professional_experiences=[]\n )\n\n def test_isistance_freelancer(self):\n self.assertIsInstance(self.freelancer_one, object)\n self.assertIsInstance(self.freelancer_two, object)\n\n def test_atributes_values_freelancer(self):\n user1 = { \"firstName\": \"teste1\", \"lastName\": \"1last\", \"jobTitle\": \"Teste JS Developer\" }\n user2 = {\"firstName\": \"teste2\", \"lastName\": \"2last\", \"jobTitle\": \"Teste Python Developer\"}\n\n self.assertEquals(self.freelancer_one.id, 1)\n self.assertEquals(self.freelancer_one.user[0]['firstName'], user1['firstName'])\n self.assertEquals(self.freelancer_one.retribution[0], 100)\n self.assertEquals(self.freelancer_one.availability_date[0], datetime(2020, 6, 10))\n self.assertEquals(self.freelancer_one.professional_experiences, [])\n\n self.assertEquals(self.freelancer_two.id, 2)\n self.assertEquals(self.freelancer_two.user[0]['firstName'], user2['firstName'])\n self.assertEquals(self.freelancer_two.retribution[0], 200)\n self.assertEquals(self.freelancer_two.availability_date[0], datetime(2021, 6, 10))\n self.assertEquals(self.freelancer_two.professional_experiences, [])\n\n def test_atributes_type_freelancer(self):\n self.assertIsInstance(self.freelancer_one.id, int)\n self.assertEquals(type(self.freelancer_one.availability_date[0]), datetime)\n self.assertIsInstance(self.freelancer_one.professional_experiences, list)\n\n self.assertIsInstance(self.freelancer_two.id, int)\n self.assertEquals(type(self.freelancer_one.availability_date[0]), datetime)\n self.assertIsInstance(self.freelancer_one.professional_experiences, list)\n\n\nclass SkillEnttityTestCase(TestCase):\n \"\"\"\n Testes da entidade Skill\n \"\"\"\n def setUp(self):\n self.skill_one = Skill(\n id=1,\n name='React',\n duration_months=300,\n )\n\n self.skill_two = Skill(\n id=2,\n name='Django',\n duration_months=400,\n )\n\n def test_isistance_skill(self):\n self.assertIsInstance(self.skill_one, object)\n self.assertIsInstance(self.skill_two, object)\n\n\n def test_atributes_values_skill(self):\n self.assertEquals(self.skill_one.id, 1)\n self.assertEquals(self.skill_one.name, 'React')\n self.assertEquals(self.skill_one.duration_months, 300)\n\n self.assertEquals(self.skill_two.id, 2)\n self.assertEquals(self.skill_two.name, 'Django')\n self.assertEquals(self.skill_two.duration_months, 400)\n\n def test_atributes_type_skill(self):\n self.assertIsInstance(self.skill_one.id, int)\n self.assertIsInstance(self.skill_one.name, str)\n self.assertIsInstance(self.skill_one.duration_months, int)\n\n self.assertIsInstance(self.skill_two.id, int)\n self.assertIsInstance(self.skill_two.name, str)\n self.assertIsInstance(self.skill_two.duration_months, int)\n\n\nclass ProcessedFreelancerEnttityTestCase(TestCase):\n \"\"\"\n Testes da entidade ProcessedFreelancer\n \"\"\"\n\n def setUp(self):\n self.skill_one = Skill(\n id=1,\n name='React',\n duration_months=300,\n )\n\n self.skill_two = Skill(\n id=2,\n name='Django',\n duration_months=400,\n )\n\n self.proc_freelancer_one = ProcessedFreelancer(\n id=1,\n computed_skills=[self.skill_one]\n )\n\n self.proc_freelancer_two = ProcessedFreelancer(\n id=2,\n computed_skills=[self.skill_two]\n )\n\n def test_isistance_skill(self):\n self.assertIsInstance(self.proc_freelancer_one, object)\n self.assertIsInstance(self.proc_freelancer_two, object)\n\n def test_atributes_values_skill(self):\n self.assertEquals(self.proc_freelancer_one.id, 1)\n for skill in self.proc_freelancer_one.computed_skills:\n self.assertEquals(skill.name, 'React')\n\n self.assertEquals(self.proc_freelancer_two.id, 2)\n for skill in self.proc_freelancer_two.computed_skills:\n self.assertEquals(skill.name, 'Django')\n\n def test_atributes_type_skill(self):\n self.assertIsInstance(self.proc_freelancer_one.id, int)\n self.assertIsInstance(self.proc_freelancer_one.computed_skills, list)\n\n self.assertIsInstance(self.proc_freelancer_two.id, int)\n self.assertIsInstance(self.proc_freelancer_two.computed_skills, list)\n\n\n" }, { "alpha_fraction": 0.5106382966041565, "alphanum_fraction": 0.5574468374252319, "avg_line_length": 15.785714149475098, "blob_id": "0c4921d42f448f7e027a14967656dbba9175d0fb", "content_id": "1d47c4dad9d4225c8359fbf9fc4e7f8133d0673d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 235, "license_type": "no_license", "max_line_length": 30, "num_lines": 14, "path": "/docker-compose.yml", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "version: '3.1'\n\nservices:\n backend:\n build:\n dockerfile: Dockerfile\n context: .\n working_dir: /var/www/app\n ports:\n - \"8000:8000\"\n volumes:\n - .:/var/www/app/backend\n environment:\n IN_DOCKER: 1\n" }, { "alpha_fraction": 0.6947227716445923, "alphanum_fraction": 0.6947227716445923, "avg_line_length": 30.87234115600586, "blob_id": "e902e4565dc8064f6f5c94f0990c67c43c027824", "content_id": "69efab6e999b302f146509dc004af7034fe49f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1505, "license_type": "no_license", "max_line_length": 83, "num_lines": 47, "path": "/freelancer/presenters/interfaces.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from typing import Type, List, Any\nfrom abc import ABC, abstractmethod\nfrom freelancer.domain.entities import Skill, Freelancer\n\n\nclass IValidator(ABC):\n \"\"\" Interface para o Validator\"\"\"\n\n @abstractmethod\n def valid(self, value: bool) -> bool:\n raise Exception(\"Validator deve implementar o mรฉtodo: valid\")\n\n @abstractmethod\n def is_empty_payload(self) -> bool:\n raise Exception(\"Validator deve implementar o mรฉtodo: is_empty_payload\")\n\n @abstractmethod\n def validate_payload(self) -> (bool, dict):\n raise Exception(\"Validator deve implementar o mรฉtodo: validate_payload\")\n\n\nclass IIterator(ABC):\n \"\"\" Interface para o Interator \"\"\"\n\n @abstractmethod\n def set_params(self, *args, **kwargs):\n raise Exception(\"Interator deve implementar o mรฉtodo: set_params\")\n\n @abstractmethod\n def execute(self, *args, **kwargs) -> Any:\n raise Exception(\"Interator deve implementar o mรฉtodo: execute\")\n\n\nclass ISerializer(ABC):\n \"\"\" Interface para o Serializer \"\"\"\n\n @abstractmethod\n def serialize_object(self) -> dict:\n raise Exception(\"Serializer deve implementar o mรฉtodo: serialize_object\")\n\n @abstractmethod\n def set_nested_to_dict(self, skills: List[Skill]) -> list:\n raise Exception(\"Serializer deve implementar o mรฉtodo: set_nested_to_dict\")\n\n @abstractmethod\n def mount_payload(self, skills: Type[Freelancer]) -> dict:\n raise Exception(\"Serializer deve implementar o mรฉtodo: mount_payload\")" }, { "alpha_fraction": 0.6780538558959961, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 32.17241287231445, "blob_id": "1d4363454365248a84893be81388775c0c66888e", "content_id": "90293f9a05ccdd6ee38a428e59c277e3d8b42920", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 966, "license_type": "no_license", "max_line_length": 97, "num_lines": 29, "path": "/freelancer/presenters/views.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom freelancer.presenters.interators import FreelancerInterator\nfrom rest_framework.status import (\n HTTP_200_OK,\n HTTP_422_UNPROCESSABLE_ENTITY\n)\nfrom rest_framework.response import Response\n\n\n# Register your viewsets here.\nclass FreelancerViewSet(viewsets.ModelViewSet):\n \"\"\"\n API usando Django RestFramework\n \"\"\"\n interator = FreelancerInterator()\n http_method_names = ['get', 'post']\n\n @action(methods=['POST'], detail=False, url_path='send-freelance', url_name='send-freelance')\n def send_freelancer(self, request):\n \"\"\"\n Enpoint principal\n \"\"\"\n try:\n freelancer = self.interator.set_params(request.data).execute()\n return Response(freelancer, status=HTTP_200_OK)\n except Exception as error:\n return Response(status=HTTP_422_UNPROCESSABLE_ENTITY)\n\n\n\n\n" }, { "alpha_fraction": 0.6752674579620361, "alphanum_fraction": 0.6765261292457581, "avg_line_length": 38.650001525878906, "blob_id": "bd5bf6740e50f185bc755bab091031a4c4b361f7", "content_id": "b8af92856a4e905d786e88425cf90f5f11ad3988", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 123, "num_lines": 40, "path": "/freelancer/presenters/interators.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from freelancer.presenters.interfaces import IIterator\nfrom freelancer.infra.serializers import FreelancerSearchedSerializer\nfrom freelancer.presenters.validators import FreelancerValidator\nfrom freelancer.infra.repositories import FreelancerRepo\nfrom freelancer.presenters.exceptions import InteratorException\n\n\nclass FreelancerInterator(IIterator):\n \"\"\"\n No interator ocorre a interaรงรฃo com grande parte dos modulos e libs\n \"\"\"\n def __init__(self):\n \"\"\"\n Inicializa a injecao de dependencia\n \"\"\"\n self.validator: object = FreelancerValidator\n self.repo: object = FreelancerRepo\n self.serializer: object = FreelancerSearchedSerializer\n\n def set_params(self, frelancer_payload: (dict, list)):\n \"\"\"\n Configura os paramentros\n \"\"\"\n self.payload = frelancer_payload\n return self\n\n def execute(self):\n \"\"\"\n Executa o fluxo a qual o interator foi designado\n \"\"\"\n try:\n valided_payload = self.validator(self.payload).validate_payload()\n\n if valided_payload[0]:\n created_freelancer_entity: object = self.repo().create_freelancer_entity(valided_payload[1])\n precessed_freelancer_entity: object = self.repo().process_freelancer_experiences(created_freelancer_entity)\n serialized_freelancer = self.serializer(precessed_freelancer_entity).serialize_object()\n return serialized_freelancer\n except InteratorException as error:\n raise InteratorException(error)\n\n\n\n" }, { "alpha_fraction": 0.6187929511070251, "alphanum_fraction": 0.6187929511070251, "avg_line_length": 22.81818199157715, "blob_id": "0b94051f2aa42e399fde35c4e3c224ede398a376", "content_id": "747d6be6964eeed627e8a7abbefbc840e1137040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 105, "num_lines": 55, "path": "/freelancer/presenters/exceptions.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "class CoolFreelaException(Exception):\n \"\"\"\n Exception base da aplicacao\n \"\"\"\n def __init__(self, source, code, message):\n super().__init__(message)\n self._source = source\n self._code = code\n\n @property\n def source(self):\n return self._source\n\n @property\n def code(self):\n return self._code\n\n\nclass InvalidPayloadException(CoolFreelaException):\n \"\"\"\n Exception para o modula de validacao\n \"\"\"\n pass\n\n\nclass ConflictException(CoolFreelaException):\n \"\"\"\n Exception para conflitos gerais\n \"\"\"\n pass\n\n\nclass InteratorException(CoolFreelaException):\n \"\"\"\n Exception para o modula de interacao\n \"\"\"\n def __init__(self, process):\n super().__init__(source='interator', code='error', message=f\"Erro em : {process}\")\n\n\nclass EntityDoesNotExistException(CoolFreelaException):\n \"\"\"\n Exception para o modula de entidades\n \"\"\"\n\n def __init__(self, entity):\n super().__init__(source='entity', code='not_found', message=f'Entidade: {entity} nรฃo encotrada ')\n\n\nclass NoPermissionException(CoolFreelaException):\n \"\"\"\n Exception para o modula de seguranca\n \"\"\"\n def __init__(self):\n super().__init__(source='permission', code='denied', message='Permission denied')" }, { "alpha_fraction": 0.36977842450141907, "alphanum_fraction": 0.4442426562309265, "avg_line_length": 18.798561096191406, "blob_id": "c0a7c1da0c214a1dcd2409f541c71a0974c9004b", "content_id": "2e77f5f71e5ab0ccf6fa6e9ebfe590d71f74904f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2759, "license_type": "no_license", "max_line_length": 107, "num_lines": 139, "path": "/readme.md", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "# Coolfrela\n\nPlataforma de busca de freelancers via Rest.\n\n## Requisitos\n\n* Docker\n* Python 3.7 >\n\n## Iniciando\n\nPassos para configurar o projeto com docker:\n\n1. `cd` na pasta do projeto\n2. `docker-compose up --build`\n\nCaso nรฃo deseje o uso de docker:\n1. `Inicie um virtual env`\n2. `python manage.py runserver`\n\nO projeto por padrรฃo estarรก em localhost:8000 pelo enpoint http://localhost:8000/freelancers/send-freelance\n\n## Como usar\n\n```\ncurl --request POST \\\n --url http://localhost:8000/freelancers/send-freelance \\\n --header 'Content-Type: application/json' \\\n --data '{\n \"freelance\": {\n \"id\": 42,\n \"user\": {\n \"firstName\": \"Hunter\",\n \"lastName\": \"Moore\",\n \"jobTitle\": \"Fullstack JS Developer\"\n },\n \"status\": \"new\",\n \"retribution\": 650,\n \"availabilityDate\": \"2018-06-13T00:00:00+01:00\",\n \"professionalExperiences\": [\n {\n \"id\": 4,\n \"companyName\": \"Okuneva, Kerluke and Strosin\",\n \"startDate\": \"2016-01-01T00:00:00+01:00\",\n \"endDate\": \"2018-05-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 241,\n \"name\": \"React\"\n },\n {\n \"id\": 270,\n \"name\": \"Node.js\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 54,\n \"companyName\": \"Hayes - Veum\",\n \"startDate\": \"2014-01-01T00:00:00+01:00\",\n \"endDate\": \"2016-09-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 470,\n \"name\": \"MySQL\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n }\n ]\n },\n {\n \"id\": 80,\n \"companyName\": \"Harber, Kirlin and Thompson\",\n \"startDate\": \"2013-05-01T00:00:00+01:00\",\n \"endDate\": \"2014-07-01T00:00:00+01:00\",\n \"skills\": [\n {\n \"id\": 370,\n \"name\": \"Javascript\"\n },\n {\n \"id\": 400,\n \"name\": \"Java\"\n }\n ]\n }\n ]\n }\n}'\n```\n\n- Envie o cURL acima por uma requisiรงรฃo POST \n- O retorno virรก da seguinte forma: \n\n```\n{\n \"freelance\": {\n \"id\": 42,\n \"computedSkills\": [\n {\n \"id\": 241,\n \"name\": \"React\",\n \"durationInMonths\": 28\n },\n {\n \"id\": 270,\n \"name\": \"Node.js\",\n \"durationInMonths\": 28\n },\n {\n \"id\": 370,\n \"name\": \"Javascript\",\n \"durationInMonths\": 60\n },\n {\n \"id\": 400,\n \"name\": \"Java\",\n \"durationInMonths\": 40\n },\n {\n \"id\": 470,\n \"name\": \"MySQL\",\n \"durationInMonths\": 32\n }\n ]\n }\n}\n\n```\n\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 21, "blob_id": "66abd35e994b5a714d42e33865f91f923d765d26", "content_id": "1b6b154cae77accc752a9411691fe0ef482fbd47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/coolfreela/__init__.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "#from .api import api" }, { "alpha_fraction": 0.5444333553314209, "alphanum_fraction": 0.5454318523406982, "avg_line_length": 46.67856979370117, "blob_id": "fe0de040e8bd52de4d1329c8f07aa0513e36a721", "content_id": "31a658998de4be5cb0e950e1d64c5a5d276dcd3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4020, "license_type": "no_license", "max_line_length": 120, "num_lines": 84, "path": "/freelancer/presenters/validators.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from .interfaces import IValidator\nfrom .exceptions import InvalidPayloadException\nimport json\n\n\nclass FreelancerValidator(IValidator):\n \"\"\"\n Classe responsavel pela validacao do payload enviado na requisicao\n \"\"\"\n def __init__(self, payload):\n self.payload_raw = payload\n\n @staticmethod\n def valid(value: bool) -> bool:\n return value\n\n def check_experiences(self, professional_exps: list):\n \"\"\"\n Verifica as lista de experiencias\n \"\"\"\n for experiance in professional_exps:\n if 'startDate' not in experiance:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: startDate')\n if 'endDate' not in experiance:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: endDate')\n if 'skills' not in experiance:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: skills')\n if len(experiance['skills']) <= 0:\n raise InvalidPayloadException(source='validator', code='empty_field',\n message='Campo obrigatรณrio nรฃo pode estar vazio: skills')\n for skill in experiance['skills']:\n if 'name' not in skill:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: skills')\n return True\n\n def is_empty_payload(self) -> bool:\n \"\"\"\n Verifica o tipo do payload\n \"\"\"\n if isinstance(self.payload_raw, (dict, object, list, bytes)):\n return True\n else:\n raise InvalidPayloadException(source='validator', code='empty_payload',\n message='Payload de requisiรงรฃo nรฃo pode ser vazio')\n\n def validate_payload(self) -> list:\n \"\"\"\n Validacao inicial do payload\n \"\"\"\n try:\n self.is_empty_payload()\n\n if 'freelance' not in self.payload_raw:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: freelance')\n payload = self.payload_raw['freelance']\n\n if 'id' not in payload:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: user')\n if 'user' not in payload:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: user')\n if 'professionalExperiences' not in payload:\n raise InvalidPayloadException(source='validator', code='field_not_exists',\n message='Campo obrigatรณrio: professionalExperiences')\n if len(payload['professionalExperiences']) <= 0:\n raise InvalidPayloadException(source='validator', code='empty_field',\n message='Campo obrigatรณrio tem que conter dados: professionalExperiences')\n try:\n self.check_experiences(payload['professionalExperiences'])\n return [self.valid(True), payload]\n\n except Exception as error:\n raise InvalidPayloadException(source='validator', code=error.code,\n message=error.args[0])\n\n except Exception as error:\n raise InvalidPayloadException(source='validator', code=error.code,\n message=error.args[0])\n\n" }, { "alpha_fraction": 0.5809061527252197, "alphanum_fraction": 0.5809061527252197, "avg_line_length": 20.55813980102539, "blob_id": "7dcae74223fc8a9bbc96b3ee10e79ec76f75e346", "content_id": "60655937a09b85d106c60bbb8a1a792ee31f6b5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 65, "num_lines": 86, "path": "/freelancer/domain/entities.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "import enum\nfrom typing import Type, List\nimport datetime\n\n\nclass Freelancer(object):\n \"\"\" Freelancer Entity \"\"\"\n\n def __init__(self,\n id: int,\n user: dict,\n status: str,\n retribution: int,\n availability_date: datetime,\n professional_experiences: list):\n self._id = id\n self._user = user,\n self._status = status,\n self._retribution = retribution,\n self._availability_date = availability_date,\n self._professional_experiences = professional_experiences\n\n @property\n def id(self):\n return self._id\n\n @property\n def user(self):\n return self._user\n\n @property\n def status(self):\n return self._status\n\n @property\n def retribution(self):\n return self._retribution\n\n @property\n def availability_date(self):\n return self._availability_date\n\n @property\n def professional_experiences(self):\n return self._professional_experiences\n\n @property\n def end(self):\n return self._end\n\n\nclass Skill(object):\n \"\"\" Skill Entity \"\"\"\n\n def __init__(self, id: int, name: str, duration_months: int):\n self._id = id\n self._name = name\n self._duration_months = duration_months\n\n @property\n def id(self):\n return self._id\n\n @property\n def name(self):\n return self._name\n\n @property\n def duration_months(self):\n return self._duration_months\n\n\nclass ProcessedFreelancer(object):\n \"\"\" Experiance Entity \"\"\"\n\n def __init__(self, id: int, computed_skills: List[Skill]):\n self._id = id\n self._computed_skills = computed_skills\n\n @property\n def id(self):\n return self._id\n\n @property\n def computed_skills(self):\n return self._computed_skills\n" }, { "alpha_fraction": 0.523681640625, "alphanum_fraction": 0.523681640625, "avg_line_length": 40.775508880615234, "blob_id": "fce73bc07d5f9bb644fab66b005729bbc25d9efe", "content_id": "68e9eef27efb57c438d2e50e98b5ecd68e4d1438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4102, "license_type": "no_license", "max_line_length": 110, "num_lines": 98, "path": "/freelancer/infra/repositories.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from freelancer.presenters.exceptions import ConflictException\nfrom freelancer.presenters.helpers import FreelancerHelpers\nfrom freelancer.domain.entities import (\n Freelancer,\n Skill,\n ProcessedFreelancer\n)\nfrom typing import List\n\n\nclass FreelancerRepo:\n \"\"\"\n Camada onde ocorre a interaรงรฃo do Interator com as Entidades e Modelos,\n Ocorre tambem grande parte da regra de negocio\n \"\"\"\n\n helper = FreelancerHelpers()\n\n def create_skill_entity_by_list(self, skills: list):\n \"\"\"\n Cria uma lista de Entidade Skills no formato final\n \"\"\"\n try:\n list_of_skills: List[Skill] = []\n for skill in skills:\n list_of_skills.append(\n Skill(\n id=skill['id'],\n name=skill['name'],\n duration_months=skill['total_months'])\n )\n return list_of_skills\n except ConflictException as err:\n raise ConflictException(source='repository', code='conflit_in_create',\n message=f'Nรฃo possรญvel skill, erro : {err}')\n\n def create_searched_freelancer_entity(self, freelancer_id: int, skills: list):\n \"\"\"\n Cria a Entidade final para serializacao\n \"\"\"\n return ProcessedFreelancer(\n id=freelancer_id,\n computed_skills=skills\n )\n\n def create_freelancer_entity(self, payload: dict):\n \"\"\"\n Cria a Entidade base Freelance\n \"\"\"\n return Freelancer(\n id=payload['id'],\n user=payload['user'],\n status=payload['status'],\n retribution=payload['retribution'],\n availability_date=payload['availabilityDate'],\n professional_experiences=payload['professionalExperiences']\n )\n\n def process_freelancer_experiences(self, freelancer: object):\n \"\"\"\n Ocorre o processamento da busca com os meses totais de cada skill\n \"\"\"\n try:\n skills: list = []\n empty_skills_list: bool = True\n sorted_experiances = self.helper.get_experiences_by_startdate(freelancer.professional_experiences)\n experiences = self.helper.update_date_experiences(sorted_experiances)\n for experience in experiences:\n months: int = self.helper.diff_beetween_dates(\n experience['startDate'], experience['endDate'])\n for skill in experience['skills']:\n if empty_skills_list:\n skills.append(self.helper.create_skill_dict(skill, months, experience))\n empty_skills_list = False\n else:\n skill_update: bool = False\n for index, sk in enumerate(skills):\n if sk['id'] == skill['id']:\n if sk['last_end_date'] < experience['startDate']:\n sk['total_months'] += months\n else:\n sk = self.helper.update_skill_process(\n experience=experience,\n sk=sk,\n months=months\n )\n sk = self.helper.set_last_skill_date(experience=experience, sk=sk)\n skill_update = True\n\n if not skill_update:\n skills.append(self.helper.create_skill_dict(skill, months, experience))\n\n skills: list = self.create_skill_entity_by_list(skills)\n return self.create_searched_freelancer_entity(freelancer_id=freelancer.id, skills=skills)\n\n except ConflictException as err:\n raise ConflictException(source='repository', code='conflit_in_create',\n message=f'Nรฃo possรญvel criar busca, erro : {err}')\n\n\n" }, { "alpha_fraction": 0.6719367504119873, "alphanum_fraction": 0.6719367504119873, "avg_line_length": 20.08333396911621, "blob_id": "abd601013e2252f4bfdea2f42c2bf93546e414c9", "content_id": "caa45e46c44e04eaf5620a3dc162e6bd7faea650", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/freelancer/domain/models.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nclass Freelancer(models.Model):\n \"\"\"\"\n Aqui seria implementada toda a parte de modelos da aplicaรงรฃo,\n todos relacionamentos e representaรงรตes do banco de dados.\n\n \"\"\"\n\n ...\n" }, { "alpha_fraction": 0.6510500907897949, "alphanum_fraction": 0.6704362034797668, "avg_line_length": 24.70833396911621, "blob_id": "027e2200eba543fa24783ee1dd29b48932780579", "content_id": "1f69124a8e7039b12e9f3775009f9f906a65eb99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 619, "license_type": "no_license", "max_line_length": 77, "num_lines": 24, "path": "/Dockerfile", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "FROM python:3.8-slim\n\nENV PYTHONUNBUFFERED 1\n\nRUN groupadd user && useradd --create-home --home-dir /home/user -g user user\nWORKDIR /var/www/app\n\nRUN apt-get update && apt-get install gcc build-essential libpq-dev -y && \\\n python3 -m pip install --no-cache-dir pip-tools\n\nCOPY ./requirements.txt /var/www/app\n\nRUN pip install -r requirements.txt\n\nRUN apt-get purge libpq-dev -y && apt-get autoremove -y && \\\n rm /var/lib/apt/lists/* rm -rf /var/cache/apt/*\n\nCOPY . /var/www/app\n\nUSER user\n\nCMD [\"sh\",\"-c\", \\\n \" python manage.py test && \\\n gunicorn coolfreela.wsgi --log-file - -b 0.0.0.0:8000 --reload\"]\n\n\n" }, { "alpha_fraction": 0.5551436543464661, "alphanum_fraction": 0.5569972395896912, "avg_line_length": 32.734375, "blob_id": "1bac6437dd128bbdeb6e7eb3df7fbada6e7f2178", "content_id": "7986b256b1f7f8830448a4080b206db7ca788012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2159, "license_type": "no_license", "max_line_length": 99, "num_lines": 64, "path": "/freelancer/presenters/helpers.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom dateutil import relativedelta\n\n\nclass FreelancerHelpers:\n @staticmethod\n def get_experiences_by_startdate(experiaeces: list):\n \"\"\"\n Ordena experiencias pela data de inรญcio\n \"\"\"\n try:\n experiences: dict = sorted(experiaeces, key=lambda experience: experience[\"startDate\"])\n return experiences\n except:\n raise Exception(\"Error when sorting the list\")\n\n @staticmethod\n def diff_beetween_dates(start, end):\n \"\"\"\n Obtem os meses por um range de datas\n \"\"\"\n result = relativedelta.relativedelta(end, start)\n return (result.years * 12) + result.months\n\n @staticmethod\n def update_date_experiences(experiences):\n \"\"\"\n Cria o datetime formatado\n \"\"\"\n for exp in experiences:\n exp['startDate'] = datetime.strptime(exp['startDate'].split('T')[0], '%Y-%m-%d')\n exp[\"endDate\"] = datetime.strptime(exp[\"endDate\"].split('T')[0], '%Y-%m-%d')\n return experiences\n\n @staticmethod\n def create_skill_dict(skill: dict, months: int, experience: object):\n \"\"\"\n Cria um dicionario temporario para a entidade Skill\n \"\"\"\n return {\n 'id': skill['id'],\n 'name': skill['name'],\n 'total_months': months,\n 'last_start_date': experience['startDate'],\n 'last_end_date': experience['endDate']\n }\n\n def update_skill_process(self, experience: object, sk: dict, months: int):\n \"\"\"\n Atualiza o total de meses de uma skill com overlop\n \"\"\"\n diff: int = self.diff_beetween_dates(experience['startDate'],\n sk['last_end_date'])\n sk['total_months'] += (months - diff)\n return sk\n\n @staticmethod\n def set_last_skill_date(experience: object, sk: dict):\n \"\"\"\n Atribui as ultimas datas de experiencia\n \"\"\"\n sk['last_start_date'] = experience[\"startDate\"]\n sk['last_end_date'] = experience[\"endDate\"]\n return sk" }, { "alpha_fraction": 0.7890173196792603, "alphanum_fraction": 0.7890173196792603, "avg_line_length": 25.615385055541992, "blob_id": "f4db89f4f9b2c0ced2ebe55f7422b49b38a28c02", "content_id": "13ca9190a8aa02a36025eba743972182b5315f29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 73, "num_lines": 13, "path": "/freelancer/urls.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom freelancer.presenters.views import FreelancerViewSet\n\nrouter = DefaultRouter(trailing_slash=False)\n\nrouter.register('freelancers', FreelancerViewSet, basename='freelancers')\n\napp_name = 'freelancer'\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.8985507488250732, "alphanum_fraction": 0.8985507488250732, "avg_line_length": 69, "blob_id": "f2936621c5cae9ade157d32b6115ecf1846b2ae5", "content_id": "289cb2f9c9a3fd78917aea11ebfca88f1d317d36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "no_license", "max_line_length": 69, "num_lines": 1, "path": "/freelancer/presenters/__init__.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from .tests_presenters import HelpersTests, FreelancerViewSetTestCase" }, { "alpha_fraction": 0.890625, "alphanum_fraction": 0.890625, "avg_line_length": 64, "blob_id": "1f4334b3d5200cd23fb29a677e3a858cf1ce24bf", "content_id": "ecaa037190d22eec19b9815c30af817850a23891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 64, "num_lines": 1, "path": "/freelancer/infra/__init__.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from .tests_infra import RepositoryTestCase, SerializersTestCase" }, { "alpha_fraction": 0.5667034387588501, "alphanum_fraction": 0.5667034387588501, "avg_line_length": 29.233333587646484, "blob_id": "3a7990c26876c8faf937cd81521d15513a13e646", "content_id": "623629f95f5afaed6c1ebda987bcd90c3b385d77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "no_license", "max_line_length": 84, "num_lines": 30, "path": "/freelancer/infra/serializers.py", "repo_name": "johnatasr/CoolFreelancers", "src_encoding": "UTF-8", "text": "from freelancer.presenters.interfaces import ISerializer\n\n\nclass FreelancerSearchedSerializer(ISerializer):\n def __init__(self, freelancer: object):\n self.freelancer = freelancer\n\n def serialize_object(self):\n return self.mount_payload(self.freelancer)\n\n def set_nested_to_dict(self, skills: list):\n list_skills = []\n for skill in skills:\n sk = {\n \"id\": skill.id,\n \"name\": skill.name,\n \"durationInMonths\": skill.duration_months\n }\n list_skills.append(sk)\n list_skills = sorted(list_skills, key=lambda sk: sk[\"id\"])\n\n return list_skills\n\n def mount_payload(self, freelance: object):\n return {\n 'freelance': {\n 'id': freelance.id,\n 'computedSkills': self.set_nested_to_dict(freelance.computed_skills)\n }\n }\n" } ]
21
antorsae/didi-competition
https://github.com/antorsae/didi-competition
44137428fc295e21481c4102bca079029ddcc74e
917aa7e01d51311ef8f9fc1f8d45aa80ddf7f19d
70ed952c19a478a28f239fd9f47021bd4c43af77
refs/heads/master
2021-06-17T20:21:56.423494
2017-07-02T20:44:13
2017-07-02T20:44:13
89,953,295
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5185185074806213, "alphanum_fraction": 0.5272042155265808, "avg_line_length": 47.816001892089844, "blob_id": "39761fd2e17e1dde0dc1d6346facf121eec31d8e", "content_id": "783e396063c0a9d7a10a19649db6ae35cb8b9ae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6102, "license_type": "no_license", "max_line_length": 148, "num_lines": 125, "path": "/tracklets/python/tracklet_view.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\n\nfrom diditracklet import *\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\n\nif __name__ == '__main__':\n\n app = QtGui.QApplication([])\n\n ## Define a top-level widget to hold everything\n w = QtGui.QWidget()\n\n ## Create some widgets to be placed inside\n\n tv_button = QtGui.QPushButton('Toggle View')\n im = pg.image(title=\"Loading\")\n\n ## Create a grid layout to manage the widgets size and position\n layout = QtGui.QGridLayout()\n w.setLayout(layout)\n\n ## Add widgets to the layout in their proper positions\n layout.addWidget(tv_button, 0, 0) # button goes in upper-left\n layout.addWidget(im, 1, 0) # plot goes on right side, spanning 3 rows\n\n def toggle_view():\n Thread.side_view = True if Thread.side_view == False else False\n\n def update(data):\n (tv, title) = data\n im.setImage(tv)\n im.win.setWindowTitle(title)\n\n class Thread(pg.QtCore.QThread):\n new_image = pg.QtCore.Signal(object)\n side_view = True\n\n def run(self):\n parser = argparse.ArgumentParser(description='View tracklets.')\n parser.add_argument('-i', '--indir', type=str, default='../../../../didi-data/release2/Data-points-processed',\n help='Input folder where processed tracklet subdirectories are located')\n parser.add_argument('-f', '--filter', type=str, nargs='+', default=None,\n help='Only include date/drive tracklet subdirectories, e.g. -f 1/21_f 2/24')\n parser.add_argument('-y', '--yaw', type=float, default=0.,\n help='Force initial yaw correction (e.g. -y 0.88)')\n parser.add_argument('-xi', '--xml-filename', type=str, default='tracklet_labels.xml',\n help='tracklet xml filename (defaults to tracklet_labels.xml, TIP: use tracklet_labels_trainable.xml if available)')\n parser.add_argument('-z', '--zoom-to-box', action='store_true',\n help='zoom view to bounding box')\n parser.add_argument('-ra', '--randomize', action='store_true',\n help='random perturbation (augmentation)')\n parser.add_argument('-1', '--first', type=int, action='store',\n help='View one frame only, e.g. -1 87 (views frame 87)')\n parser.add_argument('-m', '--many', type=int, action='store',\n help='How many frames to view, e.g. -m 100 (views up to 100 frames)')\n parser.add_argument('-n', '--num-points', type=int, action='store',\n help='Resample to number of points, e.g. -n 27000')\n parser.add_argument('-d', '--distance', default=50., type=float, action='store',\n help='Distance ')\n parser.add_argument('-p', '--points-per-ring', default=None, type=int, action='store',\n help='If specified, points per ring for linear interpolation')\n parser.add_argument('-r', '--rings', nargs='+', type=int, action='store', help='Only include ring range, e.g. -r 12 28')\n parser.add_argument('-sw', '--scale-w', default=1., type=float, action='store', help='Scale bounding box width ')\n parser.add_argument('-sl', '--scale-l', default=1., type=float, action='store', help='Scale bounding box width ')\n parser.add_argument('-sh', '--scale-h', default=1., type=float, action='store', help='Scale bounding box width ')\n parser.add_argument('-di', '--deinterpolate', action='store_true', help='Deinterpolate interpolated lidar (needs -p)')\n\n\n args = parser.parse_args()\n\n diditracklets = find_tracklets(args.indir, args.filter, args.yaw, args.xml_filename, False, (args.scale_h, args.scale_w, args.scale_l))\n\n for tracklet in diditracklets:\n tvv = None\n\n _frames = tracklet.frames()\n _first = _frames[0] if args.first is None else args.first\n _many = None\n if args.first and (args.many is None):\n _many = 1\n elif args.many is not None:\n _many = args.many\n if _many is not None:\n frames = [f for f in _frames if f in range(_first, _first+_many)]\n else:\n frames = _frames\n\n print(\"Loading: \" + str(len(frames)) + \" / \" + str(len(_frames)) + \" frames\")\n\n for frame in frames:\n tv = tracklet.top_view(frame,\n with_boxes=True,\n zoom_to_box=args.zoom_to_box,\n SX=400,\n randomize=args.randomize,\n distance = args.distance,\n rings = range(args.rings[0], args.rings[1]) if args.rings else None,\n num_points = args.num_points,\n points_per_ring = args.points_per_ring,\n deinterpolate = args.deinterpolate)\n\n #obs_points = tracklet.get_points_in_box(frame, ignore_z=False)\n #print('frame ' + str(frame), obs_points)\n\n if tvv is None:\n tvv = np.expand_dims(tv, axis=0)\n else:\n tvv = np.concatenate((tvv, np.expand_dims(tv, axis=0)), axis=0)\n self.new_image.emit((tvv, tracklet.date + \"/\" + tracklet.drive + \".bag\"))\n print(\"Finished!\")\n\n\n w.show()\n tv_button.clicked.connect(toggle_view)\n\n thread = Thread()\n thread.new_image.connect(update)\n thread.start()\n\n import sys\n\n if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):\n pg.QtGui.QApplication.exec_()\n" }, { "alpha_fraction": 0.7027565836906433, "alphanum_fraction": 0.747924268245697, "avg_line_length": 53.727272033691406, "blob_id": "2227f9657de6263fc39b4bb7cb307b833f757e51", "content_id": "8ca4a47c8cf626f5c485ae9943d5d1f33e9fc74d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3011, "license_type": "no_license", "max_line_length": 102, "num_lines": 55, "path": "/tracklets/python/refine-1.sh", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "SECONDS=0\nT=0\nfunction t()\n{\n\techo Took $(($SECONDS - $T)) seconds\n\tT=$SECONDS\n}\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/8_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/8_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/14_f -y 0.89491426 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/14_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/13 -y 0.88381232 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/13\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/11 -y 0.93886924 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/11\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/10 -y 0.922499 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/10\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/2 -y 0.88522899 ;t\n# NEED to generate _refined2 and refined3\npython refine.py -xi tracklet_labels_refined3.xml -xo tracklet_labels_trainable.xml -r -f 1/2\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/3 -y 0.9045834 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/3\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/18 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/18\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/19 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/19\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/20 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/20\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/23 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/23\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/4_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/4_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/6_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/6_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/8_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/8_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/26 ;t\n# NEED to generate _refined2\npython refine.py -xi tracklet_labels_refined2.xml -xo tracklet_labels_trainable.xml -r -f 1/26\n\necho Total time $SECONDS\n\n" }, { "alpha_fraction": 0.7601010203361511, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 55.57143020629883, "blob_id": "b6d9d54ddeafe270733afb53758d03caa5d7f769", "content_id": "7630d2846ffde53bb8d60efa0c464c8eac57b915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 396, "license_type": "no_license", "max_line_length": 158, "num_lines": 7, "path": "/calibration/README.md", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "## Camera/Lidar Calibration\n\nYou can find the camera calibration files here. Download the torrent to get the bag which you can use to generate calibration between Camera and Lidar for eg.\n\n[Torrent](http://academictorrents.com/details/ffb0db5195d5e53041da6a7e168ce930987bc2ea)\n\nSensor transforms are available here [here](https://github.com/udacity/didi-competition/tree/master/mkz-description).\n" }, { "alpha_fraction": 0.7167901396751404, "alphanum_fraction": 0.7479012608528137, "avg_line_length": 32.471073150634766, "blob_id": "fd33b75b8c1d1b972b35eb36fdfd942a90770f7e", "content_id": "6c95044421fc20059ee61647345234f2e998bec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4074, "license_type": "no_license", "max_line_length": 356, "num_lines": 121, "path": "/docs/GettingStarted.md", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "## Getting Started\nWhether youโ€™re laser focused on winning $100,000 or just trying to learn the basics of ROS and obstacle detection, there are some basic steps that youโ€™ll need to get started with Udacityโ€™s Didi Challenge.\n\n## Data\nThe [training dataset](http://academictorrents.com/details/76352487923a31d47a6029ddebf40d9265e770b5) is available as a ROS bag file. A bag contains the synchronized output of several ROS nodes. For this challenge, each vehicle bag includes:\n\n* Camera video\n* Lidar point clouds\n* GPS/IMU measurements\n\nThere are also obstacle bags which include:\n\n* Front RTK GPS\n* Back RTK GPS\n\n## How to Win\nIn order to win the competition, you must have the most accurate real time system for detecting and locating obstacles in 3D space. The competition defines accuracy as the ratio of correct bounding box volume to the combined volume of any incorrect predictions or missed predictions.\n\nThis formula from the challengeโ€™s [GitHub README](https://github.com/udacity/didi-competition/tree/master/tracklets#metrics-and-scoring) is useful:\n\nTP/(TP + FP + FN), where:\n\nTrue Positives = correctly predicted volume that overlaps ground truth\n\nFalse Positives = incorrectly predicted volume that does not overlap ground truth\n\nFalse Negatives = ground truth volume not overlapped by any predictions\n\n## Installing ROS\nIn order to read the dataset, youโ€™ll need to install ROS. ROS (Robotic Operating System) is an open source set of libraries and tools for working with robots.\n\nTo install ROS, youโ€™ll need a computer running Ubuntu. There has been some work on getting ROS running in a Docker container, but there are still a lot of issues with running the visualizations in RVIZ with that setup. If you donโ€™t have a Linux machine already, consider setting up a [dual boot machine](https://help.ubuntu.com/community/WindowsDualBoot).\n\nThis tutorial covers [installing ROS Indigo](http://www.ros.org/install/) on Ubuntu 14.04. These versions work well with [Autoware](https://github.com/CPFL/Autoware), an open-source self-driving car project built on top of ROS. Later on, Autoware will be useful for camera/LIDAR calibration. \n\n1. Setup your sources.list\n``` bash\n sudo sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main\" > /etc/apt/sources.list.d/ros-latest.list'\n```\n\n2. Set up your keys\n``` bash\nsudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116\n```\n\n3. Install ROS (this will take a few minutes)\n``` bash\nsudo apt-get install ros-indigo-desktop-full\n```\n\n4. Update your rosdep (dependency management tool)\n``` bash\nsudo rosdep init\nrosdep update\n```\n\n5. Update `env` variables\n``` bash\necho \"source /opt/ros/indigo/setup.bash\" >> ~/.bashrc\n```\n\n## Display Data in RVIZ\nOnce youโ€™ve installed ROS and downloaded the dataset, you can display the bag files in RVIZ.\n\nFirst, start the ROS master node by running roscore\n``` bash\nroscore\n```\n\nNext, play the bagfile\n``` bash\nrosbag play /{path-to-file}/bagfile_name.bag\n```\n\nCheck that the correct ROS nodes are publishing messages\n``` bash\nrostopic list\n\n$ rostopic echo /vehicle/gps/fix\n```\n\nThis should output something like this:\nheader:\n```\n seq: 7318\n stamp:\n secs: 1492883544\n nsecs: 965464774\n frame_id: ''\nstatus:\n status: 0\n service: 1\nlatitude: 37.4267093333\nlongitude: -122.07584\naltitude: -42.5\nposition_covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\nposition_covariance_type: 0\n```\n\nStart RVIZ to visualize the data being published\n``` bash\nrviz\n```\n\nSwitch the Fixed Frame to โ€˜velodyneโ€™ so that we can show point clouds\n\n<img src=\"imgs/fixed_frame.png\" alt=\"\" width=\"800px\"></img>\n\nAdd the ROS nodes that you want to visualize.\n\nClick the Add button at the bottom of the Displays panel:\n\n<img src=\"imgs/add_node.png\" alt=\"\" width=\"800px\"></img>\n\nSelect the โ€˜By Topicโ€™ tab and choose the topic that you want to visualize:\n\n<img src=\"imgs/select_topic.png\" alt=\"\" width=\"800px\"></img>\n\nNow you can see the point cloud displayed:\n\n<img src=\"imgs/point_cloud.png\" alt=\"\" width=\"800px\"></img>\n" }, { "alpha_fraction": 0.7204091548919678, "alphanum_fraction": 0.7506088614463806, "avg_line_length": 49.07316970825195, "blob_id": "2b894bfd2d884668c91721afbd06de3fa9614ebc", "content_id": "1cb39bda219d408bc5e3d17c8091a65e8ce08759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2053, "license_type": "no_license", "max_line_length": 96, "num_lines": 41, "path": "/tracklets/python/refine-2.sh", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "SECONDS=0\nT=0\nfunction t()\n{\n\techo Took $(($SECONDS - $T)) seconds\n\tT=$SECONDS\n}\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/15 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/15\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/17 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/17\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 1/21_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 1/21_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/13 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 2/13\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/11_f ;t\n# NEED to generate _refined2\npython refine.py -xi tracklet_labels_refined2.xml -xo tracklet_labels_trainable.xml -r -f 2/11_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/14_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 2/14_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/17 ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 2/17\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/3_f ;t\n# TODO correct yaw few frames at the beginning\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 2/3_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/6_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 2/6_f\n\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 2/12_f ;t\n# NEED to generate _refined2\npython refine.py -xi tracklet_labels_refined2.xml -xo tracklet_labels_trainable.xml -r -f 2/12_f\n\necho Total time $SECONDS\n" }, { "alpha_fraction": 0.7847411632537842, "alphanum_fraction": 0.7983651161193848, "avg_line_length": 366, "blob_id": "725469b6b4d198b7d5f70bb53ab8a2e699159500", "content_id": "408644716d1ee8daa8e43b531d6a87f0c9382ae9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 367, "license_type": "no_license", "max_line_length": 366, "num_lines": 1, "path": "/mkz-description/README.md", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "These files represent the sensor placement on the Udacity self-driving car. The ```mkz.urdf.xacro``` file was retrieved from the [Dataspeed Bitbucket repo](https://bitbucket.org/DataspeedInc/dbw_mkz_ros/src/0ee2d85ecbe1/dbw_mkz_description/?at=default) and modified for our sensor suite. You can find the meshes for the wheels and the vehicle itself on that website.\n" }, { "alpha_fraction": 0.5923295617103577, "alphanum_fraction": 0.5980113744735718, "avg_line_length": 47.55172348022461, "blob_id": "878be994f2e4945ba40addad14a1a79bcc92cad9", "content_id": "aebd1f2e8578d0d2b8e0fb3cd3d9440195785e58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1408, "license_type": "no_license", "max_line_length": 123, "num_lines": 29, "path": "/tracklets/python/retime.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "import rosbag\nimport rospy\nimport argparse\n\nparser = argparse.ArgumentParser(description='Convert rosbag to images and csv.')\nparser.add_argument('-i', '--ibag', type=str, nargs='?', required=True, help='input bag to process')\nparser.add_argument('-o', '--obag', type=str, default ='/dev/null', nargs='?', help='output bag')\nparser.add_argument('-t', '--topics', type=str, nargs='+', default=None, help='topics to filter')\nparser.add_argument('-s', '--seconds', type=float, default =0.1, nargs='?', help='time threshold in seconds, default: 0.1')\nargs = parser.parse_args()\nfilter_topics = args.topics\nseconds = args.seconds\nobag = args.obag\nibag = args.ibag\n\nduration = rospy.rostime.Duration(secs = int(seconds//1), nsecs = int((seconds % 1) * 1.e9))\n\nwith rosbag.Bag(obag, 'w') as outbag:\n for topic, msg, t in rosbag.Bag(ibag).read_messages():\n newstamp = msg.header.stamp if msg._has_header else t\n if filter_topics is None or topic in filter_topics:\n if msg._has_header:\n diff = msg.header.stamp - t\n if abs(diff) >= duration:\n print(topic + \" @\" + str(msg.header.seq) + \" is \" + str(abs(diff.to_sec())) + \" seconds off\")\n newstamp = t\n msg.header.stamp = newstamp\n if obag is not '/dev/null':\n outbag.write(topic, msg, t)\n" }, { "alpha_fraction": 0.5904991030693054, "alphanum_fraction": 0.5967933535575867, "avg_line_length": 40.991676330566406, "blob_id": "f9c5e3e6a089ec1ec9ff5f19aa723257fbf2d924", "content_id": "0bfc9a08d98579edb33610b2944cb212228a1668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40354, "license_type": "no_license", "max_line_length": 138, "num_lines": 961, "path": "/tracklets/python/bag_to_kitti.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n\"\"\" Udacity Self-Driving Car Challenge Bag Processing\n\"\"\"\n\nfrom __future__ import print_function\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom collections import defaultdict\nimport os\nimport sys\nimport cv2\nimport math\nimport imghdr\nimport argparse\nimport functools\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport PyKDL as kd\nimport sensor_msgs.point_cloud2 as pc2\n\nfrom bag_topic_def import *\nfrom bag_utils import *\nfrom generate_tracklet import *\n\n\n# Bag message timestamp source\nTS_SRC_PUB = 0\nTS_SRC_REC = 1\nTS_SRC_OBS_REC = 2\n\n\n# Correction method\nCORRECT_NONE = 0\nCORRECT_PLANE = 1\n\nCAP_RTK_FRONT_Z = .3323 + 1.2192\nCAP_RTK_REAR_Z = .3323 + .8636\n\n# From capture vehicle 'GPS FRONT' - 'LIDAR' in\n# https://github.com/udacity/didi-competition/blob/master/mkz-description/mkz.urdf.xacro\nFRONT_TO_LIDAR = [-1.0922, 0, -0.0508]\n\n# For pedestrian capture, a different TF from mkz.urdf was used in capture. This must match\n# so using that value here.\nBASE_LINK_TO_LIDAR_PED = [1.9, 0., 1.6]\n\nCAMERA_COLS = [\"timestamp\", \"width\", \"height\", \"frame_id\", \"filename\"]\nLIDAR_COLS = [\"timestamp\", \"points\", \"frame_id\", \"filename\"]\n\nGPS_COLS = [\"timestamp\", \"lat\", \"long\", \"alt\"]\nPOS_COLS = [\"timestamp\", \"tx\", \"ty\", \"tz\", \"rx\", \"ry\", \"rz\"]\n\n\ndef obs_name_from_topic(topic):\n return topic.split('/')[2]\n\n\ndef obs_prefix_from_topic(topic):\n words = topic.split('/')\n prefix = '_'.join(words[1:4])\n name = words[2]\n return prefix, name\n\ndef get_outdir(base_dir, name=''):\n outdir = os.path.join(base_dir, name)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\ndef check_format(data):\n img_fmt = imghdr.what(None, h=data)\n return 'jpg' if img_fmt == 'jpeg' else img_fmt\n\n\ndef write_image(bridge, outdir, msg, fmt='png'):\n results = {}\n image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)\n try:\n if hasattr(msg, 'format') and 'compressed' in msg.format:\n buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)\n cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)\n if cv_image.shape[2] != 3:\n print(\"Invalid image %s\" % image_filename)\n return results\n results['height'] = cv_image.shape[0]\n results['width'] = cv_image.shape[1]\n # Avoid re-encoding if we don't have to\n if check_format(msg.data) == fmt:\n buf.tofile(image_filename)\n else:\n cv2.imwrite(image_filename, cv_image)\n else:\n cv_image = bridge.imgmsg_to_cv2(msg, \"bgr8\")\n cv2.imwrite(image_filename, cv_image)\n except CvBridgeError as e:\n print(e)\n results['filename'] = image_filename\n return results\n\n\ndef camera2dict(timestamp, msg, write_results, camera_dict):\n camera_dict[\"timestamp\"].append(timestamp)\n if write_results:\n camera_dict[\"width\"].append(write_results['width'] if 'width' in write_results else msg.width)\n camera_dict['height'].append(write_results['height'] if 'height' in write_results else msg.height)\n camera_dict[\"frame_id\"].append(msg.header.frame_id)\n camera_dict[\"filename\"].append(write_results['filename'])\n\ndef write_lidar(outdir, msg):\n results = {}\n lidar_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()))\n cloud_gen = pc2.read_points(msg)\n cloud = []\n point_count = 0\n for x, y, z, intensity, ring in cloud_gen:\n cloud.append([x, y, z, intensity, ring])\n point_count += 1\n results['points'] = point_count\n np.save(lidar_filename, cloud)\n results['filename'] = lidar_filename\n return results\n\n\ndef lidar2dict(timestamp, msg, write_results, lidar_dict):\n lidar_dict[\"timestamp\"].append(timestamp)\n if write_results:\n lidar_dict[\"points\"].append(write_results['points'] if 'points' in write_results else msg.width)\n lidar_dict[\"frame_id\"].append(msg.header.frame_id)\n lidar_dict[\"filename\"].append(write_results['filename'])\n\ndef gps2dict(timestamp, msg, gps_dict):\n gps_dict[\"timestamp\"].append(timestamp)\n gps_dict[\"lat\"].append(msg.latitude)\n gps_dict[\"long\"].append(msg.longitude)\n gps_dict[\"alt\"].append(msg.altitude)\n\n\ndef pose2dict(timestamp, msg, pose_dict):\n pose_dict[\"timestamp\"].append(timestamp)\n pose_dict[\"tx\"].append(msg.pose.position.x)\n pose_dict[\"ty\"].append(msg.pose.position.y)\n pose_dict[\"tz\"].append(msg.pose.position.z)\n rotq = kd.Rotation.Quaternion(\n msg.pose.orientation.x,\n msg.pose.orientation.y,\n msg.pose.orientation.z,\n msg.pose.orientation.w)\n rot_xyz = rotq.GetRPY()\n pose_dict[\"rx\"].append(rot_xyz[0])\n pose_dict[\"ry\"].append(rot_xyz[1])\n pose_dict[\"rz\"].append(rot_xyz[2])\n\n\ndef tf2dict(timestamp, tf, tf_dict):\n tf_dict[\"timestamp\"].append(timestamp)\n tf_dict[\"tx\"].append(tf.translation.x)\n tf_dict[\"ty\"].append(tf.translation.y)\n tf_dict[\"tz\"].append(tf.translation.z)\n rotq = kd.Rotation.Quaternion(\n tf.rotation.x,\n tf.rotation.y,\n tf.rotation.z,\n tf.rotation.w)\n rot_xyz = rotq.GetRPY()\n tf_dict[\"rx\"].append(rot_xyz[0])\n tf_dict[\"ry\"].append(rot_xyz[1])\n tf_dict[\"rz\"].append(rot_xyz[2])\n\n\ndef imu2dict(timestamp, msg, imu_dict):\n imu_dict[\"timestamp\"].append(timestamp)\n imu_dict[\"ax\"].append(msg.linear_acceleration.x)\n imu_dict[\"ay\"].append(msg.linear_acceleration.y)\n imu_dict[\"az\"].append(msg.linear_acceleration.z)\n\n\ndef get_yaw(p1, p2):\n return math.atan2(p1[1] - p2[1], p1[0] - p2[0])\n\n\ndef dict_to_vect(di):\n return kd.Vector(di['tx'], di['ty'], di['tz'])\n\n\ndef list_to_vect(li):\n return kd.Vector(li[0], li[1], li[2])\n\n\ndef vect_to_dict3(v):\n return dict(tx=v[0], ty=v[1], tz=v[2])\n\n\ndef vect_to_dict6(v):\n if len(v) == 6:\n return dict(tx=v[0], ty=v[1], tz=v[2], rx=v[3], ry=v[4], rz=v[5])\n else:\n return dict(tx=v[0], ty=v[1], tz=v[2], rx=0, ry=0, rz=0)\n\n\ndef frame_to_dict(frame, yaw_only=False):\n r, p, y = frame.M.GetRPY()\n if yaw_only:\n return dict(tx=frame.p[0], ty=frame.p[1], tz=frame.p[2], rx=0., ry=0., rz=y)\n return dict(tx=frame.p[0], ty=frame.p[1], tz=frame.p[2], rx=r, ry=p, rz=y)\n\n\ndef dict_to_frame(di):\n return kd.Frame(\n kd.Rotation.RPY(di['rx'], di['ry'], di['rz']),\n kd.Vector(di['tx'], di['ty'], di['tz']))\n\n\ndef init_df(data_dict, cols, filename, outdir=''):\n df = pd.DataFrame(data=data_dict, columns=cols)\n if len(df.index) and filename:\n df.to_csv(os.path.join(outdir, filename), index=False)\n return df\n\ndef interpolate_df(input_dfs, index_df, filter_cols=[], filename='', outdir=''):\n if not isinstance(input_dfs, list):\n input_dfs = [input_dfs]\n if not isinstance(index_df.index, pd.DatetimeIndex):\n print('Error: Camera/lidar dataframe needs to be indexed by timestamp for interpolation')\n return pd.DataFrame()\n\n for i in input_dfs:\n if len(i.index) == 0:\n print('Warning: Empty dataframe passed to interpolate, skipping.')\n return pd.DataFrame()\n i['timestamp'] = pd.to_datetime(i['timestamp'])\n i.set_index(['timestamp'], inplace=True)\n i.index.rename('index', inplace=True)\n\n merged = functools.reduce(lambda left, right: pd.merge(\n left, right, how='outer', left_index=True, right_index=True), [index_df] + input_dfs)\n merged.interpolate(method='time', inplace=True, limit=100, limit_direction='both')\n\n filtered = merged.loc[index_df.index] # back to only index' rows\n filtered.fillna(0.0, inplace=True)\n filtered['timestamp'] = filtered.index.astype('int') # add back original timestamp integer col\n if filter_cols:\n if not 'timestamp' in filter_cols:\n filter_cols += ['timestamp']\n filtered = filtered[filter_cols]\n\n if len(filtered.index) and filename:\n filtered.to_csv(os.path.join(outdir, filename), header=True)\n return filtered\n\ndef obstacle_rtk_to_pose(\n cap_front,\n cap_rear,\n obs_front,\n obs_rear,\n obs_gps_to_centroid,\n front_to_velodyne,\n cap_yaw_err=0.,\n cap_pitch_err=0.):\n\n # calculate capture yaw in ENU frame and setup correction rotation\n cap_front_v = dict_to_vect(cap_front)\n cap_rear_v = dict_to_vect(cap_rear)\n cap_yaw = get_yaw(cap_front_v, cap_rear_v)\n cap_yaw += cap_yaw_err\n rot_cap = kd.Rotation.EulerZYX(-cap_yaw, -cap_pitch_err, 0)\n\n obs_rear_v = dict_to_vect(obs_rear)\n if obs_front:\n obs_front_v = dict_to_vect(obs_front)\n obs_yaw = get_yaw(obs_front_v, obs_rear_v)\n # use the front gps as the obstacle reference point if it exists as it's closer\n # to the centroid and mounting metadata seems more reliable\n cap_to_obs = obs_front_v - cap_front_v\n else:\n cap_to_obs = obs_rear_v - cap_front_v\n\n # transform capture car to obstacle vector into capture car velodyne lidar frame\n res = rot_cap * cap_to_obs\n res += list_to_vect(front_to_velodyne)\n\n # obs_gps_to_centroid is offset for front gps if it exists, otherwise rear\n obs_gps_to_centroid_v = list_to_vect(obs_gps_to_centroid)\n if obs_front:\n # if we have both front + rear RTK calculate an obstacle yaw and use it for centroid offset\n obs_rot_z = kd.Rotation.RotZ(obs_yaw - cap_yaw)\n centroid_offset = obs_rot_z * obs_gps_to_centroid_v\n else:\n # if no obstacle yaw calculation possible, treat rear RTK as centroid and offset in Z only\n obs_rot_z = kd.Rotation()\n centroid_offset = kd.Vector(0, 0, obs_gps_to_centroid_v[2])\n res += centroid_offset\n return frame_to_dict(kd.Frame(obs_rot_z, res), yaw_only=True)\n\ndef old_get_obstacle_pos(\n front,\n rear,\n obstacle,\n obstacle_yaw,\n velodyne_to_front,\n gps_to_centroid):\n front_v = dict_to_vect(front)\n rear_v = dict_to_vect(rear)\n obs_v = dict_to_vect(obstacle)\n\n yaw = get_yaw(front_v, rear_v)\n rot_z = kd.Rotation.RotZ(-yaw)\n\n diff = obs_v - front_v\n res = rot_z * diff\n res += list_to_vect(velodyne_to_front)\n\n # FIXME the gps_to_centroid offset of the obstacle should be rotated by\n # the obstacle's yaw. Unfortunately the obstacle's pose is unknown at this\n # point so we will assume obstacle is axis aligned with capture vehicle\n # for now.\n\n centroid = kd.Rotation.RotZ( obstacle_yaw - yaw ) * list_to_vect(gps_to_centroid)\n\n res += centroid # list_to_vect(gps_to_centroid)\n\n return frame_to_dict(kd.Frame(kd.Rotation.RotZ(obstacle_yaw - yaw), res))\n\n\ndef interpolate_to_target(target_df, other_dfs, filter_cols=[]):\n if not isinstance(other_dfs, list):\n other_dfs = [other_dfs]\n if not isinstance(target_df.index, pd.DatetimeIndex):\n print('Error: Camera dataframe needs to be indexed by timestamp for interpolation')\n return pd.DataFrame()\n\n for o in other_dfs:\n o['timestamp'] = pd.to_datetime(o['timestamp'])\n o.set_index(['timestamp'], inplace=True)\n o.index.rename('index', inplace=True)\n\n merged = functools.reduce(lambda left, right: pd.merge(\n left, right, how='outer', left_index=True, right_index=True), [target_df] + other_dfs)\n merged.interpolate(method='time', inplace=True, limit=100, limit_direction='both')\n\n filtered = merged.loc[target_df.index].copy() # back to only camera rows\n filtered.fillna(0.0, inplace=True)\n filtered.loc[:,'timestamp'] = filtered.index.astype('int') # add back original timestamp integer col\n if filter_cols:\n if not 'timestamp' in filter_cols:\n filter_cols += ['timestamp']\n filtered = filtered[filter_cols]\n\n return filtered\n\n\ndef estimate_obstacle_poses(\n cap_front_rtk,\n #cap_front_gps_offset,\n cap_rear_rtk,\n #cap_rear_gps_offset,\n obs_rear_rtk,\n obs_rear_gps_offset, # offset along [l, w, h] dim of car, in obstacle relative coords\n obs_yaw\n):\n # offsets are all [l, w, h] lists (or tuples)\n assert(len(obs_rear_gps_offset) == 3)\n # all coordinate records should be interpolated to same sample base at this point\n assert len(cap_front_rtk) == len(cap_rear_rtk) == len(obs_rear_rtk)\n\n velo_to_front = [-1.0922, 0, -0.0508]\n rtk_coords = zip(cap_front_rtk, cap_rear_rtk, obs_rear_rtk, obs_yaw)\n output_poses = [\n get_obstacle_pos(c[0], c[1], c[2], c[3], velo_to_front, obs_rear_gps_offset) for c in rtk_coords]\n\n return output_poses\n\n\ndef check_oneof_topics_present(topic_map, name, topics):\n if not isinstance(topics, list):\n topics = [topics]\n if not any(t in topic_map for t in topics):\n print('Error: One of %s must exist in bag, skipping bag %s.' % (topics, name))\n return False\n return True\n\ndef extract_metadata(md, obs_name):\n md = next(x for x in md if x['obstacle_name'] == obs_name)\n if 'gps_l' in md:\n # make old rear RTK only obstacle metadata compatible with new\n md['rear_gps_l'] = md['gps_l']\n md['rear_gps_w'] = md['gps_w']\n md['rear_gps_h'] = md['gps_h']\n return md\n\ndef process_rtk_data(\n bagset,\n cap_data,\n obs_data,\n index_df,\n outdir,\n correct=CORRECT_NONE,\n yaw_err=0.,\n pitch_err=0.\n):\n tracklets = []\n cap_rear_gps_df = init_df(cap_data['rear_gps'], GPS_COLS, 'cap_rear_gps.csv', outdir)\n cap_front_gps_df = init_df(cap_data['front_gps'], GPS_COLS, 'cap_front_gps.csv', outdir)\n cap_rear_rtk_df = init_df(cap_data['rear_rtk'], POS_COLS, 'cap_rear_rtk.csv', outdir)\n cap_front_rtk_df = init_df(cap_data['front_rtk'], POS_COLS, 'cap_front_rtk.csv', outdir)\n if not len(cap_rear_rtk_df.index):\n print('Error: No capture vehicle rear RTK entries exist.'\n ' Skipping bag %s.' % bagset.name)\n return tracklets\n if not len(cap_rear_rtk_df.index):\n print('Error: No capture vehicle front RTK entries exist.'\n ' Skipping bag %s.' % bagset.name)\n return tracklets\n\n rtk_z_offsets = [np.array([0., 0., CAP_RTK_FRONT_Z]), np.array([0., 0., CAP_RTK_REAR_Z])]\n if correct > 0:\n # Correction algorithm attempts to fit plane to rtk measurements across both capture rtk\n # units and all obstacles. We will subtract known RTK unit mounting heights first.\n cap_front_points = cap_front_rtk_df.as_matrix(columns=['tx', 'ty', 'tz']) - rtk_z_offsets[0]\n cap_rear_points = cap_rear_rtk_df.as_matrix(columns=['tx', 'ty', 'tz']) - rtk_z_offsets[1]\n point_arrays = [cap_front_points, cap_rear_points]\n filtered_point_arrays = [filter_outlier_points(cap_front_points), filter_outlier_points(cap_rear_points)]\n\n obs_rtk_dfs = {}\n for obs_name, obs_rtk_dict in obs_data.items():\n obs_front_rtk_df = init_df(obs_rtk_dict['front_rtk'], POS_COLS, '%s_front_rtk.csv' % obs_name, outdir)\n obs_rear_rtk_df = init_df(obs_rtk_dict['rear_rtk'], POS_COLS, '%s_rear_rtk.csv' % obs_name, outdir)\n if not len(obs_rear_rtk_df.index):\n print('Warning: No entries for obstacle %s in %s. Skipping.' % (obs_name, bagset.name))\n continue\n obs_rtk_dfs[obs_name] = {'rear': obs_rear_rtk_df}\n if len(obs_front_rtk_df.index):\n obs_rtk_dfs[obs_name]['front'] = obs_front_rtk_df\n if correct > 0:\n # Use obstacle metadata to determine rtk mounting height and subtract that height\n # from obstacle readings\n md = extract_metadata(bagset.metadata, obs_name)\n if not md:\n print('Error: No metadata found for %s, skipping obstacle.' % obs_name)\n continue\n if len(obs_front_rtk_df.index):\n obs_z_offset = np.array([0., 0., md['front_gps_h']])\n rtk_z_offsets.append(obs_z_offset)\n obs_front_points = obs_front_rtk_df.as_matrix(columns=['tx', 'ty', 'tz']) - obs_z_offset\n point_arrays.append(obs_front_points)\n filtered_point_arrays.append(filter_outlier_points(obs_front_points))\n obs_z_offset = np.array([0., 0., md['rear_gps_h']])\n rtk_z_offsets.append(obs_z_offset)\n obs_rear_points = obs_rear_rtk_df.as_matrix(columns=['tx', 'ty', 'tz']) - obs_z_offset\n point_arrays.append(obs_rear_points)\n filtered_point_arrays.append(filter_outlier_points(obs_rear_points))\n\n if correct == CORRECT_PLANE:\n points = np.array(np.concatenate(filtered_point_arrays))\n centroid, normal, rotation = fit_plane(\n points, do_plot=True, dataset_outdir=outdir, name=bagset.name)\n\n def apply_correction(p, z):\n p -= centroid\n p = np.dot(rotation, p.T).T\n c = np.concatenate([centroid[0:2], z[2:]])\n p += c\n return p\n\n corrected_points = [apply_correction(pa, z) for pa, z in zip(point_arrays, rtk_z_offsets)]\n cap_front_rtk_df.loc[:, ['tx', 'ty', 'tz']] = corrected_points[0]\n cap_rear_rtk_df.loc[:, ['tx', 'ty', 'tz']] = corrected_points[1]\n pts_idx = 2\n for obs_name in obs_rtk_dfs.keys():\n if 'front' in obs_rtk_dfs[obs_name]:\n obs_rtk_dfs[obs_name]['front'].loc[:, ['tx', 'ty', 'tz']] = corrected_points[pts_idx]\n pts_idx += 1\n obs_rtk_dfs[obs_name]['rear'].loc[:, ['tx', 'ty', 'tz']] = corrected_points[pts_idx]\n pts_idx += 1\n\n interpolate_df(\n cap_front_gps_df, index_df, GPS_COLS, 'cap_front_gps_interp.csv', outdir)\n interpolate_df(\n cap_rear_gps_df, index_df, GPS_COLS, 'cap_rear_gps_interp.csv', outdir)\n cap_front_rtk_interp = interpolate_df(\n cap_front_rtk_df, index_df, POS_COLS, 'cap_front_rtk_interp.csv', outdir)\n cap_rear_rtk_interp = interpolate_df(\n cap_rear_rtk_df, index_df, POS_COLS, 'cap_rear_rtk_interp.csv', outdir)\n\n if not obs_rtk_dfs:\n print('Warning: No obstacles or obstacle RTK data present. '\n 'Skipping Tracklet generation for %s.' % bagset.name)\n return tracklets\n if not bagset.metadata:\n print('Error: No metadata found, metadata.csv file should be with .bag files.'\n 'Skipping tracklet generation.')\n return tracklets\n\n cap_front_rtk_rec = cap_front_rtk_interp.to_dict(orient='records')\n cap_rear_rtk_rec = cap_rear_rtk_interp.to_dict(orient='records')\n for obs_name in obs_rtk_dfs.keys():\n obs_front_rec = {}\n if 'front' in obs_rtk_dfs[obs_name]:\n obs_front_interp = interpolate_df(\n obs_rtk_dfs[obs_name]['front'], index_df, POS_COLS, '%s_front_rtk_interpolated.csv' % obs_name, outdir)\n obs_front_rec = obs_front_interp.to_dict(orient='records')\n obs_rear_interp = interpolate_df(\n obs_rtk_dfs[obs_name]['rear'], index_df, POS_COLS, '%s_rear_rtk_interpolated.csv' % obs_name, outdir)\n obs_rear_rec = obs_rear_interp.to_dict(orient='records')\n\n # Plot obstacle and front/rear rtk paths in absolute RTK ENU coords\n fig = plt.figure()\n plt.plot(\n cap_front_rtk_interp['tx'].tolist(),\n cap_front_rtk_interp['ty'].tolist(),\n cap_rear_rtk_interp['tx'].tolist(),\n cap_rear_rtk_interp['ty'].tolist(),\n obs_rear_interp['tx'].tolist(),\n obs_rear_interp['ty'].tolist())\n if 'front' in obs_rtk_dfs[obs_name]:\n plt.plot(\n obs_front_interp['tx'].tolist(),\n obs_front_interp['ty'].tolist())\n fig.savefig(os.path.join(outdir, '%s-%s-plot.png' % (bagset.name, obs_name)))\n plt.close(fig)\n\n # Extract lwh and object type from CSV metadata mapping file\n md = extract_metadata(bagset.metadata, obs_name)\n\n obs_tracklet = Tracklet(\n object_type=md['object_type'], l=md['l'], w=md['w'], h=md['h'], first_frame=0)\n\n # NOTE these calculations are done in obstacle oriented coordinates. The LWH offsets from\n # metadata specify offsets from lower left, rear, ground corner of the vehicle. Where +ve is\n # along the respective length, width, height axis away from that point. They are converted to\n # velodyne/ROS compatible X,Y,Z where X +ve is forward, Y +ve is left, and Z +ve is up.\n lrg_to_centroid = [md['l'] / 2., -md['w'] / 2., md['h'] / 2.]\n if 'front' in obs_rtk_dfs[obs_name]:\n lrg_to_front_gps = [md['front_gps_l'], -md['front_gps_w'], md['front_gps_h']]\n gps_to_centroid = np.subtract(lrg_to_centroid, lrg_to_front_gps)\n else:\n lrg_to_rear_gps = [md['rear_gps_l'], -md['rear_gps_w'], md['rear_gps_h']]\n gps_to_centroid = np.subtract(lrg_to_centroid, lrg_to_rear_gps)\n\n # Convert ENU RTK coords of obstacle to capture vehicle body frame relative coordinates\n if obs_front_rec:\n rtk_coords = zip(cap_front_rtk_rec, cap_rear_rtk_rec, obs_front_rec, obs_rear_rec)\n obs_tracklet.poses = [obstacle_rtk_to_pose(\n c[0], c[1], c[2], c[3],\n gps_to_centroid, FRONT_TO_LIDAR, yaw_err, pitch_err) for c in rtk_coords]\n else:\n rtk_coords = zip(cap_front_rtk_rec, cap_rear_rtk_rec, obs_rear_rec)\n obs_tracklet.poses = [obstacle_rtk_to_pose(\n c[0], c[1], {}, c[2],\n gps_to_centroid, FRONT_TO_LIDAR, yaw_err, pitch_err) for c in rtk_coords]\n\n tracklets.append(obs_tracklet)\n return tracklets\n\n\ndef process_pose_data(\n bagset,\n cap_data,\n obs_data,\n index_df,\n outdir,\n):\n tracklets = []\n cap_pose_df = init_df(cap_data['base_link_pose'], POS_COLS, 'cap_pose.csv', outdir)\n cap_pose_interp = interpolate_df(\n cap_pose_df, index_df, POS_COLS, 'cap_pose_interp.csv', outdir)\n cap_pose_rec = cap_pose_interp.to_dict(orient='records')\n\n for obs_name, obs_pose_dict in obs_data.items():\n obs_pose_df = init_df(obs_pose_dict['pose'], POS_COLS, 'obs_pose.csv', outdir)\n obs_pose_interp = interpolate_df(\n obs_pose_df, index_df, POS_COLS, 'obs_pose_interp.csv', outdir)\n obs_pose_rec = obs_pose_interp.to_dict(orient='records')\n\n # Plot obstacle and front/rear rtk paths in absolute RTK ENU coords\n fig = plt.figure()\n plt.plot(\n obs_pose_interp['tx'].tolist(),\n obs_pose_interp['ty'].tolist(),\n cap_pose_interp['tx'].tolist(),\n cap_pose_interp['ty'].tolist())\n fig.savefig(os.path.join(outdir, '%s-%s-plot.png' % (bagset.name, obs_name)))\n plt.close(fig)\n\n # FIXME hard coded metadata, only Pedestrians currently using pose capture and there is only one person\n md = {'object_type': 'Pedestrian', 'l': 0.8, 'w': 0.8, 'h': 1.708}\n base_link_to_lidar = BASE_LINK_TO_LIDAR_PED\n\n obs_tracklet = Tracklet(\n object_type=md['object_type'], l=md['l'], w=md['w'], h=md['h'], first_frame=0)\n\n def _calc_cap_to_obs(cap, obs):\n cap_frame = dict_to_frame(cap)\n obs_frame = dict_to_frame(obs)\n cap_to_obs = cap_frame.Inverse() * obs_frame\n cap_to_obs.p -= list_to_vect(base_link_to_lidar)\n cap_to_obs.p -= kd.Vector(0, 0, md['h'] / 2)\n return frame_to_dict(cap_to_obs, yaw_only=True)\n\n obs_tracklet.poses = [_calc_cap_to_obs(c[0], c[1]) for c in zip(cap_pose_rec, obs_pose_rec)]\n tracklets.append(obs_tracklet)\n return tracklets\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Convert rosbag to images and csv.')\n parser.add_argument('-o', '--outdir', type=str, nargs='?', default='/output',\n help='Output folder')\n parser.add_argument('-i', '--indir', type=str, nargs='?', default='/data',\n help='Input folder where bagfiles are located')\n parser.add_argument('-f', '--img_format', type=str, nargs='?', default='jpg',\n help='Image encode format, png or jpg')\n parser.add_argument('-t', '--ts_src', type=str, nargs='?', default='pub',\n help=\"\"\"Timestamp source. 'pub'=capture node publish time, 'rec'=receiver bag record time,\n 'obs_rec'=record time for obstacles topics only, pub for others. Default='pub'\"\"\")\n parser.add_argument('-m', dest='msg_only', action='store_true', help='Messages only, no images')\n parser.add_argument('-l', dest='include_lidar', action='store_true', help='Include lidar')\n parser.add_argument('-L', dest='index_by_lidar', action='store_true', help='Index tracklets by lidar frames instead of camera frames')\n parser.add_argument('-d', dest='debug', action='store_true', help='Debug print enable')\n parser.add_argument('-u', dest='unique_paths', action='store_true', help='Unique bag output paths')\n parser.set_defaults(msg_only=False)\n parser.set_defaults(unique_paths=False)\n parser.set_defaults(debug=False)\n args = parser.parse_args()\n\n img_format = args.img_format\n base_outdir = args.outdir\n indir = args.indir\n ts_src = TS_SRC_PUB\n if args.ts_src == 'rec':\n ts_src = TS_SRC_REC\n elif args.ts_src == 'obs_rec':\n ts_src = TS_SRC_OBS_REC\n msg_only = args.msg_only\n debug_print = args.debug\n unique_paths = args.unique_paths\n \n bridge = CvBridge()\n\n include_images = False if msg_only else True\n include_lidar = args.include_lidar\n index_by_lidar = args.index_by_lidar\n\n filter_topics = CAMERA_TOPICS + CAP_FRONT_RTK_TOPICS + CAP_REAR_RTK_TOPICS \\\n + CAP_FRONT_GPS_TOPICS + CAP_REAR_GPS_TOPICS + LIDAR_TOPICS\n\n # FIXME hard coded obstacles\n # The original intent was to scan bag info for obstacles and populate dynamically in combination\n # with metadata.csv. Since obstacle names were very static, and the obstacle topic root was not consistent\n # between data releases, that didn't happen.\n obstacle_topics = []\n\n # For obstacles tracked via RTK messages\n OBS_RTK_NAMES = ['obs1']\n OBS_FRONT_RTK_TOPICS = [OBJECTS_TOPIC_ROOT + '/' + x + '/front/gps/rtkfix' for x in OBS_RTK_NAMES]\n OBS_REAR_RTK_TOPICS = [OBJECTS_TOPIC_ROOT + '/' + x + '/rear/gps/rtkfix' for x in OBS_RTK_NAMES]\n obstacle_topics += OBS_FRONT_RTK_TOPICS\n obstacle_topics += OBS_REAR_RTK_TOPICS\n\n # For obstacles tracked via TF + pose messages\n OBS_POSE_TOPICS = ['/obstacle/ped/pose'] # not under same root as other obstacles for some reason\n obstacle_topics += OBS_POSE_TOPICS\n filter_topics += [TF_TOPIC] # pose based obstacles rely on TF\n\n filter_topics += obstacle_topics\n\n bagsets = find_bagsets(indir, filter_topics=filter_topics, set_per_file=True, metadata_filename='metadata.csv')\n if not bagsets:\n print(\"No bags found in %s\" % indir)\n exit(-1)\n\n for bs in bagsets:\n print(\"Processing set %s\" % bs.name)\n sys.stdout.flush()\n\n cap_data = defaultdict(lambda: defaultdict(list))\n obs_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n outdir = os.path.join(base_outdir, bs.get_name(unique_paths))\n print( bs.get_name(unique_paths))\n get_outdir(outdir)\n\n dataset_outdir = os.path.join(base_outdir, \"%s\" % bs.name)\n print(dataset_outdir)\n\n get_outdir(dataset_outdir)\n if include_images:\n camera_outdir = get_outdir(dataset_outdir, \"camera\")\n if include_lidar:\n lidar_outdir = get_outdir(dataset_outdir, \"lidar\")\n\n bs.write_infos(dataset_outdir)\n readers = bs.get_readers()\n stats_acc = defaultdict(int)\n\n def _process_msg(topic, msg, ts_recorded, stats):\n if topic == '/tf':\n timestamp = msg.transforms[0].header.stamp.to_nsec()\n else:\n timestamp = msg.header.stamp.to_nsec() # default to publish timestamp in message header\n if ts_src == TS_SRC_REC:\n timestamp = ts_recorded.to_nsec()\n elif ts_src == TS_SRC_OBS_REC and topic in obstacle_topics:\n timestamp = ts_recorded.to_nsec()\n\n if topic in CAMERA_TOPICS:\n if debug_print:\n print(\"%s_camera %d\" % (topic[1], timestamp))\n\n write_results = {}\n if include_images:\n write_results = write_image(bridge, camera_outdir, msg, fmt=img_format)\n write_results['filename'] = os.path.relpath(write_results['filename'], dataset_outdir)\n camera2dict(timestamp, msg, write_results, cap_data['camera'])\n stats['img_count'] += 1\n stats['msg_count'] += 1\n\n elif topic in LIDAR_TOPICS:\n if debug_print:\n print(\"%s_lidar %d\" % (topic[1], timestamp))\n\n write_results = {}\n if include_lidar:\n write_results = write_lidar(lidar_outdir, msg)\n write_results['filename'] = os.path.relpath(write_results['filename'], dataset_outdir)\n lidar2dict(timestamp, msg, write_results, cap_data['lidar'])\n stats['lidar_count'] += 1\n stats['msg_count'] += 1\n\n\n elif topic in CAP_REAR_RTK_TOPICS:\n pose2dict(timestamp, msg.pose, cap_data['rear_rtk'])\n stats['msg_count'] += 1\n\n\n elif topic in CAP_FRONT_RTK_TOPICS:\n pose2dict(timestamp, msg.pose, cap_data['front_rtk'])\n stats['msg_count'] += 1\n\n\n elif topic in CAP_REAR_GPS_TOPICS:\n gps2dict(timestamp, msg, cap_data['rear_gps'])\n stats['msg_count'] += 1\n\n\n elif topic in CAP_FRONT_GPS_TOPICS:\n gps2dict(timestamp, msg, cap_data['front_gps'])\n stats['msg_count'] += 1\n\n\n elif topic in OBS_REAR_RTK_TOPICS:\n name = obs_name_from_topic(topic)\n pose2dict(timestamp, msg.pose, obs_data[name]['rear_rtk'])\n stats['msg_count'] += 1\n\n\n elif topic in OBS_FRONT_RTK_TOPICS:\n name = obs_name_from_topic(topic)\n pose2dict(timestamp, msg.pose, obs_data[name]['front_rtk'])\n stats['msg_count'] += 1\n\n\n elif topic == TF_TOPIC:\n for t in msg.transforms:\n if t.child_frame_id == '/base_link':\n tf2dict(timestamp, t.transform, cap_data['base_link_pose'])\n\n\n elif topic in OBS_POSE_TOPICS:\n name = obs_name_from_topic(topic)\n pose2dict(timestamp, msg, obs_data[name]['pose'])\n stats['msg_count'] += 1\n\n else:\n pass\n\n for reader in readers:\n last_img_log = 0\n last_msg_log = 0\n for result in reader.read_messages():\n _process_msg(*result, stats=stats_acc)\n if last_img_log != stats_acc['img_count'] and stats_acc['img_count'] % 1000 == 0:\n print(\"%d images, processed...\" % stats_acc['img_count'])\n last_img_log = stats_acc['img_count']\n sys.stdout.flush()\n if last_msg_log != stats_acc['msg_count'] and stats_acc['msg_count'] % 10000 == 0:\n print(\"%d messages processed...\" % stats_acc['msg_count'])\n last_msg_log = stats_acc['msg_count']\n sys.stdout.flush()\n\n print(\"Writing done. %d images, %d lidar frames, %d messages processed.\" %\n (stats_acc['img_count'], stats_acc['lidar_count'], stats_acc['msg_count']))\n sys.stdout.flush()\n camera_df = pd.DataFrame(data=cap_data['camera'], columns=CAMERA_COLS)\n lidar_df = pd.DataFrame(data=cap_data['lidar'], columns=LIDAR_COLS)\n\n if include_images:\n camera_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_camera.csv'), index=False)\n\n if include_lidar:\n lidar_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_lidar.csv'), index=False)\n\n if index_by_lidar:\n target_df = lidar_df\n else:\n target_df = camera_df\n\n if len(target_df['timestamp']):\n # Interpolate samples from all used sensors to camera/lidar frame timestamps\n target_df['timestamp'] = pd.to_datetime(target_df['timestamp'])\n target_df.set_index(['timestamp'], inplace=True)\n target_df.index.rename('index', inplace=True)\n target_index_df = pd.DataFrame(index=target_df.index)\n\n collection = TrackletCollection()\n\n if 'front_rtk' in cap_data and 'rear_rtk' in cap_data:\n tracklets = process_rtk_data(\n bs, cap_data, obs_data, target_index_df, outdir)\n collection.tracklets += tracklets\n\n if 'base_link_pose' in cap_data:\n tracklets = process_pose_data(\n bs, cap_data, obs_data, target_index_df, outdir)\n collection.tracklets += tracklets\n\n if collection.tracklets:\n tracklet_path = os.path.join(outdir, 'tracklet_labels.xml')\n collection.write_xml(tracklet_path)\n else:\n print('Warning: No camera image times were found. '\n 'Skipping sensor interpolation and Tracklet generation.')\n\n '''\n\n cap_rear_gps_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_rear_gps.csv'), index=False)\n cap_front_gps_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_front_gps.csv'), index=False)\n cap_rear_rtk_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_rear_rtk.csv'), index=False)\n cap_front_rtk_df.to_csv(os.path.join(dataset_outdir, 'capture_vehicle_front_rtk.csv'), index=False)\n\n obs_rtk_df_dict = {}\n for obs_topic, obs_rtk_dict in obstacle_rtk_dicts.items():\n obs_prefix, obs_name = obs_prefix_from_topic(obs_topic)\n obs_rtk_df = pd.DataFrame(data=obs_rtk_dict, columns=rtk_cols)\n if not len(obs_rtk_df.index):\n print('Warning: No entries for obstacle %s in %s. Skipping.' % (obs_name, bs.name))\n continue\n obs_rtk_df.to_csv(os.path.join(dataset_outdir, '%s_rtk.csv' % obs_prefix), index=False)\n obs_rtk_df_dict[obs_topic] = obs_rtk_df\n\n if index_by_lidar:\n target_dict = lidar_dict\n target_df = lidar_df\n else:\n target_dict = camera_dict\n target_df = camera_df\n\n if len(target_dict['timestamp']):\n # Interpolate samples from all used sensors to index frame timestamps\n target_df['timestamp'] = pd.to_datetime(target_df['timestamp'])\n target_df.set_index(['timestamp'], inplace=True)\n target_df.index.rename('index', inplace=True)\n\n target_index_df = pd.DataFrame(index=target_df.index)\n\n cap_rear_gps_interp = interpolate_to_target(target_index_df, cap_rear_gps_df, filter_cols=gps_cols)\n cap_rear_gps_interp.to_csv(\n os.path.join(dataset_outdir, 'capture_vehicle_rear_gps_interp.csv'), header=True)\n\n cap_front_gps_interp = interpolate_to_target(target_index_df, cap_front_gps_df, filter_cols=gps_cols)\n cap_front_gps_interp.to_csv(\n os.path.join(dataset_outdir, 'capture_vehicle_front_gps_interp.csv'), header=True)\n\n cap_rear_rtk_interp = interpolate_to_target(target_index_df, cap_rear_rtk_df, filter_cols=rtk_cols)\n cap_rear_rtk_interp.to_csv(\n os.path.join(dataset_outdir, 'capture_vehicle_rear_rtk_interp.csv'), header=True)\n cap_rear_rtk_interp_rec = cap_rear_rtk_interp.to_dict(orient='records')\n\n cap_front_rtk_interp = interpolate_to_target(target_index_df, cap_front_rtk_df, filter_cols=rtk_cols)\n cap_front_rtk_interp.to_csv(\n os.path.join(dataset_outdir, 'capture_vehicle_front_rtk_interp.csv'), header=True)\n cap_front_rtk_interp_rec = cap_front_rtk_interp.to_dict(orient='records')\n\n if not obs_rtk_df_dict:\n print('Warning: No obstacles or obstacle RTK data present. '\n 'Skipping Tracklet generation for %s.' % bs.name)\n continue\n\n collection = TrackletCollection()\n for obs_topic in obstacle_rtk_dicts.keys():\n obs_rtk_df = obs_rtk_df_dict[obs_topic]\n obs_interp = interpolate_to_target(target_index_df, obs_rtk_df, filter_cols=rtk_cols)\n obs_prefix, obs_name = obs_prefix_from_topic(obs_topic)\n obs_interp.to_csv(\n os.path.join(dataset_outdir, '%s_rtk_interpolated.csv' % obs_prefix), header=True)\n\n # Plot obstacle and front/rear rtk paths in absolute RTK ENU coords\n fig = plt.figure()\n plt.plot(\n obs_interp['tx'].tolist(),\n obs_interp['ty'].tolist(),\n cap_front_rtk_interp['tx'].tolist(),\n cap_front_rtk_interp['ty'].tolist(),\n cap_rear_rtk_interp['tx'].tolist(),\n cap_rear_rtk_interp['ty'].tolist())\n fig.savefig(os.path.join(dataset_outdir, '%s-%s-plot.png' % (bs.name, obs_name)))\n plt.close(fig)\n\n # Extract lwh and object type from CSV metadata mapping file\n md = bs.metadata if bs.metadata else default_metadata\n if not bs.metadata:\n print('Warning: Default metadata used, metadata.csv file should be with .bag files.')\n for x in md:\n if x['obstacle_name'] == obs_name:\n mdr = x\n\n obs_tracklet = Tracklet(\n object_type=mdr['object_type'], l=mdr['l'], w=mdr['w'], h=mdr['h'], first_frame=0)\n\n # NOTE these calculations are done in obstacle oriented coordinates. The LWH offsets from\n # metadata specify offsets from lower left, rear, ground corner of the vehicle. Where +ve is\n # along the respective length, width, height axis away from that point. They are converted to\n # velodyne/ROS compatible X,Y,Z where X +ve is forward, Y +ve is left, and Z +ve is up.\n lrg_to_gps = [mdr['gps_l'], -mdr['gps_w'], mdr['gps_h']]\n lrg_to_centroid = [mdr['l'] / 2., -mdr['w'] / 2., mdr['h'] / 2.]\n gps_to_centroid = np.subtract(lrg_to_centroid, lrg_to_gps)\n\n # compute obstacle yaw based on movement, and fill out missing gaps where obstacle moves too little\n obs_rear_rtk_diff = obs_interp.diff()\n obs_moving = (obs_rear_rtk_diff.tx ** 2 + obs_rear_rtk_diff.ty ** 2) >= (0.1 ** 2)\n obs_rear_rtk_diff.loc[~obs_moving, 'tx':'ty'] = None\n obs_yaw_computed = np.arctan2(obs_rear_rtk_diff['ty'], obs_rear_rtk_diff['tx'])\n obs_yaw_computed = obs_yaw_computed.fillna(method='bfill').fillna(method='ffill').fillna(value=0.)\n\n # Convert NED RTK coords of obstacle to capture vehicle body frame relative coordinates\n obs_tracklet.poses = estimate_obstacle_poses(\n cap_front_rtk=cap_front_rtk_interp_rec,\n #cap_front_gps_offset=[0.0, 0.0, 0.0],\n cap_rear_rtk=cap_rear_rtk_interp_rec,\n #cap_rear_gps_offset=[0.0, 0.0, 0.0],\n obs_rear_rtk=obs_interp.to_dict(orient='records'),\n obs_rear_gps_offset=gps_to_centroid,\n obs_yaw = obs_yaw_computed\n )\n\n collection.tracklets.append(obs_tracklet)\n # end for obs_topic loop\n\n tracklet_path = os.path.join(dataset_outdir, 'tracklet_labels.xml')\n collection.write_xml(tracklet_path)\n else:\n print('Warning: No camera/lidar times were found. '\n 'Skipping sensor interpolation and Tracklet generation.')\n '''\n\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6890916228294373, "alphanum_fraction": 0.7449638843536377, "avg_line_length": 44.36206817626953, "blob_id": "d2162392b27af6cba90cf14657288f803778f070", "content_id": "f679a8615dbb1658cbe10abd3f0ade2c20001383", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 106, "num_lines": 58, "path": "/tracklets/python/refine-3.sh", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "SECONDS=0\nT=0\nfunction t()\n{\n\techo Took $(($SECONDS - $T)) seconds\n\tT=$SECONDS\n}\n\n# ok\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/2_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/2_f\n\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/12_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/12_f\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/13_f ;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/13_f\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/14 -ap 0.85;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/14\n\n# ok needs ransac (bad quality?)\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/15_f -ap 0.85;t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/15_f\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/4 -y 0.801401903504; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/4\n\n#ok\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/1 -y 0.95; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/1\n\n# ok\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/6 -y 0.843107080924; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/6\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/7 -y 0.819867282753; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/7\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/8 -y 0.819051722035; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/8\n\n# ok needs ransac\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/9 -y 0.799476375819; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/9\n\n# very few frames are good.\npython refine.py -xi tracklet_labels.xml -xo tracklet_labels_refined.xml -a -f 3/11_f -y 0.769471813278; t\npython refine.py -xi tracklet_labels_refined.xml -xo tracklet_labels_trainable.xml -r -f 3/11_f\n\necho Total time $SECONDS\n" }, { "alpha_fraction": 0.5578254461288452, "alphanum_fraction": 0.5706567764282227, "avg_line_length": 44.56153869628906, "blob_id": "cc16f3e99d22ee29cdb4f55b5a1cb993b8368c26", "content_id": "1ad44672145e9db5a18565e07c7596b3f820cb94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11846, "license_type": "no_license", "max_line_length": 167, "num_lines": 260, "path": "/tracklets/python/refine.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nfrom scipy.spatial.distance import cdist\nimport argparse\nfrom diditracklet import *\nfrom generate_tracklet import *\nfrom sklearn import linear_model\nfrom sklearn.svm import SVR\n\nparser = argparse.ArgumentParser(description='Refine tracklets by finding pose of reference object or smoothing trajectory')\nparser.add_argument('-1', '--first', type=int, action='store', help='Do one frame only, e.g. -1 87 (does frame 87)')\nparser.add_argument('-s', '--start-refining-from', type=int, action='store', nargs=1, default=0, help='Start from frame (defaults to 0)')\nparser.add_argument('-l', '--only-do-look-backs', action='store_true', help='Only search based on previous frame position (needs -s)')\n\nparser.add_argument('-m', '--flip', action = 'store_true', help='Flip reference object for alignment')\nparser.add_argument('-i', '--indir', type=str, default='../../../../release2/Data-points-processed',\n help='Input folder where processed tracklet subdirectories are located')\nparser.add_argument('-f', '--filter', type=str, nargs='+', default=None,\n help='Only include date/drive tracklet subdirectories, e.g. -f 1/21_f 2/24')\nparser.add_argument('-y', '--yaw', type=float, default=0.,\n help='Force initial yaw correction (e.g. -y 0.88)')\nparser.add_argument('-xi', '--input-xml-filename', type=str, default='tracklet_labels.xml',\n help='input tracklet xml filename (defaults to tracklet_labels.xml)')\nparser.add_argument('-xo', '--output-xml-filename', type=str,\n help='output tracklet xml filename')\nparser.add_argument('-d', '--dump', action='store_true', help='Print csv or x,y,z translations and does not ')\n\ngroup = parser.add_mutually_exclusive_group(required=True)\ngroup.add_argument('-n', '--no-refine', action='store_true', help='Do not attempt to fit reference vehicle')\ngroup.add_argument('-r', '--ransac', action='store_true', help='Use ransac for trajectory interpolation')\ngroup.add_argument('-a', '--align', action='store_true', help='Use 3d pose alignment')\n\nparser.add_argument('-ap', '--align-percentage', type=float, action='store', default=0.6, help='Min percentage of lidar points for alignment')\nparser.add_argument('-ad', '--align-distance', type=float, action='store', default=0.3, help='Threshold distance for a point to be considered inlier during alignment')\nparser.add_argument('-as', '--align-yaw', action='store_true', help='Search for yaw during alignment')\n\nparser.add_argument('-v', '--view', action='store_true', help='View in 3d')\n\nargs = parser.parse_args()\n\ndiditracklets = find_tracklets(args.indir,\n filter=args.filter,\n yaw_correction=args.yaw,\n xml_filename=args.input_xml_filename,\n flip=args.flip)\n\nif args.output_xml_filename is None and args.no_refine is False:\n print(\"----------------------------------------------------------------------------------------\")\n print(\"WARNING: no -xo or --output-xml-filename filename provided, tracklets will NOT be saved!\")\n print(\"----------------------------------------------------------------------------------------\")\n print(\"\")\n\nfor tracklet in diditracklets:\n\n print(\"Refining \" + tracklet.xml_path)\n print(\"\")\n frames = tracklet.frames() if args.first is None else [args.first]\n\n t_boxes = []\n t_states = np.ones(len(frames), dtype=np.int32)\n\n t_box = np.zeros(3)\n if args.ransac:\n\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n\n # Fit lines for each axis using all data\n y = []\n for frame in frames:\n y.append(list(tracklet.get_box_centroid(frame)[:3]))\n\n X = np.arange(0,len(y))\n x_axis = np.array(y)[:,0]\n y_axis = np.array(y)[:,1]\n z_axis = np.array(y)[:,2]\n x_d1 = np.diff(x_axis)\n x_d2 = np.diff(x_axis, n=2)\n\n X = np.expand_dims(X, axis=1)\n model_x = linear_model.LinearRegression()\n model_x = SVR(kernel='rbf', C=1e4, gamma=0.01)\n model_x.fit(X, x_axis)\n\n\n model_y = linear_model.LinearRegression()\n model_y = SVR(kernel='rbf', C=1e4, gamma=0.01)\n model_y.fit(X, y_axis)\n\n model_z = linear_model.LinearRegression()\n model_z = SVR(kernel='rbf', C=1e4, gamma=0.01)\n model_z.fit(X, z_axis)\n\n\n\n # Robustly fit linear model with RANSAC algorithm\n #model_ransac = linear_model.RANSACRegressor(SVR(kernel='rbf', C=1e3, gamma=0.1))\n #model_ransac.fit(X, x_axis)\n #inlier_mask = model_ransac.inlier_mask_\n #outlier_mask = np.logical_not(inlier_mask)\n\n # Predict data of estimated models\n line_X = np.arange(0, len(y))\n x_pred = model_x.predict(line_X[:, np.newaxis])\n y_pred = model_y.predict(line_X[:, np.newaxis])\n z_pred = model_z.predict(line_X[:, np.newaxis])\n\n import scipy.spatial\n print(np.array(y)[:,:2].shape, np.squeeze(np.dstack((x_pred, y_pred)), axis=0).shape)\n _xy_cosdis = scipy.spatial.distance.cdist(np.array(y)[:,:2], np.squeeze(np.dstack((x_pred, y_pred)), axis=0), 'euclidean')\n xy_cosdis = np.empty(_xy_cosdis.shape[0])\n for i in range(_xy_cosdis.shape[0]):\n xy_cosdis[i] = _xy_cosdis[i,i]\n print(xy_cosdis)\n\n #line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])\n\n # Compare cloud with estimated points\n print(\"List cloud points differing from estimated points more than 50cm \")\n #x_axis = np.expand_dims(x_axis, axis=1)\n\n diff_points = np.where(xy_cosdis >= 0.5)\n x_axis_diff_points = np.where(abs(x_pred - x_axis) >= 0.5)\n y_axis_diff_points = np.where(abs(y_pred - y_axis) >= 0.5)\n z_axis_diff_points = np.where(abs(z_pred - z_axis) >= 0.5)\n print(diff_points)\n print(x_axis_diff_points)\n print(y_axis_diff_points)\n print(z_axis_diff_points)\n\n lw = 2\n plt.scatter(X,x_axis, color='black', marker='x', label='centroids x')\n plt.scatter(line_X[x_axis_diff_points],x_axis[x_axis_diff_points], color='red', marker='x')\n\n plt.plot(line_X, x_pred, color='green', linestyle='-', linewidth=lw, label='SVM Regressor x. Outliers: ' + str(x_axis_diff_points))\n\n\n plt.scatter(X,y_axis, color='black', marker='.', label='centroids y')\n plt.scatter(line_X[y_axis_diff_points],y_axis[y_axis_diff_points], color='red', marker='.')\n\n plt.plot(line_X, y_pred, color='navy', linestyle='-', linewidth=lw, label='SVM Regressor y. Outliers: ' + str(y_axis_diff_points))\n\n plt.scatter(X,z_axis, color='black', marker='*', label='centroids z')\n plt.scatter(line_X[z_axis_diff_points],z_axis[z_axis_diff_points], color='red', marker='*')\n\n plt.plot(line_X, z_pred, color='pink', linestyle='-', linewidth=lw, label='SVM Regressor z. Outliers: ' + str(z_axis_diff_points))\n\n plt.legend(loc=0, fontsize='xx-small')\n\n for xc in diff_points[0]:\n plt.axvline(x=line_X[xc], color='k', linestyle='--')\n\n plt.savefig(os.path.join(tracklet.xml_path , \"plot.png\"))\n plt.clf()\n\n plt.scatter(X[1:],x_d1, color='black', marker='x', label='d x')\n plt.scatter(X[2:],x_d2, color='red', marker='*', label='d2 x')\n\n plt.savefig(os.path.join(tracklet.xml_path , \"plotdiff.png\"))\n plt.clf()\n\n #modify poses using predicted values --> not accurate\n #x_axis[x_axis_diff_points] = x_pred[x_axis_diff_points]\n #y_axis[y_axis_diff_points] = y_pred[y_axis_diff_points]\n #z_axis[z_axis_diff_points] = z_pred[z_axis_diff_points]\n\n #modify poses for outliers using the neighbours mean value in all axis\n x_axis[diff_points] = (x_axis[np.array(diff_points) -1 ] + x_axis[np.array(diff_points) +1 ]) / 2\n y_axis[diff_points] = (y_axis[np.array(diff_points) -1 ] + y_axis[np.array(diff_points) +1 ]) / 2\n z_axis[diff_points] = (z_axis[np.array(diff_points) -1 ] + z_axis[np.array(diff_points) +1 ]) / 2\n\n t_boxes = zip(x_axis - np.array(y)[:,0],y_axis - np.array(y)[:,1], z_axis - np.array(y)[:,2])\n\n t_states[np.array(diff_points)] = 0\n\n elif args.align:\n\n for frame in frames:\n print(\"Frame: \" + str(frame) + \" / \" + str(len(frames)))\n if args.no_refine or frame < args.start_refining_from:\n t_box = np.zeros(3)\n else:\n if (args.start_refining_from > 0) and args.only_do_look_backs:\n look_back_last_refined_centroid = T + t_box\n else:\n look_back_last_refined_centroid = None\n\n t_box, yaw_box, reference, first = tracklet.refine_box(frame,\n look_back_last_refined_centroid = look_back_last_refined_centroid,\n return_aligned_clouds=True,\n min_percent_first = args.align_percentage,\n threshold_distance = args.align_distance,\n search_yaw = args.align_yaw )\n\n yaw = tracklet.get_yaw(frame)\n\n t_boxes.append(t_box)\n print(\"\")\n T, _ = tracklet.get_box_TR(frame)\n\n # WRITING TRACKLET\n if args.output_xml_filename is not None:\n\n collection = TrackletCollection()\n h, w, l = tracklet.get_box_size()\n obs_tracklet = Tracklet(object_type='Car', l=l,w=w,h=h, first_frame=frames[0])\n\n for frame, t_box, t_state in zip(frames, t_boxes, t_states):\n pose = tracklet.get_box_pose(frame)\n pose['tx'] += t_box[0]\n pose['ty'] += t_box[1]\n pose['tz'] += t_box[2]\n pose['status'] = t_state\n obs_tracklet.poses.append(pose)\n if args.dump:\n print(str(pose['tx']) + \",\" + str(pose['ty']) + \",\"+ str(pose['tz']))\n\n collection.tracklets.append(obs_tracklet)\n # end for obs_topic loop\n\n tracklet_path = os.path.join(tracklet.xml_path , args.output_xml_filename)\n collection.write_xml(tracklet_path)\n\nif args.view:\n\n first_aligned = point_utils.rotZ(first, yaw_box) - point_utils.rotZ(np.array([t_box[0], t_box[1], 0.]), -yaw)\n\n from pyqtgraph.Qt import QtCore, QtGui\n import pyqtgraph.opengl as gl\n\n app = QtGui.QApplication([])\n w = gl.GLViewWidget()\n w.opts['distance'] = 20\n w.show()\n w.setWindowTitle('Reference vehicle (blue) vs. original (red) vs. aligned (white) obstacle')\n\n size=np.concatenate((\n 0.01 * np.ones(reference.shape[0]),\n 0.05 * np.ones(first_aligned.shape[0]),\n 0.05 * np.ones(first.shape[0])), axis = 0)\n\n print(size.shape)\n\n sp1 = gl.GLScatterPlotItem(pos=np.concatenate((reference[:,0:3], first_aligned, first), axis=0),\n size=size,\n color=np.concatenate((\n np.tile(np.array([0,0,1.,0.5]), (reference.shape[0],1)),\n np.tile(np.array([1.,1.,1.,0.8]), (first_aligned.shape[0],1)),\n np.tile(np.array([1., 0., 0., 0.8]), (first.shape[0], 1))\n ), axis = 0),\n pxMode=False)\n sp1.translate(5,5,0)\n w.addItem(sp1)\n\n ## Start Qt event loop unless running in interactive mode.\n if __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n" }, { "alpha_fraction": 0.7606074213981628, "alphanum_fraction": 0.7923179864883423, "avg_line_length": 110.94999694824219, "blob_id": "30fc912e262af4a08b7cbb65014c1a912dbdfbad", "content_id": "22c48fd2597cdd11e08f5c9cd54b547f7558287c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2251, "license_type": "no_license", "max_line_length": 386, "num_lines": 20, "path": "/README.md", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "<img src=\"images/urdf.png\" alt=\"MKZ Model\" width=\"800px\"></img>\n\nThe repository holds the data required for getting started with the Udacity/Didi self-driving car challenge. To generate tracklets (annotation data) from the released datasets, check out the Docker code in the ```/tracklet``` folder. For sensor transform information, check out ```/mkz-description```.\n\nPlease note that tracklets cannot be generated for Dataset 1 without modifying this code, as we added an additional RTK GPS receiver onto the capture vehicle in order to determine orientation. The orientation determination to enable world to capture vehicle transformations on Dataset 2 is currently being written, with a release target for 4/4/2017 EOD.\n\n## Datasets\nHere are links to the datasets we've released specifically for this challenge:\n* [**Dataset 2**](http://academictorrents.com/details/18d7f6be647eb6d581f5ff61819a11b9c21769c7) โ€“ Three different vehicles with a variety of maneuvers, and the Round 1 test seuence. Larger image sizes and two GPS RTK units on the capture vehicle for orientation determination. Velodyne points have been removed to reduce size, so a Velodyne LIDAR driver must be run during bag playback.\n* [**Dataset 1**](http://academictorrents.com/details/76352487923a31d47a6029ddebf40d9265e770b5) โ€“ NOT SUITABLE FOR TRACKLET GENERATION. Dataset intended for particpants to become familiar with the sensor data format and ROS in general. Tracklet code must be modified to work on this dataset, and no capture vehicle orientation is available unless using Course-Over-Ground techniques.\n\n\n## Resources\nStarting Guides:\n* [**Udacity Intro**](docs/GettingStarted.md) โ€“ Basic ROS install and displaying data in RVIZ\n\nHere's a list of the projects we've open sourced already that may be helpful:\n* [**ROS Examples**](https://github.com/mjshiggins/ros-examples) โ€“ Example ROS nodes for consuming/processing the released datasets (work in progress)\n* [**Annotated Driving Datasets**](https://github.com/udacity/self-driving-car/tree/master/annotations) โ€“ Many hours of labelled driving data\n* [**Driving Datasets**](https://github.com/udacity/self-driving-car/tree/master/datasets) โ€“ Over 10 hours of driving data (LIDAR, camera frames and more)\n" }, { "alpha_fraction": 0.49376365542411804, "alphanum_fraction": 0.5150754451751709, "avg_line_length": 44.06623840332031, "blob_id": "521420ffba151996458d5850474edae70a0b2bf7", "content_id": "01880afd481165a6c8a1f300c8458e1f32323c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48997, "license_type": "no_license", "max_line_length": 179, "num_lines": 1087, "path": "/tracklets/python/diditracklet.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\nimport numpy as np\nimport os\nimport sys\nimport parse_tracklet as tracklets\nimport didipykitti as pykitti\nfrom collections import defaultdict\nimport cv2\nimport point_utils\nfrom scipy.linalg import expm3, norm\nimport os\nimport sys\nimport re\nimport time\nimport scipy.interpolate\n\nM = 10\nMIN_HEIGHT = -2. # from camera (i.e. -2-1.65 = 3.65m above floor)\nMAX_HEIGHT = 2. # from camera (i.e. +2-1.65 = 0.35m below floor)\nM_HEIGHT = (MAX_HEIGHT - MIN_HEIGHT) / M\nMIN_X = -40.\nMAX_X = 40.\nMIN_Z = 5.\nMAX_Z = 70.\nHEIGHT_F_RES = 0.1 # 0.1m for x,z slicing\n\nC_W = 512\nC_H = 64\n\nCAMERA_FEATURES = (375, 1242, 3)\nHEIGHT_FEATURES = (int((MAX_Z - MIN_Z) / HEIGHT_F_RES), int((MAX_X - MIN_X) / HEIGHT_F_RES), M + 2)\nF_VIEW_FEATURES = (C_H, C_W, 3)\n\ndef find_tracklets(\n directory,\n filter=None,\n yaw_correction=0.,\n xml_filename=\"tracklet_labels_refined.xml\",\n flip=False,\n box_scaling=(1.,1.,1.)):\n diditracklets = []\n combined_filter = \"(\" + \")|(\".join(filter) + \"$)\" if filter is not None else None\n if combined_filter is not None:\n combined_filter = combined_filter.replace(\"*\", \".*\")\n\n for root, dirs, files in os.walk(directory):\n for date in dirs: # 1 2 3\n for _root, drives, files in os.walk(os.path.join(root, date)): # ./1/ ./18/ ...\n for drive in drives:\n if os.path.isfile(os.path.join(_root, drive, xml_filename)):\n if filter is None or re.match(combined_filter, date + '/' + drive):\n diditracklet = DidiTracklet(root, date, drive,\n yaw_correction=yaw_correction,\n xml_filename=xml_filename,\n flip=flip,\n box_scaling=box_scaling)\n diditracklets.append(diditracklet)\n\n return diditracklets\n\n\nclass DidiTracklet(object):\n kitti_cat_names = ['Car', 'Van', 'Truck', 'Pedestrian', 'Sitter', 'Cyclist', 'Tram', 'Misc', 'Person (sitting)']\n kitti_cat_idxs = range(1, 1 + len(kitti_cat_names))\n\n LIDAR_ANGLE = np.pi / 6.\n\n def __init__(self, basedir, date, drive, yaw_correction=0., xml_filename=\"tracklet_labels_refined.xml\", flip=False, box_scaling=(1.,1.,1.)):\n self.basedir = basedir\n self.date = date\n self.drive = drive\n\n self.kitti_data = pykitti.raw(basedir, date, drive,\n range(0, 1)) # , range(start_frame, start_frame + total_frames))\n self.xml_path = os.path.join(basedir, date, drive)\n self.tracklet_data = tracklets.parse_xml(os.path.join(self.xml_path, xml_filename))\n\n # correct yaw in all frames if yaw_correction provided\n if yaw_correction is not 0.:\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n for t in self.tracklet_data:\n for frame_offset in range(t.first_frame, t.first_frame + t.num_frames):\n idx = frame_offset - t.first_frame\n t.rots[idx][2] += yaw_correction\n\n self.kitti_data.load_calib() # Calibration data are accessible as named tuples\n\n # lidars is a dict indexed by frame: e.g. lidars[10] = np(N,4)\n self.lidars = {}\n\n # images is a dict indexed by frame: e.g. lidars[10] = np(SY,SX,3)\n self.images = {}\n self.im_dim = (1242, 375) # by default\n\n # boxes is a dict indexed by frame: e.g. boxes[10] = [box, box, ...]\n self._boxes = None # defaultdict(list)\n self._last_refined_box = None\n self._box_scaling = box_scaling\n\n reference_file = os.path.join(basedir, date, 'obs.txt')\n\n if os.path.isfile(reference_file):\n if flip:\n print(\"Flipping\")\n else:\n print(\"Not flipping\")\n self.reference = self.__load_reference(reference_file, flip)\n else:\n self.reference = None\n\n self._init_boxes(only_with=None)\n\n\n def __load_reference(self, reference_file, flip=False):\n reference = np.genfromtxt(reference_file, dtype=np.float32, comments='/')\n\n # our reference model is in inches: convert to meters\n reference = np.multiply(reference[:, 0:3], np.array([0.0254, 0.0254, 0.0254]))\n\n reference_min = np.amin(reference[:, 0:3], axis=0)\n reference_lwh = np.amax(reference[:, 0:3], axis=0) - reference_min\n\n reference[:, 0:3] -= (reference_min[0:3] + reference_lwh[0:3] / 2.)\n\n # our reference model is rotated: align it correctly\n reference[:, 0:3] = point_utils.rotate(reference[:,0:3], np.array([1., 0., 0.]), np.pi / 2)\n\n # by default our reference model points to the opposite direction, so flip it accordingly\n if not flip:\n reference = point_utils.rotate(reference, np.array([0.,0.,1.]), np.pi)\n\n # flip it\n reference[:, 2] = -reference[:, 2]\n reference[:, 2] -= (np.amin(reference[:, 2]))\n\n # at this point our reference model is on lidar frame, centered around x=0,y=0 and sitting at z = 0\n return reference\n\n def _align(self, first, min_percent_first = 0.6, threshold_distance = 0.3, search_yaw=False):\n if self.reference is not None:\n\n model = point_utils.ICP(search_yaw=search_yaw)\n #first = point_utils.rotate(first, np.array([0., 0., 1.]), np.pi)\n\n t, _ = point_utils.ransac(first, self.reference[:, 0:3], model,\n min_percent_fist=min_percent_first,\n threshold= threshold_distance)\n\n if t is None:\n t = np.zeros((3))\n\n else:\n print(\"No reference object, not aligning\")\n t = np.zeros((3))\n return t\n\n # for DIDI -> don't filter anything\n # for KITTI ->\n # include tracklet IFF in image and not occluded\n # WARNING: There's a lot of tracklets with occs=-1 (255) which we need to fix\n def __include_tracklet(self, t, idx):\n return True # (t.truncs[idx] == tracklets.Truncation.IN_IMAGE) and (t.occs[idx, 0] == 0)\n\n def get_yaw(self, frame):\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n for t in self.tracklet_data:\n assert frame in range(t.first_frame, t.first_frame + t.num_frames)\n idx = frame - t.first_frame\n yaw = t.rots[idx][2]\n return yaw\n\n def get_state(self, frame):\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n t = self.tracklet_data[0]\n return t.states[frame - t.first_frame]\n\n def get_box_first_frame(self, box=0):\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n return self.tracklet_data[box].first_frame\n\n def get_box_size(self, box=0):\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n return self.tracklet_data[box].size # h w l\n\n def get_box_TR(self, frame, box=0):\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n t = self.tracklet_data[box]\n assert frame in range(t.first_frame, t.first_frame + t.num_frames)\n idx = frame - t.first_frame\n T,R = t.trans[idx], t.rots[idx]\n return T,R\n\n def get_box_pose(self, frame, box=0):\n T,R = self.get_box_TR(frame, box=box)\n pose = {'tx': T[0], 'ty': T[1], 'tz': T[2], 'rx': R[0], 'ry': R[1], 'rz': R[2] }\n return pose\n\n # return list of frames with tracked objects of type only_with\n def frames(self, only_with=None):\n frames = []\n for t in self.tracklet_data:\n if (only_with is None) or (t.object_type in only_with):\n for frame_offset in range(t.first_frame, t.first_frame + t.num_frames):\n if self.__include_tracklet(t, frame_offset - t.first_frame):\n frames.append(frame_offset)\n else:\n print(\"UNTRACKED\", t.object_type)\n self._init_boxes(only_with)\n return list(set(frames)) # remove duplicates\n\n def _read_lidar(self, frame):\n if frame not in self.kitti_data.frame_range:\n self.kitti_data = pykitti.raw(self.basedir, self.date, self.drive, [frame]) # , range(start_frame, start_frame + total_frames))\n self.kitti_data.load_calib()\n assert frame in self.kitti_data.frame_range\n self.kitti_data.load_velo()\n if len(self.kitti_data.velo) != 1:\n print(frame, self, self.xml_path, self.kitti_data.velo)\n print(len(self.lidars))\n assert len(self.kitti_data.velo) == 1\n lidar = self.kitti_data.velo[0]\n self.lidars[frame] = lidar\n return\n\n def _read_image(self, frame):\n if frame not in self.kitti_data.frame_range:\n self.kitti_data = pykitti.raw(self.basedir, self.date, self.drive,\n range(frame, frame + 1)) # , range(start_frame, start_frame + total_frames))\n self.kitti_data.load_calib()\n assert frame in self.kitti_data.frame_range\n self.kitti_data.load_rgb()\n self.images[frame] = self.kitti_data.rgb[0].left\n\n (sx, sy) = self.images[frame].shape[::-1][1:]\n\n if self.im_dim != (sx, sy):\n print(\"WARNING changing default dimensions to\", (sx, sy))\n self.im_dim = (sx, sy)\n\n return\n\n # initialize self.boxes with a dict containing frame -> [box, box, ...]\n def _init_boxes(self, only_with):\n #assert self._boxes is None\n self._boxes = defaultdict(list)\n for t in self.tracklet_data:\n if (only_with is None) or (t.object_type in only_with):\n for frame_offset in range(t.first_frame, t.first_frame + t.num_frames):\n idx = frame_offset - t.first_frame\n if self.__include_tracklet(t, idx):\n h, w, l = np.multiply(t.size, self._box_scaling)\n\n assert (h > 0.) and (w > 0.) and (l > 0.)\n # in velo:\n # A D\n #\n # B C\n trackletBox = np.array(\n [ # in velodyne coordinates around zero point and without orientation yet\\\n [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \\\n [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \\\n [-h/2., -h/2., -h/2., -h/2., h/2., h/2., h/2., h/2.]])\n # CAREFUL: DIDI/UDACITY changed the semantics of a TZ!\n #[0.0, 0.0, 0.0, 0.0, h, h, h, h]]) #\n yaw = t.rots[idx][2] # other rotations are 0 in all xml files I checked\n\n assert np.abs(t.rots[idx][:2]).sum() == 0, 'object rotations other than yaw given!'\n rotMat = np.array([\n [np.cos(yaw), -np.sin(yaw), 0.0],\n [np.sin(yaw), np.cos(yaw), 0.0],\n [0.0, 0.0, 1.0]])\n cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(t.trans[idx], (8, 1)).T\n self._boxes[frame_offset].append(cornerPosInVelo)\n return\n\n # given lidar points, subsample POINTS by removing points from voxels with highest density\n @staticmethod\n def _lidar_subsample(lidar, POINTS):\n # X_RANGE = ( 0., 70.)\n # Y_RANGE = (-40., 40.)\n # Z_RANGE = ( -2., 2.)\n # RES = 0.2 (not needed)\n\n NX = 10\n NY = 10\n NZ = 4\n\n bins, edges = np.histogramdd(lidar[:, 0:3], bins=(NX, NY, NZ))\n\n bin_target = np.array(bins, dtype=np.int32)\n subsample_time_start = time.time()\n\n bin_target_flat = bin_target.flatten()\n remaining = np.sum(bin_target) - POINTS\n\n bin_target_idx_sorted = np.argsort(bin_target_flat)\n maxi = bin_target_idx_sorted.shape[0] - 1\n i = maxi\n while (remaining > 0) and (i >= 0):\n maxt = bin_target_flat[bin_target_idx_sorted[maxi]]\n while bin_target_flat[bin_target_idx_sorted[i]] >= maxt:\n i -= 1\n available_to_substract = bin_target_flat[bin_target_idx_sorted[i+1:maxi+1]] - bin_target_flat[bin_target_idx_sorted[i]]\n total_available_to_substract = np.sum(available_to_substract)\n ii = i\n zz = 0\n if (total_available_to_substract < remaining):\n bin_target_flat[bin_target_idx_sorted[ii + 1:]] -= available_to_substract\n remaining -= total_available_to_substract\n else:\n while (total_available_to_substract > 0) and (remaining > 0):\n to_substract = min(remaining, available_to_substract[zz])\n bin_target_flat[bin_target_idx_sorted[ii+1]] -= to_substract\n total_available_to_substract -= to_substract\n remaining -= to_substract\n ii += 1\n zz += 1\n #print(bin_target_flat)\n #print(remaining)\n\n bin_target = bin_target_flat.reshape(bin_target.shape)\n #print(\"bin_target\", bin_target)\n #print(\"_bin_target\", _bin_target)\n\n subsample_time_end = time.time()\n #print 'Total subsample inside time: %0.3f ms' % ((subsample_time_end - subsample_time_start) * 1000.0)\n\n target_n = np.sum(bin_target)\n assert target_n == POINTS\n\n subsampled = np.empty((POINTS, lidar.shape[1]))\n\n i = 0\n j = maxi\n nx, ny, nz = bin_target.shape\n# for (x, y, z), v in np.ndenumerate(bin_target):\n # if v > 0:\n while (bin_target_flat[bin_target_idx_sorted[j]] > 0):\n x,y,z = np.unravel_index(bin_target_idx_sorted[j], bin_target.shape)\n v = bin_target_flat[bin_target_idx_sorted[j]]\n XX = edges[0][x:x + 2]\n YY = edges[1][y:y + 2]\n ZZ = edges[2][z:z + 2]\n # edge cases needed b/c histogramdd includes righest-most edge in bin\n #if (x < (nx - 1)) & (y < (ny - 1)) & (z < (nz - 1)):\n # sublidar = lidar[(lidar[:, 0] >= XX[0]) & (lidar[:, 0] < XX[1]) & (lidar[:, 1] >= YY[0]) & (lidar[:, 1] < YY[1]) & (lidar[:, 2] >= ZZ[0]) & (lidar[:, 2] < ZZ[1])]\n if x < (nx - 1):\n sublidar = lidar[(lidar[:, 0] >= XX[0]) & (lidar[:, 0] < XX[1])]\n else:\n sublidar = lidar[(lidar[:, 0] >= XX[0]) & (lidar[:, 0] <= XX[1])]\n if y < (ny - 1):\n sublidar = sublidar[(sublidar[:, 1] >= YY[0]) & (sublidar[:, 1] < YY[1])]\n else:\n sublidar = sublidar[(sublidar[:, 1] >= YY[0]) & (sublidar[:, 1] <= YY[1])]\n if z < (nz - 1):\n sublidar = sublidar[(sublidar[:, 2] >= ZZ[0]) & (sublidar[:, 2] < ZZ[1])]\n else:\n sublidar = sublidar[(sublidar[:, 2] >= ZZ[0]) & (sublidar[:, 2] <= ZZ[1])]\n assert sublidar.shape[0] == bins[x, y, z]\n assert sublidar.shape[0] >= v\n subsampled[i:(i + v)] = sublidar[np.random.choice(range(sublidar.shape[0]), v, replace=False)]\n #subsampled[i:(i + v)] = sublidar[:v]\n\n i += v\n j -= 1\n return subsampled\n\n @staticmethod\n def _remove_capture_vehicle(lidar):\n return lidar[~ ((np.abs(lidar[:, 0]) < 2.6) & (np.abs(lidar[:, 1]) < 1.))]\n\n @staticmethod\n def resample_lidar(lidar, num_points):\n lidar_size = lidar.shape[0]\n if (num_points > lidar_size) and (lidar_size > 0):\n upsample_time_start = time.time()\n\n #lidar = np.concatenate((lidar, lidar[np.random.choice(lidar.shape[0], size=num_points - lidar_size, replace=True)]), axis = 0)\n if True:\n # tile existing array and pad it with missing slice\n reps = num_points // lidar_size - 1\n if reps > 0:\n lidar = np.tile(lidar, (reps + 1, 1))\n missing = num_points - lidar.shape[0]\n lidar = np.concatenate((lidar, lidar[:missing]), axis=0)\n upsample_time_end = time.time()\n #print 'Total upsample time: %0.3f ms' % ((upsample_time_end - upsample_time_start) * 1000.0)\n\n elif num_points < lidar_size:\n subsample_time_start = time.time()\n lidar = DidiTracklet._lidar_subsample(lidar, num_points)\n subsample_time_end = time.time()\n #print 'Total subsample time: %0.3f ms' % ((subsample_time_end - subsample_time_start) * 1000.0)\n\n return lidar\n\n @staticmethod\n def filter_lidar(lidar, num_points = None, remove_capture_vehicle=True, max_distance = None, angle_cone = None, rings = None):\n\n if rings is not None:\n rlidar = np.empty((0, 4), dtype=np.float32)\n for ring in rings:\n l = lidar[lidar[:,4] == ring]\n rlidar = np.concatenate((rlidar, l[:,:4]), axis=0)\n\n if remove_capture_vehicle:\n lidar = DidiTracklet._remove_capture_vehicle(lidar)\n\n if max_distance is not None:\n lidar = lidar[(lidar[:,0] ** 2 + lidar[:,1] ** 2) <= (max_distance **2)]\n\n if angle_cone is not None:\n min_angle = angle_cone[0]\n max_angle = angle_cone[1]\n angles = np.arctan2(lidar[:,1], lidar[:,0])\n lidar = lidar[(min_angle >= angles) & (angles <= max_angle)]\n\n if num_points is not None:\n lidar = DidiTracklet.resample_lidar(lidar, num_points)\n\n return lidar\n\n def get_lidar_rings(self, frame, rings, points_per_ring, clip=None, clip_h=None,\n rotate=0., flipX = False, flipY=False, jitter=False,\n return_lidar_interpolated=False, return_lidar_deinterpolated=False,\n lidar_deinterpolate_random = False,\n return_angle_at_edges = False):\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n lidar = self.lidars[frame]\n return DidiTracklet.filter_lidar_rings(lidar, rings, points_per_ring, clip=clip, clip_h = clip_h,\n rotate=rotate, flipX = flipX, flipY=flipY, jitter=jitter,\n return_lidar_interpolated = return_lidar_interpolated,\n return_lidar_deinterpolated = return_lidar_deinterpolated,\n lidar_deinterpolate_random = lidar_deinterpolate_random,\n return_angle_at_edges = return_angle_at_edges,\n )\n\n ''' Returns array len(rings), points_per_ring, 3 => (distance XY, distance Z, intensity)\n '''\n @staticmethod\n def filter_lidar_rings(lidar, rings, points_per_ring, clip=None, clip_h = None,\n rotate=0., flipX = False, flipY=False, jitter=False,\n return_lidar_interpolated=False,\n return_lidar_deinterpolated=False,\n lidar_deinterpolate_random=False,\n return_angle_at_edges=False,\n ):\n if rotate != 0.:\n lidar = point_utils.rotZ(lidar, rotate)\n\n if flipX:\n lidar[:,0] = -lidar[:,0]\n if flipY:\n lidar[:,1] = -lidar[:,1]\n\n lidar_d_i = np.empty((len(rings), points_per_ring, 3), dtype=np.float32)\n\n if return_angle_at_edges:\n angle_at_edges = np.zeros((len(rings), 3), dtype=np.float32)\n\n if return_lidar_interpolated:\n lidar_int = np.empty((len(rings) * points_per_ring, 5), dtype=np.float32)\n\n if return_lidar_deinterpolated:\n lidar_deint = np.empty((0, 5), dtype=np.float32)\n\n _int_r = np.linspace(-np.pi, (points_per_ring - 1) * np.pi / points_per_ring, num=points_per_ring)\n for i in rings:\n l = lidar[lidar[:,4] == i]\n lp = l.shape[0]\n if lp < 2:\n _int_dr = _int_dh = _int_i = np.zeros((points_per_ring), dtype=np.float32)\n else:\n if return_angle_at_edges:\n angle_at_edges[i-rings[0]] = (np.arctan2(l[0, 1], l[0, 0]), np.arctan2(l[-1, 1], l[-1, 0]), l.shape[0])\n\n _r = np.arctan2(l[:, 1], l[:, 0]) # y/x\n _dr = np.linalg.norm(l[:, :2], axis=1) # x,y radius\n _dh = l[:,2]\n _i = l[:,3]\n\n __dr = scipy.interpolate.interp1d(_r, _dr, fill_value='extrapolate', kind='nearest')\n __dh = scipy.interpolate.interp1d(_r, _dh, fill_value='extrapolate', kind='nearest')\n __i = scipy.interpolate.interp1d(_r, _i, fill_value='extrapolate', kind='nearest')\n\n _int_dr = __dr(_int_r)\n _int_dh = __dh(_int_r)\n if clip is not None:\n np.clip(_int_dr, clip[0], clip[1], out=_int_dr)\n if clip_h is not None:\n np.clip(_int_dh, clip_h[0], clip_h[1], out=_int_dh)\n _int_i = __i(_int_r)\n\n if return_lidar_interpolated:\n _int_x = np.multiply(_int_dr, np.cos(_int_r))\n _int_y = np.multiply(_int_dr, np.sin(_int_r))\n _int_z = _int_dh\n lidar_int[points_per_ring * (i-rings[0]):points_per_ring * (i+1-rings[0])] = \\\n np.vstack((_int_x, _int_y, _int_z, _int_i, i * np.ones(points_per_ring))).T\n\n if return_lidar_deinterpolated:\n _, u_int_dr_idx_f = np.unique(_int_dr, return_index=True)\n _, u_int_dr_idx_l = np.unique(_int_dr[::-1], return_index=True)\n if lidar_deinterpolate_random:\n # picks a random point between the first and the last\n # all repetitions need to happen contiguously\n u_int_dr_idx_range = _int_dr.shape[0] - 1 - u_int_dr_idx_l - u_int_dr_idx_f\n u_int_dr_idx = u_int_dr_idx_f + np.remainder(\n np.random.randint(np.amax(u_int_dr_idx_range) + 1, size=u_int_dr_idx_f.shape),\n u_int_dr_idx_range + 1)\n\n if np.any(u_int_dr_idx > (_int_dr.shape[0] - 1 - u_int_dr_idx_l)):\n print(u_int_dr_idx_f, u_int_dr_idx_l, u_int_dr_idx_range)\n assert False\n else:\n # picks the mid point if there are repeated points.\n # all repetitions need to happen contiguously\n u_int_dr_idx = (_int_dr.shape[0] - 1 - u_int_dr_idx_l + u_int_dr_idx_f) // 2\n u_int_x = np.multiply(_int_dr[u_int_dr_idx], np.cos(_int_r[u_int_dr_idx]))\n u_int_y = np.multiply(_int_dr[u_int_dr_idx], np.sin(_int_r[u_int_dr_idx]))\n u_int_z = _int_dh[u_int_dr_idx]\n u_int_i = _int_i[u_int_dr_idx]\n lidar_deint = np.concatenate((\n lidar_deint,\n np.vstack((u_int_x, u_int_y, u_int_z, u_int_i, i * np.ones(u_int_dr_idx.shape[0]))).T),\n axis = 0)\n\n lidar_d_i[i-rings[0]] = np.vstack((_int_dr, _int_dh, _int_i)).T\n\n ret = (lidar_d_i,)\n if return_lidar_interpolated:\n ret += (lidar_int,)\n if return_lidar_deinterpolated:\n ret += (lidar_deint,)\n if return_angle_at_edges:\n angle_diffs = np.absolute(\n np.arctan2(\n np.sin(angle_at_edges[:, 0] - angle_at_edges[:, 1]),\n np.cos(angle_at_edges[:, 0] - angle_at_edges[:, 1])))\n min_diff_idx = np.argmin(angle_diffs[angle_at_edges[:,2] > 0])\n ret += (angle_at_edges[min_diff_idx,0],)\n\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\n def get_lidar(self, frame, num_points = None, remove_capture_vehicle=True, max_distance = None, angle_cone=None, rings=None):\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n lidar = self.lidars[frame]\n return self.filter_lidar(lidar, num_points = num_points, remove_capture_vehicle=remove_capture_vehicle,\n max_distance = max_distance, angle_cone=angle_cone, rings=rings)\n\n def get_box(self, frame):\n assert self._boxes is not None\n assert len(self._boxes[frame]) == 1\n box = self._boxes[frame][0] # first box for now\n return box\n\n def get_box_centroid(self, frame):\n assert self._boxes is not None\n assert len(self._boxes[frame]) == 1\n box = self._boxes[frame][0] # first box for now\n return np.average(box, axis=1)\n\n def get_points_in_box(self, frame, ignore_z=True):\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n assert self._boxes is not None\n assert len(self._boxes[frame]) == 1\n box = self._boxes[frame][0] # first box for now\n lidar = self.lidars[frame]\n return DidiTracklet.get_lidar_in_box(lidar, box, ignore_z=ignore_z)\n\n def get_number_of_points_in_box(self, frame, ignore_z=True):\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n assert self._boxes is not None\n assert len(self._boxes[frame]) == 1\n box = self._boxes[frame][0] # first box for now\n lidar = self.lidars[frame]\n return len(DidiTracklet.get_lidar_in_box(lidar, box, ignore_z=ignore_z))\n\n def top_and_side_view(self, frame, with_boxes=True, lidar_override=None, SX=None, abl_overrides=None, zoom_to_box=False, distance=50.):\n tv = self.top_view(frame, with_boxes=with_boxes, lidar_override=lidar_override,\n SX=SX, zoom_to_box=zoom_to_box, distance=distance)\n sv = self.top_view(frame, with_boxes=with_boxes, lidar_override=lidar_override,\n SX=SX, zoom_to_box=zoom_to_box, distance=distance,\n side_view=True)\n return np.concatenate((tv, sv), axis=0)\n\n def refine_box(self,\n frame,\n remove_points_below_plane = True,\n search_ground_plane_radius = 20.,\n search_centroid_radius = 4.,\n look_back_last_refined_centroid=None,\n return_aligned_clouds=False,\n min_percent_first = 0.6,\n threshold_distance = 0.3,\n search_yaw=False):\n # this is imported here so everything else works even if not installed\n # install from https://github.com/strawlab/python-pcl\n import pcl\n\n if look_back_last_refined_centroid is None:\n assert self._boxes is not None\n box = self._boxes[frame][0] # first box for now\n cx = np.average(box[0, :])\n cy = np.average(box[1, :])\n cz = np.average(box[2, :])\n else:\n cx,cy,cz = look_back_last_refined_centroid\n print(\"Using last refined centroid\", cx,cy,cz)\n\n T, _ = self.get_box_TR(frame)\n print(\"averaged centroid\", cx,cy,cz, \" vs \",T[0], T[1], T[2])\n\n t_box = np.zeros((3))\n yaw_box = 0.\n\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n lidar = self.lidars[frame]\n\n # get points close to the obstacle (d_range meters) removing capture car (2.6m x, 1m y) just in case\n # this will be handy when we find the ground plane around the obstacle later\n lidar_without_capture = DidiTracklet._remove_capture_vehicle(lidar)\n lidar_close = lidar_without_capture[( ((lidar_without_capture[:, 0] - cx) ** 2 + (lidar_without_capture[:, 1] - cy) ** 2) < search_ground_plane_radius ** 2) ]\n\n obs_isolated = []\n # at a minimum we need 4 points (3 ground plane points plus 1 obstacle point)\n if (lidar_close.shape[0] >= 4):\n\n p = pcl.PointCloud(lidar_close[:,0:3].astype(np.float32))\n seg = p.make_segmenter()\n seg.set_optimize_coefficients(True)\n seg.set_model_type(pcl.SACMODEL_PLANE)\n seg.set_method_type(pcl.SAC_RANSAC)\n seg.set_distance_threshold(0.25)\n indices, model = seg.segment()\n gp = np.zeros((lidar_close.shape[0]), dtype=np.bool)\n gp[indices] = True\n lidar = lidar_close[~gp]\n\n a, b, c, d = model\n if remove_points_below_plane and (len(lidar) > 1 ) and (len(model)== 4):\n\n # see http://mathworld.wolfram.com/HessianNormalForm.html\n # we can remove / dd because we're just interested in the sign\n # dd = np.sqrt(a ** 2 + b ** 2 + c ** 2)\n lidar = lidar[( lidar[:, 0]* a + lidar[:,1] * b + lidar[:,2] * c + d) >= 0 ]\n\n ground_z = (-d - a * cx - b * cy) / c\n print(\"Original centroid @ \" + str((cx,cy,cz)) + \" ground_z estimated @ \" + str(ground_z) )\n\n origin = np.array([cx, cy, ground_z])\n if lidar.shape[0] > 4:\n\n # obs_isolated is just lidar points centered around 0,0 and sitting on ground 0 (z=0)\n obs_isolated = lidar[:,0:3]-origin\n\n dd = np.sqrt(a ** 2 + b ** 2 + c ** 2)\n nx = a / dd\n ny = b / dd\n nz = c / dd\n print(\"Hessian normal\", nx,ny,nz)\n roll = np.arctan2(nx, nz)\n pitch = np.arctan2(ny, nz)\n print(\"ground roll | pitch \" + str(roll * 180. / np.pi) + \" | \" + str(pitch * 180. / np.pi))\n\n # rotate it so that it is aligned with our reference target\n obs_isolated = point_utils.rotZ(obs_isolated, self.get_yaw(frame))\n\n # correct ground pitch and roll\n print(\"z min before correction\", np.amin(obs_isolated[:,2]))\n\n obs_isolated = point_utils.rotate(obs_isolated, np.array([0., 1., 0.]), -roll) # along Y axis\n obs_isolated = point_utils.rotate(obs_isolated, np.array([1., 0., 0.]), -pitch) # along X axis\n print(\"z min after correction\", np.amin(obs_isolated[:,2]))\n\n # remove stuff beyond search_centroid_radius meters of the current centroid\n obs_cx = 0 #np.mean(obs_isolated[:,0])\n obs_cy = 0 #np.mean(obs_isolated[:,1])\n\n obs_isolated = obs_isolated[(((obs_isolated[:, 0] - obs_cx)** 2) + (obs_isolated[:, 1] - obs_cy) ** 2) <= search_centroid_radius ** 2]\n print(\"Isolated\", obs_isolated.shape)\n if (obs_isolated.shape[0] > 0):\n _t_box = self._align(\n obs_isolated,\n min_percent_first = min_percent_first,\n threshold_distance = threshold_distance,\n search_yaw=search_yaw)\n yaw_box = _t_box[3] if search_yaw else 0.\n _t_box[2] = 0\n t_box = -point_utils.rotZ(_t_box[:3], -self.get_yaw(frame))\n\n # if we didn't find it in the first place, check if we found it in the last frame and attempt to find it from there\n if (t_box[0] == 0.) and (t_box[1] == 0.) and (look_back_last_refined_centroid is None) and (self._last_refined_box is not None):\n print(\"Looking back\")\n t_box, _ = self.refine_box(frame,\n look_back_last_refined_centroid=self._last_refined_box,\n min_percent_first=min_percent_first,\n threshold_distance=threshold_distance,\n )\n t_box = -t_box\n\n new_ground_z = (-d - a * (cx+t_box[0]) - b * (cy+t_box[1])) / c\n print(\"original z centroid\", T[2], \"new ground_z\", new_ground_z)\n t_box[2] = new_ground_z + self.tracklet_data[0].size[0]/2. - T[2]\n\n if look_back_last_refined_centroid is not None:\n t_box[:2] = t_box[:2] + origin[:2] - T[:2]\n\n if (t_box[0] != 0.) or (t_box[1] != 0.):\n self._last_refined_box = T + t_box\n else:\n self._last_refined_box = None\n\n print(t_box)\n print(yaw_box)\n if return_aligned_clouds:\n return t_box, yaw_box, self.reference[:, 0:3], obs_isolated\n\n return t_box, yaw_box\n\n\n # return a top view of the lidar image for frame\n # draw boxes for tracked objects if with_boxes is True\n #\n # SX are horizontal pixels of resulting image (vertical pixels maintain AR),\n # useful if you want to stack lidar below or above camera image\n #\n def top_view(self, frame, with_boxes=True, lidar_override=None, SX=None,\n zoom_to_box=False, side_view=False, randomize=False, distance=50.,\n rings=None, num_points=None, points_per_ring=None, deinterpolate=False):\n\n if with_boxes and zoom_to_box:\n assert self._boxes is not None\n box = self._boxes[frame][0] # first box for now\n cx = np.average(box[0,:])\n cy = np.average(box[1,:])\n X_SPAN = 16.\n Y_SPAN = 16.\n else:\n cx = 0.\n cy = 0.\n X_SPAN = distance*2.\n Y_SPAN = distance*2.\n\n X_RANGE = (cx - X_SPAN / 2., cx + X_SPAN / 2.)\n Y_RANGE = (cy - Y_SPAN / 2., cy + Y_SPAN / 2.)\n\n if SX is None:\n RES = 0.2\n Y_PIXELS = int(Y_SPAN / RES)\n else:\n Y_PIXELS = SX\n RES = Y_SPAN / SX\n X_PIXELS = int(X_SPAN / RES)\n\n top_view = np.zeros(shape=(X_PIXELS, Y_PIXELS, 3), dtype=np.float32)\n\n # convert from lidar x y to top view X Y\n def toY(y):\n return int((Y_PIXELS - 1) - (y - Y_RANGE[0]) // RES)\n\n def toX(x):\n return int((X_PIXELS - 1) - (x - X_RANGE[0]) // RES)\n\n def toXY(x, y):\n return (toY(y), toX(x))\n\n def inRange(x, y):\n return (x >= X_RANGE[0]) and (x < X_RANGE[1]) and (y >= Y_RANGE[0]) and (y < Y_RANGE[1])\n\n if lidar_override is not None:\n lidar = lidar_override\n else:\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n lidar = self.lidars[frame]\n if rings is not None:\n lidar = lidar[np.in1d(lidar[:,4],np.array(rings, dtype=np.float32))]\n if num_points is not None:\n lidar = DidiTracklet.resample_lidar(lidar, num_points)\n if points_per_ring is not None:\n _, lidar, angle_at_edge = DidiTracklet.filter_lidar_rings(\n lidar,\n rings = range(32) if rings is None else rings,\n points_per_ring = points_per_ring,\n clip = (0., distance),\n return_lidar_interpolated = True if deinterpolate is False else False,\n return_lidar_deinterpolated= deinterpolate,\n return_angle_at_edges=True)\n\n if randomize:\n centroid = self.get_box_centroid(frame)\n\n perturbation = (np.random.random_sample((lidar.shape[0], 5)) * 2. - np.array([1., 1., 1., 1., 1.])) * \\\n np.expand_dims(np.clip(\n np.sqrt(((lidar[:, 0] - centroid[0]) ** 2) + ((lidar[:, 1] - centroid[1]) ** 2)) - 5.,\n 0., 20.), axis=1) * np.array([[2. / 20., 2. / 20., 0.1 / 20., 4. / 20., 0.]])\n lidar += perturbation\n\n if side_view:\n lidar = lidar[(lidar[:,1] >= -1.) & (lidar[:,1] <= 1.)]\n rot90X = np.array([[1, 0, 0, 0, 0], [0, 0, -1., 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], dtype=np.float32)\n lidar = np.dot(lidar, rot90X)\n\n\n # if we have camera calibration, use it to paint visible points in white, and not visible in gray\n # otherwise, just paint all white\n if self.kitti_data.calib is not None:\n in_img, outside_img = self.__project(lidar, return_projection=False, return_velo_in_img=True,\n return_velo_outside_img=True)\n else:\n in_img = lidar\n outside_img = []\n\n for point in in_img:\n x, y = point[0], point[1]\n if inRange(x, y):\n if point[4] in range(10,25):\n top_view[toXY(x, y)[::-1]] = np.ones(3)# * point[4] / 32.\n\n for point in outside_img:\n x, y = point[0], point[1]\n if inRange(x, y):\n c = (0.2, 0.2, 0.2)\n if (self.LIDAR_ANGLE is not None) and (np.arctan2(x, np.absolute(y)) >= self.LIDAR_ANGLE):\n c = (0.5, 0.5, 0.5)\n top_view[toXY(x, y)[::-1]] = c\n\n if with_boxes:\n assert self._boxes is not None\n boxes = self._boxes[frame]\n new_boxes = []\n if side_view:\n for box in boxes:\n new_boxes.append(np.dot(box.T, rot90X[0:3, 0:3]).T)\n else:\n new_boxes = boxes\n if side_view:\n order = [0,2,6,4]\n else:\n order = [0,1,2,3]\n\n for box in new_boxes:\n # bounding box in image coords (x,y) defined by a->b->c->d\n a = np.array([toXY(box[0, order[0]], box[1, order[0]])])\n b = np.array([toXY(box[0, order[1]], box[1, order[1]])])\n c = np.array([toXY(box[0, order[2]], box[1, order[2]])])\n d = np.array([toXY(box[0, order[3]], box[1, order[3]])])\n\n assert len(self.tracklet_data) == 1 # only one tracklet supported for now!\n t = self.tracklet_data[0]\n if t.states[frame - t.first_frame] == tracklets.STATE_UNSET:\n box_color = np.float32([0., 0., 1.])\n else:\n box_color = np.float32([1., 0., 0.])\n\n cv2.polylines(top_view, [np.int32((a, b, c, d)).reshape((-1, 1, 2))], True, box_color.tolist(), thickness=1)\n\n box_color *= 0.5\n\n cv2.polylines(top_view, [np.int32((a,b,(d+b)/2.)).reshape((-1, 1, 2))], True, box_color.tolist(), thickness=1)\n\n lidar_in_box = DidiTracklet.get_lidar_in_box(lidar, box)\n # lidar_in_box = self._lidar_in_box(frame, box)\n for point in lidar_in_box:\n x, y = point[0], point[1]\n if inRange(x, y):\n top_view[toXY(x, y)[::-1]] = (0., 1., 1.)\n\n return top_view\n\n def __box_to_2d_box(self, box):\n box_in_img = self.__project(box.T, return_projection=True, dim_limit=None, return_velo_in_img=False,\n return_velo_outside_img=False)\n # some boxes are behind the viewpoint (eg. frame 70 @ drive 0036 ) and would return empty set of points\n # so we return an empty box\n if box_in_img.shape[0] != 8:\n return (0, 0), (0, 0)\n # print(\"lidar box\", box.T,\"in img\", box_in_img)\n dim_limit = self.im_dim\n # clip 2d box corners within image\n box_in_img[:, 0] = np.clip(box_in_img[:, 0], 0, dim_limit[0])\n box_in_img[:, 1] = np.clip(box_in_img[:, 1], 0, dim_limit[1])\n # get 2d bbox\n bbox_l = (np.amin(box_in_img[:, 0]), np.amin(box_in_img[:, 1]))\n bbox_h = (np.amax(box_in_img[:, 0]), np.amax(box_in_img[:, 1]))\n return bbox_l, bbox_h\n\n def __lidar_in_2d_box(self, lidar, box):\n bbox_l, bbox_h = self.__box_to_2d_box(box)\n # print(\"2d clipping box\", bbox_l, bbox_h, \"filtering\", lidar.shape)\n lidar_in_2d_box = self.__project(lidar,\n return_projection=False, dim_limit=bbox_h, dim_limit_zero=bbox_l,\n return_velo_in_img=True, return_velo_outside_img=False)\n # print(\"got\", lidar_in_2d_box.shape, \"in box\")\n return lidar_in_2d_box\n\n # returns lidar points that are inside a given box, or just the indexes\n def _lidar_in_box(self, frame, box, ignore_z=False, jitter_centroid = (0.,0.,0.), scale_xy=1., dropout=0.):\n if frame not in self.lidars:\n self._read_lidar(frame)\n assert frame in self.lidars\n\n lidar = self.lidars[frame]\n return DidiTracklet.get_lidar_in_box(lidar, box, ignore_z=ignore_z,\n jitter_centroid = jitter_centroid, scale_xy=scale_xy, dropout=dropout)\n\n # returns lidar points that are inside a given box, or just the indexes\n @staticmethod\n def get_lidar_in_box(lidar, box, return_idx_only=False, ignore_z=False, jitter_centroid=(0.,0.,0.), scale_xy=0., dropout=0.):\n\n p = lidar[:, :3]\n\n _jitter_centroid = (np.random.rand(3) - 0.5 ) * jitter_centroid\n _box = box + np.array(_jitter_centroid).reshape((3,1))\n\n if scale_xy != 0.:\n _centroid = np.mean(_box[:2,:], axis=1).reshape((2,1))\n _box[:2,:] = ((np.random.rand() - 0.5 ) * scale_xy + 1. ) * (_box[:2,:] - _centroid) + _centroid\n\n # determine if points in M are inside a rectangle defined by AB AD (AB and AD are orthogonal)\n # tdlr: they are iff (0<AMโ‹…AB<ABโ‹…AB)โˆง(0<AMโ‹…AD<ADโ‹…AD)\n # http://math.stackexchange.com/questions/190111/how-to-check-if-a-point-is-inside-a-rectangle\n a = np.array([_box[0, 0], _box[1, 0]])\n b = np.array([_box[0, 1], _box[1, 1]])\n d = np.array([_box[0, 3], _box[1, 3]])\n ab = b - a\n ad = d - a\n abab = np.dot(ab, ab)\n adad = np.dot(ad, ad)\n\n amab = np.squeeze(np.dot(np.array([p[:, 0] - a[0], p[:, 1] - a[1]]).T, ab.reshape(-1, 2).T))\n amad = np.squeeze(np.dot(np.array([p[:, 0] - a[0], p[:, 1] - a[1]]).T, ad.reshape(-1, 2).T))\n\n if ignore_z:\n in_box_idx = np.where(\n (abab >= amab) & (amab >= 0.) & (amad >= 0.) & (adad >= amad))\n else:\n min_z = _box[2, 0]\n max_z = _box[2, 4]\n in_box_idx = np.where(\n (abab >= amab) & (amab >= 0.) & (amad >= 0.) & (adad >= amad) & (p[:, 2] >= min_z) & (p[:, 2] <= max_z))\n\n in_box_idx = in_box_idx[0]\n\n if (dropout != 0.) and in_box_idx.shape[0] > 2:\n in_box_idx = np.random.choice(in_box_idx, size=int(in_box_idx.shape[0]*(1. - np.random.rand() * dropout)), replace=False)\n\n if return_idx_only:\n return in_box_idx\n\n points_in_box = lidar[in_box_idx, :]\n\n #points_in_box = np.squeeze(lidar[in_box_idx, :], axis=0)\n return points_in_box\n\n # given array of points with shape (N_points) and projection matrix w/ shape (3,4)\n # projects points onto a 2d plane\n # returns projected points (N_F_points,2) and\n # their LIDAR counterparts (N_F_points,3) (unless return_velo_in_img is set to False)\n #\n # N_F_points is the total number of resulting points after filtering (only_forward Z>0 by default)\n # and optionally filtering points projected into the image dimensions spec'd by dim_limit:\n #\n # Optionally providing dim_limit (sx,sy) limits projections that end up within (0-sx,0-sy)\n # only_forward to only get points with Z >= 0\n #\n def __project(self, points,\n dim_limit=(-1, -1),\n dim_limit_zero=(0, 0),\n only_forward=True,\n return_projection=True,\n return_velo_in_img=True,\n return_velo_outside_img=False,\n return_append=None):\n\n if dim_limit == (-1, -1):\n dim_limit = self.im_dim\n\n assert return_projection or return_velo_in_img\n\n K = self.kitti_data.calib.K_cam0 # cam2 or cam0\n R = np.eye(4)\n R[0:3, 0:3] = K\n T = np.dot(R, self.kitti_data.calib.T_cam2_velo)[0:3]\n px = points\n\n if only_forward:\n only_forward_filter = px[:, 0] >= 0.\n px = px[only_forward_filter]\n if points.shape[1] < T.shape[1]:\n px = np.concatenate((px, np.ones(px.shape[0]).reshape(-1, 1)), axis=1)\n projection = np.dot(T, px.T).T\n\n norm = np.dot(projection[:, T.shape[0] - 1].reshape(-1, 1), np.ones((1, T.shape[0] - 1)))\n projection = projection[:, 0:T.shape[0] - 1] / norm\n\n if dim_limit is not None:\n x_limit, y_limit = dim_limit[0], dim_limit[1]\n x_limit_z, y_limit_z = dim_limit_zero[0], dim_limit_zero[1]\n only_in_img = (projection[:, 0] >= x_limit_z) & (projection[:, 0] < x_limit) & (\n projection[:, 1] >= y_limit_z) & (projection[:, 1] < y_limit)\n projection = projection[only_in_img]\n if return_velo_in_img:\n if return_velo_outside_img:\n _px = px[~ only_in_img]\n px = px[only_in_img]\n if return_append is not None:\n appended = return_append[only_forward_filter][only_in_img]\n assert return_projection and return_velo_in_img\n return (projection, np.concatenate((px[:, 0:3], appended.reshape(-1, 1)), axis=1).T)\n if return_projection and return_velo_in_img:\n return (projection, px)\n elif (return_projection is False) and (return_velo_in_img):\n if return_velo_outside_img:\n return px, _px\n else:\n return px\n return projection\n\n def build_height_features(self, point_cam_in_img):\n assert False # function not tested\n\n height_features = np.zeros(\n shape=(int((MAX_Z - MIN_Z) / HEIGHT_F_RES), int((MAX_X - MIN_X) / HEIGHT_F_RES), M + 2), dtype=np.float32)\n max_height_per_cell = np.zeros_like(height_features[:, :, 1])\n for p in point_cam_in_img.T:\n x = p[0]\n y = MAX_HEIGHT - np.clip(p[1], MIN_HEIGHT, MAX_HEIGHT)\n z = p[2]\n if (x >= MIN_X) and (x < MAX_X) and (z >= MIN_Z) and (z < MAX_Z):\n m = int(y // M_HEIGHT)\n xi = int((x + MIN_X) // HEIGHT_F_RES)\n zi = int((z - MIN_Z) // HEIGHT_F_RES)\n height_features[zi, xi, m] = max(y, height_features[zi, xi, m])\n if y >= max_height_per_cell[zi, xi]:\n max_height_per_cell[zi, xi] = y\n height_features[zi, xi, M] = p[3] # intensity\n height_features[zi, xi, M + 1] += 1\n log64 = np.log(64)\n height_features[:, :, M + 1] = np.clip(np.log(1 + height_features[:, :, M + 1]) / log64, 0., 1.)\n return height_features\n\n\ndef build_front_view_features(point_cam_in_img):\n delta_theta = 0.08 / (180. / np.pi) # horizontal resolution\n delta_phi = 0.4 / (180. / np.pi) # vertical resolution as per http://velodynelidar.com/hdl-64e.html\n\n c_projection = np.empty((point_cam_in_img.shape[1], 5)) # -> c,r,height,distance,intensity\n points = point_cam_in_img.T\n # y in lidar is [0] in cam (x)\n # x in lidar is [2] in cam (z)\n # z in lidar is [1] in cam (y)\n c_range = (-40 / (180. / np.pi), 40 / (180. / np.pi))\n r_range = (-2.8 / (180. / np.pi), 15.3 / (180. / np.pi))\n\n c_projection[:, 0] = np.clip(\n np.arctan2(points[:, 0], points[:, 2]),\n c_range[0], c_range[1]) # c\n c_projection[:, 1] = np.clip(\n np.arctan2(points[:, 1], np.sqrt(points[:, 2] ** 2 + points[:, 0] ** 2)),\n r_range[0], r_range[1]) # r\n c_projection[:, 2] = MAX_HEIGHT - np.clip(points[:, 1], MIN_HEIGHT, MAX_HEIGHT) # height\n c_projection[:, 3] = np.sqrt(points[:, 0] ** 2 + points[:, 1] ** 2 + points[:, 2] ** 2) # distance\n c_projection[:, 4] = points[:, 3]\n\n c_norm = np.zeros((C_H, C_W, 3))\n c_norm[np.int32((C_H - 1) * (c_projection[:, 1] - r_range[0]) // (r_range[1] - r_range[0])), np.int32(\n (C_W - 1) * (c_projection[:, 0] - c_range[0]) // (c_range[1] - c_range[0]))] = c_projection[:,\n 2:5] # .reshape(-1,3)\n\n return c_norm\n" }, { "alpha_fraction": 0.6115981340408325, "alphanum_fraction": 0.6115981340408325, "avg_line_length": 24.586206436157227, "blob_id": "b41dcab91efc2d4abb174f6dd2b033192e69eb3f", "content_id": "ce6bca0926426d252d2a944a86d6d4761fc10489", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 76, "num_lines": 58, "path": "/tracklets/python/didipykitti.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "\"\"\"Provides 'raw', which loads and parses raw KITTI data.\"\"\"\n\nimport datetime as dt\nimport glob\nimport os\nfrom collections import namedtuple\n\nimport numpy as np\n\n#import pykitti.utils as utils\n\n\nclass raw:\n \"\"\"Load and parse raw data into a usable format.\"\"\"\n\n def __init__(self, base_path, date, drive, frame_range=None):\n \"\"\"Set the path.\"\"\"\n self.path = os.path.join(base_path, date, drive)\n self.frame_range = frame_range\n self.velo = []\n\n def _load_calib_rigid(self, filename):\n assert False\n\n def _load_calib_cam_to_cam(self, velo_to_cam_file, cam_to_cam_file):\n assert False\n\n def load_calib(self):\n self.calib = None\n return # TODO\n\n def load_timestamps(self):\n assert False\n\n def _poses_from_oxts(self, oxts_packets):\n assert False\n\n def load_oxts(self):\n assert False\n\n def load_gray(self, **kwargs):\n assert False\n\n def load_rgb(self, **kwargs):\n assert False\n\n def load_velo(self):\n \"\"\"Load velodyne [x,y,z,reflectance] scan data from binary files.\"\"\"\n # Find all the Velodyne files\n velo_path = os.path.join(self.path, 'lidar', '*.npy')\n velo_files = sorted(glob.glob(velo_path))\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n velo_files = [velo_files[i] for i in self.frame_range]\n\n for velo_file in velo_files:\n self.velo.append(np.load(velo_file))" }, { "alpha_fraction": 0.7069892287254333, "alphanum_fraction": 0.725806474685669, "avg_line_length": 73.4000015258789, "blob_id": "82ad0deaa5e9aa2c93cc9c4422b2e3053f652057", "content_id": "dc1a3f1bbc2608b97390394f8083d0e0ae8ce2d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 372, "license_type": "no_license", "max_line_length": 265, "num_lines": 5, "path": "/tracklets/python/README.md", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "### View tracklets\n\n```python tracklet_view.py -f 1/8_f -z -xi tracklet_labels_trainable.xml -sw 1.2```\n\nwill load `tracklet_labels_trainable.xml` from directory `../../../../didi-data/release2/Data-points-processed/1/8_f` and display it on screen (requires (pyqtgraph)[http://www.pyqtgraph.org/] as well as prints out the points belonging to the ground truth to stdout.\n" }, { "alpha_fraction": 0.5875537991523743, "alphanum_fraction": 0.6004634499549866, "avg_line_length": 38.400001525878906, "blob_id": "42d10e5f87de97327a8a0be5e8f055238221d6fc", "content_id": "0979af7f86d0a1d44aa6cd09faa077dc4e6d6066", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9063, "license_type": "no_license", "max_line_length": 198, "num_lines": 230, "path": "/tracklets/python/point_utils.py", "repo_name": "antorsae/didi-competition", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.linalg import expm3, norm\nimport scipy.optimize\nfrom scipy.spatial.distance import cdist\n\nZ_SEARCH_SLICE = 0.02\n\ndef remove_orientation(yaw):\n yaw = np.fmod(yaw, np.pi)\n if yaw >= np.pi / 2.:\n yaw -= np.pi\n elif yaw <= -np.pi / 2.:\n yaw += np.pi\n assert (yaw <= (np.pi / 2.)) and (yaw >= (-np.pi / 2.))\n return yaw\n\ndef M(axis, theta):\n return expm3(np.cross(np.eye(3), axis / norm(axis) * theta))\n\ndef rotate(points, axis, theta):\n if points.ndim == 1:\n return np.squeeze(np.dot(np.expand_dims(points[0:3], axis=0), M(axis, theta)), axis=0)\n else:\n return np.dot(points[:, 0:3], M(axis, theta))\n\ndef nearest_neighbor(src, dst):\n '''\n Find the nearest (Euclidean) neighbor in dst for each point in src\n Input:\n src: Nx3 array of points\n dst: Nx3 array of points\n Output:\n distances: Euclidean distances of the nearest neighbor\n indices: dst indices of the nearest neighbor\n '''\n\n distances = np.empty(src.shape[0], dtype=np.float32)\n indices = np.empty(src.shape[0], dtype=np.int32)\n\n pending_points = np.array(src)\n\n while pending_points.shape[0] > 0:\n z = pending_points[0,2]\n points_in_slice = np.flatnonzero((src[:,2] > (z-Z_SEARCH_SLICE/2.)) & (src[:,2] < (z+Z_SEARCH_SLICE/2.)))\n pending_points_in_slice = (pending_points[:,2] > (z-Z_SEARCH_SLICE/2.)) & (pending_points[:,2] < (z+Z_SEARCH_SLICE/2.))\n src_slice = pending_points[pending_points_in_slice]\n dst_i = np.flatnonzero((dst[:,2] >= (z-Z_SEARCH_SLICE)) & (dst[:,2] <= (z+Z_SEARCH_SLICE)))\n dst_f = dst[dst_i]\n all_dists = cdist(src_slice, dst_f, 'euclidean')\n index = all_dists.argmin(axis=1)\n distances[points_in_slice] = all_dists[np.arange(all_dists.shape[0]), index]\n indices[points_in_slice] = dst_i[index]\n pending_points = pending_points[~pending_points_in_slice]\n\n return distances, indices\n\ndef rotZ(points, yaw):\n\n _rotMat = np.array([\n [np.cos(yaw), -np.sin(yaw)],\n [np.sin(yaw), np.cos(yaw)]])\n order = points.shape[points.ndim-1]\n rotMat = np.eye(order)\n rotMat[:2,:2] = _rotMat[:2,:2]\n return np.dot(points, rotMat)\n\ndef norm_nearest_neighbor(t, src, dst, search_yaw=False):\n _t = np.empty(3)\n _t[:2] = t[:2]\n _t[2] = 0\n if search_yaw:\n yaw = t[2]\n distances, _ = nearest_neighbor(_t + rotZ(src,yaw), dst)\n else:\n\n distances, _ = nearest_neighbor(_t + src, dst)\n return np.sum(distances) / src.shape[0]\n\ndef icp(A, B, search_yaw=False):\n result = scipy.optimize.minimize(\n norm_nearest_neighbor,\n np.array([0.,0.,3*np.pi/4.-np.pi/2.]) if search_yaw else np.array([0.,0.]),\n args=(A, B, search_yaw),\n method='Powell')\n if search_yaw:\n t = np.empty(4)\n t[:2] = result.x[:2]\n t[2] = 0\n t[3] = result.x[2]\n distances, _ = nearest_neighbor(t[:3] + rotZ(A, t[3]), B)\n else:\n t = np.empty(3)\n t[:2] = result.x[:2]\n t[2] = 0\n distances, _ = nearest_neighbor(t[:3] + A, B)\n return t, distances\n\nclass ICP(object):\n\n '''\n 2D linear least squares using the hesse normal form:\n d = x*sin(theta) + y*cos(theta)\n which allows you to have vertical lines.\n '''\n def __init__(self, search_yaw=False):\n self.search_yaw = search_yaw\n if search_yaw:\n print(\"YAW SEARCH\")\n\n def fit(self, first, reference):\n _icp = icp(first, reference, self.search_yaw)\n return _icp[0]\n\n def residuals(self, t, first, reference):\n if self.search_yaw:\n distances, _ = nearest_neighbor(rotZ(first,t[3]) + t[:3], reference)\n else:\n distances, _ = nearest_neighbor( first + t[:3], reference)\n\n return np.abs(distances)\n\n\ndef ransac(first, reference, model_class, min_percent_fist, threshold):\n '''\n Fits a model to data with the RANSAC algorithm.\n :param data: numpy.ndarray\n data set to which the model is fitted, must be of shape NxD where\n N is the number of data points and D the dimensionality of the data\n :param model_class: object\n object with the following methods implemented:\n * fit(data): return the computed model\n * residuals(model, data): return residuals for each data point\n * is_degenerate(sample): return boolean value if sample choice is\n degenerate\n see LinearLeastSquares2D class for a sample implementation\n :param min_samples: int\n the minimum number of data points to fit a model\n :param threshold: int or float\n maximum distance for a data point to count as an inlier\n :param max_trials: int, optional\n maximum number of iterations for random sample selection, default 1000\n :returns: tuple\n best model returned by model_class.fit, best inlier indices\n '''\n\n min_samples = int(min_percent_fist * first.shape[0])\n\n min_reference_z = np.amin(reference[:,2])\n max_reference_z = np.amax(reference[:,2])\n\n first_points = first.shape[0]\n\n # keep points that have a matching slices in the reference object\n first = first[(first[:,2] > (min_reference_z - Z_SEARCH_SLICE)) & (first[:,2] < (max_reference_z + Z_SEARCH_SLICE))]\n\n if first_points != first.shape[0]:\n print(\"Removed \" + str(first_points - first.shape[0]) + \" points due to Z cropping\")\n\n\n if first.shape[0] > 1:\n print(\"Fitting \" + str(first.shape[0]) + \" points to reference object\")\n else:\n print(\"No points to fit, returning\")\n return None, None\n\n best_model = None\n best_inlier_num = 0\n best_inliers = None\n best_model_inliers_residua = 1e100\n second_best_model = None\n second_best_inlier_num = 0\n second_best_inliers = None\n second_best_model_inliers_residua = 1e100\n second_best_score = 0\n\n first_idx = np.arange(first.shape[0])\n import scipy.cluster.hierarchy\n Z = scipy.cluster.hierarchy.linkage(first, 'single')\n max_d = 0.5\n clusters = scipy.cluster.hierarchy.fcluster(Z, max_d, criterion='distance')\n unique_clusters = np.unique(clusters)\n seen_clusters_idx = []\n for cluster in unique_clusters:\n sample_idx = np.where(clusters==cluster)\n sample_idx = sample_idx[0].tolist()\n sample = first[sample_idx]\n print(\"Trying cluster \" + str(cluster) + \" / \" + str(len(unique_clusters)) + \" with \" + str(len(sample_idx)) + \" points\")\n\n max_attempts = 10\n while max_attempts > 0:\n # check if we'e already looked at this set of points, if so, skip it\n if set(sample_idx) not in seen_clusters_idx:\n sample_model = model_class.fit(sample, reference)\n sample_model_residua = model_class.residuals(sample_model, first, reference)\n sample_model_inliers = first_idx[sample_model_residua<threshold]\n\n inlier_num = sample_model_inliers.shape[0]\n print(\"Inliers: \" + str(inlier_num) + \" / \" + str(first_points))\n sample_model_inliers_residua = np.sum(sample_model_residua[sample_model_residua<threshold]) / inlier_num\n if (inlier_num >= min_samples) and (sample_model_inliers_residua < best_model_inliers_residua):\n best_inlier_num = inlier_num\n best_inliers = sample_model_inliers\n best_model = sample_model\n best_model_inliers_residua = sample_model_inliers_residua\n elif (inlier_num / sample_model_inliers_residua) > second_best_score: #(inlier_num >= second_best_inlier_num) and (sample_model_inliers_residua < second_best_model_inliers_residua):\n second_best_score = inlier_num / sample_model_inliers_residua\n second_best_inlier_num = inlier_num\n second_best_inliers = sample_model_inliers\n second_best_model = sample_model\n second_best_model_inliers_residua = sample_model_inliers_residua\n\n seen_clusters_idx.append(set(sample_idx))\n\n # keep searching if there's enough inliers and there's other inliers than those\n # used to fit the model\n if (inlier_num < min_samples) or np.all(np.in1d(sample_model_inliers, sample_idx)):\n break\n else:\n sample_idx = sample_model_inliers\n sample = first[sample_idx]\n max_attempts -= 1\n\n if best_model is not None:\n model, inliers, inlier_num, residua = best_model, best_inliers, best_inlier_num, best_model_inliers_residua\n else:\n model, inliers, inlier_num, residua = second_best_model, second_best_inliers, second_best_inlier_num, second_best_model_inliers_residua\n\n print(\"Selected \" + str(inlier_num) + \" / \" + str(first_points) + \" inliers with \" + str(residua) + \" residua\")\n\n return model,inliers\n\n" } ]
15
Lyken17/semantic-segmentation-pytorch
https://github.com/Lyken17/semantic-segmentation-pytorch
c57e2b3948e044b30c55c0dda65960f42c3baded
26377d418c2cdfec1b03a04a25622a87a2bc2e05
4207dd099cbf7d3d63127ea2f7930399546bf8da
refs/heads/master
2020-03-28T13:14:28.972453
2018-09-18T03:43:50
2018-09-18T03:43:50
148,377,263
0
0
null
2018-09-11T20:38:32
2018-09-11T12:28:14
2018-09-10T23:37:53
null
[ { "alpha_fraction": 0.507317066192627, "alphanum_fraction": 0.5634146332740784, "avg_line_length": 20.578947067260742, "blob_id": "0aa6e9e1597fd2f21b30fd85b7bed7beaf493f63", "content_id": "5ccfae0869c42b4bcfbf7f9851b33769780c4c65", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1230, "license_type": "permissive", "max_line_length": 80, "num_lines": 57, "path": "/test2.py", "repo_name": "Lyken17/semantic-segmentation-pytorch", "src_encoding": "UTF-8", "text": "import json\n\nimport torch\n\nfrom models.MobileNets import mobilenet_v2\nfrom models.resnet import resnet50\n\n\nfrom models.models import Resnet, ResnetDilated\n#\n# n1 = resnet50()\n# n11 = Resnet(n1)\n\nn2 = mobilenet_v2()\ndevice = \"cpu\"\nif torch.cuda.is_available():\n\tdevice = \"cuda\"\nprint(device)\nn2.to(device)\n# n22 = mnet(n2)\n#\n# data = torch.zeros(1, 3, 600, 454)\n#\n# o1 = n11(data)\n# o2 = n22(data)\n#\n# [print(_.size()) for _ in o1]\n# print(\"==============================================================\")\n# [print(_.size()) for _ in o2]\n#\n#\n# print(n2)\n\n# dummy_input = torch.zeros(1, 3, 224, 224)\n# torch.onnx.export(n2, dummy_input,\"export.onnx\", verbose=True, )\nbatch_sizes = [16, 32, 64, 128, 256]\n\nfor bs in batch_sizes:\n\tdummy = torch.randn(bs, 3, 224, 224).to(device)\n\n\t# warm up\n\timport time\n\tstart = time.time()\n\twarm_up_runs = 50\n\tfor i in range(warm_up_runs):\n\t\tout = n2(dummy)\n\tend = time.time()\n\tprint(\"%d duration %.4f \" % (bs, (end - start) / warm_up_runs))\n\n\n\tstart = time.time()\n\twarm_up_runs = 200\n\tfor i in range(warm_up_runs):\n\t\tout = n2(dummy)\n\tend = time.time()\n\tprint(\"%d duration %.4f \" % (bs, (end - start) / warm_up_runs))\n\tprint(\"======================================================================\")\n" } ]
1
RobRight/Personal
https://github.com/RobRight/Personal
7ac5f6920e36b4b2cb4c1db4d2c8cf71e52e5a86
bb1e7116b178a88229b6866068372edff42627ed
36226617c33ee467fbf1c70cbe05450dd1e29502
refs/heads/master
2017-10-07T12:27:05.033752
2017-04-12T00:39:42
2017-04-12T00:39:42
63,233,318
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5501672029495239, "alphanum_fraction": 0.5692785382270813, "avg_line_length": 25.5, "blob_id": "faa118453607d1f694e122f49d64e64793ea50e5", "content_id": "3aafe31c883b8f8ccaa727138332b8133d785021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4186, "license_type": "no_license", "max_line_length": 111, "num_lines": 158, "path": "/_spark_car/main/vs_0_9_2/carSensor.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n *\n * Date created: 4.23.2015\n * Last updated: 6.18.2015\n * \n * Library for:\n * Spark Car distence sensor management and interpratation\n *\n * Objects:\n * function inturpretSensors() returns a move sugestion\n * varialbe far_wall is 1 for nearby objects or 0 if clear\n *\n * Movements: \n * if close front wall - move back or move back and turn or stop or just dont go forward?\n * if close back wall -\n * if close right wall - turn left\n * if close left wall - turn right\n * \n * Files:\n * carSensor.h\n * carSensor.cpp\n*/\n\n#include \"carSensor.h\"\n\nint sampleNumber = 100;\n\n// construct\nSensor::Sensor(int _val1, int _val2, int _val3, int _val4, int _val5)\n{\n dist_short_front_pin = _val1; // front short range distence sensor\n dist_long_front_pin = _val2; // front long range distence sensor\n dist_short_right_pin = _val3; // right short range distence sensor\n dist_short_left_pin = _val4; // left short range distence sensor\n dist_medium_back_pin = _val5; // back medeium range distence sensor\n \n setPinModes();\n}\n\n//-----------------------------------------------------------------------------------------------------\n\n// set pin modes\nvoid Sensor::setPinModes()\n{\n pinMode(dist_short_right_pin, INPUT);\n pinMode(dist_short_left_pin, INPUT);\n pinMode(dist_short_front_pin, INPUT);\n pinMode(dist_medium_back_pin, INPUT);\n pinMode(dist_long_front_pin, INPUT);\n}\n\n// get raw sensor values\nvoid Sensor::getValues()\n{\n raw_dist_short_right = analogRead(dist_short_right_pin);\n raw_dist_short_left = analogRead(dist_short_left_pin);\n raw_dist_short_front = analogRead(dist_short_front_pin);\n raw_dist_medium_back = analogRead(dist_medium_back_pin);\n raw_dist_long_front = analogRead(dist_long_front_pin);\n}\n\n\n// average the sensor values\nvoid Sensor::averageValues()\n{\n getValues();\n avg_dist_short_right = raw_dist_short_right;\n avg_dist_short_left = raw_dist_short_left;\n avg_dist_short_front = raw_dist_short_front;\n avg_dist_medium_back = raw_dist_medium_back;\n avg_dist_long_front = raw_dist_long_front;\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_right = avg_dist_short_right + raw_dist_short_right / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_left = avg_dist_short_left + raw_dist_short_left / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_front = avg_dist_short_front + raw_dist_short_front / 2;\n //Serial.println(avg_dist_short_front);\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_medium_back = avg_dist_medium_back + raw_dist_medium_back / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_long_front = avg_dist_long_front + raw_dist_long_front / 2;\n }\n}\n\n// Decision Tree and wall detection\n//-----------------------------------------------------------------------------------------------------\n\n// 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight(speed), 5: turnLeft(speed)\nint Sensor::interpretSensors()\n{\n averageValues();\n \n // reset values\n move = -1;\n far_wall = 0;\n front_wall = 0;\n back_wall = 0;\n right_wall = 0;\n left_wall = 0;\n\n // front short sensor\n if(avg_dist_short_front > 300)\n {\n front_wall = 1;\n move = 4; // close front wall, turn right\n }\n \n // front long sensor\n if(avg_dist_long_front > 200) far_wall = 1; // far front wall\n \n // back medium\n if(avg_dist_medium_back > 600) // far back wall\n {\n far_wall = 1;\n if(avg_dist_medium_back > 1200)\n {\n back_wall = 1;\n move = 2; // close back wall, go forward\n }\n }\n \n // right short\n if(avg_dist_short_right > 300)\n {\n right_wall = 1;\n move = 5; // close right wall, go left\n }\n \n // left short\n if(avg_dist_short_left > 300)\n {\n left_wall = 1;\n move = 4; // close left wall, go right\n }\n \n return move;\n}" }, { "alpha_fraction": 0.6438356041908264, "alphanum_fraction": 0.6826484203338623, "avg_line_length": 19.904762268066406, "blob_id": "b08d06489fb87802a911b5b05a28a0f36d604893", "content_id": "b26d32028c8180e00f97a7bfe48d6a0f3a57bc0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 438, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/hailstone_sequence/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Hailstone Sequence\n\n## Python 2.7 tested\n\n### Info:\n#### Hailstone sequence:\n* even: divide by two. (x/2)\n* odd: multiply by three and add one. (x*3+1) (x+1)?\n\nStarting with any positive integer, after n rounds you will get one (1). <br />\nUnproven but has never failed. Personally tested from 1 to 100,000,000\n\n### Settings:\n* start_num\n* end_num\n* use_threads\n* thread_num\n* print_while\n* print_after\n* max_round\n* progress_breaks" }, { "alpha_fraction": 0.5785256624221802, "alphanum_fraction": 0.5862179398536682, "avg_line_length": 22.276119232177734, "blob_id": "961d408436ae6e1335f12926d03499776a23dbdf", "content_id": "f7f674fd6437703c2cfb77777e225dfe03762487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3120, "license_type": "no_license", "max_line_length": 154, "num_lines": 134, "path": "/matrix/matrix.hpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n// Matrix Class\n\n#ifndef _MATRIX_CLASS_\n#define _MATRIX_CLASS_\n\n#include <vector>\n\n// matrix at(row).at(col)\n// r\n// r\n// r\n\nclass Matrix()\n{\npublic:\n\t// user define\n\tstd::vector <std::vector <double> > user_define_matrix();\n\t// display\n\tvoid display_matrix(std::vector <std::vector <double> >);\n\t// add and subtract\n\tstd::vector <std::vector <double> > math_add_sub_matrix(std::vector <std::vector <double> >, std::vector <std::vector <double> >, bool);\n\t// multipy and divide\n\tstd::vector <std::vector <double> > math_multipy_divide_matrix(std::vector <std::vector <double> >, std::vector <std::vector <double> >);\n};\n\nstd::vector <std::vector <double > > Matrix::user_define_matrix()\n{\n\tstd::vector <std::vector <double> > t_matrix;\n\n\tunsigned int t_col;\n\tunsigned int t_row;\n\t// column input\n\ttry {\n\t\tstd::cout << \"Enter matrix columns: \";\n\t\tstd::cin >> t_col;\n\t}\n\tcatch {\n\t\tstd::cout << \"Input must be a positive integer. Try again.\";\n\t}\n\n\t// row input\n\ttry {\n\t\tstd::cout << \"Enter matrix rows: \";\n\t\tstd::cin >> t_row;\n\t}\n\tcatch {\n\t\tstd::cout << \"Input must be a positive integer. Try again.\";\n\t}\n\n\t// matrix input\n\tfor (int r=0; r<t_row; r++)\n\t{ // rows\n\t\tstd::vector <double> t_row;\n\t\tfor (int c=0; c<t_col; c++)\n\t\t{ // columns\n\t\t\tdouble t_val;\n\t\t\ttry {\n\t\t\t\tstd::cout << \"Enter value for row \" << r << \" and column \" << c << \": \";\n\t\t\t\tstd::cin >> t_val;\n\t\t\t}\n\t\t\tcatch {\n\t\t\t\tstd::cout << \"Input must be a double. Try again.\";\n\t\t\t}\n\t\t\tt_row.push_back(t_val);\n\t\t}\n\t\tt_matrix.push_back(t_row);\n\t}\n\treturn t_matrix;\n}\n\n// display\nvoid Matrix::display_matrix(std::vector <std::vector <double> > in_matrix)\n{\n\tstd::cout << \"\\nmatrix: \" std::endl;\n\tfor (int r=0; r<in_matrix.size(); r++)\n\t{\n\t\tfor (int c=0; c<in_matrix.at(r).size(); c++)\n\t\t{\n\t\t\tstd::cout << in_matrix.at(r).at(c) << \" \";\n\t\t}\n\t\tstd::cout << std:endl;\n\t}\n}\n\n// add and subtract (m1, m2, mod)\n// mod (true:add, false:subtract)\nstd::vector <std::vector <double> > math_add_sub_matrix(std::vector <std::vector <double> > in_m1, std::vector <std::vector <double> > in_m2, bool in_mod)\n{\n\tstd::vector <std::vector <double> > t_matrix;\n\tfor (int r=0; r<in_matrix.size(); r++)\n\t{\n\t\tstd::vector <double> t_row;\n\t\tfor (int c=0; c<in_matrix.at(r).size(); c++)\n\t\t{\n\t\t\tdouble t_val;\n\t\t\tif (in_mod)\n\t\t\t\t// add\n\t\t\t\tt_val = in_m1.at(r).at(c) + in_m2.at(r).at(t);\n\t\t\telse\n\t\t\t\t// subtract\n\t\t\t\tt_val = in_m1.at(r).at(c) - in_m2.at(r).at(t);\n\t\t\tt_row.push_back(t_val);\n\t\t}\n\t\tt_matrix.push_back(t_row);\n\t}\n\treturn t_matrix;\n}\n\n// multipy (m1, m2, mod)\n// mod (true:multiply, false:divide)\nstd::vector <std::vector <double> > math_multipy_divide_matrix(std::vector <std::vector <double> > in_m1, std::vector <std::vector <double> > in_m2)\n{\n\tstd::vector <std::vector <double> > t_matrix;\n\tfor (int r=0; r<in_matrix.size(); r++)\n\t{\n\t\tstd::vector <double> t_row;\n\t\tfor (int c=0; c<in_matrix.at(r).size(); c++)\n\t\t{\n\t\t\tdouble t_val;\n\t\t\tif (in_mod)\n\t\t\t\t// multiply\n\t\t\t\tt_val = in_m1.at(r).at(c) * in_m2.at(r).at(t);\n\t\t\telse\n\t\t\t\t// divide\n\t\t\t\tt_val = in_m1.at(r).at(c) / in_m2.at(r).at(t);\n\t\t\tt_row.push_back(t_val);\n\t\t}\n\t\tt_matrix.push_back(t_row);\n\t}\n\treturn t_matrix;\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.4863070547580719, "alphanum_fraction": 0.5037344694137573, "avg_line_length": 15.957746505737305, "blob_id": "4cf1046ca1dee5f4f3a1cd1a8b6377fca2547b47", "content_id": "e47f5ebe3f328824a5b81f9c44e6580cbf9fedb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 47, "num_lines": 71, "path": "/test_code/cpp/pointers/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n// Poiner Exercise\n// WG091316\n\n#include <iostream>\n\n//\nstruct foo\n{\n\tint x;\n\tdouble y;\t\n};\n\nvoid dash_arrow_thing()\n{\n\tstruct foo var;\n\tstruct foo* pvar;\n\t\n\tvar.x = 5;\n\t(&var)->y = 15.5;\n\n\t//(*pvar).x = 10;\n\t//pvar->y = 10.1;\n\n\tstd::cout << \"x: \" << var.x << std::endl;\n\tstd::cout << \"y: \" << var.y << std::endl;\n\t//std::cout << \"px: \" << pvar->x << std::endl;\n\t//std::cout << \"py: \" << pvar->y << std::endl;\n}\n\n//\nvoid ab()\n{\n\tint a;\n\tint b;\n\tstd::cout << \"\\nEnter value a: \";\n\tstd::cin >> a;\n\tif (std::cin.fail()) {\n\t\tstd::cin.clear();\n\t\tstd::cin.ignore(256, '\\n');\n\t}\n\tstd::cout << \"Enter value b: \";\n\tstd::cin >> b;\n\tif (std::cin.fail()) {\n\t\tstd::cin.clear();\n\t\tstd::cin.ignore(256, '\\n');\n\t}\n\t\n\tint *a_ptr;\n\tint *b_ptr;\n\ta_ptr = &a;\n\tb_ptr = &b;\n\t\n\tstd::cout << \"\\nAddress of a: \";\n\tstd::cout << &a << std::endl;\n\tstd::cout << \"Address of b: \";\n\tstd::cout << &b << std::endl;\n\tstd::cout << \"Address of a_ptr: \";\n\tstd::cout << a_ptr << std::endl;\n\tstd::cout << \"Address of b_ptr: \";\n\tstd::cout << b_ptr << std::endl;\n\tstd::cout << \"Value of a_ptr: \";\n\tstd::cout << *a_ptr << std::endl;\n\tstd::cout << \"Value of b_ptr: \";\n\tstd::cout << *b_ptr << std::endl;\n}\n\nmain()\n{\n\tdash_arrow_thing();\n\t//ab();\n}\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.574999988079071, "avg_line_length": 18.5, "blob_id": "292a99aef1a2e9ef127cc18ff6168da436f0341a", "content_id": "f630a36a8b3eebaca3706680f59173f5e6d7194d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 40, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/test_code/cpp/socket/server/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\nall:\n\tg++ -std=c++11 server.cpp -o run\n" }, { "alpha_fraction": 0.5058236122131348, "alphanum_fraction": 0.5108153223991394, "avg_line_length": 24, "blob_id": "ab5674d51cb53cca3584a26f930bca7666a52e27", "content_id": "657fc6545470b882115e3e0cd9a258b0cfde6549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 601, "license_type": "no_license", "max_line_length": 82, "num_lines": 24, "path": "/connect_four_IDEA/cpp/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include \"connect_four.hpp\"\n\nint main() {\n\n CF g;\n g.setup();\n\n bool win = false;\n while (!win) {\n // human\n g.display_board(); // print board to screen\n g.move_one_human(); // interactive move selection\n g.move_two_human(); ///\n // bot\n Board state;\n state.import(g.return_board()); // return the board state\n bool good2 = g.move_one(m); // where m is a column to place the piece\n bool good1 = g.move_two(m); ///\n // \n win = g.check_win(true); // input: true (display text)\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 7.25, "blob_id": "6b1d28c067279f7699c9545651dfeca4684e9ade", "content_id": "a62fb2d1668b7869fd36381fe33b8dcbfb670db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 19, "num_lines": 4, "path": "/message_scrambler/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Message Scrambler\n\n\nno symbols\n" }, { "alpha_fraction": 0.6456211805343628, "alphanum_fraction": 0.657841145992279, "avg_line_length": 28.75757598876953, "blob_id": "da6c4cab30f0c65165188a18b91f823b467d1092", "content_id": "c42408e072334999875b9d8d7f8ac797223cd709", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 982, "license_type": "no_license", "max_line_length": 141, "num_lines": 33, "path": "/_spark_car/main/vs_0_8/carSensor.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.13.2015\n * \n * Library for:\n * Car sensor interpretation\n *\n * carSensor.h\n * carSensor.cpp\n*/\n\n#ifndef _CAR_SENSOR_MONITOR\n#define _CAR_SENSOR_MONITOR\n#include \"application.h\"\n\nclass Sensor\n{\n private:\n void setPinModes();\n void averageValues();\n float avg_dist_short_right, avg_dist_short_left, avg_dist_short_front, avg_dist_medium_back, avg_dist_long_front;\n float raw_dist_short_right, raw_dist_short_left, raw_dist_short_front, raw_dist_medium_back, raw_dist_long_front;\n float dist_short_right, dist_short_left, dist_short_front, dist_medium_back, dist_long_front;\n int move, sampleNumber;\n public:\n Sensor(int _val1, int _val2, int _val3, int _val4, int _val5);\n void getValues();\n void serialDeep();\n int interpretSensors();\n int right_wall, left_wall, front_wall, back_wall, far_front_wall, far_back_wall, close_right_wall, close_left_wall, close_front_wall;\n};\n\n#endif\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4828934371471405, "avg_line_length": 20.744680404663086, "blob_id": "d77a5f721ff8b2505b3c6ba4e5c8e9a784d148a4", "content_id": "dcc755a71a296bf5359c78ca1f760a7a97148964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 104, "num_lines": 47, "path": "/matrix_world/matrix.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "//\n// Matrix - text display\n// \n// WG-090616\n//\n\n#include <iostream>\n#include <vector>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h> // unix time sleep\n\ndouble delay_time = 10;\n\nbool change_color = true;\n\nstd::vector <char> char_options =\n{\n 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'\n,'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'\n,'0','1','2','3','4','5','6','7','8','9'\n,'!','@','#','$','%','^','&','*','(',')','-','_','=','+','[',']','{','}','|'\n};\n\nint matrix_output(int in_time)\n{\n\tsrand(time(NULL));\n\ttime_t begin = time(NULL);\n\tif (change_color) std::cout << \"\\033[32m\";\n\twhile(true)\n\t{\n\t\tint rand_id = rand() % char_options.size();\n\t\tchar char_rand = char_options.at(rand_id);\n\t\tstd::cout << char_rand;\n\t\tusleep(delay_time);\n\n\t\tif (time(NULL)-begin > in_time) break;\n\t}\n\tif (change_color) std::cout << \"\\033[0m\";\n\tstd::cout << \"\\n\";\n}\n\nint main()\n{\n\tmatrix_output(5);\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.6463963985443115, "alphanum_fraction": 0.6689189076423645, "avg_line_length": 30.785715103149414, "blob_id": "ac4b675e7a2cf54c73ef9472073fd34f9e233041", "content_id": "99aab6eedbacae83ddea5202a8f0e0493eb1c910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 444, "license_type": "no_license", "max_line_length": 115, "num_lines": 14, "path": "/_spark_car/particle/compile.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "from os import system\n\n# Verify and Compile to Particle Cloud current project\n# http://docs.particle.io/photon/cli/\n\n# Project directory to use\ncurrent_vs = '0_9_2'\n\ndef main():\n # ex particle compile main/vs_0.8 --saveTo firmware/spark_firmware_vs0.8_001.bin\n system('particle compile main/vs_' + current_vs + ' --saveTo main/firmware/firmware_vs_' + current_vs + '.bin')\n \nif __name__ == '__main__': # on load start main()\n main()" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.59375, "avg_line_length": 12.166666984558105, "blob_id": "760d31bd7a1b73b0cc20137205b305cd364b6bd1", "content_id": "1cdbfbf5efda51c4ff5a351f9343b3d5dfdd81ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 160, "license_type": "no_license", "max_line_length": 43, "num_lines": 12, "path": "/morse_code/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include <iostream>\n\n#include \"morse_code.hpp\"\n\nint main() {\n\n\tMorse::Morse m;\n\t\n\tstd::string t = m.to_morse(\"hello world\");\n\n\tstd::cout << t << std::endl;\n}\n\n" }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6600000262260437, "avg_line_length": 24, "blob_id": "4d03988e1fe8a90cc4fc4056116cad61b0600de9", "content_id": "4034d64782fb04a785ab49c2c1080663718331ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 50, "license_type": "no_license", "max_line_length": 44, "num_lines": 2, "path": "/message_scrambler/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "all:\n\tg++ -std=c++11 message_scrambler.cpp -o run\n" }, { "alpha_fraction": 0.6694045066833496, "alphanum_fraction": 0.6762491464614868, "avg_line_length": 37.47368240356445, "blob_id": "372b61b8a58aeeb21e01da440217b5f8efd70a73", "content_id": "c1647b23fc0985978df19c5172fe8732a360bfb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1461, "license_type": "no_license", "max_line_length": 177, "num_lines": 38, "path": "/_spark_car/main/vs_0_9/serialManager.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 5.3.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * serialManager.h\n * serialManager.cpp\n*/\n\n#ifndef _SERIAL_MANAGER_\n#define _SERIAL_MANAGER_\n\n#include \"application.h\"\n\nclass SerialManager\n{\n private:\n void serialMove();\n void compass();\n void waypoint();\n void sensor();\n void gps();\n void printFloat(float _val, unsigned int _precision);\n bool offlineMode, useMotorSerial, useCompassSerial, useWaypointSerial, useGPSSerial, useSensorSerial, compassError, isValidGPS;\n int mode, move, lastMove, nextWaypoint, right_wall, left_wall, front_wall, back_wall, close_right_wall, close_left_wall, close_front_wall, far_front_wall, far_back_wall;\n float heading, lat, lon, wplat, wplon, waypointDistance, waypointDirection;\n public:\n SerialManager();\n void call();\n void sync(int _mode, bool _offlineMode, bool _useMotorSerial, bool _useWaypointSerial, bool _useGPSSerial, bool _useCompassSerial, bool _useSensorSerial, int _move, \n int _lastMove, float _heading, float _compassError, float _lat, float _lon, bool _isValidGPS, int _right_wall, int _left_wall,\n int _front_wall, int _back_wall, int _close_right_wall, int _close_left_wall, int _close_front_wall, int _far_front_wall, int _far_back_wall, int _nextWaypoint,\n float _wplat, float _wplon, float _waypointDistance, float _waypointDirection);\n};\n\n#endif" }, { "alpha_fraction": 0.6151685118675232, "alphanum_fraction": 0.6404494643211365, "avg_line_length": 22.733333587646484, "blob_id": "75d859a3cf6f7b56ce6bde2ea36764ff5d561e99", "content_id": "edf197a20b8b1165ace26a9d0d8ffdd34d5ed284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 64, "num_lines": 30, "path": "/test_code/python/threading.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Python 3.5.2 threading test\n# by William Gregory on August 12th 2016\n\n# http://www.tutorialspoint.com/python/python_multithreading.htm\n\n# ToDo: investigate mutlithreading\n\nfrom thread import start_new_thread\nfrom time import sleep, ctime, time\n\n# print time and thread name after a delay four times\ndef print_time(thread_name, delay):\n count = 0\n while count < 5:\n sleep(delay)\n count += 1\n print(thread_name + \" \" + ctime(time()))\n return\n# main code\ntry:\n\tstart_new_thread(print_time, (\"thread-1\", 1), )\n\tsleep(0.1)\n\tstart_new_thread(print_time, (\"thread-2\", 1), )\nexcept:\n\tprint(\"Error: unable to start thread\")\n\nwhile True:\n\tpass\n" }, { "alpha_fraction": 0.5164677500724792, "alphanum_fraction": 0.5555954575538635, "avg_line_length": 34.49044418334961, "blob_id": "f4e6df5e41f8c2187a3d4b130e0189e351950ea9", "content_id": "35557878d77f23e152d706f9fe2dbb9381ae8b6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11148, "license_type": "no_license", "max_line_length": 179, "num_lines": 314, "path": "/_spark_car/main/vs_0_9_2/trexController.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n *\n * Date created: 6.9.2015\n * Last updated: 6.18.2015\n * \n * Library for:\n * Trex Motor Controller ROB-12075\n *\n * Objects:\n * function moveSet(move, speed) with moves 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight, 5: turnLeft, 6: moveRight(speed), 7: moveLeft(speed)\n *\n * Files:\n * trexController.h\n * trexController.cpp\n*/\n\n#include \"trexController.h\"\n\nint sv[6]={0,0,0,0,0,0}; // servo positions: 0 = Not Used\nint sd[6]={0,0,0,0,0,0}; // servo sweep speed/direction\nbyte devibrate=50; // time delay after impact to prevent false re-triggering due to chassis vibration\nint sensitivity=50; // threshold of acceleration / deceleration required to register as an impact\nint lowbat=550; // adjust to suit your battery: 550 = 5.50V\nbyte i2caddr=7; // default I2C address of T'REX is 7. If this is changed, the T'REX will automatically store new address in EEPROM\nbyte i2cfreq=0; // I2C clock frequency. Default is 0=100kHz. Set to 1 for 400kHz\nbyte pfreq = 2;\nint voltage, lmcurrent, rmcurrent, lmencoder, rmencoder;\nint x, y, z, deltax, deltay, deltaz;\nint error;\nint lmspeed = 0;\nint rmspeed = 0;\n\nint turningSpeed = 100;\nint moveTurnChange = 20;\n\nTrex::Trex() {}\n\nvoid Trex::moveSet(int _move, int _speed)\n{\n switch(_move)\n {\n // 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight, 5: turnLeft, 6: moveRight(speed), 7: moveLeft(speed)\n case 0:\n // coast stop\n masterSend(0 ,0x00, 0, 0x00);\n break;\n case 1:\n // brake stop (ammount)\n masterSend(_speed, 0x01, _speed, 0x01);\n break;\n case 2:\n // forward (speed)\n masterSend(_speed, 0x00, _speed, 0x00);\n break;\n case 3:\n // backward(speed)\n masterSend(-_speed, 0x00, -_speed, 0x00);\n break;\n case 4:\n // turnRight\n masterSend(turningSpeed, 0x00, -turningSpeed, 0x00);\n break;\n case 5:\n // turnLeft\n masterSend(-turningSpeed, 0x00, turningSpeed, 0x00);\n break;\n case 6:\n // moveRight\n masterSend(_speed, 0x00, (_speed - moveTurnChange), 0x00);\n break;\n case 7:\n // moveLeft\n masterSend((_speed - moveTurnChange), 0x00, _speed, 0x00);\n break;\n default:\n //stop\n masterSend(100, 0x01, 100, 0x01);\n break;\n }\n}\n\nvoid Trex::test(int _val)\n{\n if(_val == 1) {\n if(lmspeed < 255) lmspeed ++;\n else if(lmspeed == 255) lmspeed = 50;\n rmspeed = lmspeed;\n masterSend(lmspeed, 0x00, rmspeed, 0x00);\n }\n else if(_val == 2) {\n moveSet(2, 100);\n delay(1000);\n moveSet(3, 100);\n delay(1000);\n moveSet(4, 100);\n delay(1000);\n moveSet(5, 100);\n delay(1000);\n moveSet(0, 0);\n delay(1000);\n }\n}\n\nvoid Trex::masterReceive()\n{\n byte d;\n int i=0;\n Wire.requestFrom(I2Caddress,24); // request 24 bytes from device 007\n \n while(Wire.available()<24) // wait for entire data packet to be received\n {\n if(i==0) Serial.print(\"Waiting for slave to send data.\"); // Only print message once (i==0)\n if(i>0) Serial.print(\".\"); // print a dot for every loop where buffer<24 bytes\n i++; // increment i so that message only prints once.\n if(i>120)\n {\n Serial.println(\"\");\n i=1;\n }\n }\n d=Wire.read(); // read start byte from buffer\n if(d!=startbyte) // if start byte not equal to 0x0F \n {\n Serial.print(d,DEC);\n while(Wire.available()>0) // empty buffer of bad data\n {\n d=Wire.read();\n }\n Serial.println(\" Wrong Start Byte\"); // error message\n return; // quit\n }\n\n error = (Wire.read(),DEC); // slave error report\n \n voltage=Wire.read()*256+Wire.read(); // T'REX battery voltage\n \n \n lmcurrent=Wire.read()*256+Wire.read(); // T'REX left motor current in mA\n \n lmencoder=Wire.read()*256+Wire.read(); // T'REX left motor encoder count\n \n rmcurrent=Wire.read()*256+Wire.read(); // T'REX right motor current in mA\n \n rmencoder=Wire.read()*256+Wire.read(); // T'REX right motor encoder count\n \n x=Wire.read()*256+Wire.read(); // T'REX X-axis\n \n y=Wire.read()*256+Wire.read(); // T'REX Y-axis\n \n z=Wire.read()*256+Wire.read(); // T'REX Z-axis\n \n deltax=Wire.read()*256+Wire.read(); // T'REX X-delta\n \n deltay=Wire.read()*256+Wire.read(); // T'REX Y-delta\n \n deltaz=Wire.read()*256+Wire.read(); // T'REX Z-delta\n}\n\n\nvoid Trex::masterSend(int _lmspeed, byte _lmbrake, int _rmspeed, byte _rmbrake)\n{\n Wire.beginTransmission(I2Caddress); // transmit data to 7\n Wire.write(startbyte); // start byte\n Wire.write(pfreq); // pwm frequency\n \n Wire.write(highByte(_lmspeed)); // MSB left motor speed\n Wire.write( lowByte(_lmspeed)); // LSB left motor speed\n Wire.write(_lmbrake); // left motor brake\n \n Wire.write(highByte(_rmspeed)); // MSB right motor speed\n Wire.write( lowByte(_rmspeed)); // LSB right motor speed\n Wire.write(_rmbrake); // right motor brake\n \n Wire.write(highByte(sv[0])); // MSB servo 0\n Wire.write( lowByte(sv[0])); // LSB servo 0\n \n Wire.write(highByte(sv[1])); // MSB servo 1\n Wire.write( lowByte(sv[1])); // LSB servo 1\n \n Wire.write(highByte(sv[2])); // MSB servo 2\n Wire.write( lowByte(sv[2])); // LSB servo 2\n \n Wire.write(highByte(sv[3])); // MSB servo 3\n Wire.write( lowByte(sv[3])); // LSB servo 3\n \n Wire.write(highByte(sv[4])); // MSB servo 4\n Wire.write( lowByte(sv[4])); // LSB servo 4\n \n Wire.write(highByte(sv[5])); // MSB servo 5\n Wire.write( lowByte(sv[5])); // LSB servo 5\n \n Wire.write(devibrate); // devibrate\n Wire.write(highByte(sensitivity)); // MSB impact sensitivity\n Wire.write( lowByte(sensitivity)); // LSB impact sensitivity\n \n Wire.write(highByte(lowbat)); // MSB low battery voltage 550 to 30000 = 5.5V to 30V\n Wire.write( lowByte(lowbat)); // LSB low battery voltage\n \n Wire.write(i2caddr); // I2C slave address for T'REX controller\n Wire.write(i2cfreq); // I2C clock frequency: 0=100kHz 1=400kHz\n Wire.endTransmission(); // stop transmitting\n \n delay(50);\n masterReceive();\n delay(50);\n}\n\nvoid Trex::debugSerial()\n{\n Serial.print(\"Slave Error Message:\");\n Serial.println(error);\n \n Serial.print(\"Battery Voltage:\\t\");\n Serial.print(int(voltage/10));Serial.println(\".\"); \n Serial.print(voltage-(int(voltage/10)*10));Serial.println(\"V\");\n \n Serial.print(\"Left Motor Current:\\t\");\n Serial.print(lmcurrent);Serial.println(\"mA\");\n \n Serial.print(\"Left Motor Encoder:\\t\");\n Serial.println(lmencoder); \n \n Serial.print(\"Right Motor Current:\\t\");\n Serial.print(rmcurrent);Serial.println(\"mA\");\n \n Serial.print(\"Right Motor Encoder:\\t\");\n Serial.println(rmencoder); \n \n Serial.print(\"X-axis:\\t\\t\");\n Serial.println(x); \n \n Serial.print(\"Y-axis:\\t\\t\");\n Serial.println(y);\n \n Serial.print(\"Z-axis:\\t\\t\");\n Serial.println(z); \n \n Serial.print(\"X-delta:\\t\\t\");\n Serial.println(deltax); \n \n Serial.print(\"Y-delta:\\t\\t\");\n Serial.println(deltay); \n \n Serial.print(\"Z-delta:\\t\\t\");\n Serial.println(deltaz);\n \n Serial.print(\"\\r\\n\\n\\n\");\n}\n\n/*\nOriginally from Dagu Hi-Tech Electronics\n\nThe T'REX sample code expects 27 bytes of data to be sent to it in a specific order, this is the \"command data packet\"\nIf you do not send the correct number of bytes in the correct sequence then the T'REX will ignore the data and set the error flag\nYour software can use this error flag to re-send datapackets that may have been corrupted due to electrical interferance\n\nMaster to Slave data packet - 27 bytes\n\nbyte Start 0xF0\nbyte PWMfreq 1=31.25kHz 7=30.5Hz\nint lmspeed\nbyte lmbrake\nint rmspeed\nbyte rmbrake\nint servo 0\nint servo 1\nint servo 2\nint servo 3\nint servo 4\nint servo 5\nbyte devibrate 0-255 default=50 (100mS)\nint impact sensitivity \nint low battery 550 to 30000 5.5V to 30V\nbyte IยฒC address\nbyte IยฒC clock frequency: 0=100kHz 1=400kHz\n\n\nWhen requested, the T'REX sample code will send a data packet reporting it's status\n\nSlave to Master data packet - 24 bytes\n\nbyte Start\nbyte errorflag\nint battery voltage\nint left motor current\nint left motor encoder\nint right motor current\nint right motor encoder\n\nint X-axis raw data from accelerometer X-axis \nint Y-axis raw data from accelerometer Y-axis\nint Z-axis raw data from accelerometer Z-axis\n\nint delta X change in X-axis over a period of 2mS\nint delta Y change in Y-axis over a period of 2mS\nint delta Z change in Z-axis over a period of 2mS\n\n\nIf the T'REX receives faulty data (possibly due to electrical interference) it will report the problem using the error flag\nError Flag Bits\n\nBIT0: wrong start byte or number of bytes received\nBIT1: incorrect PWM frequency specified - must be a byte with a value of 1 to 7\nBIT2: incorrect motor speed - left and right motor speeds must be an integer from -255 to +255\nBIT3: incorrect servo position given - all servo positions must be an integer between -2400 to +2400, negative values reverse the sense of the servo. 0 = no servo \nBIT4: incorrect impact sensitivity - must be an integer from 0 to 1023\nBIT5: incorrect lowbat value - minimum battery voltage must be an integer from 550 to 3000 (5.50V to 30.00V)\nBIT6: incorrect IยฒC address given - IยฒC address must be a byte with a value from 0 to 127 (7 bit address)\nBIT7: incorrect IยฒC clock frequency - must be a byte with a value of 0 or 1\n\nNote: All integers must be sent MSB first\n\n*/" }, { "alpha_fraction": 0.48479658365249634, "alphanum_fraction": 0.5280513763427734, "avg_line_length": 17.10077476501465, "blob_id": "50c3ecf37c226e6bd33a6f3b7ad76e82779aacf1", "content_id": "6deeb3117529e44dcc766800601e0b1cbae9708b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2335, "license_type": "no_license", "max_line_length": 87, "num_lines": 129, "path": "/_spark_car/main/vs_0_8/carMovement.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.12.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * carMovement.h\n * carMovement.cpp\n*/\n\n#include \"carMovement.h\"\n\nMove::Move(int _stby, int _pwma, int _pwmb, int _ain1, int _ain2, int _bin1, int _bin2)\n{\n // Set internal variables for Motor Controller\n STBY = _stby;\n PWMA = _pwma;\n PWMB = _pwmb;\n AIN1 = _ain1;\n AIN2 = _ain2;\n BIN1 = _bin1;\n BIN2 = _bin2;\n \n setPinMode();\n}\n\n// Private------------------------------------------------------------\n\nvoid Move::setPinMode()\n{\n // Set pinModes for Motor Controller\n pinMode(STBY, OUTPUT);\n pinMode(PWMA, OUTPUT);\n pinMode(PWMB, OUTPUT);\n pinMode(AIN1, OUTPUT);\n pinMode(AIN2, OUTPUT);\n pinMode(BIN1, OUTPUT);\n pinMode(BIN2, OUTPUT);\n}\n\n// Execute movement\nvoid Move::masterMove(int _motor, int _speed, int _direction)\n{\n digitalWrite(STBY, HIGH);\n \n boolean inPin1 = LOW;\n boolean inPin2 = HIGH;\n\n if(_direction == 1)\n {\n inPin1 = HIGH;\n inPin2 = LOW;\n }\n\n if(_motor == 1)\n {\n digitalWrite(AIN1, inPin1);\n digitalWrite(AIN2, inPin2);\n analogWrite(PWMA, _speed);\n }\n else\n {\n digitalWrite(BIN1, inPin1);\n digitalWrite(BIN2, inPin2);\n analogWrite(PWMB, _speed);\n }\n}\n\n// Public-------------------------------------------------------------\n\n// Define movements\nvoid Move::moveForward(int _speed)\n{\n masterMove(0, _speed, 0);\n masterMove(1, _speed, 0);\n}\n\nvoid Move::moveBackward(int _speed)\n{\n masterMove(0, _speed, 1);\n masterMove(1, _speed, 1);\n}\n\nvoid Move::turnRight(int _speed)\n{\n masterMove(0, _speed, 1);\n masterMove(1, _speed, 0);\n}\n\nvoid Move::turnLeft(int _speed)\n{\n masterMove(0, _speed, 0);\n masterMove(1, _speed, 1);\n}\n\nvoid Move::moveRight(int _mode)\n{\n if(_mode == 0) // sharp turn\n {\n masterMove(0, 100, 0);\n masterMove(1, 180, 0);\n }\n else if(_mode == 1) // light turn\n {\n masterMove(0, 100, 0);\n masterMove(1, 130, 0);\n }\n}\n\nvoid Move::moveLeft(int _mode)\n{\n if(_mode == 0)\n {\n masterMove(0, 180, 0);\n masterMove(1, 100, 0);\n }\n else if(_mode == 1)\n {\n masterMove(0, 130, 0);\n masterMove(1, 100, 0);\n }\n}\n\n// Stop movement\nvoid Move::stop()\n{\n digitalWrite(STBY, LOW);\n}\n" }, { "alpha_fraction": 0.5495935082435608, "alphanum_fraction": 0.5678861737251282, "avg_line_length": 25.03174591064453, "blob_id": "d88da03d98514d47060a4c3ca60e6e7ac91b06b3", "content_id": "f4be357c37eaf7d97cc5d046d64b942e74318841", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4920, "license_type": "no_license", "max_line_length": 120, "num_lines": 189, "path": "/_spark_car/main/vs_0_8/carSensor.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.23.2015\n * \n * Library for:\n * Car sensor interpretation\n *\n * carSensor.h\n * carSensor.cpp\n*/\n\n#include \"carSensor.h\"\n\nint sampleNumber = 100;\n\n// construct\nSensor::Sensor(int _val1, int _val2, int _val3, int _val4, int _val5)\n{\n dist_short_right = _val1; // right short range distence sensor\n dist_short_left = _val2;\n dist_short_front = _val3;\n dist_medium_back = _val4;\n dist_long_front = _val5;\n \n setPinModes();\n}\n\n//-----------------------------------------------------------------------------------------------------\n\n// set pin modes\nvoid Sensor::setPinModes()\n{\n pinMode(dist_short_right, INPUT);\n pinMode(dist_short_left, INPUT);\n pinMode(dist_short_front, INPUT);\n pinMode(dist_medium_back, INPUT);\n pinMode(dist_long_front, INPUT);\n}\n\n// get raw sensor values\nvoid Sensor::getValues()\n{\n raw_dist_short_right = analogRead(dist_short_right);\n raw_dist_short_left = analogRead(dist_short_left);\n raw_dist_short_front = analogRead(dist_short_front);\n raw_dist_medium_back = analogRead(dist_medium_back);\n raw_dist_long_front = analogRead(dist_long_front);\n}\n\n\n// average the sensor values\nvoid Sensor::averageValues()\n{\n getValues();\n avg_dist_short_right = raw_dist_short_right;\n avg_dist_short_left = raw_dist_short_left;\n avg_dist_short_front = raw_dist_short_front;\n avg_dist_medium_back = raw_dist_medium_back;\n avg_dist_long_front = raw_dist_long_front;\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_right = avg_dist_short_right + raw_dist_short_right / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_left = avg_dist_short_left + raw_dist_short_left / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_front = avg_dist_short_front + raw_dist_short_front / 2;\n //Serial.println(avg_dist_short_front);\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_medium_back = avg_dist_medium_back + raw_dist_medium_back / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_long_front = avg_dist_long_front + raw_dist_long_front / 2;\n }\n}\n\n// Decision Tree and wall detection\n//-----------------------------------------------------------------------------------------------------\n\n// 0:stop, 1:forward, 2:backward, 3:turnRight, 4:turnLeft, 5:moveRight, 6:moveLeft, 7:followCourse, 8:allign to waypoint\nint Sensor::interpretSensors()\n{\n averageValues();\n move = -1;\n \n right_wall = 0;\n left_wall = 0;\n front_wall = 0;\n back_wall = 0;\n \n close_right_wall = 0;\n close_left_wall = 0;\n close_front_wall = 0;\n \n far_front_wall = 0;\n far_back_wall = 0;\n \n // short front\n if(avg_dist_short_front > 200) // wall\n {\n front_wall = 1;\n if(avg_dist_short_front > 600) close_front_wall = 1; // close front\n }\n else\n {\n // far dist sensor\n if(avg_dist_long_front > 800) far_front_wall = 1; // far front\n }\n \n \n // short right\n if(avg_dist_short_right > 300) // wall\n {\n right_wall = 1;\n if(avg_dist_short_right > 1000) close_right_wall = 1; // close right\n }\n \n \n // short left\n if(avg_dist_short_left > 300) // wall\n {\n left_wall = 1;\n if(avg_dist_short_left > 1200) close_left_wall = 1; // close left\n }\n \n \n // medium back\n if(avg_dist_medium_back > 300) // far wall\n {\n far_back_wall = 1; \n if(avg_dist_medium_back > 1200) back_wall = 1; // back wall\n }\n \n \n if(move == -1) move = 1; // no move sugestion, contiue\n \n return move;\n}\n\nvoid Sensor::serialDeep()\n{\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_front=\");\n Serial.print(raw_dist_short_front);\n Serial.println(\"\");\n Serial.print(\"raw_dist_long_front=\");\n Serial.print(raw_dist_long_front);\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_right=\");\n Serial.print(raw_dist_short_right);\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_left=\");\n Serial.print(raw_dist_short_left);\n Serial.println(\"\");\n Serial.print(\"raw_dist_medium_back=\");\n Serial.print(raw_dist_medium_back);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_front=\");\n Serial.print(avg_dist_short_front);\n Serial.println(\"\");\n Serial.print(\"avg_dist_long_front=\");\n Serial.print(avg_dist_long_front);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_right=\");\n Serial.print(avg_dist_short_right);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_left=\");\n Serial.print(avg_dist_short_left);\n Serial.println(\"\");\n Serial.print(\"avg_dist_medium_back=\");\n Serial.print(avg_dist_medium_back);\n Serial.println(\"\");\n}\n" }, { "alpha_fraction": 0.5569000244140625, "alphanum_fraction": 0.5701526999473572, "avg_line_length": 17.961748123168945, "blob_id": "3e5026a72695ffe1c03ebb36ecff0f0481ffff68", "content_id": "5eb5b08b58ed209b385cff785c0c6ee4059f427e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3471, "license_type": "no_license", "max_line_length": 67, "num_lines": 183, "path": "/test_code/cpp/abstract/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n// Copy of Logans code\n\n#include <iostream>\n\n//// shape (abstract class)\nclass shape {\nprotected:\n double area;\npublic:\n virtual void calc_area() = 0;\n virtual double get_area() = 0;\n void set_area(double a);\n\n const double pi = 3.14159265358979;\n};\n\nvoid shape::set_area(double a) {\n area = a;\n}\n\n\n//// triangle (instantiation of shape abstract class)\nclass triangle : public shape {\nprivate:\n double width;\n double height;\npublic:\n void calc_area(); // required for beign a shape\n double get_area(); // required for beign a shape\n void set_width(double);\n void set_height(double);\n};\n\nvoid triangle::calc_area() {\n area = 0.5 * width * height;\n}\n\ndouble triangle::get_area() {\n return area;\n}\n\nvoid triangle::set_width(double w) {\n width = w;\n}\n\nvoid triangle::set_height(double h) {\n height = h;\n}\n\n\n//// circle (instantiation of shape abstract class)\nclass circle : public shape {\nprivate:\n double radius;\n double diameter;\npublic:\n void calc_area();\n double get_area();\n void set_radius(double);\n void calc_diameter_from_radius();\n double get_diameter();\n};\n\nvoid circle::calc_area() {\n area = pi * radius * radius;\n}\n\ndouble circle::get_area() {\n return area;\n}\n\nvoid circle::set_radius(double r) {\n radius = r;\n}\n\nvoid circle::calc_diameter_from_radius() {\n diameter = 2 * radius;\n}\n\ndouble circle::get_diameter() {\n return diameter;\n}\n\n\n// testing\nclass testing {\nprivate:\n triangle tt;\n bool test_cirlce();\n bool test_triangle();\npublic:\n bool suite();\n};\n\nbool testing::suite() {\n bool pass_indicator = false;\n bool tc_indicator = false;\n bool tt_indicator = false;\n\n std::cout << \"Testing Suite\" << std::endl;\n try {\n if (test_cirlce()) {\n tc_indicator = true;\n std::cout << \"Circle PASS\" << std::endl;\n }\n else throw 100;\n if (test_triangle()) {\n tt_indicator = true;\n std::cout << \"Triangle PASS\" << std::endl;\n }\n else throw 101;\n }\n catch (int a) {\n std::cout << \"ERROR:testing:suite: \" << a << std::endl;\n }\n std::cout << std::endl;\n return pass_indicator;\n}\n\nbool testing::test_cirlce() {\n bool pass_indicator = false;\n circle tc;\n tc.set_radius(4.0);\n tc.calc_diameter_from_radius();\n tc.calc_area();\n\n if (tc.get_area() == tc.pi * 16.0) {\n if (tc.get_diameter() == 8.0) {\n pass_indicator = true;\n }\n }\n return pass_indicator;\n}\n\nbool testing::test_triangle() {\n bool pass_indicator = false;\n triangle tt;\n tt.set_width(2.0);\n tt.set_height(5.0);\n tt.calc_area();\n\n if (tt.get_area() == 5.0) {\n pass_indicator = true;\n }\n return pass_indicator;\n}\n\ndouble general_area(shape* S) {\n double a;\n\n S->calc_area();\n a = S->get_area();\n std::cout << \"general_area returns: \" << a << std::endl;\n return a;\n}\n\nint main() {\n\n testing X;\n X.suite();\n\n triangle T;\n circle C;\n\n T.set_height(3.0);\n T.set_width(4.0);\n T.calc_area();\n T.set_area(T.get_area()); // why do this? test set area?\n std::cout << \"triangle area is: \" << T.get_area() << std::endl;\n\n C.set_radius(2.0);\n C.calc_diameter_from_radius();\n C.calc_area();\n C.set_area(C.get_area());\n std::cout << \"circle area is: \" << C.get_area() << std::endl;\n\n triangle* pT = &T;\n general_area(pT);\n\n std::cout << std::endl;\n\n return 0;\n}\n" }, { "alpha_fraction": 0.4994284510612488, "alphanum_fraction": 0.5198902487754822, "avg_line_length": 28.654237747192383, "blob_id": "ce99485cad66bcbb3b7a0d1ed53d4c860b643ec7", "content_id": "b19959896bc27de8a18816bbd86820457da70f46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8748, "license_type": "no_license", "max_line_length": 168, "num_lines": 295, "path": "/_spark_car/main/vs_0_8/serialManager.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 5.3.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * serialManager.h\n * serialManager.cpp\n*/\n\n#include \"serialManager.h\"\n\nSerialManager::SerialManager() {}\n\nvoid SerialManager::sync(int _mode, bool _offlineMode, bool _useMotorSerial, bool _useWaypointSerial, bool _useGPSSerial, bool _useCompassSerial, bool _useSensorSerial,\n bool _useVoltageSerial, int _move, int _lastMove, float _heading, float _compassError, float _lat, float _lon, bool _isValidGPS, int _right_wall, int _left_wall,\n int _front_wall, int _back_wall, int _close_right_wall, int _close_left_wall, int _close_front_wall, int _far_front_wall, int _far_back_wall, int _nextWaypoint,\n float _wplat, float _wplon, float _waypointDistance, float _waypointDirection, float _batVoltage, int _batAvgReading)\n{\n mode = _mode;\n offlineMode = _offlineMode;\n useMotorSerial = _useMotorSerial;\n useCompassSerial = _useCompassSerial;\n useSensorSerial = _useSensorSerial;\n useWaypointSerial = _useWaypointSerial;\n useGPSSerial = _useGPSSerial;\n useVoltageSerial = _useVoltageSerial;\n move = _move;\n lastMove = _lastMove;\n heading = _heading;\n compassError = _compassError;\n lat = _lat;\n lon = _lon;\n isValidGPS = _isValidGPS;\n right_wall = _right_wall;\n left_wall = _left_wall;\n front_wall = _front_wall;\n back_wall = _back_wall;\n close_right_wall = _close_right_wall;\n close_left_wall = _close_left_wall;\n close_front_wall = _close_front_wall;\n far_front_wall = _far_front_wall;\n far_back_wall = _far_back_wall;\n nextWaypoint = _nextWaypoint;\n wplat = _wplat;\n wplon = _wplon;\n waypointDistance = _waypointDistance;\n waypointDirection = _waypointDirection;\n bat_voltage = _batVoltage;\n bat_avg_reading = _batAvgReading;\n}\n\nvoid SerialManager::call()\n{\n if(mode == 1)\n {\n if(offlineMode == 0)\n {\n Serial.println(\"\");\n Serial.print(\"?\");\n Serial.println(\"\");\n Serial.print(\"mode=\");\n Serial.println(mode);\n serialMove();\n compass();\n gps();\n waypoint();\n sensor();\n Serial.println(\"\");\n Serial.print(\"!\");\n }\n else\n {\n Serial1.println(\"\");\n Serial1.println(\"\");\n Serial1.println(\"--------------\");\n Serial1.print(\"move=\");\n Serial1.println(move);\n Serial1.println(\"--------------\");\n Serial1.print(\"GPS_lat=\");\n printFloat(lat, 1000000000);\n Serial1.print(\"GPS_lon=\");\n printFloat(lon, 1000000000);\n Serial1.print(\"GPS_valid=\");\n Serial1.println(isValidGPS);\n Serial1.println(\"--------------\");\n Serial1.print(\"WAY_currentWaypoint=\");\n Serial1.println((nextWaypoint - 1));\n Serial1.print(\"WAY_currentWaypointLAT=\");\n printFloat(wplat, 1000000000);\n Serial1.print(\"WAY_currentWaypointLON=\");\n printFloat(wplon, 1000000000);\n Serial1.print(\"WAY_waypointDistance=\");\n printFloat(waypointDistance, 1000000);\n Serial1.print(\"WAY_waypointDirection=\");\n Serial1.println(waypointDirection);\n Serial1.println(\"--------------\");\n if(compassError == true)\n {\n Serial1.print(\"COMP_heading=\");\n Serial1.println(heading);\n }\n else Serial1.println(\"COMP=LSM303 was NOT detected\");\n Serial1.println(\"--------------\");\n Serial1.print(\"BAT_voltage=\");\n Serial1.println(bat_voltage);\n Serial1.print(\"BAT_raw=\");\n Serial1.println(bat_avg_reading);\n Serial1.println(\"--------------\");\n }\n }\n else Serial.println(\"mode=0\");\n}\n\n//--------------------------------------------------\n\n\nvoid SerialManager::serialMove()\n{\n if(useMotorSerial)\n {\n if(lastMove != move)\n {\n // serialManager.move(move);\n switch(move)\n {\n case 0:\n Serial.println(\"MOVE=stop\");\n break;\n case 1:\n Serial.println(\"MOVE=forward\");\n break;\n case 2:\n Serial.println(\"MOVE=backward\");\n break;\n case 3:\n Serial.println(\"MOVE=turnRight\");\n break;\n case 4:\n Serial.println(\"MOVE=turnLeft\");\n break;\n case 5:\n Serial.println(\"MOVE=moveRight\");\n break;\n case 6:\n Serial.println(\"MOVE=moveLeft\");\n break;\n default:\n Serial.print(\"MOVE_errorMove=\");\n Serial.println(move);\n }\n Serial.println(\"\");\n }\n }\n}\n\nvoid SerialManager::compass()\n{\n if(useCompassSerial)\n {\n if(compassError == true)\n {\n Serial.print(\"COMP_heading=\");\n Serial.println(heading);\n Serial.println(\"\");\n }\n else Serial.println(\"COMP=LSM303 was NOT detected\");\n }\n}\n\nvoid SerialManager::sensor()\n{\n if (useSensorSerial)\n {\n Serial.print(\"SEN_far_front_wall=\");\n Serial.println(far_front_wall);\n Serial.println(\"\");\n Serial.print(\"front_wall=\");\n Serial.print(front_wall);\n Serial.println(\"\");\n Serial.print(\"close_front_wall=\");\n Serial.print(close_front_wall);\n Serial.println(\"\");\n Serial.print(\"back_wall=\");\n Serial.print(far_back_wall);\n Serial.println(\"\");\n Serial.print(\"close_back_wall=\");\n Serial.print(back_wall);\n Serial.println(\"\");\n Serial.print(\"right_wall=\");\n Serial.print(right_wall);\n Serial.println(\"\");\n Serial.print(\"close_right_wall=\");\n Serial.print(close_right_wall);\n Serial.println(\"\");\n Serial.print(\"left_wall=\");\n Serial.print(left_wall);\n Serial.println(\"\");\n Serial.print(\"close_left_wall=\");\n Serial.print(close_left_wall);\n Serial.println(\"\");\n }\n \n}\n\nvoid SerialManager::gps()\n{\n if(useGPSSerial)\n {\n if(isValidGPS == false) Serial.println(\"GPS=NOTvalid\");\n else\n {\n Serial.print(\"GPS_lat=\");\n printFloat(lat, 1000000000);\n Serial.print(\"GPS_lon=\");\n printFloat(lon, 1000000000);\n Serial.print(\"GPS_valid=\");\n Serial.println(isValidGPS);\n Serial.println(\"\");\n }\n }\n}\n\nvoid SerialManager::waypoint()\n{\n if(useWaypointSerial)\n {\n Serial.print(\"WAY_currentWaypoint=\");\n Serial.println((nextWaypoint - 1));\n Serial.print(\"WAY_currentWaypointLAT=\");\n printFloat(wplat, 1000000000);\n Serial.print(\"WAY_currentWaypointLON=\");\n printFloat(wplon, 1000000000);\n Serial.print(\"WAY_waypointDistance=\");\n printFloat(waypointDistance, 1000000);\n Serial.print(\"WAY_waypointDirection=\");\n Serial.println(waypointDirection);\n Serial.println(\"\");\n }\n}\n\nvoid SerialManager::voltage()\n{\n if(useVoltageSerial)\n {\n Serial.print(\"BAT_voltage=\");\n Serial.println(bat_voltage);\n Serial.print(\"BAT_raw=\");\n Serial.println(bat_avg_reading);\n Serial.println(\"\");\n }\n}\n\n//--------------------------------------------------\n\nvoid SerialManager::printFloat(float _val, unsigned int _precision)\n{\n if(offlineMode == 0)\n {\n Serial.print (int(_val)); //prints the int part\n Serial.print(\".\"); // print the decimal point\n unsigned int frac;\n if(_val >= 0)\n frac = (_val - int(_val)) * _precision;\n else\n frac = (int(_val)- _val ) * _precision;\n int frac1 = frac;\n while( frac1 /= 10 )\n _precision /= 10;\n _precision /= 10;\n while( _precision /= 10)\n Serial.print(\"0\");\n\n Serial.println(frac,DEC) ;\n }\n else\n {\n Serial1.print (int(_val)); //prints the int part\n Serial1.print(\".\"); // print the decimal point\n unsigned int frac;\n if(_val >= 0)\n frac = (_val - int(_val)) * _precision;\n else\n frac = (int(_val)- _val ) * _precision;\n int frac1 = frac;\n while( frac1 /= 10 )\n _precision /= 10;\n _precision /= 10;\n while( _precision /= 10)\n Serial1.print(\"0\");\n\n Serial1.println(frac,DEC) ;\n }\n}\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.574999988079071, "avg_line_length": 18.5, "blob_id": "e8fbfb91257f050c2020467d59e7ee7e79c1a77a", "content_id": "5d1fa83784a4598198aa837d9bcdcb5015582041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 40, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/test_code/cpp/socket/client/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\nall:\n\tg++ -std=c++11 client.cpp -o run\n" }, { "alpha_fraction": 0.5703839063644409, "alphanum_fraction": 0.5776965022087097, "avg_line_length": 36.29545593261719, "blob_id": "fc9d437b2ee21f59e8441481dd21b53cf0865162", "content_id": "6ace21fa050a96180ef2db0d7e1440bc268da65e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 77, "num_lines": 44, "path": "/ethanol_drink_comparer/ethanol_drink_comparer.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# mixed drink compared to another ethonal content\nprint(\"\\ndrink one compared to drink two mixed with another drink\")\nprint(\"created by William Gregory on July 23rd\")\nprint(\"note: do not type units just numbers\")\nprint(\"------------------------------------------------------------\")\n\n# user input\n# ----------\n# drink one (ex beer)\nprint(\"unmixed drink\")\none_name = input(\"Drink one name: \")\none_abv = float(input(one_name + \" ABV (%): \"))\none_vol = float(input(one_name + \" volume (mL): \"))\nprint()\n# drink two (ex liquer)\nprint(\"mixed drink\")\ntwo_name = input(\"Drink two name: \")\ntwo_abv = float(input(two_name + \" ABV (%): \"))\nprint()\n# mixed drink (ex powerade)\nmix_name = input(\"Mixer drink name: \")\nmix_vol = float(input(mix_name + \" volume when full (mL): \"))\nmix_ratio = float(input(two_name + \" per mix drink, replaceing mixer (%): \"))\nprint()\n\n# calculations\n# ------------\n# ethonal in drink one\none_ethonal_vol = one_vol * (one_abv / 100)\n# drink two volume in mix drink\nvol_two_in_mix = mix_vol * (mix_ratio / 100)\n# ethonal in mixed drink\nmixed_ethonal_vol = vol_two_in_mix * (two_abv / 100)\n# ratio of ethonal per drink one and mix\nethonal_ratio = mixed_ethonal_vol / one_ethonal_vol\n\n# result output\n# -------------\nprint(\"Ethonal volume in \" + one_name + \": \" + str(one_ethonal_vol) + \" mL\")\nprint(\"With \" + mix_name + \" and \" + str(mix_ratio) + \"% \" + two_name)\nprint(\"Ethonal volume in mixed drink: \" + str(mixed_ethonal_vol) + \" mL\")\nprint(\"--------------------------------------\")\nprint(\"Ratio: \" + str(ethonal_ratio) + \" \" + one_name + \" per 1 mixed drink\")\nprint(\"------------------------------------------------------------\")\n" }, { "alpha_fraction": 0.5408163070678711, "alphanum_fraction": 0.5850340127944946, "avg_line_length": 16.235294342041016, "blob_id": "07f9024ac98611ef3a2514906865d6ed7ab2810a", "content_id": "f6971821e9a0c2ae16433af6c5a8da40d24bcd03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 294, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/test_code/cpp/delta_time/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include <ctime>\n#include <iostream>\n\nint main() {\n\tclock_t begin = clock();\n\t\n\tfor (std::size_t i=0; i<10000000000; ++i) {\n\t\tstd::string boom = \"HI\";\n\t}\n\t\n\tclock_t end = clock();\n\tdouble dtime = double(end-begin) / CLOCKS_PER_SEC;\n\n\tstd::cout << \"time: \" << dtime << std::endl;\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6521739363670349, "alphanum_fraction": 0.6679160594940186, "avg_line_length": 28.021739959716797, "blob_id": "094c9e80913d638a92b8dcb1f4e2f7bc180ef81b", "content_id": "f24098f0c3c29d7270fb5a4f33bff29d0f2a5b9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 121, "num_lines": 46, "path": "/_spark_car/main/vs_0_9_2/carSensor.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n *\n * Date created: 4.23.2015\n * Last updated: 6.18.2015\n * \n * Library for:\n * Spark Car distence sensor management and interpratation\n *\n * Objects:\n * function inturpretSensors() returns a move sugestion\n * varialbe far_wall is 1 for nearby objects or 0 if clear\n *\n * Movements: \n * if close front wall - move back or move back and turn or stop or just dont go forward?\n * if close back wall -\n * if close right wall - turn left\n * if close left wall - turn right\n * \n * Files:\n * carSensor.h\n * carSensor.cpp\n*/\n\n#ifndef _CAR_SENSOR_MONITOR\n#define _CAR_SENSOR_MONITOR\n#include \"application.h\"\n\nclass Sensor\n{\n private:\n void setPinModes();\n void averageValues();\n void getValues();\n float avg_dist_short_right, avg_dist_short_left, avg_dist_short_front, avg_dist_medium_back, avg_dist_long_front;\n float raw_dist_short_right, raw_dist_short_left, raw_dist_short_front, raw_dist_medium_back, raw_dist_long_front;\n int dist_short_right_pin, dist_short_left_pin, dist_short_front_pin, dist_medium_back_pin, dist_long_front_pin;\n signed int move;\n int sampleNumber;\n public:\n Sensor(int _val1, int _val2, int _val3, int _val4, int _val5);\n int interpretSensors();\n int front_wall, back_wall, right_wall, left_wall, far_wall;\n};\n\n#endif" }, { "alpha_fraction": 0.5347364544868469, "alphanum_fraction": 0.5520581007003784, "avg_line_length": 24.69377899169922, "blob_id": "dd1687b15850fbe538ec9b407886687f86a13c4a", "content_id": "76522f46710bdbc89e0650e1b9af1f6075ef4fa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5369, "license_type": "no_license", "max_line_length": 111, "num_lines": 209, "path": "/_spark_car/main/vs_0_9/carSensor.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.23.2015\n * \n * Library for:\n * Car sensor interpretation\n *\n * carSensor.h\n * carSensor.cpp\n*/\n\n#include \"carSensor.h\"\n\nint sampleNumber = 100;\n\n// construct\nSensor::Sensor(int _val1, int _val2, int _val3, int _val4, int _val5)\n{\n dist_short_right = _val1; // right short range distence sensor\n dist_short_left = _val2; // left short range distence sensor\n dist_short_front = _val3; // front short range distence sensor\n dist_medium_back = _val4; // back medeium range distence sensor\n dist_long_front = _val5; // front long range distence sensor\n \n setPinModes();\n}\n\n//-----------------------------------------------------------------------------------------------------\n\n// set pin modes\nvoid Sensor::setPinModes()\n{\n pinMode(dist_short_right, INPUT);\n pinMode(dist_short_left, INPUT);\n pinMode(dist_short_front, INPUT);\n pinMode(dist_medium_back, INPUT);\n pinMode(dist_long_front, INPUT);\n}\n\n// get raw sensor values\nvoid Sensor::getValues()\n{\n raw_dist_short_right = analogRead(dist_short_right);\n raw_dist_short_left = analogRead(dist_short_left);\n raw_dist_short_front = analogRead(dist_short_front);\n raw_dist_medium_back = analogRead(dist_medium_back);\n raw_dist_long_front = analogRead(dist_long_front);\n}\n\n\n// average the sensor values\nvoid Sensor::averageValues()\n{\n getValues();\n avg_dist_short_right = raw_dist_short_right;\n avg_dist_short_left = raw_dist_short_left;\n avg_dist_short_front = raw_dist_short_front;\n avg_dist_medium_back = raw_dist_medium_back;\n avg_dist_long_front = raw_dist_long_front;\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_right = avg_dist_short_right + raw_dist_short_right / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_left = avg_dist_short_left + raw_dist_short_left / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_short_front = avg_dist_short_front + raw_dist_short_front / 2;\n //Serial.println(avg_dist_short_front);\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_medium_back = avg_dist_medium_back + raw_dist_medium_back / 2;\n }\n \n for(int x = 0; x < sampleNumber; x++)\n {\n getValues();\n avg_dist_long_front = avg_dist_long_front + raw_dist_long_front / 2;\n }\n}\n\n// Decision Tree and wall detection\n//-----------------------------------------------------------------------------------------------------\n\n// 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight(speed), 5: turnLeft(speed)\nint Sensor::interpretSensors()\n{\n averageValues();\n move = -1;\n \n right_wall = 0;\n left_wall = 0;\n front_wall = 0;\n back_wall = 0;\n \n close_right_wall = 0;\n close_left_wall = 0;\n close_front_wall = 0;\n \n far_front_wall = 0;\n far_back_wall = 0;\n\n // short front\n if(avg_dist_short_front > 200) // wall\n {\n front_wall = 1;\n if(avg_dist_short_front > 600) \n {\n close_front_wall = 1; // close front\n move = 3;\n }\n }\n else\n {\n // far dist sensor\n if(avg_dist_long_front > 800) far_front_wall = 1; // far front\n }\n \n // short right\n if(avg_dist_short_right > 300) // wall\n {\n right_wall = 1;\n if(avg_dist_short_right > 600) \n {\n close_right_wall = 1; // close right\n move = 5;\n }\n }\n \n \n // short left\n if(avg_dist_short_left > 300) // wall\n {\n left_wall = 1;\n if(avg_dist_short_left > 600) \n {\n close_left_wall = 1; // close left\n move = 4;\n }\n }\n \n \n // medium back\n if(avg_dist_medium_back > 500) // far wall\n {\n far_back_wall = 1; \n if(avg_dist_medium_back > 1200)\n {\n back_wall = 1; // back wall\n move = 2;\n }\n }\n \n \n if(move == -1) move = 2; // no move sugestion, contiue\n \n if(far_front_wall == 1 || far_back_wall == 1) close_wall = 1;\n else close_wall = 0;\n \n return move;\n}\n\n\n\nvoid Sensor::serialDeep()\n{\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_front=\");\n Serial.print(raw_dist_short_front);\n Serial.println(\"\");\n Serial.print(\"raw_dist_long_front=\");\n Serial.print(raw_dist_long_front);\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_right=\");\n Serial.print(raw_dist_short_right);\n Serial.println(\"\");\n Serial.print(\"raw_dist_short_left=\");\n Serial.print(raw_dist_short_left);\n Serial.println(\"\");\n Serial.print(\"raw_dist_medium_back=\");\n Serial.print(raw_dist_medium_back);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_front=\");\n Serial.print(avg_dist_short_front);\n Serial.println(\"\");\n Serial.print(\"avg_dist_long_front=\");\n Serial.print(avg_dist_long_front);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_right=\");\n Serial.print(avg_dist_short_right);\n Serial.println(\"\");\n Serial.print(\"avg_dist_short_left=\");\n Serial.print(avg_dist_short_left);\n Serial.println(\"\");\n Serial.print(\"avg_dist_medium_back=\");\n Serial.print(avg_dist_medium_back);\n Serial.println(\"\");\n}" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 18.5, "blob_id": "47cb708cf304a103c9e024908c8c03bfb73a9f48", "content_id": "076fe7ec6d1a95d83682178081d516e2ba7129fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 39, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/matrix_world/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "all:\n\tg++ -std=c++11 matrix.cpp -o run\n" }, { "alpha_fraction": 0.47021082043647766, "alphanum_fraction": 0.47754353284835815, "avg_line_length": 22.717391967773438, "blob_id": "a8100ae08f76cf70ace3d72e1e7b9218c4e8666a", "content_id": "ce471a8372290f37089e4206b6e6ea47c0bbb6ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1091, "license_type": "no_license", "max_line_length": 61, "num_lines": 46, "path": "/_spark_car/other/serial_interpreter.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "import serial\nfrom time import sleep\n\nser = serial.Serial(port='COM3')\n\nserStart = \"?\"\nserStop = \"!\"\nserComment = \"#\"\nserEqual = \"=\"\n\nwhile True:\n while True:\n # reset values\n names = {} # names dictionary\n values = {} # variable dictionary\n iteration = 1 # iteration\n serData = [] # serial data\n \n line = (str(ser.readline().strip()))\n\n if serStart in line:\n while True:\n line = (str(ser.readline().strip().decode()))\n if serStop in line:\n break\n if line != \"\" and serComment not in line:\n serData.append(line)\n break\n for x in range(len(serData)):\n val = (serData[x]).split(serEqual)\n name = val[0]\n value = val[1]\n names[iteration] = name\n values[name] = value\n iteration += 1\n\n for x in range(1, len(names)+1):\n print(names[x] + \" = \" + values[names[x]])\n\n sleep(1)\n\n print(\"\")\n print(\"----------------------------------------\")\n print(\"\")\n\nser.close()\n" }, { "alpha_fraction": 0.6223506927490234, "alphanum_fraction": 0.6685934662818909, "avg_line_length": 22.636363983154297, "blob_id": "a0151ad97f3d1c745c1c0e3d0a96a8fd386b77d2", "content_id": "d3c66724e71930da9108ba169cf3ebc5103fcdd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 104, "num_lines": 22, "path": "/_spark_car/particle/upload.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "from os import system\nfrom random import uniform\n\n# Flash current project binary to current device\n# Note: Combile first to generate .bin file\n# http://docs.particle.io/photon/cli/\n\n# Device ID's\ndev_core = '53ff6e066667574845231267'\ndev_photon = ''\n\n# Project directory to use\ncurrent_vs = '0_9_2'\n\ncurrent_device = dev_core\n\ndef main():\n # ex \n system('particle flash ' + current_device + ' main/firmware/firmware_vs_' + current_vs + '.bin')\n \nif __name__ == '__main__': # on load start main()\n main()" }, { "alpha_fraction": 0.5637639760971069, "alphanum_fraction": 0.575129508972168, "avg_line_length": 35.48170852661133, "blob_id": "3161ff1cc970772810c3f8a3b33908210f553c9e", "content_id": "388fc805ce243e5f35f782d5f11a156b3bbfb7ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5983, "license_type": "no_license", "max_line_length": 107, "num_lines": 164, "path": "/hailstone_sequence/hailstone_sequence.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Collatz conjecture (hailstone sequence)\n# any starting positive integer will reach 1 eventually\n# written by William Gregory on August 10th 2016\n\n# ToDo:\n# - seperate into functions\n# - add threading - break up start:end into threads\n\nfrom math import floor\nfrom time import time, sleep\nfrom multiprocessing import Process\n\n# - settings -\n# good to change\nstart_num = 1 # must be a positive integer\nend_num = 10000000 # must be a positive integer\nuse_threads = True\nthread_num = 4\n# 100,000,000\nprint_while = False # print info during run\nprint_after = False # print info after number reached for each\n# not good to change\nmax_round = 10000 # exit if goal not reached before then\nprogress_breaks = 100 # number of percentage updates (ex: 10 or 100)\n# - settings end - \n# ---\n# - functions -\n\n# print master starting info\ndef print_starting_info():\n print(\"Hailstone sequence\")\n print(\"------------------\")\n print(\"Start: \" + str(start_num) + \"\\tEnd: \" + str(end_num))\n print(\"Max round: \" + str(max_round))\n print(\"\")\n print(\"Complete: \\tLast Avg Rounds: \\tTime Total: \\tTime Remaining:\")\n print(\"--------- \\t---------------- \\t----------- \\t---------------\")\n\n# converts input seconds and returns minutes and seconds\ndef seconds_to_minutes_seconds(in_seconds):\n stms_min = int(floor(info_delta_time/60))\n stms_sec = int(floor(info_delta_time-(temp_min*60)))\n return stms_min, stms_sec\n\n# calculate an aproximate time remaining\n# def calculate_time_remaining(in_avg_time, in_current_round, in_total_rounds):\n # pass\n\n# apply the hailstone sequence once using input and return result\ndef apply_hailstone_sequence_once(in_x):\n # even\n if in_x%2 == 0:\n return (in_x/2)\n # odd\n else:\n return (in_x*3 + 1)\n\n# run the sequence from input start to end and return pass/fail result\n# manage optional console output;\ndef hailstone_iterative_loop(in_start, in_end):\n if use_threads:\n print(\"Starting: \" + str(in_start) + \" to \" + str(in_end))\n info_start_time = time()\n info_max_round = 1\n info_all_pass = True\n info_total_rounds = 0\n temp_print_break = floor((in_end-in_start)/progress_breaks)\n temp_next_print = temp_print_break\n temp_total_rounds = in_end-in_start\n temp_last_avg_rounds = 0\n # in_start to in_end\n for x in xrange(in_start, in_end):\n current_num = x\n number_reached = False\n run_exit = False\n run_number = 1\n # run\n while run_exit is False:\n current_num = apply_hailstone_sequence_once(current_num)\n # --\n run_number += 1\n info_total_rounds += 1\n # print while\n if print_while is True:\n print(\"value: \" + str(current_num) + \"\\trun: \" + str(run_number))\n # info max round\n if run_number > info_max_round: info_max_round = run_number\n # 1 reached (exit) (pass)\n if current_num == 1:\n message = \"PASS\"\n run_exit = True\n # max rounds (exit)\n elif run_number >= max_round:\n message = \"max round reached\"\n print(\"number: \" + str(x) + \"\\tmessage: \" + message)\n info_all_pass = False\n run_exit = True\n # starting number again (exit)\n elif current_num == x and run_number != 1:\n message = \"WOAH INFINATE LOOP. TELL THE PEOPLE\"\n print(\"number: \" + str(x) + \"\\tmessage: \" + message)\n info_all_pass = False\n run_exit = True\n temp_last_avg_rounds += run_number\n # print after\n if print_after: print(\"start: \" + str(x) + \"\\trounds: \" + str(run_number) + \"\\tinfo: \" + message)\n # progress update\n temp_time_taken_total = int(time()-info_start_time)\n if temp_next_print == x and print_after is False:\n temp_next_print = x+temp_print_break\n temp_progress = int((x*100)/temp_total_rounds)\n temp_time_remaining = temp_time_taken_total*(temp_total_rounds-x)/x\n temp_last_avg_rounds = int(floor(temp_last_avg_rounds/temp_print_break))\n if use_threads is False:\n print(\"done: \" + str(temp_progress) + \"%\" +\n \"\\tlavgr: \" + str(temp_last_avg_rounds) +\n \"\\tttotal: \" + str(temp_time_taken_total) +\n \"\\ttremain: \" + str(temp_time_remaining))\n temp_last_avg_rounds = 0;\n print(\"all pass: \" + str(info_all_pass))\n\ndef print_ending_info():\n print(\"\")\n print(\"run complete\")\n print(\"------------\")\n # pass/fail\n if info_all_pass is True:\n info_pass_message = \"PASS\"\n else:\n info_pass_message = \"FAIL\"\n print(\"all pass: \" + info_pass_message)\n # max rounds\n print(\"max round: \" + str(info_max_round))\n # time\n info_delta_time = time()-info_start_time\n if info_delta_time > 60:\n temp_min, temp_sec = seconds_to_minutes_seconds(info_delta_time)\n print(\"time: \" + str(temp_min) + \" min\\t\" + str(temp_sec) + \" sec\")\n else:\n print(\"time: \" + str(floor(info_delta_time)) + \" seconds\")\n # total rounds\n print(\"total rounds: \" + str(info_total_rounds))\n\n# - functions end -\n# ---\n# - main code -\nif __name__ == '__main__':\n if use_threads is False: print intro info\n print_starting_info()\n if use_threads:\n threads_array = []\n divide_max = (end_num - start_num) / thread_num\n for x in range(0,thread_num):\n print(\"start \" + str(x))\n p = Process(target=hailstone_iterative_loop, args=(divide_max*x+1, divide_max*x+divide_max+1,))\n p.start()\n threads_array.append(p)\n sleep(0.1)\n for x in range(len(threads_array)):\n threads_array[x].join()\n else:\n hailstone_iterative_loop(start_num, end_num)\n if use_threads is False: print_ending_info()\n# - main code end -\n" }, { "alpha_fraction": 0.5423423647880554, "alphanum_fraction": 0.5615615844726562, "avg_line_length": 18.337209701538086, "blob_id": "1bd770f2cbca917120c419f902d7c9d56074ab0a", "content_id": "0c30ea5e7f75e2fa33ba9375e6a25167b1379da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 81, "num_lines": 86, "path": "/test_code/cpp/openssl/secured/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n// OpenSSL test\n\n#include <iostream>\n\n#include \"/usr/include/openssl/bio.h\"\n#include \"/usr/include/openssl/ssl.h\"\n#include \"/usr/include/openssl/err.h\"\n\nint main() {\n\n\tSSL_load_error_strings();\n\tERR_load_BIO_strings();\n\tOpenSSL_add_all_algorithms();\n\n\tBIO * bio;\n\t\n\tSSL_CTX * ctx = SSL_CTX_new(SSLv23_client_method());\n\tSSL * ssl;\n\n\t// load trust store\n\t// do not have trust store file\n\t//if (! SSL_CTX_load_verify_locations(ctx, \"/usr/include/TrustStore.pem\", NULL))\n\t//{\n\t//\tstd::cout << \"failed to load trust store\" << std::endl;\n\t//}\n\n\n\t\n\t// connect\n\n\tstd::cout << \"connecting\" << std::endl;\n\t\n\t//bio = BIO_new_connect(\"68.229.7.130:80\");\n\tbio = BIO_new_connect(\"www.ibm.com:80\");\n\t\n\tstd::cout << \"connected\" << std::endl;\n\t\n\tif (bio == NULL) {\n\t\tstd::cout << \"fail_1\" << std::endl;\n\t} else {\n\t\tstd::cout << \"pass_1\" << std::endl;\n\t}\n\tif (BIO_do_connect(bio) <= 0) {\n\t\tstd::cout << \"fail_2\" << std::endl;\n\t} else {\n\t\tstd::cout << \"pass_2\" << std::endl;\n\t}\n\t\n\tstd::cout << \"done\" << std::endl;\n\n\n\t// read\n/*\n\tchar buff[1536] = {};\n\n\tint x = BIO_read(bio, buff, sizeof(buff));\n\tif (x == 0) {\n\t\tstd::cout << \"closed\" << std::endl;\n\t} else if (x < 0) {\n\t\tif (! BIO_should_retry(bio)) {\n\t\t\tstd::cout << \"failed\" << std::endl;\n\t\t}\n\t\tstd::cout << \"retry\" << std::endl;\n\t}\n\tstd::cout << \"read done\" << std::endl;\n*/\n\t// write\n\n\tchar buff_w[1536] = {};\n\n\tint w = BIO_write(bio, buff_w, sizeof(buff_w));\n\tif (w < 0) {\n\t\tif (! BIO_should_retry(bio)) {\n\t\t\tstd::cout << \"failed\" << std::endl;\n\t\t}\n\t\tstd::cout << \"retry\" << std::endl;\n\t}\n\n\tstd::cout << \"write done\" << std::endl;\n\n\t// close\n\tBIO_free_all(bio);\n\tstd::cout << \"connecion closed\" << std::endl;\n\t\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.5162000060081482, "alphanum_fraction": 0.5407999753952026, "avg_line_length": 32.40979766845703, "blob_id": "56b74c2d5dd3932851eb243ed166b184bbe81417", "content_id": "24a7b047b1539adb8a028151e922c8b2f147dd4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15000, "license_type": "no_license", "max_line_length": 136, "num_lines": 449, "path": "/_spark_car/main/vs_0_9_2/carControl.ino", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.13.2015\n * 6.16.2015 \n *\n * Main code for Spark Car - see about.txt\n *\n * Bare bones vs 0.9.2 - object avoidance, waypoint navigation, rc control, cloud commands\n * \n*/\n\n//SYSTEM_MODE(SEMI_AUTOMATIC); // dont connect to Particle Cloud\n\n#include \"trexController.h\" // motor controller library\n#include \"carSensor.h\" // dist sensor manager library\n#include \"lsm303.h\" // compass library\n#include \"tinyGPS.h\" // gps library\n#include \"serialManager.h\" // serial library\n#include \"math.h\"\n\n// Enable/Desable general settings\n//-----------------------------------------------------------------------------------------------------\n\n// Action\nboolean useMotors = FALSE; // engage motors\nboolean useSensors = FALSE; // use distence sensors to avoid ojects\nboolean useWaypoint = FALSE; // use GPS and compass sensors to find waypoint\nboolean rcmode = TRUE; // control from rc controller - useMotors must be TRUE to run\n\n// Serial\nboolean useSerial = FALSE; // general sensor data\nboolean useStorage = FALSE; // SD card log of data - not added yet\n\n// Particle Cloud\nboolean useCloud = TRUE; // use Particle cloud\n\n// Other\nboolean usingParticleDevice = TRUE;\n\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\n// Initialize libraries\nTrex moveCar; // motor control\nSensor sensor(A0, A1, A2, A3, A4); // dist sensor (pins)\nLSM303 compass; // compass/acceleromitor sensor\nTinyGPS gps; // GPS\nSerialManager serial; // serial for debug\n\n// Define\nuint8_t mode = 1; // control mode (0) off, (1) auto, (2) rc\nuint8_t offlinePin = 7; // Particle cloud online/ofline select pin\nconst float Pi = 3.14159265; // circumference / diameter\n\n// Move\nint8_t move = 0; // move to execute\nuint8_t lastMove; // previous move if needed\nuint8_t sensorMove; // move recomended by sensor library\nuint8_t speed = 200; // default driving speed\nuint8_t slow = 0; // half speed if slow is (1)\n\n// Compass\nfloat heading = 0.0; // compass heading\nconst uint8_t compAcuracy = 30; // number of itterations for avrage of compass value\nboolean compassError; // false if not found\nconst float headingTolerance = 10.0; // degrees until move\n\n// GPS\nboolean isValidGPS; // true when GPS data is valid\nfloat lat, lon; // GPS lat and lan\nfloat wplat, wplon; // waypoint lat and lon\nunsigned long age; // gps data age\n\n// Waypoint\nint nextWaypoint = 0; // next waypoint number to assign\nfloat waypointDistance, waypointDirection; // updating waypoint data\nconst float waypointOffsetAllowed = 0.00000001; // tolerance until waypoint reached\n\n// Serial\nunsigned long previousMillis = 0; // last print time\nunsigned long interval = 1000; // time between print\n\n// RC control\nint throtlePin = A5; // rc throtle pin (2 on reciever)\nint steeringPin = A6; // rc steering pin (1 on reciever)\nint stopPin = A7; // rc stop pin (7 on reciever)\nint rc_rmspeed, rc_lmspeed, throtle, steering, stop; // rc control values\n\n// Particle cloud\nboolean offlineMode;\nboolean lowPower = FALSE; // TRUE to turn off WiFi when offline\nint cloudMove(String command); // move function for cloud\n\n// Setup\n//-----------------------------------------------------------------------------------------------------\n\nvoid setup()\n{\n pinMode(offlinePin, INPUT); // set pin modes\n pinMode(throtle, INPUT);\n pinMode(steering, INPUT);\n pinMode(stopPin, INPUT);\n \n Serial.begin(9600); // serial usb debug\n Serial1.begin(9600); // gps rx\n Wire.begin(); // compass and Trex controller\n \n if(useWaypoint)\n {\n compassError = compass.init();\t// returns \"true\" if compass found\n\t compass.enableDefault(); // enable compass\n }\n \n Spark.function(\"carMove\", cloudMove); // setup cloud function called 'move'\n Spark.variable(\"move\", &move, INT); // opens variable to Particle cloud\n Spark.variable(\"sensorMove\", &sensorMove, INT); // opens variable to Particle cloud\n \n setWaypoint(); // set first waypoint\n \n delay(400);\n}\n\n// Main loop\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\nvoid loop()\n{\n manageDebug(); // manage debug\n manageCloud(); // manage cloud\n \n if(mode == 0) // mode(0) for all stop\n {\n delay(1000);\n }\n else if(mode == 1) // mode(1) for auto mode - avoid wall and fallow waypoint\n {\n if(useSensors) // Sensors\n {\n sensorMove = sensor.interpretSensors(); // check distance sensor library\n }\n\n if(useWaypoint) // Waypoint\n {\n checkCompass(); // get compass reading\n \n getGPS(); // get GPS reading\n \n findWaypoint(); // find waypoint\n \n if(abs(lat-wplat) < waypointOffsetAllowed) setWaypoint(); // within waypoint offset, set next waypoint\n }\n \n carDecideMove(); // decide move\n \n if(useMotors) moveCar.moveSet(move, speed); // execute move with Trex controller\n }\n else if(mode == 2) // mode(2) for rc control\n {\n rcControl(); // RC control function\n \n if(useMotors) moveCar.masterSend(rc_lmspeed, 0x00, rc_rmspeed, 0x00); // moveCar\n }\n else // defalut all stop\n {\n delay(1000);\n }\n}\n\n// Decide move\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\n// 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight(speed), 5: turnLeft(speed)\n\nvoid carDecideMove() // forward > waypoint > sensors > moveCar\n{\n lastMove = move; // set lastmove\n move = 2; // set default move as forward\n \n if(useWaypoint) // if use waypoint\n {\n if(isValidGPS) // if GPS valid turn to face heading if not already\n {\n if((waypointDirection - heading) > headingTolerance) move = 5; // heading greater than tolerance, go left\n else if((waypointDirection - heading) < -headingTolerance) move = 4; // heading less than tolerance, go right\n }\n else move = 0; // stop if GPS invalid\n }\n \n if(useSensors) // use sensor sugestion if there is one\n {\n if(sensorMove != 2) move = sensorMove; // if sensor has a sugestion\n }\n \n checkSpeed();\n}\n\n// Waypoint - set for course\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\n// set the next waypoint lat and long (read: nextWaypoint; set: nextwaypoint, wplat, wplon)\nvoid setWaypoint()\n{\n switch(nextWaypoint)\n {\n case 0:\n wplat = 39.550416;\n wplon = -119.809166;\n break;\n case 1:\n wplat = 39.550545;\n wplon = -119.808651;\n break;\n case 2:\n wplat = 39.550413;\n wplon = -119.808396;\n break;\n case 3:\n wplat = 39.550291;\n wplon = -119.808712;\n break;\n }\n nextWaypoint++;\n}\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\n\n// Compass\n//-----------------------------------------------------------------------------------------------------\n\n// finds compass heading (read: compassError, lsm303 compass reading; set: heading)\nvoid checkCompass()\n{\n if (compassError) // compass found\n\t{\n\t compass.read(); // read compass\n\t int mag_x = compass.m.x; // set values\n\t int mag_y = compass.m.y;\n\t // int mag_z = compass.m.z;\n\n\t for(int x = 0; x < compAcuracy; x++) // get avrage of values\n\t {\n\t\t compass.read();\n\t\t mag_x = mag_x + compass.m.x / 2;\n\t mag_y = mag_y + compass.m.y / 2;\n\t // mag_z = mag_z + compass.m.z / 2;\n\t }\n\t \n heading = (((atan2(mag_y, mag_x) * 180.0) / Pi) + 180.0); // find heading - heading\n\t}\n}\n\n\n// GPS\n//-----------------------------------------------------------------------------------------------------\n\n// check if GPS is valid (read: tinyGPS Serial1(GPS); set: isValidGPS)\nvoid checkGPSvalid()\n{\n isValidGPS = false;\n for (unsigned long start = millis(); millis() - start < 1000;)\n {\n // Check GPS data is available\n while (Serial1.available())\n {\n char c = Serial1.read();\n \n // parse GPS data\n if (gps.encode(c)) isValidGPS = true;\n }\n }\n}\n\n// get GPS data (use: checkGPSvalid(), set: GPS (lat, lon, age))\nvoid getGPS()\n{\n checkGPSvalid(); // check if GPS is valid\n\n if (isValidGPS) gps.f_get_position(&lat, &lon, &age); // get position if GPS valid\n}\n\n\n// Waypoint - set for course\n//-----------------------------------------------------------------------------------------------------\n\n// find waypoint direction and distence from current location (read: wplat, wplon; set: waypointDistance, waypointDirection)\nvoid findWaypoint()\n{\n float TMPdlat = (wplat - lat);\n float TMPdlon = wplon - lon;\n waypointDistance = sqrt(pow(TMPdlat, 2) + pow(TMPdlon, 2)); // find waypoint distance\n \n TMPdlat = toRad(TMPdlat);\n TMPdlon = toRad(TMPdlon);\n float TMPlat = toRad(lat);\n float TMPwplat = toRad(wplat);\n \n float y = sin(TMPdlon) * cos(TMPwplat);\n float x = cos(TMPlat) * sin(TMPwplat) - sin(TMPlat) * cos(TMPwplat) * cos(TMPdlon);\n waypointDirection = toDeg(atan2(y, x)); // find waypoint direction\n \n if (waypointDirection < 0) waypointDirection = 360 - abs(waypointDirection);\n}\n\n// RC Control\n//-----------------------------------------------------------------------------------------------------\n\n// uses value from rc to set speed and then adds direction, then calls moveCar.masterSend\nvoid rcControl()\n{\n throtle = pulseIn(throtlePin, HIGH); // read values\n steering = pulseIn(steeringPin, HIGH);\n stop = pulseIn(stopPin, HIGH);\n throtle = constrain(throtle, 800, 1500); // constrain valuse\n steering = constrain(steering, 800, 1500);\n int gospeed = 0; // mapped value of throtle\n if(throtle > 1200) gospeed = map(throtle, 1200, 1500, 0, 255); // map forward speed\n else if(throtle < 1100) gospeed = map(throtle, 1100, 800, 0, -255); // backward\n int val = 0; // mapped value of steering\n if(steering > 1200) val = map(steering, 1200, 1500, 0, gospeed*2); // map steering\n else if(steering < 1100) val = map(steering, 1100, 800, 0, gospeed*2);\n rc_rmspeed = rc_lmspeed = gospeed;\n if (steering > 1200) rc_rmspeed = (rc_rmspeed - val); // adjust speed for steering\n else if (steering < 1100) rc_lmspeed = (rc_lmspeed - val);\n if(stop > 1000) { rc_lmspeed = 0; rc_rmspeed = 0; } // safety stop switch check\n}\n\n\n// Debug\n//-----------------------------------------------------------------------------------------------------\n\n// print debug serial once a second (add SD card option when ofline)\nvoid manageDebug()\n{\n if (useSerial)\n {\n unsigned long currentMillis = millis();\n if ((currentMillis - previousMillis) > interval) // once a second\n {\n previousMillis = currentMillis; // reset time\n serial.call(mode, move, lastMove, heading, lat, lon, wplat, wplon, waypointDistance, waypointDirection); // print serial\n }\n }\n}\n\n// manage Particle cloud tasks\nvoid manageCloud()\n{\n // check offline select pin\n int val = digitalRead(offlinePin);\n if(val == HIGH) offlineMode = TRUE;\n else offlineMode = FALSE;\n \n manageWiFi();\n \n if(Spark.connected()) // runs if online\n { \n cloudPublish();\n }\n}\n\n// publish to cloud\nvoid cloudPublish()\n{\n if(useCloud)\n {\n if(lastMove != move) Spark.publish(\"move\", String(move)); // publish move to Particle Cloud if move changed\n }\n}\n\n// called when move from cloud recieved\nint cloudMove(String command)\n{\n if(useCloud)\n {\n if(command == \"forward\") move = 2;\n else if(command == \"backward\") move = 3;\n else if(command == \"turnRight\") move = 4;\n else if(command == \"turnLeft\") move = 5;\n else if(command == \"stop\") move = 1;\n else move = 1; // unknown move - stop\n return 1;\n } else return -1;\n \n}\n\n// in lowPower mode turn off wifi when offline\nvoid manageWiFi()\n{\n if(lowPower) {\n if(offlineMode) WiFi.off(); // turn off WiFi\n else // turn on WiFi and connect to cloud\n {\n WiFi.on();\n while(WiFi.connecting()) {}\n WiFi.connect();\n while(!WiFi.ready()) {}\n Spark.connect();\n }\n }\n}\n\n\n// Other\n//-----------------------------------------------------------------------------------------------------\n\n// check for nearby objects, go slower if there are any\nvoid checkSpeed()\n{\n slow = sensor.far_wall;\n if(slow == 1) speed = 100; // if object\n else speed = 200; // if clear\n}\n\nfloat toRad(float val) { return val * Pi / 180; } // float to radians\n\nfloat toDeg(float val) { return val * 180 / Pi; } // float to degrees\n\n// Copyright (2014) Timothy Brown\n//-----------------------------------------------------------------------------------------------------\nunsigned long pulseIn(uint16_t pin, uint8_t state)\n{\n GPIO_TypeDef* portMask = (PIN_MAP[pin].gpio_peripheral); // Cache the target's peripheral mask to speed up the loops.\n uint16_t pinMask = (PIN_MAP[pin].gpio_pin); // Cache the target's GPIO pin mask to speed up the loops.\n unsigned long pulseCount = 0; // Initialize the pulseCount variable now to save time.\n unsigned long loopCount = 0; // Initialize the loopCount variable now to save time.\n unsigned long loopMax = 20000000; // Roughly just under 10 seconds timeout to maintain the Spark Cloud connection.\n \n // Wait for the pin to enter target state while keeping track of the timeout.\n while (GPIO_ReadInputDataBit(portMask, pinMask) != state) {\n if (loopCount++ == loopMax) {\n return 0;\n }\n }\n \n // Iterate the pulseCount variable each time through the loop to measure the pulse length; we also still keep track of the timeout.\n while (GPIO_ReadInputDataBit(portMask, pinMask) == state) {\n if (loopCount++ == loopMax) {\n return 0;\n }\n pulseCount++;\n }\n \n // Return the pulse time in microseconds by multiplying the pulseCount variable with the time it takes to run once through the loop.\n return pulseCount * 0.405; // Calculated the pulseCount++ loop to be about 0.405uS in length.\n}" }, { "alpha_fraction": 0.4961166977882385, "alphanum_fraction": 0.5540822148323059, "avg_line_length": 18.767789840698242, "blob_id": "2b6da5b142c06ec172defced027cd3056c91c1a6", "content_id": "258d6e68ae0e5f94fc841ae05467fe67693aadb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5279, "license_type": "no_license", "max_line_length": 68, "num_lines": 267, "path": "/test_code/cpp/opengl/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include <math.h>\n#include <vector>\n#include <GL/glut.h>\n\n// todo: add bias\n\n// for a (in,hn,1) network\n\n//float mo = 0.8; // node offset\nfloat radius = 0.13; // node radius\nfloat dl = 0.01; // defalut line width\n\nint in = 2; // input nodes\nint hn = 5; // hidden nodes\nint on = 1; // output nodes (must be 1)\n\nfloat PI = 3.14159;\n\nstd::vector <bool> input_state;\nstd::vector <double> input_val;\n\nstd::vector <bool> hidden_state;\nstd::vector <double> hidden_val;\n\nvoid renderScene(void) {\n\n\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n\t// draw circles\n\tfor (int j=0; j<3; ++j) {\n\n\t\tglColor3f(1.0, 1.0, 1.0);\n\t\tglLineWidth(2.0);\n\n\t\t// x position\n\t\tfloat offset = 0.0;\n\t\t//if (j==0) offset = mo;\n\t\t//else if (j==2) offset = -mo;\n\n\t\t//for (int i=0; i<=360; ++i) {\n\t\t//\tfloat degInRad = i*PI/180;\n\t\t//\tglVertex2f(cos(degInRad)*radius-offset, sin(degInRad)*radius);\n\t\t//}\n\n\t\t// input\n\t\tif (j==0) {\n\t\t\tfor (int k=0; k<in; ++k) {\n\t\t\t\tglBegin(GL_LINE_LOOP);\n\t\t\t\tfloat to = mo/in;\n\t\t\t\tif (k==1) to = -to;\n\t\t\t\tfor (int i=0; i<=360; ++i) {\n\t\t\t\t\tfloat r = i*PI/180;\n\t\t\t\t\tglVertex2f(cos(r)*radius-offset, sin(r)*radius+to);\n\t\t\t\t}\n\t\t\t\tglEnd();\n\t\t\t}\n\t\t}\n\n\t\t// hidden\n\t\tif (j==1) {\n\t\t\tint half = 0;\n\t\t\tbool even = true;\n\t\t\toffset = 1.8/hn;\n\n\t\t\tfloat to = 0.0\n\n\t\t\tif (hn % 2 != 0) {\n\t\t\t\teven = false; // odd nodes\n\t\t\t\thalf = (hn-1)/2;\n\t\t\telse half = hn/2;\n\n\t\t\tfor (int k=0; k<hn; ++k) {\n\t\t\t\tglBegin(GL_LINE_LOOP);\n\n\t\t\t\tif (even && k==0) to = 0.0 // first node at zero\n\t\t\t\tif (even && k!=0) {\n\t\t\t\t\tif (k < half) {\n\t\t\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (!even && k==0) to = offset/2; // first node at offset/2 +,-\n\t\t\t\tif (!even && k==1) to = -offest/2;\n\n\t\t\t\tfor (int i=0; i<=360; ++i) {\n\t\t\t\t\tfloat d = i*PI/180;\n\t\t\t\t\tglVertex2f(cos(d)*radius-offset, sin(d)*radius+to);\n\t\t\t\t}\n\t\t\t\tglEnd();\n\t\t\t}\n\t\t}\n\n\t\t// output\n\t\tif (j==2) {\n\t\t\tfor (int k=0; k<on; ++k) {\n\t\t\t\tglBegin(GL_LINE_LOOP);\n\t\t\t\tfor (int i=0; i<=360; ++i) {\n\t\t\t\t\tfloat d = i*PI/180;\n\t\t\t\t\tglVertex2f(cos(d)*radius-offset, sin(d)*radius);\n\t\t\t\t}\n\t\t\t\tglEnd();\n\t\t\t}\n\t\t}\n\n\t\t// create settings\n\t\t// input to hidden\n\t\tfor (int n=0; n<in; ++n) {\n\t\t\tstd::vector <bool> ts;\n\t\t\tstd::vector <double> tv;\n\t\t\tfor (int nn=0; nn<hn; ++n) {\n\t\t\t\tts.push_back(true);\n\t\t\t\ttv.push_back(0.0);\n\t\t\t}\n\t\t\tinput_state.push_back(ts);\n\t\t\tinput_val.push_back(tv);\n\t\t}\n\t\t// hidden to output\n\t\tfor (int n=0; n<hn; ++n) {\n\t\t\tstd::vector <bool> ts;\n\t\t\tstd::Vector <double> tv;\n\t\t\tts.push_back(true);\n\t\t\tts.push_back(0.0);\n\t\t\thidden_state(true);\n\t\t\thidden_val(0.0);\n\t\t}\n\n\t\t// draw lines\n\n\t\tdouble value = cos(PI/180)*radius;\n\n\t\t// --------------------------\n\t\t// line positive (true) , negititive (false)\n\n\t\tbool tt = true; // top top\n\t\tbool tm = true;\n\t\tbool tb = false;\n\t\tbool tbt = true; // top bias top\n\t\tbool tbm = true;\n\t\tbool tbb = false;\n\n\t\tbool bt = true; // bottom (hidden) top\n\t \tbool bm = true;\n\t\tbool bb = true;\n\t\tbool bbb = true; // bottom bias\n\n\t\t// line width\n\n\t\tfloat ttL = 0.0;\n\t\tfloat tmL = 10.0;\n\t\tfloat tbL = 0.0;\n\t\tfloat tbtL = 0.0;\n\t\tfloat tbmL = 0.0;\n\t\tfloat tbbL = 0.0;\n\n\t\tfloat btL = 0.0;\n\t\tfloat bmL = -1.0;\n\t\tfloat bbL = 0.0;\n\t\tfloat bbbL = 0.0;\n\t\t// ---------------------------\n\n\t\tglBegin(GL_LINES);\n\n\t\t// hidden to output\n\n\t\t// - top\n\t\tif (bt) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+btL);\n\t\t// -\n\t\tglVertex2f(mo-value, 0.0);\n\t\tglVertex2f(0.0+value, mo);\n\n\t\t// - middle\n\t\tif (bm) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+bmL);\n\t\t// -\n\t\tglVertex2f(mo-value, 0.0);\n\t\tglVertex2f(0.0+value, 0.0);\n\n\t\t// - bottom\n\t\tif (bb) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+bbL);\n\t\t// -\n\t\tglVertex2f(mo-value, 0.0);\n\t\tglVertex2f(0.0+value, -mo);\n\n\t\t// - bias\n\t\tif (bbb) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+bbbL);\n\t\t// -\n\t\t//glVertex2f(); ----------FIX-----------\n\t\t//glVertex2f();\n\n\t\t// input to hidden\n\n\t\t// - top\n\t\tif (tt) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+ttL);\n\t\t// -\n\t\tglVertex2f(0.0-value, mo);\n\t\tglVertex2f(-mo+value, 0.0);\n\n\t\t// - middle\n\t\tif (tm) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+tmL);\n\t\t// -\n\t\tglVertex2f(0.0-value, 0.0);\n\t\tglVertex2f(-mo+value, 0.0);\n\n\t\t// bottom\n\t\tif (tb) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+tbL);\n\t\t// -\n\t\tglVertex2f(0.0-value, -mo);\n\t\tglVertex2f(-mo+value, 0.0);\n\n\t\t// bias\n\t\tif (tbb) glColor3f(1.0, 0.0, 0.0);\n\t\telse glColor3f(0.0, 0.0, 1.0);\n\t\tglLineWidth(dl+tbbL);\n\t\t// -\n\t\t//glVertex2f(); -----------FIX-----------\n\t\t//glVertex2f();\n\n\t\tglEnd();\n\t}\n\n\tglutSwapBuffers();\n}\n\n// issue: nothing displays (disabled)\nvoid changeSize(int w, int h) {\n\n\tif (h == 0) h = 1;\n\tfloat ratio = 1.0 * w / h;\n\n\tglMatrixMode(GL_PROJECTION);\n\tglLoadIdentity();\n\tglViewport(0, 0, w, h);\n\tgluPerspective(45, ratio, 1, 100);\n\tglMatrixMode(GL_MODELVIEW);\n}\n\nint main(int argc, char **argv) {\n\n\t// init GLUT and create window\n\tglutInit(&argc, argv);\n\tglutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);\n\tglutInitWindowPosition(100, 100);\n\tglutInitWindowSize(640, 640);\n\tglutCreateWindow(\"ProjectZero - NeuralNetwork\");\n\n\t// register callbacks\n\tglutDisplayFunc(renderScene);\n\t//glutReshapeFunc(changeSize);\n\n\t// enter GLUT processing cycle\n\tglutMainLoop();\n\n\treturn 1;\n}\n" }, { "alpha_fraction": 0.582456111907959, "alphanum_fraction": 0.6105263233184814, "avg_line_length": 18.066667556762695, "blob_id": "0941ee3ae81ac19264f15ee84f1e77a668db1887", "content_id": "29970fdf2eb2b1bd6e25dbf594667d7230a4f30b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 285, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/ethanol_drink_comparer/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Ethanol Drink Comparer\n\n## python 3.5.2\n\n### Info:\n* alcoholic drink 1 (ex: beer)\n* alcoholic drink 2 (ex: vodka)\n* mixing drink (ex: powerade)\n\ndrink 1 verses drink 2 replacing some of the mixing drink\n\n|drink 1\t|mixed ex\t|\n|:---------:|:---------:|\n|beer\t\t|vodka\t\t|\n|\t\t\t|powerade\t|" }, { "alpha_fraction": 0.7311828136444092, "alphanum_fraction": 0.7311828136444092, "avg_line_length": 17.600000381469727, "blob_id": "8339b58f58010acc68838677f1f8c2e5cc470e4a", "content_id": "e9ddc584575ec047f8072a8c799b1fe2f508c34d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Personal\n## collection of personal projects\n\n### Notice:\n* some projects moved to own repo\n" }, { "alpha_fraction": 0.45366600155830383, "alphanum_fraction": 0.4755600690841675, "avg_line_length": 18.828283309936523, "blob_id": "6901c3b05772f3abacd51202ed3b841c6f83709d", "content_id": "e3234a2de0d14ec55df21f8ccf910c436eef7aad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1964, "license_type": "no_license", "max_line_length": 58, "num_lines": 99, "path": "/player/player.hpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n/*\n\nPlayer\nWilliam Gregory (c) 2075 - all rights reserved.\n\nrange: 0.0 to 100.0\n low high\n\nhow well can you survive?\n\n*/\n\n#include <vector>\n#include <iostream>\n\nnamespace Player {\n\n class Nutrition {\n private:\n double nutrition; // healthy range of food\n double hunger; // belly - hunger feeling\n double full; // full to overfull; usually zero\n public:\n Nutrition() {\n nutrition = 100;\n hunger = 50;\n full = 0;\n }\n };\n\n class Strength {\n private:\n double power_upper; // muscle strength upper body\n double power_lower; // i.e. legs\n double indurance_upper; // cardio\n double indurance_lower; \n public:\n Strength() {\n power_upper = 20;\n power_lower = 20;\n indurance_upper = 20;\n indurance_lower = 20;\n }\n };\n\n class Health {\n private:\n double health; // dead to very healthy\n double sore; // muscles sore\n double tired; // not tired to very tired\n double sick; // not sick to very sick\n public:\n Health() {\n health = 100;\n sore = 0;\n tired = 10;\n sick = 0;\n }\n };\n\n class Mind {\n private:\n double ability; // ability to learn\n double science; // knowledge\n double art; // knowledge\n public:\n Mind() {\n ability = 80;\n science = 40;\n art = 20;\n }\n }\n\n class Traits {\n private:\n double patience;\n double temper;\n double attentive;\n public:\n Traits() {\n patience = 60;\n temper = 30;\n attentive = 50;\n }\n }\n\n class Player {\n private:\n Nutrition n;\n Strength s;\n Health h;\n Mind m;\n public:\n Player() {\n std::cout << \"\\nwelcome\\n\" << std::endl;\n }\n };\n\n}\n" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.574999988079071, "avg_line_length": 18.5, "blob_id": "1bf12b6288e5576725750a4cb743d78c1cd94e05", "content_id": "6ce95866755d33b7a0d59bf409df145fc43ecc79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 40, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/matrix/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\nall:\n\tg++ -std=c++11 matrix.hpp -o run\n" }, { "alpha_fraction": 0.5374045968055725, "alphanum_fraction": 0.5674300193786621, "avg_line_length": 35.407405853271484, "blob_id": "5a24c00d42fa686b8bfef67225577b245a7a63bc", "content_id": "2940a3ad70b39878b0128824ac3ed37573362c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1965, "license_type": "no_license", "max_line_length": 179, "num_lines": 54, "path": "/_spark_car/main/vs_0_9_2/trexController.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n *\n * Date created: 6.9.2015\n * Last updated: 6.18.2015\n * \n * Library for:\n * Trex Motor Controller ROB-12075\n *\n * Objects:\n * function moveSet(move, speed) with moves 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight, 5: turnLeft, 6: moveRight(speed), 7: moveLeft(speed)\n *\n * Files:\n * trexController.h\n * trexController.cpp\n*/\n\n#ifndef _TREX_MOTOR_CONTROLLER\n#define _TREX_MOTOR_CONTROLLER\n#include \"application.h\"\n\n#define lowByte(w) ((uint8_t) ((w) & 0xff))\n#define highByte(w) (((uint8_t) ((w) >> 8) & 0xff))\n\n#define startbyte 0x0F\n#define I2Caddress 0x07\n\nclass Trex\n{\n private:\n int sv[6]; // servo positions: 0 = Not Used\n int sd[6]; // servo sweep speed/direction\n byte devibrate; // time delay after impact to prevent false re-triggering due to chassis vibration\n int sensitivity; // threshold of acceleration / deceleration required to register as an impact\n int lowbat; // adjust to suit your battery: 550 = 5.50V\n byte i2caddr; // default I2C address of T'REX is 7. If this is changed, the T'REX will automatically store new address in EEPROM\n byte i2cfreq; // I2C clock frequency. Default is 0=100kHz. Set to 1 for 400kHz\n byte pfreq;\n int lmspeed, rmspeed;\n int x, y, z;\n int turningSpeed, moveTurnChange;\n public:\n Trex();\n void moveSet(int _move, int _speed);\n void masterReceive();\n void masterSend(int _lmspeed, byte _lmbrake, int _rmspeed, byte _rmbrake);\n void debugSerial();\n void test(int _val);\n int voltage, lmcurrent, rmcurrent, lmencoder, rmencoder;\n int deltax, deltay, deltaz;\n int error;\n};\n\n#endif" }, { "alpha_fraction": 0.507547914981842, "alphanum_fraction": 0.545491635799408, "avg_line_length": 26.550561904907227, "blob_id": "526a582aeb2023d9a6fdb8c4f24bd89b44238583", "content_id": "b5be13b22e296034f4e4c88b90cbb61b86f70134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2451, "license_type": "no_license", "max_line_length": 180, "num_lines": 89, "path": "/_spark_car/main/vs_0_9_2/serialManager.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 5.3.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * serialManager.h\n * serialManager.cpp\n*/\n\n#include \"serialManager.h\"\n\nSerialManager::SerialManager() {}\n\nvoid SerialManager::call(int _mode, int _move, int _lastMove, float _heading, float _lat, float _lon, float _wplat, float _wplon, float _waypointDistance, float _waypointDirection)\n{\n if(_mode == 1)\n {\n Serial1.println(\"\");\n Serial1.println(\"\");\n Serial1.println(\"--------------\");\n Serial1.print(\"move=\");\n serialMove(_move);\n Serial1.println(\"--------------\");\n Serial1.print(\"GPS_lat=\");\n printFloat(_lat, 1000000000);\n Serial1.print(\"GPS_lon=\");\n printFloat(_lon, 1000000000);\n Serial1.println(\"--------------\");\n Serial1.print(\"WAY_currentWaypointLAT=\");\n printFloat(_wplat, 1000000000);\n Serial1.print(\"WAY_currentWaypointLON=\");\n printFloat(_wplon, 1000000000);\n Serial1.print(\"WAY_waypointDistance=\");\n printFloat(_waypointDistance, 1000000);\n Serial1.print(\"WAY_waypointDirection=\");\n Serial1.println(_waypointDirection);\n Serial1.println(\"--------------\");\n Serial1.print(\"COMP_heading=\");\n Serial1.println(_heading);\n }\n}\n\nvoid SerialManager::serialMove(int _val)\n{\n switch(_val)\n {\n case 0:\n Serial.println(\"MOVE=stop\");\n break;\n case 1:\n Serial.println(\"MOVE=forward\");\n break;\n case 2:\n Serial.println(\"MOVE=backward\");\n break;\n case 3:\n Serial.println(\"MOVE=turnRight\");\n break;\n case 4:\n Serial.println(\"MOVE=turnLeft\");\n break;\n case 5:\n Serial.println(\"MOVE=moveRight\");\n break;\n case 6:\n Serial.println(\"MOVE=moveLeft\");\n break;\n default:\n Serial.print(\"MOVE_errorMove=\");\n Serial.println(_val);\n }\n Serial.println(\"\");\n}\n\nvoid SerialManager::printFloat(float _val, unsigned int _precision)\n{\n Serial.print (int(_val));\n Serial.print(\".\");\n unsigned int frac;\n if(_val >= 0) frac = (_val - int(_val)) * _precision;\n else frac = (int(_val)- _val) * _precision;\n int frac1 = frac;\n while(frac1 /= 10) _precision /= 10;\n _precision /= 10;\n while(_precision /= 10) Serial.print(\"0\");\n Serial.println(frac,DEC);\n}" }, { "alpha_fraction": 0.5745553970336914, "alphanum_fraction": 0.600547194480896, "avg_line_length": 21.15151596069336, "blob_id": "b34741d72466a503458d3f557b90bdc1e8203463", "content_id": "d996705814a3c6eb77b7ee32fdf1093128469326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 731, "license_type": "no_license", "max_line_length": 90, "num_lines": 33, "path": "/_spark_car/main/vs_0_8/carMovement.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.12.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * carMovement.h\n * carMovement.cpp\n*/\n\n#ifndef _CAR_MOVEMENT_LIBRARY\n#define _CAR_MOVEMENT_LIBRARY\n#include \"application.h\"\n\nclass Move\n{\n private:\n int STBY, PWMA, PWMB, AIN1, AIN2, BIN1, BIN2;\n void setPinMode();\n public:\n Move(int _stby, int _pwma, int _pwmb, int _ain1, int _ain2, int _bin1, int _bin2);\n void masterMove(int _motor, int _speed, int _direction);\n void moveForward(int _speed);\n void moveBackward(int _speed);\n void turnRight(int _speed);\n void turnLeft(int _speed);\n void moveRight(int _mode);\n void moveLeft(int _mode);\n void stop();\n};\n\n#endif\n" }, { "alpha_fraction": 0.6036983132362366, "alphanum_fraction": 0.6080493330955505, "avg_line_length": 16.851133346557617, "blob_id": "4e3012fcede9a06ba2f393454a4778948bf5e935", "content_id": "3dcea9937a20ac15280ae96170fcfd8ca12f19f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5516, "license_type": "no_license", "max_line_length": 94, "num_lines": 309, "path": "/message_scrambler/message_scrambler.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * Encrypter\n * 7/16/2016\n *\n * William Gregory\n *\n * NO SYMBOLS\n *\n */\n\n#include <iostream>\n#include <string>\n#include <sstream>\n#include <vector>\n#include <algorithm>\n#include <fstream>\n\nusing namespace std;\n\n// key class\nstruct ekey\n{\n\tvector <char> value;\n\tvector <int> map;\n};\n\n\n// encrypter class\nclass encrypter\n{\nprivate:\n\tekey master_key;\n\t// import/export\n\tbool export_message(vector <int>);\n\tbool export_un_message(string);\n\tvector <int> import_message();\n\tbool export_key();\n\tbool import_key();\n\t// main functions\n\tint set_map(char);\n\tchar get_map(int);\npublic:\n\tencrypter();\n\tbool verbose;\n\tbool testing;\n\t// function handlers\n\tvector <int> encrypt(string);\n\tstring decrypt(vector <int>);\n};\n\n\nencrypter::encrypter()\n{\n\tverbose = false;\n\ttesting = false;\n}\n\n\n// export encrypted message to file\nbool encrypter::export_message(vector <int> in_message)\n{\n\tofstream file;\n\tfile.open(\"output/enmess.boss\", ios::out | ios::trunc);\n\tif (file.is_open())\n\t{\n\t\tfor (int i=0; i<in_message.size(); i++)\n\t\t{\n\t\t\tfile << in_message.at(i) << \"\";\n\t\t}\n\t\treturn true;\n\t}\n\treturn false;\n}\n\n\n// exports key to text file\nbool encrypter::export_key()\n{\n\tofstream file;\n\tfile.open(\"output/boss.key\", ios::out | ios::binary | ios::trunc);\n\tif (file.is_open())\n\t{\n\t\tfor (int i=0; i<master_key.value.size(); i++)\n\t\t{\n\t\t\tfile << master_key.value.at(i);\n\t\t}\n\t\tfile << \"\\n\";\n\t\tfor (int i=0; i<master_key.map.size(); i++)\n\t\t{\n\t\t\tfile << master_key.map.at(i);\n\t\t}\n\t\tfile << \"\\n\";\n\t\tfile.close();\n\t\treturn true;\n\t}\n\treturn false;\n}\n\n\n// import encrypted message from file\nvector <int> encrypter::import_message()\n{\n\tifstream file;\n\tfile.open(\"output/enmess.boss\");\n\tvector <int> out_mes;\n\tif (file.is_open())\n\t{\n\t\tstring s;\n\t\tchar ch;\n\t\tfstream fin(\"output/enmess.boss\", fstream::in);\n\t\twhile (fin >> noskipws >> ch)\n\t\t{\n\t\t\tint temp_val = ch - '0';\n\t\t\tout_mes.push_back(temp_val);\n\t\t}\n\t}\n\treturn out_mes;\n}\n\n\n// imports key from key file\nbool encrypter::import_key()\n{\n\tifstream file;\n\tfile.open(\"output/boss.key\");\n\tif (file.is_open())\n\t{\n\t\tmaster_key.value.clear();\n\t\tmaster_key.map.clear();\n\t\tstring line;\n\t\tint count = 0;\n\t\twhile (getline(file, line))\n\t\t{\n\t\t\tfor (int i=0; i<line.size(); i++)\n\t\t\t{\n\t\t\t\tif (count == 0) master_key.value.push_back(line.at(i));\n\t\t\t\telse if (count == 1) master_key.map.push_back(line.at(i)-'0');\n\t\t\t\telse return false;\n\t\t\t}\n\t\t\tcount++;\n\t\t}\n\t\t// testing\n\t\tif (verbose)\n\t\t{\n\t\t\tcout << endl;\n\t\t\tcout << \"import:\" << endl;\n\t\t\tfor (int i=0; i<master_key.value.size(); i++)\n\t\t\t{\n\t\t\t\tcout << master_key.value.at(i);\n\t\t\t\tcout << \":\";\n\t\t\t\tcout << master_key.map.at(i);\n\t\t\t\tcout << \" \";\n\t\t\t}\n\t\t\tcout << endl;\n\t\t}\n\t\t\n\t\tfile.close();\n\t\treturn true;\n\t}\n\treturn false;\n}\n\n\n// export decrypted message to file\nbool encrypter::export_un_message(string in_val)\n{\n\tofstream file;\n\tfile.open(\"output/message.txt\", ios::out | ios::trunc);\n\tif (file.is_open())\n\t{\n\t\tfile << in_val;\n\t\tfile.close();\n\t\treturn true;\n\t}\n\treturn false;\n}\n\n\n// manages raw to key\nint encrypter::set_map(char in_val)\n{\n\tint temp_mapped;\n\tif (master_key.value.empty()) temp_mapped = 0;\n\telse\n\t{\n\t\t// check if mapped\n\t\tvector <char>::iterator it = find(master_key.value.begin(), master_key.value.end(), in_val);\n\t\tif (it != master_key.value.end())\n\t\t{\n\t\t\tint temp_index = it - master_key.value.begin();\n\t\t\ttemp_mapped = master_key.map.at(temp_index);\n\t\t\treturn temp_mapped;\n\t\t}\n\t\telse // add map\n\t\t{\n\t\t\ttemp_mapped = master_key.map.back() + 1;\n\t\t}\n\t}\n\tmaster_key.value.push_back(in_val);\n\tmaster_key.map.push_back(temp_mapped);\n\treturn temp_mapped;\n}\n\n\n// manages key to raw\nchar encrypter::get_map(int in_val)\n{\n\tif (master_key.map.empty()) return '-';\n\telse\n\t{\n\t\tvector <int>::iterator it = find(master_key.map.begin(), master_key.map.end(), in_val);\n\t\tif (it != master_key.map.end())\n\t\t{\n\t\t\tint temp_index = it - master_key.map.begin();\n\t\t\treturn (char) master_key.value.at(temp_index);\n\t\t}\n\t\telse\n\t\t{\n\t\t\treturn '-';\n\t\t}\n\t}\n}\n\n\n// encrypt manager\n// input: string value to encrypt\nvector <int> encrypter::encrypt(string in_val)\n{\n\tif (verbose) cout << \"input size: \" << in_val.size() << endl;\n\tvector <int> out_val;\n\tfor (int i=0; i<in_val.size(); i++)\n\t{\n\t\t// get int equivalent to char\n\t\tint temp_val = (int) in_val.at(i);\n\t\tint temp_val_mapped = set_map(in_val.at(i));\n\t\tout_val.push_back(temp_val_mapped);\n\t}\n\tif (verbose) cout << \"output size: \" << master_key.value.size() << endl;\n\t\n\tif (testing)\n\t{\n\t\tcout << endl;\n\t\tcout << \"- encrypt -\" << endl;\n\t\tfor (int i=0; i<out_val.size(); i++)\n\t\t{\n\t\t\tcout << out_val.at(i) << \" \";\n\t\t}\n\t\tcout << endl;\n\t}\n\t\n\texport_message(out_val); // export message\n\texport_key(); // export key\n\treturn out_val;\n}\n\n\n// decrypt manager\nstring encrypter::decrypt(vector <int> in_val)\n{\n\tstring out_val;\n\t// import\n\timport_key();\n\tin_val = import_message();\n\tfor (int i=0; i<in_val.size(); i++)\n\t{\n\t\tchar temp_val = get_map(in_val.at(i));\n\t\tout_val.push_back(temp_val);\n\t}\n\texport_un_message(out_val);\n\t\n\tif (testing)\n\t{\n\t\tcout << endl;\n\t\tcout << \"- decrypt -\" << endl;\n\t\tfor (int i=0; i<out_val.size(); i++)\n\t\t{\n\t\t\tcout << out_val.at(i);\n\t\t}\n\t\tcout << endl << endl;;\n\t}\n\treturn out_val;\n}\n\n\n// autorun\nint main()\n{\n\t// - settings -\n\tbool verbose = false;\n\tbool testing = false;\n\t// - setting end -\n\t\n\tencrypter enc;\n\tenc.verbose = verbose;\n\tenc.testing = testing;\n\tstring in_string;\n\tif (testing) in_string = \"Hello World\";\n\telse\n\t{\n\t\tcout << endl;\n\t\tcout << \"Enter text to encrypt: \";\n\t\tgetline(cin, in_string); // whole line\n\t}\n\t\n\t// run\n\tvector <int> temp = enc.encrypt(in_string);\n\tenc.decrypt(temp);\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.5810810923576355, "avg_line_length": 11.333333015441895, "blob_id": "165512e7df4ade4a54e99e71ea8b10103b9db4a8", "content_id": "9af2d6855ad949fab5706210a79ebf521dc7b1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 74, "license_type": "no_license", "max_line_length": 21, "num_lines": 6, "path": "/player/main.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include \"player.hpp\"\n\nint main() {\n Player::Player p;\n return 0;\n}" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.5918367505073547, "avg_line_length": 23, "blob_id": "2334065a45374fe14b1b65c5f823929342e55c08", "content_id": "e98f936700993309966e557b23196647a017e799", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 49, "license_type": "no_license", "max_line_length": 42, "num_lines": 2, "path": "/test_code/cpp/opengl/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\nall:\n\tg++ main.cpp -o run -lGL -lglut -lGLU -lm\n" }, { "alpha_fraction": 0.7210526466369629, "alphanum_fraction": 0.7210526466369629, "avg_line_length": 22.875, "blob_id": "728871199ccf0dc06a2c4d65fa7adaa3199cec68", "content_id": "9ff3bc61cc538afd6479a9d4e6657871c3e87e76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 68, "num_lines": 8, "path": "/matrix_world/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# Matrix World\n\n## Info:\n* displays text (with optional color) to look like the Martix movie.\n* eventually times out. recomended not to exit manually.\n\n## Image:\n![run image](run_image.png)" }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 25, "blob_id": "9248a50046bc5158ef65848dce8e748421e2eea8", "content_id": "65fad86fe8e237ef133bad83bd1a223e7e5c2372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 52, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/morse_code/Makefile", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "all:\n\tg++ -std=c++11 morse_code.hpp main.cpp -o run\n" }, { "alpha_fraction": 0.62109375, "alphanum_fraction": 0.64453125, "avg_line_length": 20.94285774230957, "blob_id": "6490866a9b40456b5b458a49948d73d17331ccc0", "content_id": "c31382fc1e3996a939c97dea310be7c036b9775b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "no_license", "max_line_length": 66, "num_lines": 35, "path": "/test_code/python/multithreading.py", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# Python 3.5.2 threading test\n# by William Gregory on August 12th 2016\n\n# http://www.tutorialspoint.com/python/python_multithreading.htm\n\n# ToDo: investigate mutlithreading\n\nfrom multiprocess import Process\nimport time\n\n# print time and thread name after a delay four times\ndef print_time(thread_name, delay):\n count = 0\n while count < 5:\n time.sleep(delay)\n count += 1\n print(thread_name + \" \" + time.ctime(time.time()))\n return\n\n# main code\ntry:\n\tp = Process(target=print_time, args=(\"1\", 1))\n\tp.start()\n\tthreads_array.append(p)\n\tsleep(0.1)\n\tp = Process(target=print_time, args=(\"2\" 2))\n\tp.start()\n\tthreads_array.append(p)\nexcept:\n\tprint(\"Error: unable to start thread\")\n\nwhile True:\n\tpass\n" }, { "alpha_fraction": 0.4759671688079834, "alphanum_fraction": 0.4800703525543213, "avg_line_length": 22.054054260253906, "blob_id": "b4e540c4c8048efdecdc8871d1f288bc8961d9a7", "content_id": "1798ba33dc3e42e705f1274ba02fa9639258b883", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1706, "license_type": "no_license", "max_line_length": 74, "num_lines": 74, "path": "/connect_four_IDEA/cpp/connect_four.hpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n#include <vector>\n\n// move coresponds to a column\n\nclass Board {\nprivate:\n std::vector <std::vector <unsigned int> board;\n size_board_x;\n size_board_y;\npublic:\n Board() {\n\n };\n void create_board() {\n board.clear();\n for (std::size_t i=0; i<size_board_x; i++) {\n for (std::size_t j=0; j<size_board_y; j++) {\n board.at(i).at(j) = 0;\n }\n }\n }\n void display_board() {\n board.clear();\n for (std::size_t i=0; i<size_board_x; i++) {\n for (std::size_t j=0; j<size_board_y; j++) {\n std::cout << board.at(i).at(j) << std::endl;\n }\n }\n }\n bool move_one(unsigned int move) {\n // check if column is full\n // if not, place piece as low as posible in column (ie. not taken)\n // piece (1)\n }\n bool move_two(unsigned int move) {\n // move one but (2)\n }\n};\n\nclass CF {\nprivate:\n Board board;\npublic:\n void display_board() {\n board.display_board();\n }\n void move_one_human() {\n std::cout << \"enter move: \" << std::endl;\n unsigned int move;\n // get move\n bool t = move_one(move);\n if(!move) {\n // invalid move\n }\n }\n void move_two_human() {\n std::cout << \"enter move: \" << std::endl;\n unsigned int move;\n // get move\n bool t = move_two(move);\n if(!move) {\n // invalid move\n }\n }\n bool move_one(unsigned int move) {\n return board.move_one(move);\n }\n bool move_two(unsigned int move) {\n return board.move_two(move);\n }\n bool check_win() {\n return board.check_win();\n }\n};" }, { "alpha_fraction": 0.5659722089767456, "alphanum_fraction": 0.6232638955116272, "avg_line_length": 14.594594955444336, "blob_id": "9e044c31a36e1dbf60668af1fe5353a3c839d6d8", "content_id": "6a9bbd8940a2d9618faa4e3dd39650d74be67386", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 576, "license_type": "no_license", "max_line_length": 125, "num_lines": 37, "path": "/_spark_car/other/test/TrexMove/control.ino", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory\n * June 3, 2015\n * June 4, 2015\n * \n * Trex motion test\n * Particle with Trex chassis and Trex motor controller\n * new - test vs 0.01\n *\n * manual contorl over network\n *\n * To Do:\n *\n * add remote control\n * add serial debug\n *\n */\n\n#include \"trexController.h\"\n\nTrex move;\n\nint speed = 100; // 0-255\nint move = 0; // 0: coast stop, 1: brake stop, 2: forward(speed), 3: backward(speed), 4: turnRight(speed), 5: turnLeft(speed)\n\nvoid setup()\n{\n Serial.begin(9600); // debug\n Wire.begin(); // controller\n}\n\n\nvoid loop()\n{\n moveSet(move, speed);\n delay(50);\n}" }, { "alpha_fraction": 0.34814006090164185, "alphanum_fraction": 0.3625834584236145, "avg_line_length": 38.037235260009766, "blob_id": "cf07d8ae2911bf12ac3ce6257c7378555cf3d754", "content_id": "0b2c13815c995cb643b6021e2cea272b23191a7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7339, "license_type": "no_license", "max_line_length": 108, "num_lines": 188, "path": "/maze/maze_generation.cpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "\n/*\n\nMaze Generater\n\nexample key: \n#:0,-:1,x:2\n\n###########\n#-xxx----x#\n#-----xx--#\n#xx-xxxxx-#\n###########\n\n*/\n\nclass Cell() {\nprivate:\n bool type; // 0: border0, 1: path, 2: wall\n unsigned int row;\n unsigned int column;\n unsigned int visited;\npublic:\n Cell(bool t, unsigned int r; unsigned int c) {\n type = t;\n row = r;\n column = c;\n visited = 0;\n }\n}\n\nclass Generate() {\nprivate:\n std::vector <std::vector <Cell> maze;\n unsigned int size_x;\n unsigned int size_y;\n unsigned int start_x;\n unsigned int start_y;\n unsigned int end_x;\n unsigned int end_y;\n //\n void generate_blank_maze() {\n maze.clear();\n std::vector <std::vector <Cell> t_maze;\n for (std::size_t i=0; i<size_x; i++) {\n std::vector <Cell> t_row;\n for (std::size_t j=0; j<size_y; j++) {\n if (i==0 || i==size_x-1 || j=00 || j==size_y-1) Cell c(0, i, j);\n else Cell c(2, i, j);\n t_row.push_back(c);\n }\n t_maze.push_back(t_row);\n }\n return t_maze;\n }\npublic:\n Generate(unsigned int x, unsigned int y) {\n size_x = x;\n size_y = y;\n start_x = 0;\n start_y = 0;\n end_x = x-1;\n end_y = y-1;\n }\n void generate_maze() {\n generate_blank_maze();\n done = false;\n while (!done) {\n int current_x = 0;\n int current_y = 0;\n if (current_x == end_x && current_y == end_y) {\n done = true;\n } else {\n // check if tiles around are walls\n // check if tiles around have been visited (order: NSEW)\n // if not, move there\n // // check for space around (need wall between path)\n // // if valid for space, mark as space\n // // set as current location and search from here\n while (true) {\n // UNTESTED: PROBABLY TOTALY WRONG\n if (maze.at(current_x).at(current_y+1).type != 0) { // if not border\n if (maze.at(current_x).at(current_y+1).visited == false) { // if not visited\n t_y = current_y+1;\n t_x = current_x;\n // if space out 2 in each direction is not a path (ie: keep walls between paths)\n bool t = true;\n if (t_y+2 < size_y) if (maze.at(t_x).at(t_y+2).type == 1) t = false;\n if (t_x+2 < size_x) if (maze.at(t_x+2).at(t_y).type == 1) t = false;\n if (t_y-2 > 0) if (maze.at(t_x).at(t_y-2).type == 1) t = false;\n if (t_x-2 > 0) if (maze.at(t_x-2).at(t_y).type == 1) t = false;\n if (t) {\n current_y = t_y;\n current_x = t_x;\n maze.at(current_x).at(current_y).visited = true; // set visited\n maze.at(current_x).at(current_y).type = 1; // set path\n break;\n } else {\n // go backwards\n }\n }\n }\n if (maze.at(current_x+1).at(current_y).type != 0) {\n if (maze.at(current_x+1).at(current_y).visited == false) {\n t_y = current_y+1;\n t_x = current_x;\n bool t = true;\n if (t_y+2 < size_y) if (maze.at(t_x).at(t_y+2).type == 1) t = false;\n if (t_x+2 < size_x) if (maze.at(t_x+2).at(t_y).type == 1) t = false;\n if (t_y-2 > 0) if (maze.at(t_x).at(t_y-2).type == 1) t = false;\n if (t_x-2 > 0) if (maze.at(t_x-2).at(t_y).type == 1) t = false;\n if (t) {\n current_y = t_y;\n current_x = t_x;\n maze.at(current_x).at(current_y).visited = true;\n maze.at(current_x).at(current_y).type = 1;\n break;\n }\n }\n }\n if (maze.at(current_x).at(current_y-1).type != 0) {\n if (maze.at(current_x).at(current_y-1).visited == false) {\n t_y = current_y+1;\n t_x = current_x;\n bool t = true;\n if (t_y+2 < size_y) if (maze.at(t_x).at(t_y+2).type == 1) t = false;\n if (t_x+2 < size_x) if (maze.at(t_x+2).at(t_y).type == 1) t = false;\n if (t_y-2 > 0) if (maze.at(t_x).at(t_y-2).type == 1) t = false;\n if (t_x-2 > 0) if (maze.at(t_x-2).at(t_y).type == 1) t = false;\n if (t) {\n current_y = t_y;\n current_x = t_x;\n maze.at(current_x).at(current_y).visited = true;\n maze.at(current_x).at(current_y).type = 1;\n break;\n }\n }\n }\n if (maze.at(current_x-1).at(current_y).type != 0) {\n if (maze.at(current_x-1).at(current_y).visited == false) {\n t_y = current_y+1;\n t_x = current_x;\n bool t = true;\n if (t_y+2 < size_y) if (maze.at(t_x).at(t_y+2).type == 1) t = false;\n if (t_x+2 < size_x) if (maze.at(t_x+2).at(t_y).type == 1) t = false;\n if (t_y-2 > 0) if (maze.at(t_x).at(t_y-2).type == 1) t = false;\n if (t_x-2 > 0) if (maze.at(t_x-2).at(t_y).type == 1) t = false;\n if (t) {\n current_y = t_y;\n current_x = t_x;\n maze.at(current_x).at(current_y).visited = true;\n maze.at(current_x).at(current_y).type = 1;\n break;\n }\n }\n }\n }\n }\n }\n }\n void display_maze() {\n for (std::size_t i=0; i<maze.size(); i++) {\n for (std::size_t j=0; j<maze.at(i); j++) {\n unsigned int t = maze.at(i).at(j).type;\n if (t == 0) std::cout << \"#\";\n else if (t == 1) std::cout << \"-\";\n else if (t == 2) std::cout << \"x\";\n else std::cout << \"E\";\n }\n std::cout << \"/n/n\";\n }\n }\n void export_maze() {\n // TODO: export maze to file\n }\n std::vector <std::vector <Cell> return_maze() {\n return maze;\n }\n}\n\nint main() {\n\n Generate g;\n g.generate_blank_maze();\n g.generate_maze();\n g.display_maze();\n\n return 0;\n}" }, { "alpha_fraction": 0.6396396160125732, "alphanum_fraction": 0.6576576828956604, "avg_line_length": 19.592592239379883, "blob_id": "de47791a436fcd4190a4eb784897bfe0a8b25c53", "content_id": "f71e4de4e7b8852835f0629d304337545faf24f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 555, "license_type": "no_license", "max_line_length": 174, "num_lines": 27, "path": "/_spark_car/main/vs_0_9_2/serialManager.h", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 5.3.2015\n * \n * Library for:\n * Motor Contoller (TB6612FNG)\n *\n * serialManager.h\n * serialManager.cpp\n*/\n\n#ifndef _SERIAL_MANAGER_\n#define _SERIAL_MANAGER_\n\n#include \"application.h\"\n\nclass SerialManager\n{\n private:\n void serialMove(int _val);\n void printFloat(float _val, unsigned int _precision);\n public:\n SerialManager();\n void call(int _mode, int _move, int _lastMove, float _heading, float _lat, float _lon, float _wplat, float _wplon, float _waypointDistance, float _waypointDirection);\n};\n\n#endif" }, { "alpha_fraction": 0.5053708553314209, "alphanum_fraction": 0.5063938498497009, "avg_line_length": 18.75757598876953, "blob_id": "f1a43f3a760d99e88e85c258e00200b2eee7a8d6", "content_id": "e69de84a1caddda50bdeb698f97bef718cd99adc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 79, "num_lines": 99, "path": "/school_manager/class_manager.hpp", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <math.h>\n#include <vector>\n#include <string>\n#include <ctime>\n\nusing namespace std;\n\nstruct school_class\n{\n string title;\n string id;\n int start_time;\n int end_time;\n};\n\nclass classes\n{\nprivate:\n // - variables\n vector <school_class> classes;\n // - functions\n void list_classes();\n void add_class(string, string, int, int);\n bool user_add_class();\npublic:\n // - variables\n // - functions\n school();\n bool run_manager();\n};\n\nschool::school()\n{\n cout << \"School Manager\" << endl;\n cout << endl;\n}\n\nvoid school::list_classes()\n{\n for (int i=0; i<classes.size(); i++)\n {\n cout << \" - \" << classes.at(i).title << endl;\n }\n cout << endl;\n}\n\nvoid school::add_class(string in_title, string in_id, int in_start, int in_end)\n{\n school_class temp;\n temp.title = in_title;\n temp.id = in_id;\n temp.start_time = in_start;\n temp.end_time = in_end;\n\n classes.push_back(temp);\n cout << \"Added class: \" << in_title << endl;\n cout << endl;\n}\n\nbool school::user_add_class()\n{\n string temp_title;\n string temp_id;\n int temp_start;\n int temp_end;\n // title\n cout << \"Enter class title: \" << endl;\n cin >> temp_title;\n // ID\n cout << \"Enter clas ID: \" << endl;\n cin >> temp_id;\n // start time\n cout << \"Enter start time: \" << endl;\n cin >> temp_start;\n // end time\n cout << \"Enter end time: \" << endl;\n cin >> temp_end;\n\n add_class(temp_title, temp_id, temp_start, temp_end);\n list_classes();\n\n return true;\n}\n\n// call to run\n// put run code here\nbool school::run_manager()\n{\n user_add_class();\n return true;\n}\n\nint main()\n{\n school sch;\n sch.run_manager();\n return 0;\n}" }, { "alpha_fraction": 0.5369318127632141, "alphanum_fraction": 0.5497159361839294, "avg_line_length": 14.30434799194336, "blob_id": "6f26246bc4454553991a1b9145918957851dd929", "content_id": "8e3a704f11c76211a144711f306cb788521cf474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 704, "license_type": "no_license", "max_line_length": 46, "num_lines": 46, "path": "/connect_four_IDEA/java/ConnectFour.java", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "class Board {\n\n\t// [[row], [row], [row]]\n\tint[][] board = new int [6][7];\n\n\tvoid setup() {\n\n\t}\n\n\tvoid create_board() {\n\t\tfor (int i=0; i<board.length; i++) {\n\t\t\tfor (int j=0; j<board[i].length; j++) {\n\t\t\t\tboard[i][j] = 0;\n\t\t\t}\n\t\t}\n\t}\n\n\tvoid display_board() {\n\t\tSystem.out.println(\"|\");\n\t\tfor (int i=0; i<board.length; i++) {\n\t\t\tfor (int j=0; j<board[i].length; j++) {\n\t\t\t\tSystem.out.println(board[i][j])\n\t\t\t}\n\t\t\tSystem.out.println(System.lineSeparator());\n\t\t}\n\t}\n\n\tboolean add_peice(int row) {\n\n\t}\n}\n\npublic static class ConnectFour {\n\n\tvoid player_move() {\n\n\t}\n\n\tpublic static void main (String args[]) {\n\t\t// create board\n\t\t// while playing\n\t\t// - player 1\n\t\t// - player 2\n\t\t// - check for winner\n\t}\n}\n" }, { "alpha_fraction": 0.5691527724266052, "alphanum_fraction": 0.619840681552887, "avg_line_length": 24.822429656982422, "blob_id": "91ed032b23e19139e5d8745cb8803d2686dbc62a", "content_id": "18cda36fc380d3f83f036b087e0e64b9ae7897f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2762, "license_type": "no_license", "max_line_length": 124, "num_lines": 107, "path": "/_spark_car/README.md", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "# SparkCar\n### vs 0.8\n\n## not complete\n## not currently supported\n\nThis is the code used in my Spark Core controlled 'autonomous car'. <br />\n\n```c++\n\n// From carControl.ino\n// Decide move\n//-----------------------------------------------------------------------------------------------------\n\nvoid carDecideMove()\n{\n lastMove = move;\n move = 2;\n \n if(useWaypoint)\n {\n if(isValidGPS)\n {\n if((waypointDirection - heading) > headingTolerance) move = 5; // heading greater than tolerance, go left\n else if((waypointDirection - heading) < -headingTolerance) move = 4; // heading less than tolerance, go right\n }\n else move = 0;\n }\n \n if(useSensors)\n {\n move = sensorMove;\n Serial.println(move);\n }\n slow = sensor.close_wall;\n if(slow == 1) speed = 100;\n else speed = 200;\n}\n\n\n// From carSensor.cpp\n// Medium back\n if(avg_dist_medium_back > 500) // far wall\n {\n far_back_wall = 1; \n if(avg_dist_medium_back > 1200)\n {\n back_wall = 1; // back wall\n move = 2;\n }\n }\n \n```\n## Files:\n\n### carControl (main):\n\n-- [carControl.ino](main/vs_0_9_2/carControl.ino) --\n\n### trexController Library: - see resources\n\n[trexController.cpp](main/vs_0_9_2/trexController.cpp)\n\n[trexController.h](main/vs_0_9_2/trexController.h)\n\n### carSensor Library:\n\n-- [carSensor.cpp](main/vs_0_9_2/carSensor.cpp) --\n\n[carSensor.h](main/vs_0_9_2/carSensor.h)\n\n### serialManager Library:\n\n[serialManager.cpp](main/vs_0_9_2/serialManager.cpp)\n\n[serialManager.h](main/vs_0_9_2/serialManager.h)\n\n## I do not take credit for the following libraries:\n\n### lsm303 Library:\n\n[lsm303.cpp](main/vs_0_9_2/lsm303.cpp)\n\n[lsm303.h](main/vs_0_9_2/lsm303.h)\n\n### tinyGPS Library:\n\n[tinyGPS.cpp](main/vs_0_9_2/tinyGPS.cpp)\n\n[tinyGPS.h](main/vs_0_9_2/tinyGPS.h)\n\n## About:\n\nSpark Core - http://www.spark.io/ <br />\nChassis - https://www.sparkfun.com/products/11056 <br />\nmotor controller - https://www.sparkfun.com/products/retired/12075 <br />\ncompass/acceleromitor (LSM303) - https://www.adafruit.com/product/1120 <br />\ngps (MTK3339) - https://www.adafruit.com/products/746 <br/>\n(3) short range distance sensors (2-15cm) - http://www.digikey.com/product-detail/en/0/425-2854-ND <br />\n(1) medium range distance sensor (10-80cm) - http://www.digikey.com/product-detail/en/0/28995-ND <br />\n(1) long range distance sensor (15-150cm) -https://www.sparkfun.com/products/8958 <br />\n(2) 7.4v 1000mAh lipo - https://www.sparkfun.com/products/11855 <br />\n\n## Resources:\n\nTinyGPS - http://arduiniana.org/libraries/tinygps/\nDagu Hi-Tech Electronics motor controller example code - https://sites.google.com/site/daguproducts/home/download-page<br />" }, { "alpha_fraction": 0.48154231905937195, "alphanum_fraction": 0.5030924081802368, "avg_line_length": 24.74378204345703, "blob_id": "af4fb8d78944153b7fe3a2f1c24cfd8326e78ae1", "content_id": "bb1911502b0305387011a45ec8d204a55a464720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10348, "license_type": "no_license", "max_line_length": 146, "num_lines": 402, "path": "/_spark_car/main/vs_0_8/carControl.ino", "repo_name": "RobRight/Personal", "src_encoding": "UTF-8", "text": "/*\n * William Gregory \n * 4.13.2015\n * 5.1.2015 \n *\n * Main code for spark car\n * \n * ToDo:\n * add degree of turn to movement\n * add complex movement. ex back,turn forward\n * integrate gps, compass, sensors\n*/\n\n// SYSTEM_MODE(SEMI_AUTOMATIC); // dont auto connect to cloud\n\n// Setup\n//-----------------------------------------------------------------------------------------------------\n\n#include \"carSensor.h\"\n#include \"carMovement.h\"\n#include \"lsm303.h\"\n#include \"tinyGPS.h\"\n#include \"serialManager.h\"\n#include \"math.h\"\n\n// Enable/Desable general settings\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n// Action\n#define useMotors TRUE // engage motors\n#define useSensors TRUE\n#define useWaypoint TRUE // combine with compass and gps\n\n// Serial\n#define useMotorSerial TRUE\n#define useSensorSerial TRUE // (SENS)\n#define useSensorSerialDeep FALSE // indepth sensor data\n#define useCompassSerial TRUE // (COMP)\n#define useGPSSerial TRUE // (GPS)\n#define useWaypointSerial TRUE // (WAY)\n#define useVoltageSerial TRUE // battery voltage\n//-----------------------------------------------------------------------------------------------------\n//-----------------------------------------------------------------------------------------------------\n\n#define VOLTAGE_PIN A7\n#define CONNECT_PIN D7\n\n// Initialize libraries\nMove moveCar(D2, A0, A1, D5, D3, D6, D4); // motor control\nSensor sensor(A2, A3, A4, A5, A6); // dist sensor one\nLSM303 compass; // compass/acceleromitor sensor\nTinyGPS gps; // GPS\nSerialManager serial;\n\n// Define\nint mode = 1; // control mode (1) general ok (0) off\nint move = 0; // move to execute\nint speed = 100; // default driving speed\nconst float Pi = 3.14159265; // circumference / diameter\nint lastMove; // previous move if needed\nint sensorMove; // move defined by sensor library\nint offlineMode;\nuint8_t runNumber = 0x00;\nconst int runNumberAddress = 0; // EEPROM address for run number\n\n// Compass\nfloat heading = 0.0; // compass heading\nconst int compAcuracy = 30;\nconst float compassOffsetAllowed = 15;\nbool compassError; // false if not found\nconst float headingTolerance = 10.0; // degrees until move\n\n// GPS\nbool isValidGPS; // true when GPS data is valid\nfloat lat, lon; // GPS lat and lan\nfloat wplat, wplon; // waypoint lat and lon\nunsigned long age; // gps data age\n\n// Waypoint\nint nextWaypoint = 0; // next waypoint # to assign\nint newWaypoint = 1; // set (1) to assign next waypoint\nfloat waypointDistance, waypointDirection, offsetDirection;\nconst float waypointOffsetAllowed = 0.00000001; // tolerance until waypoint reached\n\n// Serial print\nunsigned long previousMillis = 0; // last print time\nunsigned long interval = 1000; // time between print\n\n// Battery\nfloat bat_voltage;\nint bat_avg_reading;\n\nvoid setup()\n{\n //runNumber = EEPROM.read(runNumberAddress); // Read last run number\n //runNumber++;\n EEPROM.write(runNumberAddress, runNumber); // Write current run number\n \n Serial.begin(9600); // debug\n Serial1.begin(9600); // gps\n Wire.begin(); // compass\n pinMode(VOLTAGE_PIN, INPUT);\n pinMode(CONNECT_PIN, INPUT);\n \n compassError = compass.init();\t // Returns \"true\" if compass found\n\tcompass.enableDefault();\n \n setWaypoint(); // set first waypoint\n newWaypoint = 0; // true if get new waypoint\n \n delay(400);\n \n // Serial1.print(\"runNumber=\");\n // Serial1.println(runNumber);\n}\n\n// main loop\n//-----------------------------------------------------------------------------------------------------\n\nvoid loop()\n{\n checkConnection();\n checkVoltage();\n \n // --MODE ONE--\n if(mode == 1) // mode(0) for all stop, (1) for ok\n {\n // Sensors\n if(useSensors)\n {\n sensor.interpretSensors(); // check distance sensor library\n }\n\n // Waypoint\n if(useWaypoint)\n {\n // Compass\n checkCompass();\n \n // GPS\n getGPS();\n \n // Waypoint\n if(abs(lat-wplat) < waypointOffsetAllowed) // close to waypoint\n {\n //newWaypoint = 1;\n }\n if(newWaypoint == 1) // set next waypoint\n {\n setWaypoint();\n newWaypoint = 0;\n }\n findWaypoint();\n }\n \n carDecideMove(); // decide move\n \n if(useMotors) setMove(); // execute move\n }\n else\n {\n delay(5000);\n }\n\n manageSerial();\n}\n\n// decide move\n//-----------------------------------------------------------------------------------------------------\n\nvoid carDecideMove()\n{\n lastMove = move;\n move = 1;\n \n if(useWaypoint)\n {\n if(isValidGPS)\n {\n if((waypointDirection - heading) > headingTolerance) move = 3; // heading greater than tolerance, go left\n else if((waypointDirection - heading) < -headingTolerance) move = 4; // heading less than tolerance, go right\n }\n else move = 0;\n }\n \n if(useSensors)\n {\n \n }\n}\n\n\n// move\n//-----------------------------------------------------------------------------------------------------\n\n// 0:stop, 1:forward, 2:backward, 3:turnRight, 4:turnLeft, 5:moveRight, 6:moveLeft, 7:followCourse, 8:allign to waypoint\nvoid setMove()\n{\n switch(move)\n {\n case 0:\n moveCar.stop();\n break;\n case 1:\n moveCar.moveForward(speed);\n break;\n case 2:\n moveCar.moveBackward(speed);\n break;\n case 3:\n moveCar.turnRight(speed);\n break;\n case 4:\n moveCar.turnLeft(speed);\n break;\n case 5:\n moveCar.moveRight(0);\n break;\n case 6:\n moveCar.moveLeft(0);\n break;\n default:\n moveCar.stop();\n }\n}\n\n// compass (COMP)\n//-----------------------------------------------------------------------------------------------------\n\nvoid checkCompass()\n{\n if (compassError)\n\t{\n\t compass.read();\n\t int mag_x = compass.m.x;\n\t int mag_y = compass.m.y;\n\t // int mag_z = compass.m.z;\n\n\t for(int x = 0; x < compAcuracy; x++)\n\t {\n\t\t compass.read();\n\t\t mag_x = mag_x + compass.m.x / 2;\n\t mag_y = mag_y + compass.m.y / 2;\n\t // mag_z = mag_z + compass.m.z / 2;\n\t }\n heading = (((atan2(mag_y, mag_x) * 180.0) / Pi) + 180.0);\n\t}\n}\n\n\n// GPS\n//-----------------------------------------------------------------------------------------------------\n\nvoid checkGPSvalid()\n{\n isValidGPS = false;\n for (unsigned long start = millis(); millis() - start < 1000;)\n {\n // Check GPS data is available\n while (Serial1.available())\n {\n char c = Serial1.read();\n \n // parse GPS data\n if (gps.encode(c))\n isValidGPS = true;\n }\n }\n}\n\nvoid getGPS()\n{\n checkGPSvalid();\n\n if (isValidGPS)\n {\n gps.f_get_position(&lat, &lon, &age);\n }\n}\n\n// waypoint\n//-----------------------------------------------------------------------------------------------------\n\nvoid setWaypoint()\n{\n switch(nextWaypoint)\n {\n case 0:\n wplat = 39.542652;\n wplon = -119.813440;\n break;\n case 1:\n wplat = 1.0;\n wplon = 1.0;\n break;\n case 2:\n wplat = 0.0;\n wplon = 0.0;\n break;\n case 3:\n wplat = 0.0;\n wplon = 0.0;\n break;\n }\n nextWaypoint++;\n}\n\nvoid findWaypoint()\n{\n float TMPdlat = (wplat - lat);\n float TMPdlon = wplon - lon;\n waypointDistance = sqrt(pow(TMPdlat, 2) + pow(TMPdlon, 2));\n \n Serial.println(TMPdlat);\n Serial.println(TMPdlon);\n \n TMPdlat = toRad(TMPdlat);\n TMPdlon = toRad(TMPdlon);\n float TMPlat = toRad(lat);\n float TMPwplat = toRad(wplat);\n \n float y = sin(TMPdlon) * cos(TMPwplat);\n float x = cos(TMPlat) * sin(TMPwplat) - sin(TMPlat) * cos(TMPwplat) * cos(TMPdlon);\n waypointDirection = toDeg(atan2(y, x));\n \n if (waypointDirection < 0)\n {\n waypointDirection = 360 - abs(waypointDirection);\n }\n}\n\n\n// other\n//-----------------------------------------------------------------------------------------------------\n\nvoid manageSerial()\n{\n unsigned long currentMillis = millis();\n if ((currentMillis - previousMillis) > interval)\n {\n previousMillis = currentMillis;\n serial.sync(mode, offlineMode, useMotorSerial, useWaypointSerial, useGPSSerial, useCompassSerial, useSensorSerial, useVoltageSerial,\n move, lastMove, heading, compassError, lat, lon, isValidGPS, sensor.right_wall, sensor.left_wall, sensor.front_wall, sensor.back_wall,\n sensor.close_right_wall, sensor.close_left_wall, sensor.close_front_wall, sensor.far_front_wall, sensor.far_back_wall, nextWaypoint,\n wplat, wplon, waypointDistance, waypointDirection, bat_voltage, bat_avg_reading);\n serial.call();\n if (useSensorSerialDeep)\n {\n sensor.serialDeep();\n } \n }\n}\n\nfloat toRad(float val)\n{\n val = val * Pi / 180;\n return val;\n}\n\nfloat toDeg(float val)\n{\n val = val * 180 / Pi;\n return val;\n}\n\nvoid checkVoltage()\n{\n bat_avg_reading = analogRead(VOLTAGE_PIN);\n for (int x = 0; x < 40; x++)\n {\n bat_avg_reading = ((bat_avg_reading + analogRead(VOLTAGE_PIN)) / 2);\n }\n while (bat_avg_reading < 780) // low battery\n {\n mode = 0;\n bat_avg_reading = analogRead(VOLTAGE_PIN);\n for (int x = 0; x < 40; x++)\n {\n bat_avg_reading = ((bat_avg_reading + analogRead(VOLTAGE_PIN)) / 2);\n }\n }\n //mode = 1;\n \n if (bat_avg_reading > 1000); //high voltage\n \n bat_voltage = map(bat_avg_reading, 780.0, 880.0, 6.9, 7.9); // find voltage\n}\n\nvoid checkConnection()\n{\n if (digitalRead(CONNECT_PIN) == HIGH)\n {\n Spark.disconnect();\n offlineMode = 1;\n }\n \n if (digitalRead(CONNECT_PIN) == LOW)\n {\n Spark.connect();\n offlineMode = 0;\n }\n}" } ]
52
fuurin/meyer
https://github.com/fuurin/meyer
34233ff7e5b330d53010a14ca5c6eea139e52491
745e9f30f256da4ca5672a5ee59e478a37ba2d01
2bef79f8e07a1d6f65a5dfa34965b0be2de4f4b6
refs/heads/master
2021-01-23T22:24:21.235263
2018-01-25T18:05:03
2018-01-25T18:05:03
102,321,727
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.556291401386261, "alphanum_fraction": 0.7284768223762512, "avg_line_length": 36.75, "blob_id": "c9f08e04d42613d43ff2637192b73fe988e0d62e", "content_id": "230ca6fb110e1e79604eefbffccdbc45e58c3888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 151, "license_type": "no_license", "max_line_length": 129, "num_lines": 4, "path": "/doc/html/search/all_f.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['total',['total',['../namespacemeyer_1_1special__program.html#a16ef00406a38891967508e7b6b02cd9b',1,'meyer::special_program']]]\n];\n" }, { "alpha_fraction": 0.5204917788505554, "alphanum_fraction": 0.7090163826942444, "avg_line_length": 47.79999923706055, "blob_id": "89be25797ba88c887432ae0fbe0491032f3d5a83", "content_id": "23ac7a8d8fc7274319d59250343f7609e20252f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 244, "license_type": "no_license", "max_line_length": 112, "num_lines": 5, "path": "/doc/html/search/variables_6.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['set',['Set',['../namespacemeyer_1_1program.html#ac9c465b4f9426d61a87d653e9f26dc36',1,'meyer::program']]],\n ['set_5f',['set_',['../namespacemeyer_1_1program.html#aa5506eff84f20ed482dae9ad8b143b22',1,'meyer::program']]]\n];\n" }, { "alpha_fraction": 0.6439887881278992, "alphanum_fraction": 0.6458527445793152, "avg_line_length": 20.918367385864258, "blob_id": "ba90e6aa02312ea2f1d0b435991fbdef10556a46", "content_id": "e542b9013f2c5086a7da14c6a1462419dfa61d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 62, "num_lines": 49, "path": "/meyer/contracted_program.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And\nfrom .program import progs\n\ndef contracts(b, p):\n\treturn b < p\n\ndef require(pre, b, post):\n\treturn And(b.pre() >= pre, b.post()/pre <= post, +b)\n\nclass ContractedProgram():\n\t\"\"\"This class represents a notation of contracted program.\"\"\"\n\n\tdef __init__(self, s, b=None, p=None):\n\t\tif b is None and p is None:\n\t\t\tself.b, self.p = progs(s, 'b p')\n\t\telse:\n\t\t\tself.b, self.p = b, p\n\t\ts.add(contracts(self.b, self.p))\n\n\tdef pre(self):\n\t\treturn self.p.pre()\n\n\tdef b(self):\n\t\treturn self.b\n\n\tdef post(self):\n\t\treturn self.p.post()\n\n\tdef strongest_postcondition(self, C=None):\n\t\tif C is None:\n\t\t\treturn self.b.post() / self.pre()\n\t\telse:\n\t\t\treturn self.b.post() / C\n\n\tdef sp(self, C=None):\n\t\treturn self.strongest_postcondition(C)\n\n\tdef weakest_precondition(self, r=None):\n\t\tif r is None:\n\t\t\treturn self.b.dom() - (self.b.post() - self.post()).dom()\n\t\telse:\n\t\t\treturn self.b.dom() - (self.b.post() - r).dom()\n\n\tdef wp(self, r=None):\n\t\treturn self.weakest_precondition(r)\n\nclass CProg(ContractedProgram):\n\t\"\"\"This is short for ContractedProgram\"\"\"" }, { "alpha_fraction": 0.5700325965881348, "alphanum_fraction": 0.7394136786460876, "avg_line_length": 60.400001525878906, "blob_id": "a485000fb0c0f4c777d19a0c52f1c643a6f8257d", "content_id": "67fca53de683b012cf7a5e0c0e0b589b766df778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 307, "license_type": "no_license", "max_line_length": 146, "num_lines": 5, "path": "/doc/html/search/functions_0.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['as_5flist',['as_list',['../namespacemeyer_1_1util_1_1z3py__util.html#a88b1476f6b8d614eb1907a94d711f40d',1,'meyer::util::z3py_util']]],\n ['atomic_5fconcurrency',['atomic_concurrency',['../namespacemeyer_1_1constructs.html#a0646fbb3ed8f783ab062b3d3dce3b170',1,'meyer::constructs']]]\n];\n" }, { "alpha_fraction": 0.7032617330551147, "alphanum_fraction": 0.7096260786056519, "avg_line_length": 34.94285583496094, "blob_id": "5d701b7bae4541a23f224ecd814ffe35c6f121cc", "content_id": "b89fad846cb555eb0654f939c9ea86f5177125e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 116, "num_lines": 35, "path": "/meyer/feasibility.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And, Not\n\n## @file feasibility.py\n# Module used to define the condition of feasibility on a program.\n# \n# A program is feasible if all the elements of its precondition are included in the left side of its postcondition.\n\n## Creates the assumption of feasibility on a program.\n# @param p The program that needs to be feasible.\n# @param strong Let it be True if you need pre_p = dom(post_p) assumption.\n# @return The assumption.\ndef is_feasible(p, strong=False):\n\tif strong:\n\t\treturn p.pre() == p.dom_post()\n\telse:\n\t\treturn p.pre() <= p.dom_post()\n\ndef feasible(*progs, strong=False):\n\tif len(progs) == 0:\n\t\traise Exception(\"feasible is receiving nothing.\")\n\tif len(progs) == 1:\n\t\treturn is_feasible(progs[0], strong)\n\treturn [is_feasible(p, strong) for p in list(progs)]\n\n## Creates the assumption of infeasibility on a program.\n# @param p The program that needs to be infeasible.\n# @param strong Let it be True if you need pre_p != dom(post_p) assumption.\n# @return The assumption.\ndef infeasible(*progs, strong=False):\n\tif len(progs) == 0:\n\t\traise Exception(\"feasible is receiving nothing.\")\n\tif len(progs) == 1:\n\t\treturn Not(is_feasible(progs[0], strong))\n\treturn [Not(is_feasible(p, strong)) for p in list(progs)]" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 35, "blob_id": "ba7cab4a16204a0eb4feb6cc26efae014be6582c", "content_id": "2cddf1d750737f5efcdeed01314abc6dd61cd326", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 144, "license_type": "no_license", "max_line_length": 122, "num_lines": 4, "path": "/doc/html/search/all_11.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['white',['white',['../namespacemeyer_1_1util_1_1color.html#a54c3aa0aab3ccea89e2b6928b4e23d6d',1,'meyer::util::color']]]\n];\n" }, { "alpha_fraction": 0.5406162738800049, "alphanum_fraction": 0.5826330780982971, "avg_line_length": 12.692307472229004, "blob_id": "44cb14bc01bcbf4c46d61dfc4f385cf407d1c02e", "content_id": "911c02c952fa7b19cfc86c371d567f7784699af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 357, "license_type": "no_license", "max_line_length": 30, "num_lines": 26, "path": "/doc/html/search/searchdata.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var indexSectionsWithContent =\n{\n 0: \"_abcdefghimnprstuwyz\",\n 1: \"m\",\n 2: \"_cdefiprsz\",\n 3: \"acdefghimnprstu\",\n 4: \"abcgprswy\"\n};\n\nvar indexSectionNames =\n{\n 0: \"all\",\n 1: \"namespaces\",\n 2: \"files\",\n 3: \"functions\",\n 4: \"variables\"\n};\n\nvar indexSectionLabels =\n{\n 0: \"All\",\n 1: \"Namespaces\",\n 2: \"Files\",\n 3: \"Functions\",\n 4: \"Variables\"\n};\n\n" }, { "alpha_fraction": 0.6036036014556885, "alphanum_fraction": 0.6396396160125732, "avg_line_length": 26.75, "blob_id": "51a82e2278e45d47e90e21d33aa69ab10eb2c08d", "content_id": "5af1f723f1c44e0afad7b6fd7084ad342c85582e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 111, "license_type": "no_license", "max_line_length": 89, "num_lines": 4, "path": "/doc/html/search/files_8.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['special_5fprogram_2epy',['special_program.py',['../special__program_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.6459510326385498, "alphanum_fraction": 0.660075306892395, "avg_line_length": 23.251142501831055, "blob_id": "3c8aecb2688db22d943e0ba17704838eb1f16637", "content_id": "cdf143f983ef19499e90cc6886d9b88a366240b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5310, "license_type": "no_license", "max_line_length": 83, "num_lines": 219, "path": "/meyer/util/z3py_set.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport inspect\nfrom z3 import ArraySort, BoolSort, EnumSort, ForAll, Exists, And, Or, Not, Implies\nfrom .z3py_util import U, const, evaluate\n\nSORT = U\nSET_SORT = ArraySort(U, BoolSort())\n\ndef set_sort(sort):\n\tglobal SORT\n\tSORT = sort\n\tglobal SET_SORT\n\tSET_SORT = ArraySort(sort, BoolSort())\n\ndef get_sort():\n\tglobal SORT\n\tsort = SORT\n\treturn sort\n\nclass Set():\n\t\"\"\"Base class for set instance.\"\"\"\n\t\n\t# @param p A set instance created by Z3.py.\n\tdef __init__(self, s):\n\t\tself.s = s\n\n\tdef __call__(self, x):\n\t\treturn self.has(x)\n\n\tdef __neg__(self):\n\t\treturn Complement(self)\n\n\tdef __pow__(self, other):\n\t\treturn disjoint(self, other)\n\n\tdef __sub__(self, other):\n\t\treturn Subtraction(self, other)\n\n\tdef __and__(self, other):\n\t\treturn Intersection(self, other)\n\t\n\tdef __xor__(self, other):\n\t\tfrom .z3py_rel import Combination\n\t\treturn Combination(self, other)\n\n\tdef __or__(self, other):\n\t\treturn Union(self, other)\n\n\tdef __eq__(self, other):\n\t\treturn eq(self, other)\n\n\tdef __contains__():\n\t\treturn included(self, other)\n\n\tdef __le__(self, other):\n\t\treturn included(self, other)\n\n\tdef __ge__(self, other):\n\t\treturn includes(self, other)\n\n\tdef __ne__(self, other):\n\t\treturn Not(self.__eq__(other))\n\n\t# @param x An element that is included in this set.\n\t# @return The constraint that x is included in this set.\n\tdef has(self, x):\n\t\treturn self.s(x) if inspect.ismethod(self.s) else self.s[x]\n\n\t# @return A set instance created by Z3.py.\n\tdef z3(self):\n\t\treturn self.s\n\n## Creates a new set.\n# @param name The name of the created set.\n# @param name The sort of the created set.\n# @return The set instance created.\ndef set(name):\n\treturn Set(const(name, SET_SORT))\n\n## Creates multiple new sets.\n# @param names The names of the created sets, separated by space character.\n# @return a list of the set instances created.\ndef sets(names):\n\tnames = names.split(' ')\n\treturn [set(name) for name in names]\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef eq(s1, s2):\n\tx = const('x', get_sort())\n\treturn ForAll(x, s1(x) == s2(x))\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef includes(s1, s2):\n\tx = const('x', get_sort())\n\treturn ForAll(x, Implies(s2(x), s1(x)))\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef included(s1, s2):\n\tx = const('x', get_sort())\n\treturn ForAll(x, Implies(s1(x), s2(x)))\n\n## Prints a set\n# @param solver The solver in which the set is.\n# @param set The set that need to be printed.\ndef show_set(solver, set):\n\tif isinstance(set, Set):\n\t\tset = set.z3()\n\tif not str(set)[1] == '!':\n\t\tprint(\"content of\", set)\n\t\tif len(solver.model()[set].as_list()) > 1:\n\t\t\tprint(\" =\", solver.model()[set])\n\t\telse:\n\t\t\tprint(\" =\", evaluate(solver, solver.model()[set].as_list()[0]))\n\t\tprint()\n\ndef show_sets(solver, *sets):\n\tfor s in sets: show_set(solver, s)\n\ndef show_set_models(solver):\n\tis_set = lambda elt: elt.range() == SET_SORT\n\tsets = list(filter(is_set, solver.model()))\n\tshow_sets(solver, *sets)\n\nclass Union(Set):\n\t\"\"\"Union for set instances.\"\"\"\n\t\n\t# @param p A set instance created by Z3.py.\n\tdef __init__(self, s1, s2):\n\t\tself.s1 = s1\n\t\tself.s2 = s2\n\n\t# @param x An element that is included in this union of sets.\n\t# @return The constraint that x is included in this set.\n\tdef has(self, x):\n\t\treturn Or(self.s1(x), self.s2(x))\n\n\tdef z3(self):\n\t\treturn (self.s1, self.s2)\n\nclass Intersection(Set):\n\t\"\"\"Intersection for set instances.\"\"\"\n\t\n\t# @param p A set instance created by Z3.py.\n\tdef __init__(self, s1, s2):\n\t\tself.s1 = s1\n\t\tself.s2 = s2\n\n\t# @param x An element that is included in this Intersection of sets.\n\t# @return The constraint that x is included in this set.\n\tdef has(self, x):\n\t\treturn And(self.s1(x), self.s2(x))\n\n\tdef z3(self):\n\t\treturn (self.s1, self.s2)\n\nclass Inter(Intersection):\n\t\"\"\"This is short for Intersection\"\"\"\n\nclass Subtraction(Set):\n\t\"\"\"Subtraction for set instances.\"\"\"\n\t\n\tdef __init__(self, s1, s2):\n\t\tself.s1 = s1\n\t\tself.s2 = s2\n\n\t# @param x An element that is included in this Intersection of sets.\n\t# @return The constraint that x is included in this set.\n\tdef has(self, x):\n\t\treturn And(self.s1(x), Not(self.s2(x)))\n\n\tdef z3(self):\n\t\treturn (self.s1, self.s2)\n\nclass Sub(Subtraction):\n\t\"\"\"This is short for Subtraction\"\"\"\n\nclass Complement(Set):\n\t\"\"\"Complement for a set instance.\"\"\"\n\t\n\t# @param p A set instance created by Z3.py.\n\tdef __init__(self, s):\n\t\tself.s = s\n\n\t# @param x An element that is NOT included in this set.\n\t# @return The constraint that x is NOT included in this set.\n\tdef has(self, x):\n\t\treturn Not(self.s(x))\n\nclass Cpl(Complement):\n\t\"\"\"This is short name for Complement\"\"\"\n\nclass Empty(Set):\n\t\"\"\"An empty set\"\"\"\n\tdef __init__(self):\n\t\tself.sort = get_sort()\n\n\tdef has(self, x):\n\t\treturn False\n\nclass Universe(Set):\n\t\"\"\" An Universe set\"\"\"\n\tdef __init__(self):\n\t\tself.sort = get_sort()\n\n\tdef has(self, x):\n\t\treturn True\n\ndef disjoint(s1, s2):\n\tx = const('x', get_sort())\n\treturn Not(Exists(x, (s1 & s2)(x)))" }, { "alpha_fraction": 0.5567010045051575, "alphanum_fraction": 0.738831639289856, "avg_line_length": 57.20000076293945, "blob_id": "94aea7f810d3e5b87fe34a4d41701000a46e4644", "content_id": "7db0aa841806492c5e8d52fb2e9f3a06ea7bbe48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 291, "license_type": "no_license", "max_line_length": 137, "num_lines": 5, "path": "/doc/html/search/functions_3.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['equivalent',['equivalent',['../namespacemeyer_1_1equivalence.html#a74265e413d441b88ad03f99cd367c4ea',1,'meyer::equivalence']]],\n ['evaluate',['evaluate',['../namespacemeyer_1_1util_1_1z3py__util.html#ad469c85dd6ea1c5d3641446ea0246166',1,'meyer::util::z3py_util']]]\n];\n" }, { "alpha_fraction": 0.6154115796089172, "alphanum_fraction": 0.6280210018157959, "avg_line_length": 21.66666603088379, "blob_id": "6979e5e7962b778278c3b0aa0391108520872d14", "content_id": "957b195382fa56f1f6097fc38860e6e4a3294181", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2855, "license_type": "no_license", "max_line_length": 72, "num_lines": 126, "path": "/meyer/basic_constructs.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import Or, And\nfrom .program import Program\n\nclass Choice(Program):\n\t\"\"\"Choice, performs like p1 or p2 or ...\"\"\"\n\tdef __init__(self, *p):\n\t\tself.p = list(p)\n\n\tdef _set(self, x):\n\t\treturn Or([p.set(x) for p in self.p])\n\n\tdef _pre(self, x):\n\t\treturn Or([p.pre(x) for p in self.p])\n\n\tdef _post(self, x, y):\n\t\t# return Or([p.post(x, y) for p in self.p])\n\t\treturn Or([(p.post()/p.pre())(x, y) for p in self.p])\n\n\nclass Choi(Choice):\n\t\"\"\"This is short name for Choice\"\"\"\n\n\n\nclass Composition(Program):\n\t\"\"\"Composition, performs like p1 then like p2.\"\"\"\n\tdef __init__(self, p1, p2):\n\t\tself.p1 = p1\n\t\tself.p2 = p2\n\n\tdef _set(self, x):\n\t\treturn Or(self.p1.set(x), self.p2.set(x))\n\n\tdef _pre(self, x):\n\t\treturn (self.p1.pre() & self.p1.post() << self.p2.pre())(x) \n\t\n\tdef _post(self, x, y):\n\t\treturn (self.p1.post() // self.p2.pre() ^ self.p2.post())(x, y)\n\t\nclass Comp(Composition):\n\t\"\"\"This is short name for Composition\"\"\"\n\n\n\nclass SoftComposition(Program):\n\t\"\"\"Composition, performs like p1 then like p2.\"\"\"\n\tdef __init__(self, p1, p2):\n\t\tself.p1 = p1\n\t\tself.p2 = p2\n\n\tdef _set(self, x):\n\t\treturn Or(self.p1.set(x), self.p2.set(x))\n\n\tdef _pre(self, x):\n\t\treturn (self.p1.post() << self.p2.pre())(x)\n\n\tdef _post(self, x, y):\n\t\treturn (self.p1.post() ^ self.p2.post())(x, y)\n\nclass SComp(SoftComposition):\n\t\"\"\"This is short name for Composition\"\"\"\n\n\nclass Restriction(Program):\n\t\"\"\"Restriction, performs like a set c on program p.\"\"\"\n\tdef __init__(self, c, p):\n\t\tself.c = c\n\t\tself.p = p\n\n\tdef _set(self, x):\n\t\treturn self.p.set(x)\n\n\tdef _pre(self, x):\n\t\t# return self.p.pre(x) # This causes counter example in P6\n\t\t# return And(self.p.pre(x)) # interesting result; this causes unknown\n\t\treturn And(self.p.pre(x), self.c(x))\n\n\tdef _post(self, x, y):\n\t\treturn And(self.p.post(x, y), self.c(x))\n\nclass Rest(Restriction):\n\t\"\"\"This is short name for Restriction\"\"\"\n\n\nclass MeyerRestriction(Program):\n\t\"\"\"Restriction on Meyer's paper, performs like a set c on program p.\"\"\"\n\tdef __init__(self, c, p):\n\t\tself.c = c\n\t\tself.p = p\n\n\tdef _set(self, x):\n\t\treturn self.p.set(x)\n\n\tdef _pre(self, x):\n\t\treturn self.p.pre(x) # This causes counter example in P6\n\t\t# return And(self.p.pre(x)) # interesting result; this causes unknown\n\t\t# return And(self.p.pre(x), self.c(x))\n\n\tdef _post(self, x, y):\n\t\treturn And(self.p.post(x, y), self.c(x))\n\nclass MRest(MeyerRestriction):\n\t\"\"\"This is short name for MeyerRestriction\"\"\"\n\t\n\nclass Corestriction(Program):\n\t\"\"\"\n\tCorestriction, performs like \n\tp applied only when results satisfy a set C.\n\t\"\"\"\n\tdef __init__(self, p, c):\n\t\tself.p = p\n\t\tself.c = c\n\n\tdef _set(self, x):\n\t\treturn self.p.set(x)\n\n\tdef _pre(self, x):\n\t\treturn (self.p.pre() & self.p.post() << self.c)(x)\n\n\tdef _post(self, x, y):\n\t\treturn And(self.p.post(x, y), self.c(y))\n\nclass Corest(Corestriction):\n\t\"\"\"This is short name for Corestriction\"\"\"" }, { "alpha_fraction": 0.747787594795227, "alphanum_fraction": 0.752212405204773, "avg_line_length": 42.095237731933594, "blob_id": "ab89d40dbc4338d8b3b103a4c1ee027823c669ad", "content_id": "c70faabd5c488348361fac2f8a3b2f63631c8870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 98, "num_lines": 21, "path": "/meyer/meyer.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import Not, sat\nfrom .util.z3py_util import proof as util_proof, U\nfrom .util.z3py_set import show_set_models\nfrom .program import show_prog_models\n\n## Proof of the conclusion which will be nagated and added to the solver.\n# @param solver The solver which contains all the premises.\n# @param conclusion The conclusion constraint that you'd like to proof.\n# @param title The title of the theorem.\n# @param reset Boolean that indicates if the solver will be reset after the proof, true is reset.\n# @return The result (sat, unsat or unknown) of the proof.\ndef conclude(solver, conclusion, title=None, reset=True, show_solver=False):\n\tsolver.add(Not(conclusion))\n\tresult = util_proof(solver, title=title, reset=False, show_solver=show_solver, show_model=False)\n\tif result == sat:\n\t\tshow_set_models(solver)\n\t\tshow_prog_models(solver)\n\tif reset:\n\t\tsolver.reset()\n\t# return result" }, { "alpha_fraction": 0.5347222089767456, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 35, "blob_id": "5f3450ae3ccfc11786924fc6d395c563a1a348fb", "content_id": "23a0e7d28c4a5b4d58b2d632070863f0a24dd7f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 144, "license_type": "no_license", "max_line_length": 122, "num_lines": 4, "path": "/doc/html/search/variables_3.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['green',['green',['../namespacemeyer_1_1util_1_1color.html#a408ce5a7a281f56a95ee22c3b30917ce',1,'meyer::util::color']]]\n];\n" }, { "alpha_fraction": 0.51408451795578, "alphanum_fraction": 0.716901421546936, "avg_line_length": 77.88888549804688, "blob_id": "70f652b9d379f768d08986d974efaafdaba3dcc1", "content_id": "2f6f07963babdf462e0bfdd225252254afeb4080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 710, "license_type": "no_license", "max_line_length": 124, "num_lines": 9, "path": "/doc/html/search/variables_4.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['post',['Post',['../namespacemeyer_1_1program.html#a18877460818246ee839f29d7e6dc5eed',1,'meyer::program']]],\n ['post_5f',['post_',['../namespacemeyer_1_1program.html#a3222d9981c3064528ea694bbadf9c67a',1,'meyer::program']]],\n ['pre',['Pre',['../namespacemeyer_1_1program.html#a18582ffc9743be82070d174cf1dd0def',1,'meyer::program']]],\n ['pre_5f',['pre_',['../namespacemeyer_1_1program.html#a6f2f5087d48ccd14f64219bc08379388',1,'meyer::program']]],\n ['prog',['Prog',['../namespacemeyer_1_1program.html#a314c908a305b7a9bf8e7bc6b9cde2e02',1,'meyer::program']]],\n ['purple',['purple',['../namespacemeyer_1_1util_1_1color.html#acb70a71abf2319da1f0086bc3c7c4315',1,'meyer::util::color']]]\n];\n" }, { "alpha_fraction": 0.6284095644950867, "alphanum_fraction": 0.6390829086303711, "avg_line_length": 21, "blob_id": "887df6961e123cefe66941fb347cba0b132007d8", "content_id": "fb4902c64164842633e6dd9f6d12189ef66c40b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7589, "license_type": "no_license", "max_line_length": 83, "num_lines": 345, "path": "/meyer/util/z3py_rel.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport inspect\nfrom z3 import ArraySort, BoolSort, EnumSort, ForAll, Exists, And, Or, Not, Implies\nfrom .z3py_set import Set\nfrom .z3py_util import U, const, unveil, model\n\nSORT_DOM = U\nSORT_RAN = U\nREL_SORT = ArraySort(U, ArraySort(U, BoolSort()))\n\ndef set_sort(sort_dom, sort_ran):\n\tglobal SORT_DOM\n\tSORT_DOM = sort_dom\n\tglobal SORT_RAN\n\tSORT_RAN = sort_ran\n\tglobal REL_SORT\n\tREL_SORT = ArraySort(\n\t\tsort_dom, \n\t\tArraySort(sort_ran, BoolSort())\n\t)\n\ndef get_sort_dom():\n\tglobal SORT_DOM\n\tsort_dom = SORT_DOM\n\treturn sort_dom\n\ndef get_sort_ran():\n\tglobal SORT_RAN\n\tsort_ran = SORT_RAN\n\treturn sort_ran\n\nclass Relation():\n\t\"\"\"Base class for relation instance.\"\"\"\n\t\n\t# @param r A relation instance created by Z3.py.\n\tdef __init__(self, r):\n\t\tself.r = r\n\n\tdef __call__(self, x, y):\n\t\treturn self.has(x, y)\n\n\tdef __neg__(self):\n\t\treturn Complement(self)\n\n\tdef __truediv__(self, other):\n\t\treturn Restriction(self, other)\n\n\tdef __floordiv__(self, other):\n\t\treturn Corestriction(self, other)\n\n\tdef __sub__(self, other):\n\t\treturn Subtraction(self, other)\n\n\tdef __lshift__(self, other):\n\t\treturn InverseImage(self, other)\n\n\tdef __rshift__(self, other):\n\t\treturn Image(self, other)\n\n\tdef __and__(self, other):\n\t\treturn Intersection(self, other)\n\n\tdef __or__(self, other):\n\t\treturn Union(self, other)\n\n\tdef __xor__(self, other):\n\t\treturn Composition(self, other)\n\n\tdef __eq__(self, other):\n\t\treturn eq(self, other)\n\n\tdef __contains__():\n\t\treturn included(self, other)\n\n\tdef __le__(self, other):\n\t\treturn included(self, other)\n\n\tdef __ge__(self, other):\n\t\treturn includes(self, other)\n\n\tdef __ne__(self, other):\n\t\treturn Not(self.__eq__(other))\n\n\tdef dom(self, x=None):\n\t\treturn Set(self._dom) if x is None else self._dom(x)\n\n\tdef _dom(self, x):\n\t\ty = const('x', get_sort_ran())\n\t\treturn Exists(y, self.has(x, y))\n\n\tdef ran(self, y=None):\n\t\treturn Set(self._ran) if y is None else self._ran(x)\n\t\n\tdef _ran(self, y):\n\t\tx = const('x', get_sort_dom())\n\t\treturn Exists(x, self.has(x, y))\n\n\tdef has(self, x, y):\n\t\treturn self.r(x, y) if inspect.ismethod(self.r) else self.r[x][y]\n\n\tdef z3(self):\n\t\treturn self.r\n\nclass Rel(Relation):\t\n\t\"\"\"This is short for Relation\"\"\"\n\n## Creates a new relation.\n# @param name The name of the created set.\n# @param name The sort of the created set.\n# @return The set instance created.\ndef rel(name):\n\treturn Rel(const(name, REL_SORT))\n\n## Creates multiple new sets.\n# @param names The names of the created sets, separated by space character.\n# @return a list of the set instances created.\ndef rels(names):\n\tnames = names.split(' ')\n\treturn [rel(name) for name in names]\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef eq(r1, r2):\n\tx = const('x', get_sort_dom())\n\ty = const('y', get_sort_ran())\n\treturn ForAll([x, y], r1(x, y) == r2(x, y))\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef includes(r1, r2):\n\tx = const('x', get_sort_dom())\n\ty = const('y', get_sort_ran())\n\treturn ForAll([x, y], Implies(r2(x, y), r1(x, y)))\n\n## Returns a constraint that two sets are same.\n# @param s1 A set that will be same as s2.\n# @param s2 A set that will be same as s1.\n# @return A constraint that two sets are same.\ndef included(r1, r2):\n\tx = const('x', get_sort_dom())\n\ty = const('y', get_sort_ran())\n\treturn ForAll([x, y], Implies(r1(x, y), r2(x, y)))\n\n## Prints a relation\n# @param solver The solver in which the set is.\n# @param set The set that need to be printed.\ndef show_rel(solver, rel):\n\tif isinstance(rel, Relation):\n\t\trel = rel.z3()\n\tif not str(rel)[1] == '!': \n\t\tprint(\"content of\", rel)\n\t\tcontent = model(solver, rel).as_list()\n\t\tunveil(solver, content)\n\t\tprint()\n\ndef show_rels(solver, *rels):\n\tfor r in rels: show_rel(solver, r)\n\ndef show_rel_models(solver):\n\tis_rel = lambda elt: elt.range() == REL_SORT\n\trels = list(filter(is_rel, solver.model()))\n\tshow_rels(solver, *rels)\n\nclass Union(Rel):\n\t\"\"\"Union for relation instances.\"\"\"\n\t\n\tdef __init__(self, r1, r2):\n\t\tself.r1 = r1\n\t\tself.r2 = r2\n\n\tdef has(self, x, y):\n\t\treturn Or(self.r1(x, y), self.r2(x, y))\n\n\tdef z3(self):\n\t\treturn (self.r1, self.r2)\n\nclass Intersection(Rel):\n\t\"\"\"Intersection for relation instances.\"\"\"\n\t\n\tdef __init__(self, r1, r2):\n\t\tself.r1 = r1\n\t\tself.r2 = r2\n\n\tdef has(self, x, y):\n\t\treturn And(self.r1(x, y), self.r2(x, y))\n\n\tdef z3(self):\n\t\treturn (self.r1, self.r2)\n\nclass Inter(Intersection):\n\t\"\"\"This is short for Intersection\"\"\"\n\nclass Subtraction(Rel):\n\t\"\"\"Subtraction for relation instances.\"\"\"\n\t\n\tdef __init__(self, r1, r2):\n\t\tself.r1 = r1\n\t\tself.r2 = r2\n\n\tdef has(self, x, y):\n\t\treturn And(self.r1(x, y), Not(self.r2(x, y)))\n\n\tdef z3(self):\n\t\treturn (self.r1, self.r2)\n\nclass Sub(Subtraction):\n\t\"\"\"This is short for Subtraction\"\"\"\n\nclass Restriction(Rel):\n\t\"\"\"Restriction for a relation instance and a set instance.\"\"\"\n\t\n\tdef __init__(self, r, s):\n\t\tself.r = r\n\t\tself.s = s\n\n\tdef has(self, x, y):\n\t\treturn And(self.r(x, y), self.s(x))\n\n\tdef z3(self):\n\t\treturn (self.r, self.s)\n\nclass Rest(Restriction):\n\t\"\"\"This is short for Restriction\"\"\"\n\nclass Corestriction(Rel):\n\t\"\"\"Corestriction for a relation instance and a set instance.\"\"\"\n\t\n\tdef __init__(self, r, s):\n\t\tself.r = r\n\t\tself.s = s\n\n\tdef has(self, x, y):\n\t\treturn And(self.r(x, y), self.s(y))\n\n\tdef z3(self):\n\t\treturn (self.r, self.s)\n\nclass Corest(Corestriction):\n\t\"\"\"This is short for Corestriction\"\"\"\n\nclass Composition(Relation):\n\t\"\"\"Composition for relation instances. r;s (= s o r)\"\"\"\n\t\n\tdef __init__(self, r, s):\n\t\tself.r = r\n\t\tself.s = s\n\n\tdef has(self, x, y):\n\t\ta = const('a', get_sort_ran())\n\t\treturn Exists(a, And(self.r(x, a), self.s(a, y)))\n\n\tdef z3(self):\n\t\treturn (self.r, self.s)\n\nclass Comp(Composition):\n\t\"\"\"This is short for Composition\"\"\"\n\nclass Image(Set):\n\t\"\"\"Image for a relation instance and a set instance.\"\"\"\n\t\n\tdef __init__(self, r, s):\n\t\tself.r = r\n\t\tself.s = s\n\n\tdef has(self, y):\n\t\tx = const('x', get_sort_dom())\n\t\treturn Exists(x, And(self.r(x, y), self.s(x)))\n\n\tdef z3(self):\n\t\treturn (self.r, self.s)\n\nclass Img(Image):\n\t\"\"\"This is short for Img\"\"\"\n\nclass InverseImage(Set):\n\t\"\"\"Inverse image for a relation instance and a set instance.\"\"\"\n\t\n\tdef __init__(self, r, s):\n\t\tself.r = r\n\t\tself.s = s\n\n\tdef has(self, x):\n\t\ty = const('y', get_sort_ran())\n\t\treturn Exists(y, And(self.r(x, y), self.s(y)))\n\n\tdef z3(self):\n\t\treturn (self.r, self.s)\n\nclass Inv(InverseImage):\n\t\"\"\"This is short for InverseImage\"\"\"\n\nclass Complement(Rel):\n\t\"\"\"Complement for a set instance.\"\"\"\n\t\n\tdef __init__(self, r):\n\t\tself.r = r\n\n\tdef has(self, x, y):\n\t\treturn Not(self.r(x, y))\n\nclass Cpl(Complement):\n\t\"\"\"This is short name for Complement\"\"\"\n\nclass Combination(Rel):\n\t\"\"\"Combination made of two set instances.\"\"\"\n\t\n\tdef __init__(self, s1, s2):\n\t\tself.s1 = s1\n\t\tself.s2 = s2\n\n\tdef has(self, x, y):\n\t\treturn And(self.s1(x), self.s2(y))\n\n\tdef z3(self):\n\t\treturn (self.s1, self.s2)\n\nclass Comb(Combination):\n\t\"\"\"This is short for Combination\"\"\"\n\nclass Empty(Relation):\n\t\"\"\"An empty set\"\"\"\n\tdef __init__(self):\n\t\tself.sort_dom = get_sort_dom()\n\t\tself.sort_ran = get_sort_ran()\n\n\tdef has(self, x, y):\n\t\treturn False\n\nclass Universe(Relation):\n\t\"\"\" An Universe set\"\"\"\n\tdef __init__(self):\n\t\tself.sort_dom = get_sort_dom()\n\t\tself.sort_ran = get_sort_ran()\n\n\tdef has(self, x, y):\n\t\treturn True\n\ndef well_founded(r):\n\tx = const('x', get_sort_dom())\n\ty = const('y', get_sort_ran())\n\treturn ForAll([x, y], Implies(r(x, y), x != y))" }, { "alpha_fraction": 0.5802469253540039, "alphanum_fraction": 0.6370370388031006, "avg_line_length": 24.375, "blob_id": "e846755e6d6aca18cf231703aa25c76ad3c12ba9", "content_id": "94368a57d402e203c4edfa673a6cf0d2b609288f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 52, "num_lines": 16, "path": "/meyer/state.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import ForAll, Exists, Not\nfrom .meyer import U\nfrom .util.z3py_util import const, consts\n\ndef trivial(s, post):\n\ts1 = const('s1', U)\n\treturn ForAll(s1, post(s, s1))\n\ndef irrelevant(s, post):\n\ts1, s2 = consts('s1 s2', U)\n\treturn ForAll([s1, s2], post(s, s1) == post(s, s2))\n\ndef relevant(s, post):\n\ts1, s2 = consts('s1 s2', U)\n\treturn Exists([s1, s2], post(s, s1) != post(s, s2))" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6827957034111023, "avg_line_length": 13.34615421295166, "blob_id": "60f78776c5f898c3f3b2f02e68227cc0db180b82", "content_id": "e9224996dcdc5032b86778ccecb73b2da2e13b53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 69, "num_lines": 26, "path": "/meyer/conditions.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "from .util.z3py_set import Empty, Universe\nfrom .util.z3py_rel import Empty as RelEmpty, Universe as RelUniverse\n\ndef And(s1, s2):\n\treturn s1 & s2\n\ndef Or(s1, s2):\n\treturn s1 | s2\n\ndef Not(s):\n\treturn -s\n\ndef Implies(s1, s2):\n\treturn s1 <= s2\n\ndef true():\n\treturn Universe()\n\ndef false():\n\treturn Empty()\n\ndef havoc():\n\treturn RelUniverse()\n\ndef fail():\n\treturn RelEmpty()" }, { "alpha_fraction": 0.6047297120094299, "alphanum_fraction": 0.6081081032752991, "avg_line_length": 21.846153259277344, "blob_id": "3da77f8425e58ddea35a5c6c9764b92dd2dc2e15", "content_id": "bae4d21267e07124d233526a2be47aecdb824d65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/meyer/invariant.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\ndef is_invariant_of(I, p):\n\treturn p.post() >> (I & p.dom()) <= I\n\ndef is_ivr_of(I, p):\n\treturn is_invariant_of(I, p)\n\n# Actually this doesn't use C, b\ndef is_loop_invariant_of(I, a, C, b):\n\treturn I <= a.ran\n\ndef is_livr_of(I, a, C, b):\n\treturn is_loop_invariant_of(I, a, C, b)" }, { "alpha_fraction": 0.5364963412284851, "alphanum_fraction": 0.7189781069755554, "avg_line_length": 53.79999923706055, "blob_id": "44f36d0170bb9ad88fe9b4630710cfeb8b2eb72e", "content_id": "6475095655b63015ff06e7ed615fd06445362968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 274, "license_type": "no_license", "max_line_length": 129, "num_lines": 5, "path": "/doc/html/search/all_7.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['getcolor',['getcolor',['../namespacemeyer_1_1util_1_1color.html#a777b068ba3d7bb397cb83871d691ecf5',1,'meyer::util::color']]],\n ['green',['green',['../namespacemeyer_1_1util_1_1color.html#a408ce5a7a281f56a95ee22c3b30917ce',1,'meyer::util::color']]]\n];\n" }, { "alpha_fraction": 0.5536231994628906, "alphanum_fraction": 0.7043478488922119, "avg_line_length": 56.5, "blob_id": "78ea16be3fb23a523ee8ed9bba0535742ff6828b", "content_id": "683d987e198067fa37135c82b5166d075ac6e791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 345, "license_type": "no_license", "max_line_length": 130, "num_lines": 6, "path": "/doc/html/search/all_d.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['red',['red',['../namespacemeyer_1_1util_1_1color.html#a73b6737d01fe2c13bcb4b9ce85929329',1,'meyer::util::color']]],\n ['refinement_2epy',['refinement.py',['../refinement_8py.html',1,'']]],\n ['restriction',['restriction',['../namespacemeyer_1_1constructs.html#adcab11ce5ea89886576f593f6d2926f6',1,'meyer::constructs']]]\n];\n" }, { "alpha_fraction": 0.6672025918960571, "alphanum_fraction": 0.6768488883972168, "avg_line_length": 19.766666412353516, "blob_id": "a7a19e6f2dc534a14cc2f6714d39313ab2dadf87", "content_id": "14e1d6d779cd4bee0087df0d8e8fd4118ba0a178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/meyer/loop.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom .special_programs import Skip\nfrom .basic_constructs import Choi\nfrom .util.z3py_rel import well_founded\n\nLOOP_NUM = 10\n\ndef fixed_repetition(p, i):\n\tif i==0:\n\t\treturn Skip() / p.dom()\n\telse:\n\t\treturn p ^ fixed_repetition(p, i-1)\n\ndef fix_rep(p, i):\n\treturn fixed_repetition(p, i)\n\ndef arbitrary_repetition(p):\n\treturn Choi(*[fix_rep(p, i) for i in range(LOOP_NUM)])\n\ndef arb_rep(p):\n\treturn arbitrary_repetition(p)\n\ndef while_loop(a, C, b):\n\treturn a ^ arb_rep(b / -C) // C\n\ndef wloop(a, C, b):\n\treturn while_loop(a, C, b)\n\ndef loop_variant_of(a, C, b):\n\tprint('loop invariant: under construction')" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.625, "avg_line_length": 23, "blob_id": "e3823823bc5ce8c1a98b43c5d7030c74df02ffb5", "content_id": "5fc0afb273657affa2720e12cc6a610010fed6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 96, "license_type": "no_license", "max_line_length": 74, "num_lines": 4, "path": "/doc/html/search/files_2.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['determinacy_2epy',['determinacy.py',['../determinacy_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.617977499961853, "alphanum_fraction": 0.6207864880561829, "avg_line_length": 17.269229888916016, "blob_id": "7d14f6dec473dd225297654688807af65854d10b", "content_id": "1eb2d028bf27ef90cddf80ce233a379f0f601f57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 73, "num_lines": 78, "path": "/meyer/special_programs.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import ForAll, And, Not\nfrom .util.z3py_util import elms\nfrom .util.z3py_set import Universe, Empty\nfrom .program import prog, Program\n\n## @file special_program.py\n# Module used to define special programs according to meyer's article.\n# \n# \n\n# Each classes are equivalence to restricted special program instances.\n# However, one way may cause unknown. Then try another way.\n\nclass Fail(Program):\n\tdef __init__(self):\n\t\tpass\n\n\tdef _set(self, x):\n\t\treturn False\n\n\tdef _pre(self, x):\n\t\treturn False\n\n\tdef _post(self, x, y):\n\t\treturn False\n\ndef fail(s):\n\tp = prog(s, \"fail\")\n\tx, y = elms(\"x y\")\n\ts.add(ForAll([x, y], Not(Or(p.set(x), p.pre(x), p.post(x, y)))))\n\treturn p\n\nclass Havoc(Program):\n\tdef __init__(self):\n\t\tpass\n\n\tdef _set(self, x):\n\t\treturn True\n\n\tdef _pre(self, x):\n\t\treturn True\n\n\tdef _post(self, x, y):\n\t\treturn True\n\ndef havoc(s):\n\tp = prog(s, \"havoc\")\n\tx, y = elms(\"x y\")\n\ts.add(ForAll([x, y], And(p.set(x), p.pre(x), p.post(x, y))))\n\treturn p\n\nclass Skip(Program):\n\tdef __init__(self):\n\t\tpass\n\n\tdef _set(self, x):\n\t\treturn True\n\n\tdef _pre(self, x):\n\t\treturn True\n\n\tdef _post(self, x, y):\n\t\treturn x == y\n\ndef skip(s):\n\tp = prog(s, \"skip\")\n\tx, y = elms(\"x y\")\n\ts.add(ForAll([x, y], And(p.set(x), p.pre(x), p.post(x, y) == (x == y))))\n\treturn p\n\ndef total(solver):\n\tp = prog(solver, 'total')\n\tsolver.add(p.pre() == Universe())\n\treturn p\n\ndef is_total(p):\n\treturn p.pre() == Universe()" }, { "alpha_fraction": 0.4920634925365448, "alphanum_fraction": 0.6984127163887024, "avg_line_length": 30.5, "blob_id": "53de485227931e46013637672748139a8353f5e9", "content_id": "f3b4f001fc07cf8b63a68294e025216dc2976c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 126, "license_type": "no_license", "max_line_length": 104, "num_lines": 4, "path": "/doc/html/search/variables_0.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['a',['A',['../namespacemeyer_1_1program.html#a5439166d754be3dc1dd22114889f26f2',1,'meyer::program']]]\n];\n" }, { "alpha_fraction": 0.7080873847007751, "alphanum_fraction": 0.7110389471054077, "avg_line_length": 35.04787063598633, "blob_id": "c4e833d6ddc6d9cf5d51658bca7df26e0374301d", "content_id": "fe1b5a99f7769ca31cd956f80b4c1e07474251de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6776, "license_type": "no_license", "max_line_length": 159, "num_lines": 188, "path": "/meyer/util/z3py_util.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom .color import red, cyan, yellow\nfrom z3 import IntSort, EnumSort, Not\nfrom z3 import Const, Consts, Function, is_ast, is_bool, is_as_array\nfrom z3 import simplify, get_as_array_func\nfrom z3 import sat, unsat, unknown\n## @file z3py_util.py\n# This module is used to construct special variables that are needed in theorem proving.\n#\n# This module attributes an ID to each element used in the proof of a theorem. It also allows the construction of consts, functions and permits to print them.\n\n# U = IntSort()\nU, (A, B, C) = EnumSort('U', ('A', 'B', 'C'))\n# U, UALL = EnumSort('U', ['U'+str(n) for n in range(1,9)])\n\n## Returns a string which contains informations about the universe used.\n# @return The string which contains the universe.\ndef universe_state():\n\tif hasattr(U, 'num_constructors'):\n\t\tnum = str(U.num_constructors())\n\t\treturn 'Universe = U, has ' + num + ' element(s)'\n\treturn 'Universe = ' + str(U)\n\n## Sets universe state u to set module and bin-relation module.\ndef set_universe_state(u):\n\tglobal U\n\tset_set_sort(u)\n\tset_rel_sort(u, u)\n\tU = u\n\n_ID = 0\n\n## Creates a new ID that will be incremented and given to each element in the theorem proving. It is used to avoid conflicts of the same name in Z3Py.\ndef _id():\n\tglobal _ID\n\twhile True:\n\t\t_ID += 1\n\t\tyield _ID\n\n## Concatenates the name of the element with its ID, and return the string result.\n# @param name Name of the element\n# @return The name concatenated with the current ID value.\ndef id_name(name):\n\tif ' ' in name:\n\t\traise ValueError(\"name mustn't include space\")\n\treturn name + '_' + str(next(_id()))\n\n## Concatenates the names of the elements with their IDs, and return the string result.\n# @param names Names of the element separated by space character.\n# @return The concatenated names of the element with the ID, all separated by space character.\ndef id_names(names):\n\tnames = names.split(\" \")\n\tfor idx, name in enumerate(names):\n\t\tnames[idx] = id_name(name)\n\treturn ' '.join(names)\n\n\n## Creates a new const element.\n# @param name Name of the const element.\n# @param sort Type of the const element.\n# @return A new const element.\ndef const(name, sort):\n\treturn Const(id_name(name), sort)\n\n## Creates multiple new consts elements of the same type.\n# @param names Names of the const elements, separated by space character.\n# @param sort Type of the const elements (all consts must have the same type when using this function.\n# @return New const elements.\ndef consts(names, sort):\n\treturn Consts(id_names(names), sort)\n\ndef element(name):\n\treturn const(name, U)\n\ndef elm(name):\n\treturn element(name)\n\ndef elements(names):\n\treturn consts(names, U)\n\ndef elms(names):\n\treturn elements(names)\n\n## Creates a function element.\n# @param name Name of the function element.\n# @param sort Type of the function element.\n# @return A new function element.\ndef function(name, *sort):\n\treturn Function(id_name(name), sort)\n\n## Creates multiple functions elements.\n# @param names Names of the function elements, separated by space character.\n# @param sort Type of the function elements (all functions must have the same type when using this function.\n# @return New function elements.\ndef functions(names, *sort):\n\tnames = id_names(names).split(\" \")\n\treturn [function(name, *sort) for name in names]\n\n## Returns the model of a solver after checking whether its result.\n# @param solver The currently used solver.\n# @param instance The instance that is needed.\n# @return The model of the instance in the solver.\ndef model(solver, instance):\n\treturn solver.model()[instance]\n\n## Evaluates the content of an element.\n# @param solver The currently used solver.\n# @param function The function that needs to be evaluated\n# @return The evaluated function, in general it will return the value of the booleans in the function.\ndef evaluate(solver, function):\n\treturn solver.model().evaluate(function)\n\n## Returns the list corresponding to an array of an element of the solver.\n# @param solver The currently used solver.\n# @param as_array The array that needs to be converted in a list.\n# @return The list that corresponds to the array passed as parameter.\ndef as_list(solver, as_array):\n\tarray = get_as_array_func(as_array)\n\treturn model(solver, array).as_list()\n\n## Recursive function that prints the elements of an element.\n# @param solver The solver currently used.\n# @param item The item that needs to be printed\n# Recursive functions that can be used to print the composition of an item used in the theorem (such as a program)\nITE = ('if', 'then', 'else')\ndef unveil(solver, item, phase=0):\n\tif type(item) == list:\n\t\tfor i in item:\n\t\t\tindent = lambda n: ''.join(['\\t']*(n))\n\t\t\tif type(i) != list: i = [\"else\", i]\n\t\t\tif is_bool(i[1]):\n\t\t\t\tprint(indent(phase), i[0], '->', evaluate(solver, i[1]))\n\t\t\telse:\n\t\t\t\tprint(indent(phase), i[0], '->')\n\t\t\t\tunveil(solver, i[1], phase+1)\n\telse:\n\t\tif is_bool(item): \n\t\t\treturn item\n\t\telif is_as_array(item): \n\t\t\treturn unveil(solver, as_list(solver, item), phase)\n\t\telif item.decl().name() == 'if':\n\t\t\treturn unveil(solver, [list(i) for i in zip(ITE, item.children())], phase)\n\t\telif is_ast(item): \n\t\t\treturn unveil(solver, evaluate(solver, item), phase)\n\t\telse: \n\t\t\treturn \"#unspecified\"\n\n## Prints the name of an element and call unveil to print its content\n# @param solver The solver currently used.\n# @param record The name of the record.\n# @param element The element that needs to be printed.\ndef show_record_element(solver, record, element):\n\tprint(element, 'of', record)\n\telm = simplify(element(model(solver, record)))\n\telm_list = as_list(solver, elm)\n\tunveil(solver, elm_list)\n\tprint()\n\n## Checks if a solver instance is sat, unsat or unknown, returns the result.\n# @param solver The solver that is used to make the proof.\n# @param title The title of the theorem.\n# @param reset Indicates if the solver will be reset after the proof or not, True by default.\n# @return The result of the theorem (sat, unsat or unknown)\ndef proof(solver, title=None, reset=True, show_solver=False, show_model=True):\n\tif show_solver: print(solver)\n\tif title != None: print(yellow(title))\n\tprint(yellow(universe_state()))\n\n\tresult = solver.check()\n\tif result == unsat:\n\t\tprint(cyan(\"Holds: \" + str(result)), \"\\n\")\n\telse:\n\t\tprint(red(\"Unholds: \" + str(result)))\n\t\tif result == sat and show_model:\n\t\t\tfrom .z3py_set import show_set_models\n\t\t\tfrom .z3py_rel import show_rel_models\n\t\t\tshow_set_models(solver)\n\t\t\tshow_rel_models(solver)\n\t\tif result == unknown: \n\t\t\tprint(red(solver.reason_unknown()), \"\\n\")\n\t\n\tif reset: solver.reset()\n\t\n\treturn result\n\ndef conclude(solver, conclusion, title=None, reset=True, show_solver=False):\n\tsolver.add(Not(conclusion))\n\tproof(solver, title=title, reset=reset, show_solver=show_solver, show_model=True)" }, { "alpha_fraction": 0.614145040512085, "alphanum_fraction": 0.655326783657074, "avg_line_length": 68.8125, "blob_id": "7c8bdb5b6695c4e6bce5c55012b9be9c36f8d7ff", "content_id": "272919a1d4eedf2b725ccd24261427a2e06afea7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 101, "num_lines": 16, "path": "/doc/html/search/namespaces_0.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['color',['color',['../namespacemeyer_1_1util_1_1color.html',1,'meyer::util']]],\n ['constructs',['constructs',['../namespacemeyer_1_1constructs.html',1,'meyer']]],\n ['determinacy',['determinacy',['../namespacemeyer_1_1determinacy.html',1,'meyer']]],\n ['equivalence',['equivalence',['../namespacemeyer_1_1equivalence.html',1,'meyer']]],\n ['feasibility',['feasibility',['../namespacemeyer_1_1feasibility.html',1,'meyer']]],\n ['functionality',['functionality',['../namespacemeyer_1_1functionality.html',1,'meyer']]],\n ['implementation',['implementation',['../namespacemeyer_1_1implementation.html',1,'meyer']]],\n ['meyer',['meyer',['../namespacemeyer.html',1,'']]],\n ['program',['program',['../namespacemeyer_1_1program.html',1,'meyer']]],\n ['refinement',['refinement',['../namespacemeyer_1_1refinement.html',1,'meyer']]],\n ['special_5fprogram',['special_program',['../namespacemeyer_1_1special__program.html',1,'meyer']]],\n ['util',['util',['../namespacemeyer_1_1util.html',1,'meyer']]],\n ['z3py_5futil',['z3py_util',['../namespacemeyer_1_1util_1_1z3py__util.html',1,'meyer::util']]]\n];\n" }, { "alpha_fraction": 0.6504064798355103, "alphanum_fraction": 0.6680216789245605, "avg_line_length": 20.735294342041016, "blob_id": "c7cac5a8a92ff8f038228e22f16e2e50070c122c", "content_id": "347d7fe20515cda9b19563238d91304522d0efac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 48, "num_lines": 34, "path": "/meyer/conditionals.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom .program import Program\n\nclass GuardedConditional(Program):\n\tdef __init__(self, C1, p1, C2, p2):\n\t\tself.definition = p1 / C1 | p2 / C2\n\t\n\tdef _set(self, x):\n\t\treturn self.definition.set(x)\n\n\tdef _pre(self, x):\n\t\treturn self.definition.pre(x)\n\n\tdef _post(self, x, y):\n\t\treturn self.definition.post(x, y)\n\nclass GCond(GuardedConditional):\n\t\"\"\"This is short name for GuardedConditional\"\"\"\n\nclass IfThenElse(Program):\n\tdef __init__(self, C, p1, p2):\n\t\tself.definition = p1 / C | p2 / -C\n\t\n\tdef _set(self, x):\n\t\treturn self.definition.set(x)\n\n\tdef _pre(self, x):\n\t\treturn self.definition.pre(x)\n\n\tdef _post(self, x, y):\n\t\treturn self.definition.post(x, y)\n\nclass Ite(IfThenElse):\n\t\"\"\"This is short name for IfThenElse\"\"\"" }, { "alpha_fraction": 0.5489038825035095, "alphanum_fraction": 0.7293423414230347, "avg_line_length": 97.83333587646484, "blob_id": "36a51f94ad3befbf5e951ad1ee061d6edde51d72", "content_id": "d56ee4e78b98a4008ea6f65257253d0b55826507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 164, "num_lines": 12, "path": "/doc/html/search/all_e.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['set',['Set',['../namespacemeyer_1_1program.html#ac9c465b4f9426d61a87d653e9f26dc36',1,'meyer::program']]],\n ['set_5f',['set_',['../namespacemeyer_1_1program.html#aa5506eff84f20ed482dae9ad8b143b22',1,'meyer::program']]],\n ['show_5fprog',['show_prog',['../namespacemeyer_1_1program.html#a98fde47f6e0b55f0a853504c52f42695',1,'meyer::program']]],\n ['show_5frecord_5felement',['show_record_element',['../namespacemeyer_1_1util_1_1z3py__util.html#a94d6ccd0151825f3405c1ebccf8532b9',1,'meyer::util::z3py_util']]],\n ['show_5fset',['show_set',['../namespacemeyer_1_1program.html#a4dd6b1d1b136379bcee2fa37a407eff9',1,'meyer::program']]],\n ['show_5fset_5felement',['show_set_element',['../namespacemeyer_1_1util_1_1z3py__util.html#a95c84051003c44372826c0350d2d4042',1,'meyer::util::z3py_util']]],\n ['skip',['skip',['../namespacemeyer_1_1special__program.html#acb9cc59171a05af2110ca447780028c4',1,'meyer::special_program']]],\n ['skolemized_5ffeasible',['skolemized_feasible',['../namespacemeyer_1_1feasibility.html#a60f0122f899a01e6c5b69f43d7f29727',1,'meyer::feasibility']]],\n ['special_5fprogram_2epy',['special_program.py',['../special__program_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.710749626159668, "alphanum_fraction": 0.7390381693840027, "avg_line_length": 39.42856979370117, "blob_id": "992de37d3537868604ef5168bf7a770f4b16419a", "content_id": "a752f46723b3056584872a3073e7454689e078b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 83, "num_lines": 35, "path": "/meyer/implementation.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And\n\n## @file implementation.py\n# Module used to define the operation of implementation between two programs.\n# \n# A program p' is an implementation of p if p' refines p and if p' is feasible.\n\n## Creates the operation of implementation between two programs.\n# @param p1 The program that is the implementation of p2.\n# @param p2 The program which is a contract of is p1.\n# @return The Z3 assumptions of the implementation operation.\ndef is_implementation_of(p1, p2):\n\treturn And(+p1, p1 <= p2)\n\n## This is short name for a relation is_implementation_of\n# @param p1 The program that is the implementation of p2.\n# @param p2 The program which is a contract of is p1.\n# @return The Z3 assumptions of the implementation operation.\ndef is_impl_of(p1, p2):\n\treturn is_implementation_of(p1, p2)\n\n## Creates the operation of contract (inverse implementation) between two programs.\n# @param p1 The program that is the contract of p2.\n# @param p2 The program which is an implementation of p1.\n# @return The Z3 assumptions of the contract operation.\ndef is_contract_of(p1, p2):\n\treturn And(+p2, p2 <= p1)\n\n## This is short name for a relation is_contract_of\n# @param p1 The program that is the contract of p2.\n# @param p2 The program which is an implementation of p1.\n# @return The Z3 assumptions of the contract operation.\ndef is_ctrt_of(p1, p2):\n\treturn is_contract_of(p1, p2)" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 37.25, "blob_id": "49db54326b86ea9b93ab5c2e862a861309ea8b45", "content_id": "a7f79108cbf1b2b554a09161c5c163d6cfe3ea28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 153, "license_type": "no_license", "max_line_length": 131, "num_lines": 4, "path": "/doc/html/search/functions_8.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['model',['model',['../namespacemeyer_1_1util_1_1z3py__util.html#a8512b24ebe7ccd137b53ba52407415fb',1,'meyer::util::z3py_util']]]\n];\n" }, { "alpha_fraction": 0.5521796345710754, "alphanum_fraction": 0.7437252402305603, "avg_line_length": 93.625, "blob_id": "eb8fe774153fc1e66228b3af537a5a258580fa20", "content_id": "ebb536119e9a7feab9b57f9e3ea190d793522caa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 757, "license_type": "no_license", "max_line_length": 235, "num_lines": 8, "path": "/doc/html/search/functions_a.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['prog',['prog',['../namespacemeyer_1_1program.html#a2ec2977de2aefb152624161860abed00',1,'meyer::program']]],\n ['prog_5fconstraint',['prog_constraint',['../namespacemeyer_1_1program.html#a2229fec0fdbcba6e277f26f537088c21',1,'meyer::program']]],\n ['progs',['progs',['../namespacemeyer_1_1program.html#ae0fe8928ca6b44bea0484d83cb8b98dc',1,'meyer::program']]],\n ['progs_5fconstraint',['progs_constraint',['../namespacemeyer_1_1program.html#aa1b9b5b66171bc75ca42d795003ada41',1,'meyer::program']]],\n ['proof',['proof',['../namespacemeyer_1_1program.html#a1f3d5951c7b4c6aad277761ba776724b',1,'meyer.program.proof()'],['../namespacemeyer_1_1util_1_1z3py__util.html#a6d3bdd5583300633c2b4562123a3e876',1,'meyer.util.z3py_util.proof()']]]\n];\n" }, { "alpha_fraction": 0.5545350313186646, "alphanum_fraction": 0.7428243160247803, "avg_line_length": 95.77777862548828, "blob_id": "2663353bd8b7ee78a185d057d9ca757dc8fa04a3", "content_id": "5c5796e108391f96408d35171aa4da5444d13845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 871, "license_type": "no_license", "max_line_length": 164, "num_lines": 9, "path": "/doc/html/search/functions_c.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['show_5fprog',['show_prog',['../namespacemeyer_1_1program.html#a98fde47f6e0b55f0a853504c52f42695',1,'meyer::program']]],\n ['show_5frecord_5felement',['show_record_element',['../namespacemeyer_1_1util_1_1z3py__util.html#a94d6ccd0151825f3405c1ebccf8532b9',1,'meyer::util::z3py_util']]],\n ['show_5fset',['show_set',['../namespacemeyer_1_1program.html#a4dd6b1d1b136379bcee2fa37a407eff9',1,'meyer::program']]],\n ['show_5fset_5felement',['show_set_element',['../namespacemeyer_1_1util_1_1z3py__util.html#a95c84051003c44372826c0350d2d4042',1,'meyer::util::z3py_util']]],\n ['skip',['skip',['../namespacemeyer_1_1special__program.html#acb9cc59171a05af2110ca447780028c4',1,'meyer::special_program']]],\n ['skolemized_5ffeasible',['skolemized_feasible',['../namespacemeyer_1_1feasibility.html#a60f0122f899a01e6c5b69f43d7f29727',1,'meyer::feasibility']]]\n];\n" }, { "alpha_fraction": 0.49462366104125977, "alphanum_fraction": 0.5698924660682678, "avg_line_length": 22.25, "blob_id": "bf56dbc984fd62f5fd8782d7c89645209188d34f", "content_id": "a055c6b9d781db694ccf8b78c77c72a2b9f65835", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 93, "license_type": "no_license", "max_line_length": 71, "num_lines": 4, "path": "/doc/html/search/all_13.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['z3py_5futil_2epy',['z3py_util.py',['../z3py__util_8py.html',1,'']]]\n];\n" }, { "alpha_fraction": 0.4959677457809448, "alphanum_fraction": 0.7056451439857483, "avg_line_length": 48.599998474121094, "blob_id": "e1867952b6de6d3a9474d063f5d72504eb41c3a2", "content_id": "7f28d10761fa12931908905d27680f58be9bd597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 248, "license_type": "no_license", "max_line_length": 120, "num_lines": 5, "path": "/doc/html/search/variables_2.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['c',['C',['../namespacemeyer_1_1program.html#afa493796575f275536829e18ce904c1d',1,'meyer::program']]],\n ['cyan',['cyan',['../namespacemeyer_1_1util_1_1color.html#a6da67b9f01da86ac24097fa7e978526e',1,'meyer::util::color']]]\n];\n" }, { "alpha_fraction": 0.7175572514533997, "alphanum_fraction": 0.7480915784835815, "avg_line_length": 21, "blob_id": "74e2f90de899cfa094c8d5bfc4af4652f4573181", "content_id": "14a153ec7cdd50987c804e6757c308aa6c5efdc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/meyer/util/z3py_proof_kit.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "from z3 import *\nfrom .z3py_util import conclude\nfrom .z3py_set import set, sets\nfrom .z3py_rel import rel, rels\n\nsolver = Solver()" }, { "alpha_fraction": 0.7175463438034058, "alphanum_fraction": 0.7218259572982788, "avg_line_length": 32.42856979370117, "blob_id": "22cfbf28a76be3b1796686c524700b03a625219c", "content_id": "5840c12aabfb28387544d24c1c08ac45c01c13d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 132, "num_lines": 21, "path": "/meyer/functionality.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And, Not\nfrom .util.z3py_set import set\n\n## @file functionality.py\n# Module used to define the condition of functionality on a program.\n# \n# A program is functional if every subset C of S is disjoint from post_p(C). A program that is not functional is called imperative.\n\n## Creates the assumption of functionality on a program.\n# @param p The program that needs to be functional.\n# @return The assumption.\ndef functional(p):\n\tc = set('c')\n\treturn\tAnd(c <= p.set, c ** (p.post() >> c))\n\n## Creates the assumption of an imperative program.\n# @param p The program that needs to be imperative.\n# @return The assumption.\ndef imperative(p):\n\treturn Not(functional(p))" }, { "alpha_fraction": 0.6450276374816895, "alphanum_fraction": 0.6843922734260559, "avg_line_length": 30.5, "blob_id": "c0ff33f3943e374e4d5e0571b80ed4aa594001e2", "content_id": "56cdbd258823502b0e686a40c4ec666dfdef3651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1454, "license_type": "no_license", "max_line_length": 74, "num_lines": 46, "path": "/meyer/refinement.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And\n\n## @file refinement.py\n# Module used to define the operation of refinement between two programs.\n# \n# A program p2 refines a program p1 if :\n# Set_p2 โŠ‡ Set_p1\n# Pre_p2 โŠ‡ Pre_p1\n# (Post_p2 / Pre_p1) โŠ† Post_p1\n\n## Creates the operation of refinement between two programs.\n# @param p2 The program that refines p1.\n# @param p1 The program that specifies (or abstracts) p2.\n# @return The Z3 assumption used for refinement.\ndef is_refinement_of(p2, p1):\n\treturn And(\n\t\tp2.set() >= p1.set(),\n\t\tp2.pre() >= p1.pre(),\n\t\tp2.post() / p1.pre() <= p1.post()\n\t)\n\n## This is short name for a relation is_refinement_of\n# @param p2 The program that refines p1.\n# @param p1 The program that specifies (or abstracts) p2.\n# @return The Z3 assumption used for refinement.\ndef is_ref_of(p2, p1):\n\treturn is_refinement_of(p2, p1)\n\n## Creates the operation of abstract between two programs.\n# @param p1 The program that specifies (or abstracts) p2.\n# @param p2 The program that refines p1.\n# @return The Z3 assumption used for abstract.\ndef is_abstract_of(p1, p2):\n\treturn And(\n\t\tp2.set() >= p1.set(),\n\t\tp2.pre() >= p1.pre(),\n\t\tp2.post() / p1.pre() <= p1.post()\n\t)\n\n## This is short name for a relation is_abstract_of\n# @param p1 The program that specifies (or abstracts) p2.\n# @param p2 The program that refines p1.\n# @return The Z3 assumption used for abstract.\ndef is_abs_of(p1, p2):\n\treturn is_abstract_of(p1, p2)" }, { "alpha_fraction": 0.6443113684654236, "alphanum_fraction": 0.6670658588409424, "avg_line_length": 21.594594955444336, "blob_id": "f9f8b049f5a7b10f0ffcd58846733f1703613532", "content_id": "9ef8a603e555839ecaa9240c51bc5c9b0169745b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 59, "num_lines": 37, "path": "/meyer/concurrency.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom .program import Program\n\nclass AtomicConcurrency(Program):\n\tdef __init__(self, p1, p2):\n\t\tself.definition = (p1 ^ p2) | (p2 ^ p1)\n\t\n\tdef _set(self, x):\n\t\treturn self.definition.set(x)\n\n\tdef _pre(self, x):\n\t\treturn self.definition.pre(x)\n\n\tdef _post(self, x, y):\n\t\treturn self.definition.post(x, y)\n\nclass Atom(AtomicConcurrency):\n\t\"\"\"This is short name for AtomicConcurrency\"\"\"\n\nclass NonAtomicConcurrency(Program):\n\tdef __init__(self, p1, p2, q): \n\t\tself.definition = (Atom(p1, q) ^ p2) | (p1 ^ Atom(p2, q))\n\t\n\tdef _set(self, x):\n\t\treturn self.definition.set(x)\n\n\tdef _pre(self, x):\n\t\treturn self.definition.pre(x)\n\n\tdef _post(self, x, y):\n\t\treturn self.definition.post(x, y)\n\nclass NAtom(NonAtomicConcurrency):\n\t\"\"\"This is short name for NonAtomicConcurrency\"\"\"\n\ndef commute(p1, p2):\n\treturn p1 ^ p2 == p2 ^ p1" }, { "alpha_fraction": 0.5536332130432129, "alphanum_fraction": 0.7301037907600403, "avg_line_length": 56.79999923706055, "blob_id": "b1b8d2eaabf379717dba8ae013c4c83bba928380", "content_id": "725858d3b50852f7a5e38c87b1b35cb6d92e12e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 289, "license_type": "no_license", "max_line_length": 133, "num_lines": 5, "path": "/doc/html/search/functions_e.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['universe_5fstate',['universe_state',['../namespacemeyer_1_1program.html#a48c262752e496209621f5c6e0da3f8ad',1,'meyer::program']]],\n ['unveil',['unveil',['../namespacemeyer_1_1util_1_1z3py__util.html#a4eea2be8cfebc20260a595ef0152781c',1,'meyer::util::z3py_util']]]\n];\n" }, { "alpha_fraction": 0.47940075397491455, "alphanum_fraction": 0.5380774140357971, "avg_line_length": 20.105262756347656, "blob_id": "60f7112534bc1b2b24fbf90443474ae83b5edb5a", "content_id": "53a7cdd041bd6433bf7c30b8a001a11abe7e7db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 801, "license_type": "no_license", "max_line_length": 54, "num_lines": 38, "path": "/meyer/util/color.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n#color.py\n#\ndef getcolor(colorname):\n colors = {\n 'clear': '\\033[0m',\n 'black': '\\033[30m',\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'purple': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m'\n }\n def func2(c):\n return colors[colorname] + c + colors['clear']\n\n return func2\n\nblack = getcolor('black')\nred = getcolor('red')\ngreen = getcolor('green')\nyellow = getcolor('yellow')\nblue = getcolor('blue')\npurple = getcolor('purple')\ncyan = getcolor('cyan')\nwhite = getcolor('white')\n\n'''\nexample\nfrom color import *\na = \"this is a test message\"\nprint(red(a))\nprint(white(a))\nprint(cyan(a))\nprint(purple(\"test \") + yellow(\"message\"))\n'''" }, { "alpha_fraction": 0.7097315192222595, "alphanum_fraction": 0.7340604066848755, "avg_line_length": 36.85714340209961, "blob_id": "8e0209a2fc81f791862efce006c7271fc72f9a91", "content_id": "1566ffd051ea600999563d5fbe5964d8980ab90b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2384, "license_type": "no_license", "max_line_length": 92, "num_lines": 63, "path": "/meyer/equivalence.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import And\n\n## @file equivalence.py\n# Module used to define the condition of equivalence between two programs.\n# \n# Two programs are equivalent if their preconditions and their postconditions are the same.\n\n## Creates the assumption of equivalence between two programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equivalence between two programs.\ndef equivalent(p1, p2):\n\treturn And(eq_pre(p1, p2), eq_actual_post(p1, p2))\n\n## Short name for equivalent.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equivalence between two programs.\ndef eq(p1, p2):\n\treturn equivalent(p1, p2)\n\n## Creates the assumption of equal between two programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equal between two programs.\ndef equal(p1, p2):\n\treturn And(eq_set(p1, p2), eq_pre(p1, p2), eq_post(p1, p2))\n\n## Creates the assumption of equality between two state sets of programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equality between two state sets of programs.\ndef eq_set(p1, p2):\n\treturn p1.set() == p2.set()\n\n## Creates the assumption of equality between two preconditions of programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equality between two preconditions of programs.\ndef eq_pre(p1, p2):\n\treturn p1.pre() == p2.pre()\n\n## Creates the assumption of equality between two postconditions of programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equality between two postSconditions of programs.\ndef eq_post(p1, p2):\n\treturn p1.post() == p2.post()\n\n## Creates the assumption of equality between two ranges of postconditions of programs.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equality between two ranges of postconditions of programs.\ndef eq_ran(p1, p2):\n\treturn p1.ran() == p2.ran()\n\n## Creates the assumption of equality between two postconditions of actual program I/O.\n# @param p1 The first program.\n# @param p2 The second program.\n# @return The assumption of equality between two postSconditions of feasible programs.\ndef eq_actual_post(p1, p2):\n\treturn p1.post() / p1.pre() == p2.post() / p2.pre()" }, { "alpha_fraction": 0.556291401386261, "alphanum_fraction": 0.7284768223762512, "avg_line_length": 36.75, "blob_id": "9930e2921efecbb9ae890da59ed05f67e2e7e3a5", "content_id": "9524f9bead76a437626639d93cfc422971ca0f7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 151, "license_type": "no_license", "max_line_length": 129, "num_lines": 4, "path": "/doc/html/search/functions_6.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['havoc',['havoc',['../namespacemeyer_1_1special__program.html#a82762750ef41bb2f8580570bb8905df5',1,'meyer::special_program']]]\n];\n" }, { "alpha_fraction": 0.5680190920829773, "alphanum_fraction": 0.7482100129127502, "avg_line_length": 92.11111450195312, "blob_id": "64235b878b2c514a66b70b91dc2b879840038367", "content_id": "e287abb1039e8d21ac3a214cb33fdff9a82b0401", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 838, "license_type": "no_license", "max_line_length": 159, "num_lines": 9, "path": "/doc/html/search/functions_1.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['choice',['choice',['../namespacemeyer_1_1constructs.html#ae659fbbcaf6239c8c8a1e79ee97b7134',1,'meyer::constructs']]],\n ['composition',['composition',['../namespacemeyer_1_1constructs.html#a8bb4c363d27270f0a745f816d1e75814',1,'meyer::constructs']]],\n ['composition_5fis_5ffeasible',['composition_is_feasible',['../namespacemeyer_1_1constructs.html#ac7888a6a7eeec7bbd29aad3bc2791619',1,'meyer::constructs']]],\n ['const',['const',['../namespacemeyer_1_1util_1_1z3py__util.html#a09fd9b270cf6714742578ca63a5cbb42',1,'meyer::util::z3py_util']]],\n ['consts',['consts',['../namespacemeyer_1_1util_1_1z3py__util.html#a8565362aa72ebb1f4f22eb0950d08609',1,'meyer::util::z3py_util']]],\n ['corestriction',['corestriction',['../namespacemeyer_1_1constructs.html#ad3cf2dbc20a62597059a34e860d64515',1,'meyer::constructs']]]\n];\n" }, { "alpha_fraction": 0.6903602480888367, "alphanum_fraction": 0.6924699544906616, "avg_line_length": 30.932641983032227, "blob_id": "0fe41bd803b253a1831609fd241264e466e4b3f5", "content_id": "ee12a71a5f0d1c34a2c5029cab61c4e30dbc25bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6162, "license_type": "no_license", "max_line_length": 84, "num_lines": 193, "path": "/meyer/program.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import Datatype, BoolSort, IntSort, ArraySort, And, Not\nfrom .meyer import U\nfrom .util.z3py_set import Set\nfrom .util.z3py_rel import Rel\nfrom .util.z3py_util import const, consts, show_record_element\n\n\n## @file program.py\n# This module can be used for definition of specification/program instance\n#\n#\n\nSET = ArraySort(U, BoolSort())\n# OOPSet = ArraySort(IntSort(), ArraySort(U, BoolSort()))\nPRE = ArraySort(U, BoolSort())\nPOST = ArraySort(U, ArraySort(U, BoolSort()))\n\nPROG = Datatype('Prog')\nPROG.declare('mk_prog', ('set', SET), ('pre', PRE), ('post', POST))\nPROG = PROG.create()\nset_ = PROG.set\npre_ = PROG.pre\npost_ = PROG.post\n\nclass Program():\n\t\"\"\"Base class for Program instance.\"\"\"\n\t# @param p A program instance created by Z3.py.\n\tdef __init__(self, p):\n\t\tself.p = p\n\n\t# @param x An element that is included in Set of this program.\n\t# @return The constraint that x is included in Set of this program.\n\tdef set(self, x=None):\n\t\treturn Set(self._set) if x is None else self._set(x)\n\n\tdef _set(self, x):\n\t\treturn set_(self.p)[x]\n\n\t# @param x An element that is included in Pre of this program.\n\t# @return The constraint that x is included in Pre of this program.\n\tdef pre(self, x=None):\n\t\treturn Set(self._pre) if x is None else self._pre(x) \n\n\tdef _pre(self, x):\n\t\treturn pre_(self.p)[x]\n\t\n\t# @param x An element that is included in post of this program.\n\t# @param y An element that is included in the range of this program.\n\t# @return The constraint that x is included in Set of this program.\n\tdef post(self, x=None, y=None):\n\t\treturn Rel(self._post) if x is None and y is None else self._post(x, y) \n\n\tdef _post(self, x, y):\n\t\treturn post_(self.p)[x][y]\n\n\t# @param x An element that is included in the domain of this program.\n\t# @return The constraint that x is included in the domain of this program.\n\tdef dom(self, x=None):\n\t\treturn self.pre(x)\n\n\t# @param y An element that is included in the range of this program.\n\t# @return The constraint that x is included in the range of this program.\n\tdef ran(self, y=None):\n\t\treturn Set(self._ran) if y is None else self._ran(y) \n\t\t\n\tdef _ran(self, y):\n\t\treturn (self.post() >> self.dom())(y)\n\t\t\n\t# @param x An element that is included in the domain of post of this program.\n\t# @return The constraint that x is included in the domain of post of this program.\n\tdef dom_post(self, x=None):\n\t\treturn Set(self._dom_post) if x is None else self._dom_post(x) \n\n\tdef _dom_post(self, x):\n\t\ty = const('y', U)\n\t\treturn self.post().dom(x)\n\n\t# @param y An element that is included in the range of post of this program.\n\t# @return The constraint that x is included in the range of post of this program.\n\tdef ran_post(self, y=None):\n\t\treturn Set(self._ran_post) if y is None else self._ran_post(y) \n\t\n\tdef _ran_post(self, y):\n\t\treturn self.post().ran(y)\n\n\tdef __pos__(self):\n\t\tfrom .feasibility import feasible\n\t\treturn feasible(self)\n\n\tdef __invert__(self):\n\t\tfrom .feasibility import feasible\n\t\treturn feasible(self, strong=True)\t\t\n\n\tdef __truediv__(self, C):\n\t\tfrom .basic_constructs import Restriction\n\t\treturn Restriction(C, self)\n\n\tdef __floordiv__(self, C):\n\t\tfrom .basic_constructs import Corestriction\n\t\treturn Corestriction(self, C)\n\n\tdef __and__(self, p):\n\t\tfrom .concurrency import AtomicConcurrency\n\t\treturn AtomicConcurrency(self, p)\n\n\tdef __or__(self, p):\n\t\tfrom .basic_constructs import Choice\n\t\treturn Choice(self, p)\n\n\tdef __xor__(self, p):\n\t\tfrom .basic_constructs import Composition, SoftComposition\n\t\t# return SoftComposition(self, p)\n\t\treturn Composition(self, p)\n\n\tdef __lt__(self, p):\n\t\tfrom .implementation import is_implementation_of\n\t\treturn is_implementation_of(self, p)\n\n\tdef __gt__(self, p):\n\t\tfrom .implementation import is_contract_of\n\t\treturn is_contract_of(self, p)\n\n\tdef __le__(self, p):\n\t\tfrom .refinement import is_refinement_of\n\t\treturn is_refinement_of(self, p)\n\n\tdef __ge__(self, p):\n\t\tfrom .refinement import is_abstract_of\n\t\treturn is_abstract_of(self, p)\n\t\n\tdef __eq__(self, p):\n\t\tfrom .equivalence import equivalent\n\t\treturn equivalent(self, p)\n\n\tdef __ne__(self, p):\n\t\treturn Not(self.__eq__(p))\n\n## Use prog/progs _constraint to Prog constants.\n# @param prog The prog that needs constraints.\n# @return The constraints linked to a program.\ndef prog_constraint(p):\n\t# return And(p.pre() <= p.set(), p.post() <= p.set() ^ p.set())\n\tfrom z3 import ForAll, And, Implies\n\tx, y = consts('x y', U)\n\treturn ForAll([x, y], And(\n\t\tImplies(p.pre(x), p.set(x)),\n\t\tImplies(p.post(x, y), And(p.set(x), p.set(y))),\n\t))\n\t\n## Maps the constraints of progs to the progs.\n# @param progs A list of progs that is used.\n# @return The Z3 And condition that maps constraints and progs.\ndef progs_constraint(*progs):\n\treturn And([progs_constraint(p) for p in progs])\n\n## Creates a new program, adding the constraints to the solver.\n# @param solver The solver in which the programwill be added.\n# @param name The name of the created program.\n# @return The program instance created.\ndef prog(solver, name):\n\tp = Program(const(name, PROG))\n\tsolver.add(prog_constraint(p))\n\treturn p\n\n## Creates multiple new programs, adding the constraints to the solver.\n# @param solver The solver in which the program will be added.\n# @param names The names of the created programs, separated by space character.\n# @return The map of the program instances created.\ndef progs(solver, names):\n\tps = [prog(solver, name) for name in names.split(' ')]\n\treturn ps[0] if len(ps) == 1 else ps\n\n## Prints a program\n# @param solver The solver in which the program is.\n# @param prog The program that needs to be printed.\ndef show_prog(solver, prog):\n\tif isinstance(prog, Program):\n\t\tshow_record_element(solver, prog.z3(), set_)\n\t\tshow_record_element(solver, prog.z3(), pre_)\n\t\tshow_record_element(solver, prog.z3(), post_)\n\telse:\n\t\tshow_record_element(solver, prog, set_)\n\t\tshow_record_element(solver, prog, pre_)\n\t\tshow_record_element(solver, prog, post_)\n\ndef show_progs(solver, *progs):\n\tfor p in progs: show_prog(solver, p)\n\ndef show_prog_models(solver):\n\tis_prog = lambda elt: elt.range() == PROG\n\tprogs = list(filter(is_prog, solver.model()))\n\tshow_progs(solver, *progs)" }, { "alpha_fraction": 0.5569985508918762, "alphanum_fraction": 0.7417027354240417, "avg_line_length": 85.625, "blob_id": "ba6e6ec77d404c7b7880526f7f811c44f476fe05", "content_id": "a037e416e3ea85782b0fe00ff63a06ef3f749968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 693, "license_type": "no_license", "max_line_length": 139, "num_lines": 8, "path": "/doc/html/search/functions_4.js", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "var searchData=\n[\n ['fail',['fail',['../namespacemeyer_1_1special__program.html#a1ca0361ff0637d102b69809d581c571e',1,'meyer::special_program']]],\n ['feasible',['feasible',['../namespacemeyer_1_1feasibility.html#a784d705bc651829bdb0857446104635a',1,'meyer::feasibility']]],\n ['function',['function',['../namespacemeyer_1_1util_1_1z3py__util.html#a347859270b8ef0d7bb63d7840aa5f4aa',1,'meyer::util::z3py_util']]],\n ['functional',['functional',['../namespacemeyer_1_1functionality.html#abd59b387495be24dc003d05a4c1f8a9e',1,'meyer::functionality']]],\n ['functions',['functions',['../namespacemeyer_1_1util_1_1z3py__util.html#a6cf5a3dfdff69d332f989d1b8f73119b',1,'meyer::util::z3py_util']]]\n];\n" }, { "alpha_fraction": 0.7069154977798462, "alphanum_fraction": 0.710208535194397, "avg_line_length": 31.571428298950195, "blob_id": "440cfcb76b7089b05bae1d0e81839471682bb303", "content_id": "801d52f60b03669169d2c3f1a22a360d26d5cf34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 71, "num_lines": 28, "path": "/meyer/determinacy.py", "repo_name": "fuurin/meyer", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom z3 import ForAll, Exists, Implies, And, Not\nfrom .meyer import U\nfrom .util.z3py_util import consts\n\n## @file determinacy.py\n# This module is used to create deterministic assumptions on programs.\n#\n# A program is deterministic is its postcondition is a function.\n\n## Returns the assumption that makes a program deterministic.\n# @param p The program that needs to be deterministic.\n# @return The assumption linked to determinism in a program.\ndef deterministic(p):\n\tx,y,z = consts('x y z', U)\n\treturn\tAnd(\n\t\t\t\tForAll(x, Exists(y, p.post(x, y))),\n\t\t\t\tForAll([x,y,z], Implies(\n\t\t\t\t\tAnd(p.post(x, y), p.post(x, z)),\n\t\t\t\t\ty == z\n\t\t\t\t))\n\t\t\t)\n\n## Returns the assumption that makes a program non-deterministic.\n# @param p The program that needs to be non-deterministic.\n# @return The assumption linked to non-determinism in a program.\ndef non_deterministic(p):\n\treturn Not(deterministic(p))" } ]
46
thy-x/notes
https://github.com/thy-x/notes
5fe766e7aa3dd85b437c77559f6ef7e5f487f4fa
2db6ce88db0d2a486f9098c1543fe52c1fbef571
8a30ce1a0a37d94acf8ba54a68a2f1ea93e888a2
refs/heads/main
2023-08-07T05:23:29.115284
2022-08-04T05:12:54
2022-12-17T02:30:44
413,013,664
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7285041213035583, "alphanum_fraction": 0.7367491126060486, "avg_line_length": 28.789474487304688, "blob_id": "d91c2576a8b6f9d9b3f86fbde97312950c2ec027", "content_id": "b5ada7a892c8acd062ce5569548495b75b2df2d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1710, "license_type": "no_license", "max_line_length": 292, "num_lines": 57, "path": "/src/python/misc/Mutable Default Arguments/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Mutable Default Arguments\n\nforked from <https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments>\n\nSeemingly the most common surprise new Python programmers encounter is Pythonโ€™s treatment of mutable default arguments in function definitions.\n\n**What You Wrote**\n```python\ndef append_to(element, to=[]):\n to.append(element)\n return to\n```\n\n**What You Might Have Expected to Happen**\n\n```python\nmy_list = append_to(12)\nprint(my_list)\n\nmy_other_list = append_to(42)\nprint(my_other_list)\n```\n\nA new list is created each time the function is called if a second argument isnโ€™t provided, so that the output is:\n\n```python\n[12]\n[42]\n```\n\n**What Actually Happens**\n```python\n[12]\n[12, 42]\n```\n\nA new list is created once when the function is defined, and the same list is used in each successive call.\n\nPythonโ€™s default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well.\n\n**What You Should Do Instead**\n\nCreate a new object each time the function is called, by using a default arg to signal that no argument was provided (None is often a good choice).\n\n```python\ndef append_to(element, to=None):\n if to is None:\n to = []\n to.append(element)\n return to\n```\n\nDo not forget, you are passing a list object as the second argument.\n\n**When the Gotcha Isnโ€™t a Gotcha**\n\nSometimes you can specifically โ€œexploitโ€ (read: use as intended) this behavior to maintain state between calls of a function. This is often done when writing a caching function.\n" }, { "alpha_fraction": 0.5724138021469116, "alphanum_fraction": 0.5724138021469116, "avg_line_length": 6.25, "blob_id": "131264565276ed96687a337f74986b6c63c4fe8c", "content_id": "05220d74dea4d8700dd87faf83ab2efae4e82e4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 145, "license_type": "no_license", "max_line_length": 14, "num_lines": 20, "path": "/src/kitchensink/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "---\norphan: true\n---\n\nKitchen Sink \n\n```{toctree}\n:caption: Thy\n:glob:\n\n*/index\n```\n\n```{toctree}\n:caption: Furo\n:titlesonly:\n:glob:\n\nfuro/*\n```\n" }, { "alpha_fraction": 0.5514018535614014, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 25.75, "blob_id": "4ac5efe601f421bd396169ec08a2f0881ecc596f", "content_id": "4429b175f16b599db810f8f7a3af5b817825410b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 107, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/src/quick_notes/clipboard/index.rst", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "Collection of Clipboard templates\n#################################\n\n- :download:`restructured.clips.yaml`\n" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.5918367505073547, "avg_line_length": 6.142857074737549, "blob_id": "799fa12faf14075f842f097fda70b3e8c35aaf0e", "content_id": "7013c98708c902b9515eb2aa22360138f034c912", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 15, "num_lines": 7, "path": "/src/python/misc/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Miscellaneous\n\n```{toctree}\n:glob:\n\n*/index\n```" }, { "alpha_fraction": 0.7369077205657959, "alphanum_fraction": 0.7439733743667603, "avg_line_length": 29.820512771606445, "blob_id": "e4ff0b40695c99a658561c2321e0410b5f428f73", "content_id": "1967c2f9092446b85d481ca8e61b3d2ba9612467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 285, "num_lines": 78, "path": "/src/python/misc/Late Binding Closures/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Late Binding Closures\nforked from <https://docs.python-guide.org/writing/gotchas/#late-binding-closures>\n\nAnother common source of confusion is the way Python binds its variables in closures (or in the surrounding global scope).\n\n**What You Wrote**\n```python\ndef create_multipliers():\n return [lambda x : i * x for i in range(5)]\n```\n\n**What You Might Have Expected to Happen**\n```python\nfor multiplier in create_multipliers():\n print(multiplier(2))\n```\n\nA list containing five functions that each have their own closed-over i variable that multiplies their argument, producing:\n\n```python\n0\n2\n4\n6\n8\n```\n\n**What Actually Happens**\n```python\n8\n8\n8\n8\n8\n```\n\nFive functions are created; instead all of them just multiply x by 4.\n\nPythonโ€™s closures are late binding. This means that the values of variables used in closures are looked up at the time the inner function is called.\n\nHere, whenever any of the returned functions are called, the value of i is looked up in the surrounding scope at call time. By then, the loop has completed and i is left with its final value of 4.\n\nWhatโ€™s particularly nasty about this gotcha is the seemingly prevalent misinformation that this has something to do with lambdas in Python. Functions created with a lambda expression are in no way special, and in fact the same exact behavior is exhibited by just using an ordinary def:\n\n```python\ndef create_multipliers():\n multipliers = []\n\n for i in range(5):\n def multiplier(x):\n return i * x\n multipliers.append (multiplier)\n\n return multipliers\n```\n\n**What You Should Do Instead**\n\nThe most general solution is arguably a bit of a hack. Due to Pythonโ€™s aforementioned behavior concerning evaluating default arguments to functions (see Mutable Default Arguments), you can create a closure that binds immediately to its arguments by using a default arg like so:\n\n```python\ndef create_multipliers():\n return [lambda x, i=i : i * x for i in range(5)]\n```\n\nAlternatively, you can use the functools.partial function:\n\n```python\nfrom functools import partial\nfrom operator import mul\n\ndef create_multipliers():\n return [partial(mul, i) for i in range(5)]\n```\n\n**When the Gotcha Isnโ€™t a Gotcha**\n\nSometimes you want your closures to behave this way. Late binding is good in lots of situations. Looping to create unique functions is unfortunately a case where they can cause hiccups.\n\n\n" }, { "alpha_fraction": 0.6185243129730225, "alphanum_fraction": 0.6279434561729431, "avg_line_length": 20.233333587646484, "blob_id": "689c1f182f878c2d15397095e99bc8ee01be3bc4", "content_id": "5be250df863a3dac3e6f81804018821babf86932", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 637, "license_type": "no_license", "max_line_length": 104, "num_lines": 30, "path": "/src/conf.py", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, str(Path(__file__).resolve().parent))\nfrom conf_global import *\n\nhtml_title = \"Thy\"\nproject = \"Thy's Notes\"\nproject_copyright = '2022, Thierry Humphrey'\nauthor = 'Thierry Humphrey'\n\nintersphinx_mapping_enabled = (\n # 'thy_main',\n # 'thy_python',\n # 'thy_azure',\n # 'thy_devops',\n # 'thy_qknotes',\n # 'thy_systems',\n # 'thy_misc',\n\n 'py3',\n 'pydevguide',\n 'rtfd',\n 'sphinx',\n 'furo',\n)\nintersphinx_mapping = {k: v for k, v in intersphinx_mapping.items() if k in intersphinx_mapping_enabled}\n" }, { "alpha_fraction": 0.769340991973877, "alphanum_fraction": 0.7722063064575195, "avg_line_length": 44.01612854003906, "blob_id": "02988b094a8ddeefb7a5386369b73ddcc1cfba6c", "content_id": "d2f9570cab0f5e93bc27d2790d8783098a1b3b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2792, "license_type": "no_license", "max_line_length": 232, "num_lines": 62, "path": "/src/systems/linux/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Linux\n\n## Terminology (TODO: Remove it)\n\n- Kernel \n The kernel is the program that manages the system, including (depending on the kernel model) hardware devices, memory, and CPU scheduling. It runs in a privileged CPU mode that allows direct access to hardware, called kernel mode.\n- Process \n An OS abstraction and environment for executing a program. The program runs in user mode, with access to kernel mode (e.g., for performing device I/O) via system calls or traps into the kernel.\n- Thread \n An executable context that can be scheduled to run on a CPU. The kernel has multiple threads, and a process contains one or more.\n- Task \n A Linux runnable entity, which can refer to a process (with a single thread), a thread from a multithreaded process, or kernel threads.\n- Virtual memory \n An abstraction of main memory that supports multitasking and oversubscription. It is, practically, an infinite resource.\n- Kernel space \n The virtual memory address space for the kernel.\n- User space \n The virtual memory address space for processes.\n- User land \n User-level programs and libraries (/usr/bin, /usr/lib...).\n- Context switch \n A switch from running one thread or process to another.\n- Mode switch \n A switch between kernel and user modes.\n- Trap \n A signal sent to the kernel to request a system routine (privileged action). Trap types include system calls, processor exceptions, and interrupts.\n- Hardware interrupt \n A signal sent by physical devices to the kernel, usually to request servicing of I/O. An interrupt is a type of trap.\n \n\n## Kernel and User Modes\n\nKernel mode allows full access to devices and the execution of privileges instructions. User mode request privileges operations to kernel via system calls.\n\nSwitching between user and kernel modes is a mode switch.\n\n## Protection Ring\n\nmechanisms to protect data and functionality from faults (by improving fault tolerance) and malicious behavior (by providing computer security)\n![](https://upload.wikimedia.org/wikipedia/commons/2/2f/Priv_rings.svg)<https://en.wikipedia.org/wiki/Protection_ring>\n\n## Context Switch\n\nProcess of storing the system state for one task, so that task can be paused and another task resumed.\n\nThere are three potential triggers for a context switch:\n\n1. Multitasking\n2. Interrupt handling\n3. User and Kernel mode switching\n\n\n- <https://en.wikipedia.org/wiki/Context_switch>\n\n## System Calls\n\nProgrammatic way in which a computer program requests a service from the kernel. System calls provide an essential interface between a process and the operating system.\n\nSystem calls are generally not invoked directly, but rather via wrapper functions in glibc.\n\n- <https://en.wikipedia.org/wiki/System_call>\n- <https://man7.org/linux/man-pages/man2/syscalls.2.html>\n\n" }, { "alpha_fraction": 0.84375, "alphanum_fraction": 0.84375, "avg_line_length": 12.714285850524902, "blob_id": "fdfeca93f522d923f630f9930696bbc790539cf2", "content_id": "09bae70f4e88ff30466590833b95462a20d91fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/requirements.txt", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "sphinx\nsphinx-copybutton\nsphinx-design\nsphinx-inline-tabs\nsphinx-tabs\nmyst-parser[linkify]\nfuro\n" }, { "alpha_fraction": 0.6411764621734619, "alphanum_fraction": 0.658823549747467, "avg_line_length": 7.947368621826172, "blob_id": "216f93181c56ea205cb7eb35f6b355b87856b0d5", "content_id": "da6072e82c6a4387d4c437c88f9ee5b4bf3e409e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "no_license", "max_line_length": 17, "num_lines": 19, "path": "/src/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "\n```{toctree}\n:maxdepth: 3\n\npython/index\n```\n\n```{toctree}\n:maxdepth: 2\n\nsystems/index\njavascript/index\nquick_notes/index\n```\n\n```{toctree}\n:maxdepth: 1\n\ndevops/index\n```" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 13, "blob_id": "859ab6865de0e1faa1e45d9c17bdfb0472eeec23", "content_id": "de60b20f51974199b03180d3ac6b3eabef80eaa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 13, "num_lines": 1, "path": "/README.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Thy's Notes\n " }, { "alpha_fraction": 0.5344827771186829, "alphanum_fraction": 0.5344827771186829, "avg_line_length": 8.833333015441895, "blob_id": "c78a74b448741474cf08d81c646f882de5ee8d67", "content_id": "d79c1dffc1ca5816d4872185219c2eeebd622aa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 58, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/src/quick_notes/index.rst", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "Quick Notes\n###########\n\n.. toctree::\n\n clipboard/index" }, { "alpha_fraction": 0.5619085431098938, "alphanum_fraction": 0.5652602314949036, "avg_line_length": 35.228572845458984, "blob_id": "1dd36a1bcb07ce9397f7b01fd682bfcc34a78284", "content_id": "2067e2d225365f6ad1e9922c245705b18ef2742d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5072, "license_type": "no_license", "max_line_length": 119, "num_lines": 140, "path": "/src/conf_global.py", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "html_title = \"<a href='https://thy.readthedocs.io'>home</a>\"\n\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html\n# https://www.sphinx-doc.org/en/master/examples.html\nintersphinx_mapping = {\n # 'thy_main' : ('https://thy.readthedocs.io/', None),\n # 'thy_python' : ('https://thy-python.readthedocs.io/', None),\n # 'thy_azure' : ('https://thy-azure.readthedocs.io/', None),\n # 'thy_devops' : ('https://thy-devops.readthedocs.io/', None),\n # 'thy_qknotes' : ('https://thy-qknotes.readthedocs.io/', None),\n # 'thy_systems' : ('https://thy-systems.readthedocs.io/', None),\n # 'thy_misc' : ('https://thy-misc.readthedocs.io/', None),\n\n 'sphinx' : ('https://www.sphinx-doc.org/en/master/', None),\n 'jupyterbook' : ('https://jupyterbook.org/en/stable/', None),\n 'myst_parser' : ('https://myst-parser.readthedocs.io/en/latest/', None),\n 'myst_nb' : ('https://myst-nb.readthedocs.io/en/latest/', None),\n 'sphinx_book_theme': ('https://sphinx-book-theme.readthedocs.io/en/stable/', None),\n 'furo' : ('https://pradyunsg.me/furo/', None),\n\n 'py3' : ('https://docs.python.org/3/', None),\n 'numpy' : ('https://numpy.org/doc/stable/', None),\n 'pandas' : ('https://pandas.pydata.org/pandas-docs/stable', None),\n 'pydevguide' : ('https://devguide.python.org/', None),\n 'python_guide_org' : ('https://docs.python-guide.org/', None),\n 'django' : ('https://docs.djangoproject.com/en/3.2/', 'https://docs.djangoproject.com/en/3.2/_objects/'),\n 'jinja' : ('https://jinja.palletsprojects.com/en/3.1.x/', None),\n 'requests' : ('https://requests.readthedocs.io/en/latest/', None),\n 'podman' : ('https://docs.podman.io/en/stable/', None),\n 'molecule' : ('https://molecule.readthedocs.io/en/stable/', None),\n 'bs4' : ('https://www.crummy.com/software/BeautifulSoup/bs4/doc/', None),\n 'buildbot' : ('https://docs.buildbot.net/latest/', None),\n 'pypa' : ('https://www.pypa.io/en/latest/', None),\n 'waf' : ('https://waf.io/apidocs/', None),\n 'setuptools' : ('https://setuptools.pypa.io/en/latest/', None),\n 'ansible' : ('https://docs.ansible.com/ansible/latest/', None),\n 'conda' : ('https://conda.io/en/latest/', None),\n 'dnf' : ('https://dnf.readthedocs.io/en/latest/', None),\n 'pip' : ('https://pip.pypa.io/en/stable/', None),\n 'pelican' : ('https://docs.getpelican.com/en/latest/', None),\n 'rtfd' : ('https://docs.readthedocs.io/en/stable/', None),\n 'wagtail' : ('https://docs.wagtail.org/en/stable/', None),\n 'boto3' : ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),\n 'ceph' : ('https://docs.ceph.com/en/latest/', None),\n 'psycopg' : ('https://www.psycopg.org/docs/', None),\n 'sqlalchemy' : ('https://docs.sqlalchemy.org/en/14/', None),\n\n 'blender' : ('https://docs.blender.org/manual/en/latest/', None),\n 'blenderapi' : ('https://docs.blender.org/api/current/', None),\n\n 'circuitpython' : ('https://docs.circuitpython.org/en/latest/', None),\n 'micropython' : ('https://docs.micropython.org/en/latest/', None),\n\n # '' : ('', None),\n # '' : ('', None),\n\n # objects.inv is not found\n # 'pygments' : ('https://pygments.org/docs/', None),\n # 'selenium' : ('https://www.selenium.dev/documentation/', None),\n # 'opencv' : ('https://docs.opencv.org/4.x/', None),\n}\n\nmanpages_url = 'https://linux.die.net/man/{section}/{page}'\nnumfig = True\n\nrst_prolog = \"\"\"\n.. role:: python(code)\n :language: python\n\n\"\"\"\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n # 'sphinx.ext.viewcode',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.todo',\n\n 'myst_parser',\n\n \"sphinx_copybutton\",\n \"sphinx_design\",\n \"sphinx_inline_tabs\",\n\n]\n\ntemplates_path = [\n '_global/_templates',\n '_templates',\n]\n\nexclude_patterns = []\n\nhtml_show_sphinx = False\nhtml_show_copyright = False\n\n# https://pradyunsg.me/furo/\nhtml_theme = 'furo'\n\nhtml_theme_options = {\n 'navigation_with_keys': True,\n # 'sidebar_hide_name' : True,\n}\n\nhtml_css_files = [\n '_global/custom.css',\n]\nhtml_js_files = [\n '_global/custom.js',\n]\nhtml_show_sourcelink = False\nhtml_static_path = ['_static']\nhtml_extra_path = []\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'restructuredtext',\n '.md' : 'markdown',\n}\n\nmyst_enable_extensions = [\n \"amsmath\",\n \"colon_fence\",\n \"deflist\",\n \"dollarmath\",\n \"html_admonition\",\n \"html_image\",\n \"linkify\",\n \"replacements\",\n \"smartquotes\",\n \"substitution\",\n \"tasklist\",\n]\nmyst_heading_anchors = 3\n\nautosectionlabel_prefix_document = True\n" }, { "alpha_fraction": 0.7367619276046753, "alphanum_fraction": 0.7428571581840515, "avg_line_length": 47.592594146728516, "blob_id": "284d373ae6f53303ee56bf13a8dee6b4be16daa4", "content_id": "93b8c27f7bcd305a00d11f0d032c36ec01ed347b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2625, "license_type": "no_license", "max_line_length": 247, "num_lines": 54, "path": "/src/python/misc/Kerberos Module/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Kerberos Module\n\nhihi hahaha\n\n## test123\n\nThe basic flow of a typical Kerberos authentication is as follows:\n\n- Client sends an unauthenticated request to the server\n- Server sends back a 401 response with a `WWW-Authenticate: Negotiate` header with no authentication details\n- Client sends a new request with an `Authorization: Negotiate` header\n- Server checks the `Authorization` header against the Kerberos infrastructure and either allows or denies access accordingly. If access is allowed, it should include a `WWW-Authenticate: Negotiate` header with authentication details in the reply.\n- Client checks the authentication details in the reply to ensure that the request came from the server\n\nMy Sample Python code using kerberos module (You can use [requests-kerberos](https://github.com/requests/requests-kerberos) too):\n```python\nimport requests\nimport kerberos\nimport dns.resolver\n\nfrom requests.compat import urlparse\n\ndef myrequests_request(method, url, client_principal=None, **kwargs):\n req = requests.request(method, url, **kwargs)\n if \"Negotiate\" in req.headers.get(\"www-authenticate\", \"\"):\n hostname = urlparse(req.url).hostname\n canonical_name = dns.resolver.Resolver().query(hostname).canonical_name\n ret_code, context = kerberos.authGSSClientInit(f\"HTTP@{canonical_name}\", principal=client_principal)\n kerberos.authGSSClientStep(context, \"\")\n kwargs[\"headers\"] = {**kwargs.get(\"headers\", {}), \n **{\"Authorization\": f\"Negotiate {kerberos.authGSSClientResponse(context)}\"}}\n req = requests.request(method, req.url, **kwargs)\n return req\n\nmyrequests_get = lambda url, **kwargs: myrequests_request('GET', url, **kwargs)\nmyrequests_post = lambda url, **kwargs: myrequests_request('POST', url, **kwargs)\n\nreq = myrequests_get(\"http://your.server.com/\")\n```\nBefore running above script, you need to obtain and cache Kerberos ticket-granting tickets (using kinit)\n\nHow to create keytab file and run kinit with it\n```shell\nkutil -v -k your.keytab.kt add -p User@your.domain -V 0 -e arcfour-hmac-md5\nkinit -kt your.keytab.kt User@your.domain\n```\n\nReferences:\n- [Using the Python Kerberos Module](http://python-notes.curiousefficiency.org/en/latest/python_kerberos.html)\n- [requests-kerberos](https://github.com/requests/requests-kerberos) \n- [rfc4559: SPNEGO-based Kerberos and NTLM HTTP Authentication in Microsoft Windows](https://tools.ietf.org/html/rfc4559)\n- [apple/ccs-pykerberos/pysrc/kerberos.py](https://raw.githubusercontent.com/apple/ccs-pykerberos/master/pysrc/kerberos.py)\n- <https://web.mit.edu/kerberos/>\n- <https://kb.iu.edu/d/aumh>\n\n" }, { "alpha_fraction": 0.5729166865348816, "alphanum_fraction": 0.5729166865348816, "avg_line_length": 11, "blob_id": "e92f35cdf5108e52d9419ef9d7bf5bcd1e25fbd9", "content_id": "6c5dc9ac0027f2aac56b4f63b227e84ff0fae1c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 96, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/src/devops/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# DevOps / GitOps / SRE \n\n```{eval-rst}\n.. toctree::\n\n git/howto\n thy_dev_machine/index\n```\n" }, { "alpha_fraction": 0.5844032168388367, "alphanum_fraction": 0.5929624438285828, "avg_line_length": 55.83783721923828, "blob_id": "1f434394490d33aa8b7961436908de1a3dd14fc3", "content_id": "54e45ab80107d207010eadc0a5ffe2ac3398e54b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 258, "num_lines": 37, "path": "/src/devops/thy_dev_machine/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Thy's desktop environment\n\nHow I configure my desktop environment\n\n## Operating System\n\nAs of writing (Mar 2021), I use [Linux Mint 20 Cinnamon](https://linuxmint.com/rel_ulyana_cinnamon.php) (codename Ulyana) which is based on [Ubuntu 20.04](https://wiki.ubuntu.com/FocalFossa/ReleaseNotes). \n\n[Cinnamon Desktop Environment](https://en.wikipedia.org/wiki/Cinnamon_(desktop_environment)) derives from [GNOME 3](https://en.wikipedia.org/wiki/GNOME_3) but follows traditional [desktop metaphor](https://en.wikipedia.org/wiki/Desktop_metaphor) conventions.\n\n## Replicate Windows Shortcuts\n\nI'm a Linux user who are `forced` to use Windows at work. \n\nHere's how to replicate Windows shortcuts to my Development Machine.\n\nTODO: put the `dconf load` command\n\n## Jetbrains PyCharm shortcuts conflict\n\nSome shortcuts conflict with global system actions. To fix these conflicts, I reassign or disable the conflicting shortcut.\n\nTaken from [jetbrains](https://www.jetbrains.com/help/pycharm/configuring-keyboard-and-mouse-shortcuts.html#conflicts), here are a few examples of system shortcut conflicts with the default keymap in PyCharm\n\n| Shortcut | System action | IntelliJ IDEA action |\n|---------------------|------------------------------------------|---------------------------------|\n| Ctrl+Alt+S | Shade window | Open the Settings dialog |\n| Ctrl+Alt+L | Lock screen | Reformat Code |\n| Ctrl+Alt+T | Launch Terminal | Surround With |\n| Ctrl+Alt+F12 | Open the tty12 virtual console\tFile path | |\n| Ctrl+Alt+Left/Right | Switch between Workspaces | Undo/redo navigation operations |\n| Alt+F7 | Move window | Find Usages |\n | Alt+F8 | Resize window | Evaluate Expression |\n\n## References\n\n- <https://github.com/webpro/awesome-dotfiles>\n" }, { "alpha_fraction": 0.6245954632759094, "alphanum_fraction": 0.6504854559898376, "avg_line_length": 15.052631378173828, "blob_id": "1691062903fe4ed8ece4e7c0626425c5c108114b", "content_id": "031adf14081ce3e6a9f49e9a75558713b6280c24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 309, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/src/kitchensink/include_download_python/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "\n# Include and Download Python script\n\n````markdown\n```{literalinclude} test1.py\n:linenos:\n```\n{download}`test1.py` \n{download}`script <test1.py>` \n[script](test1.py) \n````\n\nResulting:\n\n```{literalinclude} test1.py\n:linenos:\n```\n{download}`test1.py` \n{download}`script <test1.py>` \n[script](test1.py) \n\n" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5540540814399719, "avg_line_length": 8.25, "blob_id": "22593539dd3053121a2fd98c1f531e95a323074e", "content_id": "b9c2d1d1df2635de6d59b791a6b918b5fd839edf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 15, "num_lines": 8, "path": "/src/javascript/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Javascript\n\n```{eval-rst}\n.. toctree::\n :maxdepth: 2\n\n quickref\n```\n" }, { "alpha_fraction": 0.7216867208480835, "alphanum_fraction": 0.7240963578224182, "avg_line_length": 18.785715103149414, "blob_id": "19bb5c64933339f37217af36c836187de572055f", "content_id": "963ecb188ff87f4c6e28c4813db68285f1f41531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 830, "license_type": "no_license", "max_line_length": 74, "num_lines": 42, "path": "/src/devops/git/howto.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Git HowTos\n\n## Reset commit history \n\nSometimes I don't care about commit history and want to remove all of it. \n\nThere are two options:\n\n1) Create a new orphan branch and replace the branch with it\n```shell\n# Create an orphan branch\ngit checkout --orphan tmp_branch\ngit add -A\ngit commit -m \"Initial Commit\"\n\n# Deletes the master branch\ngit branch -D master \n\n# Rename the current branch to master\ngit branch -m master \n\n# Force push master branch to origin\ngit push -f origin master \n\n# optional, remove the old files\ngit gc --aggressive --prune=all\n```\n\n2) Remove .git folder and re-push it\n```shell\n# Remove .git folder\nrm -rf .git\n\n# recreate the repo\ngit init\ngit add .\ngit commit -m \"Initial commit\"\n\n# push to github \ngit remote add origin git@github.com:<YOUR ACCOUNT>/<YOUR REPOS>.git\ngit push -u --force origin master\n```" }, { "alpha_fraction": 0.5355122685432434, "alphanum_fraction": 0.565053403377533, "avg_line_length": 16.09677505493164, "blob_id": "17f6601fc96a36b8353c6d734a8586b45f2fa3c8", "content_id": "304ba528b68979157c4797b205f4d3518a3d2007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 91, "num_lines": 93, "path": "/src/javascript/quickref.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Quick Reference Javascript\n\n## 1 Lexical Structure\n\n### 1.1 Comments\n\n```javascript\n// single line comment\n\n/*\n Multi-line comment\n */\n```\n\n### 1.2 Literals\n\n```javascript\n1 // Number one\n1.5 // Number one point five\n'Hello' // String\ntrue // Boolean\nfalse // Boolean\nnull // Null object\n```\n\n### 1.3 Declarations\n\n| | |\n|-------|---------------------------------------------------------------------------------|\n| var | Declares a variable, optionally initializing it to a value |\n| let | Declares a block-scoped, local variable, optionally initializing it to a value. |\n| const | Declares a block-scoped, read-only named constant. |\n\n## 2 Numbers\nhttps://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Numbers_and_dates\n\n### 2.1 Integer\n\nbase-10 integer\n```javascript\n1\n10\n123\n1000\n```\n\nhexadecimal literal begins with `0x` or `0X`\n```javascript\n0xff // 255\n0XABCD // 43981\n```\n\nIn ES6, binary (base 2) or octal (base 8) using prefixes 0b/0B and 0o/0O\n\n### 2.2 Floating-point\n\n\n## Quick Notes\n\n**Property Access** \n\n```javascript\nexpression . identifier\nexpression [ expression ]\n```\n\nExamples\n\n```javascript\nlet team = {size:5}\nteam.size\nteam[\"size\"]\n\n```\n\n**Conditional Property Access**\n\n```javascript\nexpression ?. identifier\nexpression ?.[ expression ]\n```\n\nExamples\n\n```javascript\nlet a = {}\na?.b // undefined\n```\n\n\n## References\n\nhttps://developer.mozilla.org/en-US/docs/Web/JavaScript\n\n" }, { "alpha_fraction": 0.4749999940395355, "alphanum_fraction": 0.48750001192092896, "avg_line_length": 7.888888835906982, "blob_id": "1be12c67758c43325c60e6a627f2330e8e2a4019", "content_id": "314658149446149a91a2304dbdc56eb7ef300e6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 80, "license_type": "no_license", "max_line_length": 15, "num_lines": 9, "path": "/src/systems/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Systems\n\n```{eval-rst}\n.. toctree::\n :maxdepth: 2\n :glob:\n\n */index\n```\n" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 5.5714287757873535, "blob_id": "a174a566427d690667abf32c31867717013c6e21", "content_id": "343668250881518e8f0f0fa9bb1a9c9f3bc641ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "no_license", "max_line_length": 12, "num_lines": 7, "path": "/src/python/index.md", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "# Python\n\n```{toctree}\n:glob:\n\nmisc/index\n```\n" }, { "alpha_fraction": 0.6064516305923462, "alphanum_fraction": 0.6064516305923462, "avg_line_length": 50.66666793823242, "blob_id": "7dd83c05b5e8b6b37bb5d6ab791289ba63b23ab7", "content_id": "c10160bb400e870bf0f7285c3a8db1bc7ba5af19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 155, "license_type": "no_license", "max_line_length": 119, "num_lines": 3, "path": "/src/_global/custom.js", "repo_name": "thy-x/notes", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n $(\"a.external:not([href^='https://thy'],[href^='https://thump'])\").attr(\"target\", \"_blank\").addClass(\"external_link\")\n})\n" } ]
22
abacusv/pythonFun
https://github.com/abacusv/pythonFun
e4e879a2976327d64d3a67776be0a611da79a075
c19a6941155be49e2e0abef3d412f30457710963
d60047feb9a2d88decc1d9f4187528e44e5c9937
refs/heads/master
2020-12-25T18:32:00.664376
2013-07-07T18:05:00
2013-07-07T18:05:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5984975099563599, "alphanum_fraction": 0.6060100197792053, "avg_line_length": 31.324323654174805, "blob_id": "4e1a4755ffb47e8f72dc71c3e724f31e2aafa5c3", "content_id": "04d92f85f0dee958b5ef03e87af1841a1934645c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/grey_scale.py", "repo_name": "abacusv/pythonFun", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport numpy as np\nimport cv2\n''' opencv2 opens a image file and creates a vector of type (x,y,[r,g,b]).\n To convert the image into grey scale we need to store only one value\n of the rgb. The best way to do so is to store in format (x,y). The\n color value at (x,y) should be avg of [r,g,b] value\n'''\ndef grey_scale(image):\n return (image.sum(2)/3).astype(np.uint8)\n\nexts = ['.bmp', '.pbm', '.pgm', '.ppm', '.sr', '.ras', '.jpeg', '.jpg', \n '.jpe', '.jp2', '.tiff', '.tif', '.png', '.JPG']\n\ndef main(imagepath):\n if not os.path.exists(imagepath):\n print \"Error: File not found\"\n return\n\n '''some native checking and converting steps '''\n print \"Loading image {}\".format(imagepath)\n dirpath = os.path.dirname(imagepath)\n fname = os.path.basename(imagepath)\n name, ext = os.path.splitext(fname)\n if ext not in exts:\n print \"Unsupported file type\"\n return\n ''' read the image file '''\n img = cv2.imread(imagepath)\n out_img = grey_scale(img)\n out_path = os.path.join(dirpath, \"{}_{}.{}\".format(name, \"grey\", ext))\n cv2.imwrite(out_path, out_img)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n\n\n" } ]
1
k3mlol/HardWAFStress
https://github.com/k3mlol/HardWAFStress
c1d0a79733e1b9ba5acb2a65a87b4f8a91c525ec
9ec620553ebcdb37ab0bbaa36808736a62015354
b4474952b52cbdcd3c4eef3a1f55c5775beb5f14
refs/heads/master
2020-04-16T02:50:43.303830
2019-01-11T08:48:46
2019-01-11T08:48:46
165,210,371
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7398843765258789, "alphanum_fraction": 0.7398843765258789, "avg_line_length": 16.399999618530273, "blob_id": "bde5420bb03e9251ab403397e6af24a741a1013d", "content_id": "73eb062ffaca5793904df51edd19b657130c6692", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 173, "license_type": "permissive", "max_line_length": 61, "num_lines": 10, "path": "/readme.md", "repo_name": "k3mlol/HardWAFStress", "src_encoding": "UTF-8", "text": "### description\n\nWAF may ban attackers' ip. what if I use proxy to attack.\n\nwhat if huge proxy ip server attack the waf at the same time.\n\n### Library\ngevent\n\nuse virtualenv" }, { "alpha_fraction": 0.6056910753250122, "alphanum_fraction": 0.6310975551605225, "avg_line_length": 23.600000381469727, "blob_id": "d18680b293b75055cba50fd1a108550281c44e8a", "content_id": "eff150ea8017043408d9f22e0e4314dd102b7552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "permissive", "max_line_length": 87, "num_lines": 40, "path": "/hard_waf_stress.py", "repo_name": "k3mlol/HardWAFStress", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#encoding:utf-8\n#description:hard WAF Stress Test\n#author:k3m\n\nimport requests\nimport gevent\nimport json\nimport sys\n\ndef get_proxy():\n server_list = []\n file_obj = open('proxy_url.txt', 'r')\n url = file_obj.read()\n file_obj.close()\n response = requests.get(url)\n server_json = json.loads(response.text)\n for one in server_json[\"data\"][\"proxy_list\"]:\n tmp = {\"http\": \"http://\"+one}\n #print tmp\n server_list.append(tmp)\n return server_list\n\ndef attack_test(target, proxy):\n #timeout is 3 seconds\n\n #payload = {'id': 'select 1 from databses;'}\n try:\n r = requests.get(url=target + \"/.svn/\", timeout=3, verify=False, proxies=proxy)\n except:\n print \"error go on\"\ntarget = sys.argv[0]\n#target = \"http://121.15.129.226:9001/\"\n\nnum = 100\ni = 1\nfor i in range(num):\n server_list = get_proxy()\n tasks=[gevent.spawn(attack_test,target, proxy) for proxy in server_list]\n gevent.joinall(tasks)\n" } ]
2
yangshuangs/building-tool
https://github.com/yangshuangs/building-tool
c79f03740283c3b38ecbf42efb1422ff79d39151
4981783fafd98fdf97bfe92b4175357899e7c1c6
95f998d8485c39d5b133acec31e0f47df9dfdbc6
refs/heads/master
2020-04-15T01:52:09.532066
2019-01-06T14:59:22
2019-01-06T14:59:22
164,294,537
0
0
null
2019-01-06T09:53:08
2019-01-06T09:53:11
2019-01-06T14:59:23
Python
[ { "alpha_fraction": 0.5847952961921692, "alphanum_fraction": 0.5847952961921692, "avg_line_length": 26.360000610351562, "blob_id": "32c849c1176e1fa5ebb026d6a828ae228edd774b", "content_id": "a39c3e591cb60286e7f61c0dc19a4e01510f8227", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 69, "num_lines": 25, "path": "/core/window/window.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom .window_types import make_window\nfrom ...utils import (get_edit_mesh, kwargs_from_props)\n\n\nclass Window:\n\n @classmethod\n def build(cls, context, props):\n me = get_edit_mesh()\n bm = bmesh.from_edit_mesh(me)\n faces = [face for face in bm.faces if face.select] # ๆ‰พๅ‡บ่ขซ้€‰ไธญ็š„้ข\n\n if cls.validate(faces):\n make_window(bm, faces, **kwargs_from_props(props))\n bmesh.update_edit_mesh(me, True)\n return {'FINISHED'}\n return {'CANCELLED'}\n\n @classmethod\n def validate(cls, faces):\n if faces:\n if not any([f.normal.z for f in faces]):\n return True\n return False\n" }, { "alpha_fraction": 0.7076923251152039, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 18.117647171020508, "blob_id": "a580648eb1e60e1555ca955fbc6ef6d364e4a8f5", "content_id": "c9f7a26b5132707d09b64093e7f9cdaa9baef865", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/core/door/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .door import Door\nfrom .door_ops import DoorOperator\nfrom .door_props import DoorProperty\n\nclasses = (\n DoorProperty, DoorOperator\n)\n\ndef register_door():\n for cls in classes:\n bpy.utils.register_class(cls)\n\ndef unregister_door():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.477297306060791, "alphanum_fraction": 0.49864864349365234, "avg_line_length": 21.155689239501953, "blob_id": "06b908a569f5db24d055bfc576cde5d3e4c67533", "content_id": "d24d877ebd67c2030bbaef9dc2a4a16dd6bf32fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3736, "license_type": "no_license", "max_line_length": 61, "num_lines": 167, "path": "/utils/util_material.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\r\r\rdef create_mat(name=\"Default Material\"):\r \"\"\"ๆ–ฐๅปบๆ่ดจ\"\"\"\r return bpy.data.materials.new(name)\r\r\rdef link_mat(obj, mat):\r \"\"\"ๆ่ดจๅ…ณ่”ๅˆฐๆจกๅž‹ไธŠ\"\"\"\r obj.data.materials.append(mat)\r\r\rdef set_defaults(mat, diffuse, diff_int, specular, spec_int):\r mat.diffuse_color = diffuse\r mat.diffuse_intensity = diff_int\r mat.specular_color = specular\r mat.specular_intensity = spec_int\r\r\rdef material_set_faces(obj, mat, faces):\r if not mat:\r return\r if mat.name not in obj.data.materials:\r link_mat(obj, mat)\r mat_index = list(obj.data.materials).index(mat)\r for face in faces:\r face.material_index = mat_index\r\r\rdef has_material(obj, name):\r return name in obj.data.materials.keys()\r\r\rdef template_create_materials(obj, mat_name, defaults):\r \"\"\"ๅˆ›ๅปบๆ่ดจๅฎžไพ‹\"\"\"\r if has_material(obj, mat_name):\r mat = obj.data.materials[mat_name]\r else:\r mat = create_mat(mat_name)\r link_mat(obj, mat)\r\r set_defaults(\r mat,\r defaults.get('diffuse'),\r defaults.get('diffuse_intensity'),\r defaults.get('specular'),\r defaults.get('specular_intensity')\r )\r return mat\r\r\rdef floor_mat_slab(obj):\r return template_create_materials(\r obj,\r \"material_slab\",\r {\r 'diffuse': (.4, .35, .3),\r 'diffuse_intensity': .8,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef floor_mat_wall(obj):\r return template_create_materials(\r obj,\r \"material_wall\",\r {\r 'diffuse': (.3, .25, .13),\r 'diffuse_intensity': .8,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef window_mat_frame(obj):\r return template_create_materials(\r obj,\r \"material_window_frame\",\r {\r 'diffuse': (.8, .8, .8),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef window_mat_bars(obj):\r return template_create_materials(\r obj,\r \"material_window_bar\",\r {\r 'diffuse': (0, .6, 0),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef window_mat_glass(obj):\r return template_create_materials(\r obj,\r \"material_window_glass\",\r {\r 'diffuse': (0, .1, .6),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef door_mat_frame(obj):\r return template_create_materials(\r obj,\r \"material_door_frame\",\r {\r 'diffuse': (.8, .8, .8),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef door_mat_pane(obj):\r return template_create_materials(\r obj,\r \"material_door_pane\",\r {\r 'diffuse': (.13, .05, 0),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef door_mat_groove(obj):\r return template_create_materials(\r obj,\r \"material_door_groove\",\r {\r 'diffuse': (.13, .05, 0),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r\r\rdef door_mat_glass(obj):\r return template_create_materials(\r obj,\r \"material_door_glass\",\r {\r 'diffuse': (0, .1, .6),\r 'diffuse_intensity': 1,\r 'specular': (1, 1, 1),\r 'specular_intensity': 0\r }\r )\r" }, { "alpha_fraction": 0.5991171002388, "alphanum_fraction": 0.6041622757911682, "avg_line_length": 26.339080810546875, "blob_id": "84c438f25857e7d423179132f8d33adf88faaaae", "content_id": "32b935a8c9eadfa7600c3103e6479d064347dd5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5221, "license_type": "no_license", "max_line_length": 84, "num_lines": 174, "path": "/utils/util_mesh.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nimport bmesh\nimport operator\nimport functools as ft\nfrom mathutils import Matrix, Vector\nfrom bmesh.types import BMVert\n\n\n# verts-้กถ็‚น, edges-่พน, faces-้ข\n# ๅฏน็ฝ‘ๆ ผ่ฟ›่กŒๆ“ไฝœ\n\ndef get_edit_mesh():\n \"\"\"่Žทๅ–็ผ–่พ‘ๆจกๅผไธ‹็š„็‰ฉไฝ“็ฝ‘ๆ ผๆ•ฐๆฎ\"\"\"\n return bpy.context.edit_object.data\n\n\ndef make_mesh(name):\n \"\"\"ๆ–ฐๅปบ็ฝ‘ๆ ผ\"\"\"\n return bpy.data.meshes.new(name)\n\n\ndef select(elements, val=True):\n \"\"\"้€‰ๆ‹ฉๅ…ƒ็ด \"\"\"\n for element in elements:\n element.select = val\n\n\ndef filter_geom(geom, _type):\n \"\"\"lambda--ๅŒฟๅๅ‡ฝๆ•ฐ\"\"\"\n return list(filter(lambda x: isinstance(x, _type), geom))\n\n\ndef filter_vertical_edges(edges, normal):\n \"\"\"\n round()ๆ–นๆณ•--่ฟ”ๅ›žๆตฎ็‚นๆ•ฐx็š„ๅ››่ˆไบ”ๅ…ฅๅ€ผ\n res--ๅฏนๅž‚็›ดๆ–นๅ‘็š„่พน่ฟ›่กŒๅ€ผ็ฒพ็กฎๅŒ–๏ผŒ่ฟ”ๅ›žไธบ่พน็š„ๆ•ฐ็ป„\n \"\"\"\n res = []\n rnd = lambda val: round(val, 3)\n\n for e in edges:\n if normal.x:\n s = set([rnd(v.co.y) for v in e.verts])\n else:\n s = set([rnd(v.co.x) for v in e.verts])\n\n if len(s) == 1:\n res.append(e)\n return res\n\n\ndef filter_horizontal_edges(edges, normal):\n \"\"\"ๅค„็†ๆฐดๅนณๆ–นๅ‘็š„่พน\"\"\"\n res = []\n rnd = lambda val: round(val, 3)\n\n for e in edges:\n if normal.z:\n s = set([rnd(v.co.y) for v in e.verts])\n else:\n s = set([rnd(v.co.z) for v in e.verts])\n\n if len(s) == 1:\n res.append(e)\n return res\n\n\ndef calc_edge_median(edge):\n \"\"\"่ฎก็ฎ—่พน็š„ไธญ็‚น\"\"\"\n # reduce() ๅฏ่ฟญไปฃ็‰ฉ็›ธๅŠ \n return ft.reduce(operator.add, [v.co for v in edge.verts])/len(edge.verts)\n\n\ndef calc_verts_median(verts):\n \"\"\"่ฎก็ฎ—ๅคš็‚นไธญๅฟƒ\"\"\"\n return ft.reduce(operator.add, [v.co for v in verts])/len(verts)\n\n\ndef calc_face_dimensions(face):\n \"\"\"่ฎก็ฎ—้ข็š„้•ฟๅ’Œๅฎฝ\"\"\"\n vertical = filter_vertical_edges(face.edges, face.normal)[-1]\n horizontal = filter_horizontal_edges(face.edges, face.normal)[-1]\n return horizontal.calc_length(), vertical.calc_length()\n\n\ndef face_with_verts(bm, verts, default=None):\n \"\"\"ๅˆฉ็”จ็ป™ๅฎš็š„้กถ็‚นๅฏปๆ‰พๅฏนๅบ”็š„้ข\"\"\"\n for face in bm.faces:\n if len(set(list(face.verts) + verts)) == len(verts):\n return face\n return default\n\n\ndef split_quad(bm, face, vertical=False, cuts=4):\n \"\"\"ๅฐ†ๅ››่พนๅฝข็š„่พน็ป†ๅˆ†ๆˆๅ‡ๅŒ€็š„ๆฐดๅนณ/ๅž‚็›ดๅˆ‡ๅ‰ฒ\"\"\"\n res = None\n if vertical:\n e = filter_horizontal_edges(face.edges, face.normal)\n res = bmesh.ops.subdivide_edges(bm, edges=e, cuts=cuts)\n else:\n e = filter_vertical_edges(face.edges, face.normal)\n res = bmesh.ops.subdivide_edges(bm, edges=e, cuts=cuts)\n return res\n\n\ndef split(bm, face, svertical, shorizontal, offx=0, offy=0, offz=0):\n \"\"\"ๅฐ†ไธ€ไธชๅ››ๅ…ƒ็ป„ๅˆ†ๆˆ่ง„ๅˆ™็š„ๅ››ไธช้ƒจๅˆ†๏ผˆ้€šๅธธๆ˜ฏไธ€ไธชๅชๆœ‰ๆฃฑ่ง’็›ด่พน็š„ๅ›พๅฝข๏ผ‰\"\"\"\n scale = 3\n svertical *= scale\n shorizontal *= scale\n do_vertical = svertical < scale\n do_horizontal = shorizontal < scale\n face.select = False\n median = face.calc_center_median()\n\n if not do_horizontal and not do_vertical:\n return face\n\n # ๆฐดๅนณๆ–นๅ‘ๅˆ†ๅ‰ฒ -- ่พน็š„้กถ็‚นๅ…ทๆœ‰็›ธๅŒ็š„Zๅๆ ‡\n if do_horizontal:\n horizontal = list(filter(\n lambda e: len(set([round(v.co.z, 1) for v in e.verts])) == 1, face.edges\n ))\n sp_res = bmesh.ops.subdivide_edges(bm, edges=horizontal, cuts=2)\n verts = filter_geom(sp_res['geom_inner'], BMVert)\n\n T = Matrix.Translation(-median)\n bmesh.ops.scale(bm, vec=(shorizontal, shorizontal, 1), verts=verts, space=T)\n\n # ๅž‚็›ดๆ–นๅ‘ๅˆ†ๅ‰ฒ -- ่พนไธŠ็š„้กถ็‚นๅ…ทๆœ‰็›ธๅŒ็š„x/yๅๆ ‡\n if do_vertical:\n bmesh.ops.remove_doubles(bm, verts=list(bm.verts))\n face = face_with_verts(bm, verts) if do_horizontal else face\n\n # ๅˆคๆ–ญๅž‚็›ด็š„่พน\n other = list(filter(\n lambda e: len(set([round(v.co.z, 1) for v in verts])) == 1,\n face.edges\n ))\n vertical = list(set(face.edges) - set(other))\n\n # ๅˆ‡ๅˆ†\n sp_res = bmesh.ops.subdivide_edges(bm, edges=vertical, cuts=2)\n verts = filter_geom(sp_res['geom_inner'], BMVert)\n\n # ่ฎก็ฎ—ๅˆ‡ๅˆ†็š„้ข\n T = Matrix.Translation(-median)\n bmesh.ops.scale(bm, vec=(1, 1, svertical), verts=verts, space=T)\n\n if do_horizontal and do_vertical:\n link_edges = [e for v in verts for e in v.link_edges]\n all_verts = list({v for e in link_edges for v in e.verts})\n bmesh.ops.translate(bm, verts=all_verts, vec=(offx, offy, 0))\n elif do_horizontal and not do_vertical:\n bmesh.ops.translate(bm, verts=verts, vec=(offx, offy, 0))\n\n bmesh.ops.translate(bm, verts=verts, vec=(0, 0, offz))\n\n face = face_with_verts(bm, verts)\n return face\n\n\ndef edge_split_offset(bm, edges, verts, offset, connect_verts=False):\n \"\"\"ๅฏน่พน่ฟ›่กŒๅˆ†ๅ‰ฒ๏ผŒๅ็งป้‡็”ฑ้กถ็‚น่ฎก็ฎ—ๅพ—ๅˆฐ\"\"\"\n new_verts = []\n for idx, e in enumerate(edges):\n vert = verts[idx]\n _, v = bmesh.utils.edge_split(e, vert, offset / e.calc_length())\n new_verts.append(v)\n\n if connect_verts:\n res = bmesh.ops.connect_verts(bm, verts=new_verts).get('edges')\n return res\n return new_verts\n" }, { "alpha_fraction": 0.5950633883476257, "alphanum_fraction": 0.6017345190048218, "avg_line_length": 22.793651580810547, "blob_id": "f83a180072d40bfa620497d087df01d5540a37ac", "content_id": "61791b376702376c79117fdd636ef17378bb9f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1525, "license_type": "no_license", "max_line_length": 56, "num_lines": 63, "path": "/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .core import register_core, unregister_core\n\nbl_info = {\n \"name\": \"Test Build Tools\",\n \"author\": \"PxGeng\",\n \"version\": (0, 0, 1),\n \"blender\": (2, 79, 0),\n \"location\": \"View3D > Toolshelf > Test Build Tools\",\n \"description\": \"Building Test Tools\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"Cynthia\"\n}\n\n\nclass MeshGenerationPanel(bpy.types.Panel):\n \"\"\"ๅปบ็ญ‘ๆ“ไฝœไปฅๅŠๅฑžๆ€ง่ฎพ็ฝฎ็š„UI้ขๆฟ\"\"\"\n bl_idname = \"VIEW3D_PT_cynthia\"\n bl_label = \"Mesh Generation\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"TOOLS\"\n bl_category = \"Test Tools\"\n\n def draw(self, context):\n layout = self.layout\n active = context.object\n\n col = layout.column(align=True)\n col.operator(\"cynthia.add_floorplan\")\n col.operator(\"cynthia.add_floors\")\n\n row = col.row(align=True)\n row.operator(\"cynthia.add_window\")\n row.operator(\"cynthia.add_door\")\n\n row = col.row(align=True)\n row.operator(\"cynthia.add_railing\")\n row.operator(\"cynthia.add_balcony\")\n\n col.operator(\"cynthia.add_stairs\")\n col.operator(\"cynthia.add_roof\")\n\n\ndef register():\n bpy.utils.register_class(MeshGenerationPanel)\n register_core()\n\n\ndef unregister():\n bpy.utils.unregister_class(MeshGenerationPanel)\n unregister_core()\n\nif __name__ == \"__main__\":\n import os\n os.system(\"clear\")\n try:\n unregister()\n except Exception as e:\n pass\n finally:\n register()\n" }, { "alpha_fraction": 0.5924437046051025, "alphanum_fraction": 0.6028938889503479, "avg_line_length": 27.272727966308594, "blob_id": "935ae51a260d0a289163d5d127a15fb49c9fc980", "content_id": "6bd5f9835600c3e53ad0c1a16ae6f98fcc233694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 63, "num_lines": 44, "path": "/core/balcony/balcony_props.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom bpy.props import (\n BoolProperty,\n EnumProperty,\n FloatProperty,\n PointerProperty)\n\nfrom ..generic import SizeOffsetProperty\nfrom ..rails import RailProperty\n\nclass BalconyProperty(bpy.types.PropertyGroup):\n width = FloatProperty(\n name=\"Balcony Width\", min=0.01, max=100.0, default=1.2,\n description=\"Width of balcony\")\n\n railing = BoolProperty(\n name=\"Add Railing\", default=True,\n description=\"Whether the balcony has railing\")\n\n open_items = [\n (\"NONE\", \"None\", \"\", 0),\n (\"FRONT\", \"Front\", \"\", 1),\n (\"LEFT\", \"Left\", \"\", 2),\n (\"RIGHT\", \"Right\", \"\", 3)\n ]\n\n open_side = EnumProperty(\n name=\"Open Side\", items=open_items, default='NONE',\n description=\"Sides of the balcony with no railing\")\n\n rail = PointerProperty(type=RailProperty)\n soff = PointerProperty(type=SizeOffsetProperty)\n\n def draw(self, context, layout):\n self.soff.draw(context, layout)\n\n row = layout.row()\n row.prop(self, 'width')\n\n layout.prop(self, 'railing', toggle=True)\n if self.railing:\n box = layout.box()\n box.prop(self, \"open_side\", text=\"Open\")\n self.rail.draw(context, box)\n" }, { "alpha_fraction": 0.6831210255622864, "alphanum_fraction": 0.6831210255622864, "avg_line_length": 26.30434799194336, "blob_id": "7c76d6625cfd6ab50156f62ab58dedaf2d1b188b", "content_id": "04863bf4997c37ff1ab68a88753ac261efd68dee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "no_license", "max_line_length": 73, "num_lines": 23, "path": "/core/window/window_ops.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .window import Window\nfrom .window_props import WindowPrperty\n\n\nclass WindowOperator(bpy.types.Operator):\n \"\"\"Creates windows on selected mesh faces\"\"\"\n # ๅœจ้€‰ๅฎš็ฝ‘ๆ ผๅ†…ๅˆ›ๅปบ็ช—ๆˆท\n bl_idname = \"cynthia.add_window\"\n bl_label = \"Add window\"\n bl_options = {'REGISTER', 'UNDO'}\n\n props = bpy.props.PointerProperty(type=WindowPrperty)\n\n @classmethod\n def poll(cls, context):\n return context.object is not None and context.mode == 'EDIT_MESH'\n\n def execute(self, context):\n return Window.build(context, self.props)\n\n def draw(self, context):\n self.props.draw(context, self.layout)\n" }, { "alpha_fraction": 0.5336179137229919, "alphanum_fraction": 0.5346851944923401, "avg_line_length": 26.558822631835938, "blob_id": "7c5b78d8cb4660f1ca6e487fb928ba62cd8055f4", "content_id": "7bbaf2f0f5fdae93ca170b45883c3afd490600af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 66, "num_lines": 34, "path": "/core/floor/floor.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom .floor_types import make_floors\nfrom ...utils import (\n get_edit_mesh,\n kwargs_from_props\n)\n\n\nclass Floor:\n\n @classmethod\n def build(cls, context, props):\n \"\"\"ๅˆฉ็”จfloor types & props ็”Ÿๆˆๅ‡ ไฝ•\"\"\"\n me = get_edit_mesh()\n bm = bmesh.from_edit_mesh(me)\n\n if cls.validate(bm):\n if any([f for f in bm.faces if f.select]):\n make_floors(bm, None, **kwargs_from_props(props))\n else:\n edges = [e for e in bm.edges if e.is_boundry]\n make_floors(bm, edges, **kwargs_from_props(props))\n bmesh.update_edit_mesh(me, True)\n return {'FINISHED'}\n return {'CANCELLED'}\n\n @classmethod\n def validate(cls, bm):\n \"\"\"???\"\"\"\n if len(list({v.co.z for v in bm.verts})) == 1:\n return True\n elif any([f for f in bm.faces if f.select]):\n return True\n return False\n" }, { "alpha_fraction": 0.6067478060722351, "alphanum_fraction": 0.6106194853782654, "avg_line_length": 31.872726440429688, "blob_id": "68c0c2acf5a2d4ba61e3c46768b2bdff53c12011", "content_id": "698869cd100692b219dce13b7e7c30b41ca0ef39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2056, "license_type": "no_license", "max_line_length": 91, "num_lines": 55, "path": "/core/floor/floor_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nimport itertools as it\nfrom bmesh.types import (\n BMVert, BMFace, BMEdge\n)\nfrom ...utils import (\n select, filter_geom\n)\n\n\ndef make_floors(bm, edges, floor_count, floor_height,\n slab_thickness, slab_outset, **kwargs):\n \"\"\"\n ็”Ÿๆˆ็ฑปไผผๅปบ็ญ‘ๆฅผๅฑ‚็š„ๆŒคๅŽ‹ๆจกๅž‹\n :param floor_count: (int)ๆฅผๅฑ‚ๆ•ฐ\n :param floor_height: (float)ๆฅผๅฑ‚้ซ˜ๅบฆ\n :param slab_thickness: (float)ๆฅผๆฟๅŽšๅบฆ\n :param slab_outset: (float)ๆฅผๆฟๅค–ๅปถ้ƒจๅˆ†้•ฟๅบฆ\n :param kwargs: \n \"\"\"\n del_faces = []\n if not edges:\n # --ๆฃ€ๆต‹้€‰ไธญ็š„้ข็š„่พน็ผ˜\n del_faces = [f for f in bm.faces if f.select]\n all_edges = list({e for f in del_faces for e in f.edges})\n edges = [e for e in all_edges\n if len(list({f for f in e.link_faces if f in del_faces})) == 1]\n\n # --extrude floor\n slab_faces = []\n\n # cycle()-->ๆŠŠไผ ๅ…ฅ็š„ไธ€ไธชๅบๅˆ—ๆ— ้™้‡ๅคไธ‹ๅŽป\n offsets = it.cycle([slab_thickness, floor_height])\n\n # islice(iterable, start, stop[, step])-->่ฟ”ๅ›žๅบๅˆ—seq็š„ไปŽstartๅผ€ๅง‹ๅˆฐstop็ป“ๆŸ็š„ๆญฅ้•ฟไธบstep็š„ๅ…ƒ็ด ็š„่ฟญไปฃๅ™จ\n # ???\n for offset in it.islice(offsets, 0, floor_count*2):\n if offset == 0 and offset == slab_thickness:\n continue\n ext = bmesh.ops.extrude_edge_only(bm, edges=edges)\n # ???translate\n bmesh.ops.translate(bm, vec=(0, 0, offset), verts=filter_geom(ext['geom'], BMVert))\n edges = filter_geom(ext['geom'], BMEdge)\n if offset == slab_thickness:\n slab_faces.extend(filter_geom(ext['geom'], BMFace))\n # --ๅฐ†้ขๅตŒๅ…ฅๅŒบๅŸŸ\n bmesh.ops.inset_region(bm, faces=slab_faces, depth=-slab_outset)\n # --ไธŠไธ‹ๆ–‡ๅˆ›ๅปบ๏ผŒไปŽ้กถ็‚นๅˆ›ๅปบๆ–ฐ้ข๏ผŒไปŽ่พน็ฝ‘็”Ÿๆˆไธœ่ฅฟ๏ผŒๅˆถไฝœ็บฟ่พน็ญ‰\n bmesh.ops.contextual_create(bm, geom=edges)\n # --่ฎก็ฎ—ๆŒ‡ๅฎš่พ“ๅ…ฅ้ข็š„โ€œๅค–้ƒจโ€ๆณ•็บฟ\n bmesh.ops.recalc_face_normals(bm, faces=bm.faces)\n\n if del_faces:\n bmesh.ops.delete(bm, geom=del_faces, context=5) # ???context\n select(list(bm.edges), False)\n" }, { "alpha_fraction": 0.7323943376541138, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 19.882352828979492, "blob_id": "4140ccf43c7282204c96a8f8e7db0629c29f4bdf", "content_id": "de4456bf56123919399b90af63a8e2050a164795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/core/balcony/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .balcony import Balcony\nfrom .balcony_ops import BalconyOperator\nfrom .balcony_props import BalconyProperty\n\nclasses = (\n BalconyProperty, BalconyOperator\n)\n\ndef register_balcony():\n for cls in classes:\n bpy.utils.register_class(cls)\n\ndef unregister_balcony():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.7329843044281006, "alphanum_fraction": 0.7329843044281006, "avg_line_length": 17.190475463867188, "blob_id": "6d74f817b92b64ff624725c0a51794daeff108f3", "content_id": "3e30131ab7999690ec3015b663cf8b72b78200cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 46, "num_lines": 21, "path": "/core/floorplan/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .floorplan import Floorplan\nfrom .floorplan_ops import FloorplanOperator\nfrom .floorplan_props import FloorplanProperty\n\n\nclasses = (\n FloorplanOperator,\n FloorplanProperty\n)\n\n\ndef register_floorplan():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister_floorplan():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.5632458329200745, "alphanum_fraction": 0.5692124366760254, "avg_line_length": 21.958904266357422, "blob_id": "b9c56237aaadb6bb6e9419c05e9eb955c46cfb92", "content_id": "27f8b938a7ffbc25051b7f801bed29d81cc0455f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 92, "num_lines": 73, "path": "/utils/util_cynthia.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom mathutils import Vector\n\n\ndef clamp(val, _min, _max):\n \"\"\"\n ้‡็ฝฎๅ€ผ(_min่‡ณ_max)\n :param val: \n :param _min: \n :param _max: \n :return: \n \"\"\"\n return max(min(val, _max), _min)\n\n\ndef condition(con, val1, val2):\n return val1 if con else val2\n\n\ndef ifeven(num, val1, val2):\n return condition(num % 2 == 0, val1, val2)\n\n\ndef args_from_props(props, names):\n return tuple(getattr(props, name) for name in names)\n\n\ndef kwargs_from_props(props):\n valid_types = (\n int, float, str, tuple, bool, Vector,\n bpy.types.Material,\n bpy.types.Object\n )\n\n result = {}\n for p in dir(props):\n if p.startswith('__') or p in ['rna_type', 'bl_rna']:\n continue\n prop = getattr(props, p)\n\n if isinstance(prop, valid_types):\n result[p] = prop\n elif isinstance(prop, bpy.types.PropertyGroup) and not isinstance(prop,type(props)):\n result.update(kwargs_from_props(prop))\n return result\n\n\ndef assert_test(func):\n \"\"\"\n ๆ•่Žทfuncไธญ็š„ๅผ‚ๅธธ\n :param func: \n :return: \n \"\"\"\n def wrapper():\n try:\n func()\n print(func.__name__.upper() + \" PASSED ..\")\n except Exception as e:\n print(func.__name__.upper + \" FAILED ..\", e)\n\n return wrapper()\n\n\ndef clean_scene():\n \"\"\"ๆธ…้™ค้ขๆฟ\"\"\"\n scene = bpy.context.scene\n if scene.ojects:\n active = scene.objects.active # ๅฝ“ๅ‰้€‰ๆ‹ฉๅ…ƒ็ด \n if active and active.mode == 'EDIT':\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='SELECT') # ๆ›ดๆ”นๅœบๆ™ฏไธญๆ‰€ๆœ‰ๅฏ่งๅฏน่ฑก็š„้€‰ๆ‹ฉ--้€‰ๆ‹ฉๆ‰€ๆœ‰ๅ…ƒ็ด \n bpy.ops.object.delete(use_global=False)\n return scene\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.7246376872062683, "avg_line_length": 19.294116973876953, "blob_id": "7d25cb2d84067317eea90cb1cbc3154b0079586d", "content_id": "edf3f0706b8fdc95ff5065010d3a9bb280742f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/core/stairs/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .stairs import Stairs\nfrom .stairs_ops import StairsOperator\nfrom .stairs_props import StairsProperty\n\nclasses = (\n StairsProperty, StairsOperator\n)\n\ndef register_stairs():\n for cls in classes:\n bpy.utils.register_class(cls)\n\ndef unregister_stairs():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.5656711459159851, "alphanum_fraction": 0.5716120600700378, "avg_line_length": 30.454545974731445, "blob_id": "e52adb91763eb1bef0a9255a051f738ac7bf0159", "content_id": "6e2722dd0f9dde2a28adf6437a5c3696a9148057", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6490, "license_type": "no_license", "max_line_length": 97, "num_lines": 198, "path": "/core/fill/fill_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom mathutils import Vector, Matrix\nfrom bmesh.types import BMEdge, BMVert\nfrom ...utils import (\n filter_geom,\n filter_vertical_edges,\n filter_horizontal_edges,\n calc_edge_median,\n calc_face_dimensions\n)\n\n\ndef fill_panel(bm, face, panel_x, panel_y, panel_b, panel_t, panel_d, **kwargs):\n \"\"\"\n ๅนณ้ขๆ่ดจๅกซๅ……\n :param bm: \n :param face: \n :param panel_x: (int)number of horizontal panels\n :param panel_y: (int)number of vertical panels\n :param panel_b: (float)border of panels from face edges\n :param panel_t: (float)thickness of panel inset\n :param panel_d: (float)depth of panel\n \"\"\"\n # ไธปๅนณ้ข(ๅฏๆทปๅŠ ๅญๅนณ้ข)\n bmesh.ops.inset_individual(bm, faces=[face], thickness=panel_b)\n\n # ่ฎก็ฎ—่ขซๅˆ†ๅ‰ฒ็š„่พน\n n = face.normal\n v_edges = filter_vertical_edges(face.edges, n)\n h_edges = list(set(face.edges) - set(v_edges))\n\n vts = []\n # ๅฏน่พน่ฟ›่กŒๅˆ†ๅ‰ฒ\n if panel_x:\n res_one = bmesh.ops.subdivide_edges(bm, edges=v_edges, cuts=panel_x)\n vts = filter_geom(res_one['geom_inner'], BMVert)\n\n if panel_y:\n res_two = bmesh.ops.subdivide_edges(\n bm,\n edges=h_edges+filter_geom(res_one['geom_inner'], BMEdge) if panel_x else h_edges,\n cuts=panel_y\n )\n vts = filter_geom(res_two['geom_inner'], BMVert)\n\n # ๆ–ฐๅขžๅนณ้ข\n if vts:\n faces = list(filter(lambda f: len(f.verts) == 4,\n {f for v in vts for f in v.link_faces if f.normal == n})\n )\n bmesh.ops.inset_individual(bm, faces=faces, thickness=panel_t / 2)\n bmesh.ops.translate(\n bm,\n verts=list({v for f in faces for v in f.verts}),\n vec=n*panel_d\n )\n # recalc_face_normals()-->่ฎก็ฎ—้ข็š„โ€œๅค–้ƒจโ€ๆณ•็บฟ\n bmesh.ops.recalc_face_normals(bm, faces=list(bm.faces))\n\n\ndef fill_glass_panes(bm, face, panel_x, panel_y, panel_t, panel_d, **kwargs):\n \"\"\"ๆทปๅŠ ็Žป็’ƒๅนณ้ข\"\"\"\n\n v_edges = filter_vertical_edges(face.edges, face.normal)\n h_edges = filter_horizontal_edges(face.edges, face.normal)\n\n edges = []\n # ๅฏน่พน่ฟ›่กŒๅˆ†ๅ‰ฒ\n if panel_x:\n res_one = bmesh.ops.subdivide_edges(bm, edges=v_edges, cuts=panel_x).get('geom_inner')\n edges.extend(filter_geom(res_one, BMEdge))\n\n if panel_y:\n res_two = bmesh.ops.subdivide_edges(\n bm,\n edges=h_edges + filter_geom(res_one, BMEdge) if panel_x else h_edges,\n cuts=panel_y\n ).get('geom_inner')\n edges.extend(filter_geom(res_two, BMEdge))\n\n if edges:\n panel_faces = list({f for ed in edges for f in ed.link_faces})\n bmesh.ops.inset_individual(bm, faces=panel_faces, thickness=panel_t)\n\n for f in panel_faces:\n bmesh.ops.translate(bm, verts=f.verts, vec=-f.normal*panel_d)\n\n\ndef fill_bar(bm, face, bar_x, bar_y, bar_t, bar_d, **kwargs):\n \"\"\"ๆ ๆ†็ฑปๅž‹ๅกซๅ……\"\"\"\n # ่ฎก็ฎ—้ข็š„ไธญๅฟƒ๏ผŒ้•ฟๅ’Œๅฎฝ\n width, height = calc_face_dimensions(face)\n fc = face.calc_center_median()\n\n # ๆ–ฐๅปบๅ†…้ƒจๆก†ๆžถ\n # --ๆฐดๅนณๆ–นๅ‘\n offset = height / (bar_x + 1)\n for i in range(bar_x):\n # ๅคๅˆถ\n ret = bmesh.ops.duplicate(bm, geom=[face])\n verts = filter_geom(ret['geom'], BMVert)\n\n # ็ผฉๆ”พ ๅ˜ๆข\n fs = bar_t / height\n bmesh.ops.scale(bm, verts=verts, vec=(1, 1, fs), space=Matrix.Translation(-fc))\n bmesh.ops.translate(\n bm,\n verts=verts,\n vec=Vector((face.normal*bar_d/2))+Vector((0, 0, -height/2+(i+1)*offset))\n )\n\n # ๆŒคๅŽ‹\n ext = bmesh.ops.extrude_edge_only(bm, edges=filter_horizontal_edges(\n filter_geom(ret['geom'], BMEdge), face.normal))\n bmesh.ops.translate(bm, verts=filter_geom(ext['geom'], BMVert), vec=-face.normal*bar_d/2)\n\n # --ๅž‚็›ดๆ–นๅ‘\n eps = 0.015\n offset = width / (bar_y + 1)\n for i in range(bar_y):\n # ๅคๅˆถ\n ret = bmesh.ops.duplicate(bm, geom=[face])\n verts = filter_geom(ret['geom'], BMVert)\n # Scale and Translate\n fs = bar_t / width\n bmesh.ops.scale(bm, verts=verts, vec=(fs, fs, 1), space=Matrix.Translation(-fc))\n perp = face.normal.cross(Vector((0, 0, 1))) # ๅž‚็›ด็บฟ\n bmesh.ops.translate(\n bm,\n verts=verts,\n vec=Vector((face.normal*((bar_d/2)-eps)))+perp*(-width/2+((i+1)*offset))\n )\n ext = bmesh.ops.extrude_edge_only(\n bm,\n edges=filter_vertical_edges(\n filter_geom(ret['geom'], BMEdge), face.normal)\n )\n bmesh.ops.translate(\n bm,\n verts=filter_geom(ext['geom'], BMVert),\n vec=-face.normal * ((bar_d / 2) - eps)\n )\n\n\ndef fill_louver(bm, face, louver_m, louver_count, louver_d, louver_b, **kwargs):\n \"\"\"\n ็™พๅถ็ช—\n :param bm: \n :param face: \n :param louver_m: (float)margin\n :param louver_count: (int)number\n :param louver_d: (float)depth\n :param louver_b: (float)border\n \"\"\"\n normal = face.normal\n # inset margin\n if louver_m:\n bmesh.ops.inset_individual(bm, faces=[face], thickness=louver_m)\n # ๅˆ‡ๅ‰ฒๅž‚็›ดๆ–นๅ‘่พน\n count = (2 * louver_count) - 1\n count = count if count % 2 == 0 else count + 1\n\n res = bmesh.ops.subdivide_edges(\n bm,\n edges=filter_vertical_edges(face.edges, face.normal),\n cuts=count\n )\n\n # ้œ€่ฆๆทปๅŠ ็™พๅถ็ช—็š„้ข\n faces = list({f for e in filter_geom(res['geom_inner'], BMEdge) for f in e.link_faces})\n faces.sort(key=lambda f: f.calc_center_median().z)\n louver_faces = faces[1::2]\n\n # ่พน่ท็ผฉๆ”พ\n for face in louver_faces:\n bmesh.ops.scale(\n bm,\n vec=(1, 1, 1+louver_b),\n verts=face.verts,\n space=Matrix.Translation(-face.calc_center_median())\n )\n # ๆŒคๅŽ‹็™พๅถ็ช—้ข\n res = bmesh.ops.extrude_discrete_faces(bm, faces=louver_faces)\n bmesh.ops.translate(\n bm,\n vec=normal * louver_d,\n verts=list({v for face in res['face'] for v in face.verts})\n )\n # ็™พๅถ็ช—ๅ€พๆ–œ\n for face in res['faces']:\n top_edge = max(\n filter_horizontal_edges(face.edges, face.normal),\n key=lambda e: calc_edge_median(e).z\n )\n bmesh.ops.translate(bm, vec=-face.normal*louver_d, verts=top_edge.verts)\n\n # clearup\n bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.01)\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16.894737243652344, "blob_id": "3c4b62aa102f1d326bc9f7263613f11dc29604e0", "content_id": "47730eeaee1dd7323d9f282dcbe49ff4af386612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 340, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/core/floor/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .floor import Floor\nfrom .floor_ops import FloorOperator\nfrom .floor_props import FloorProperty\n\nclasses = (\n FloorOperator,\n FloorProperty\n)\n\n\ndef register_floor():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister_floor():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.5379483103752136, "alphanum_fraction": 0.5596330165863037, "avg_line_length": 21.62264060974121, "blob_id": "cba614a1fd2ec088ada0af9d5d925fc37a1e4178", "content_id": "a0083d7275fe78e347c340340b2bc724d11e7b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 70, "num_lines": 53, "path": "/core/generic.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom bpy.props import *\n\n\nclass SizeOffsetProperty(bpy.types.PropertyGroup):\n \"\"\"่ฎพ็ฝฎๅฐบๅฏธ\"\"\"\n\n size = FloatVectorProperty(\n name=\"Size\",\n min=.01,\n max=1.0,\n subtype='XYZ',\n size=2,\n default=(0.7, 0.7),\n description=\"Size of geometry\"\n )\n # ๅ็งป้‡่ฎพ็ฝฎ\n off = FloatVectorProperty(\n name=\"Offset\",\n min=-1000.0,\n max=1000.0,\n subtype='TRANSLATION',\n size=3,\n default=(0.0, 0.0, 0.0),\n description=\"How much to offset geometry\"\n )\n collapsed = BoolProperty(default=True)\n\n def draw(self, context, layout):\n \"\"\"UI้ขๆฟ็ป˜ๅˆถๅ‡ฝๆ•ฐ\"\"\"\n box = layout.box()\n box.prop(self, 'collapsed', text=\"Size & Offset\", toggle=True)\n\n if not self.collapsed:\n row = box.row(align=False)\n\n col = row.column(align=True)\n col.prop(self, 'size', slider=True)\n\n col = row.column(align=True)\n col.prop(self, 'off')\n\nclasses = (SizeOffsetProperty, )\n\n\ndef register_generic():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister_generic():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.7188405990600586, "alphanum_fraction": 0.7188405990600586, "avg_line_length": 17.157894134521484, "blob_id": "164d8bafc6b8a8714fab3ad14d2c07f7bd874062", "content_id": "3618c69a37f7d84d4c3f80799393140bb9704a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 39, "num_lines": 19, "path": "/core/window/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .window import Window\nfrom .window_ops import WindowOperator\nfrom .window_props import WindowPrperty\n\n\nclasses = (\n WindowPrperty, WindowOperator\n)\n\n\ndef register_window():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister_window():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.4688152074813843, "alphanum_fraction": 0.4971017837524414, "avg_line_length": 22.56830596923828, "blob_id": "a1658f970e29027a91777741d003159cfdca72b7", "content_id": "469f77314dd76b99878da90b15530fca282d1c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4357, "license_type": "no_license", "max_line_length": 54, "num_lines": 183, "path": "/core/floorplan/floorplan_props.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom bpy.props import *\n\n\nclass FloorplanProperty(bpy.types.PropertyGroup):\n \"\"\"ไธบ้œ€่ฆๅˆ›ๅปบ็š„ๅนณ้ขๅธƒๅฑ€่ฎพ็ฝฎ(้ป˜่ฎค)ๅฑžๆ€ง \"\"\"\n\n fp_types = [\n (\"RECTANGULAR\", \"Rectangular\", \"\", 0),\n (\"CIRCULAR\", \"Circular\", \"\", 1),\n (\"COMPOSITE\", \"Composite\", \"\", 2),\n (\"H-SHAPED\", \"H-Shaped\", \"\", 3),\n (\"RANDOM\", \"Random\", \"\", 4)\n ]\n\n type = EnumProperty(\n items=fp_types,\n default='RECTANGULAR',\n description=\"Type of floorplan\"\n )\n\n seed = IntProperty(\n name=\"Seed\",\n min=0,\n max=10000,\n default=1,\n description=\"Seed for random generation\"\n )\n\n width = FloatProperty(\n name=\"width\",\n min=0.01,\n max=100.0,\n default=2,\n description=\"Base width of floorplan\"\n )\n\n length = FloatProperty(\n name=\"Length\",\n min=0.01,\n max=100.0,\n default=2,\n description=\"Base Length of floorplan\"\n )\n\n radius = FloatProperty(\n name=\"Radius\",\n min=.1,\n max=100.0,\n default=2,\n description=\"Radius of circle\"\n )\n\n segs = IntProperty(\n name=\"Segments\",\n min=3,\n max=100,\n default=32,\n description=\"Number of segments in the circle\"\n )\n\n tw1 = FloatProperty(\n name=\"Tail Width\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Width of floorplan segment\"\n )\n\n tl1 = FloatProperty(\n name=\"Tail Length\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Length of floorplan segment\"\n )\n\n tw2 = FloatProperty(\n name=\"Tail Width 1\",\n min=0.0, max=100.0,\n default=1,\n description=\"Width of floorplan segment\"\n )\n\n tl2 = FloatProperty(\n name=\"Tail Length 1\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Length of floorplan segment\"\n )\n\n tw3 = FloatProperty(\n name=\"Tail Width 2\",\n min=0.0, max=100.0,\n default=1,\n description=\"Width of floorplan segment\"\n )\n\n tl3 = FloatProperty(\n name=\"Tail Length 2\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Length of floorplan segment\"\n )\n\n tw4 = FloatProperty(\n name=\"Tail Width 3\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Width of floorplan segment\"\n )\n\n tl4 = FloatProperty(\n name=\"Tail Length 3\",\n min=0.0,\n max=100.0,\n default=1,\n description=\"Length of floorplan segment\"\n )\n\n cap_tris = BoolProperty(\n name='Cap Triangles',\n default=False,\n description='Set the fill type to triangles'\n )\n\n def draw(self, context, layout):\n \"\"\"ๆ“ไฝœ้ขๆฟๅธƒๅฑ€\"\"\"\n row = layout.row()\n row.prop(self, \"type\", text=\"\")\n\n box = layout.box()\n if self.type == 'RECTANGULAR':\n col = box.column(align=True)\n col.prop(self, 'width')\n col.prop(self, 'length')\n\n elif self.type == 'RANDOM':\n col = box.column(align=True)\n col.prop(self, 'seed')\n col.prop(self, 'width')\n col.prop(self, 'length')\n\n elif self.type == 'CIRCULAR':\n col = box.column(align=True)\n col.prop(self, 'radius')\n col.prop(self, 'segs')\n\n row = box.row()\n row.prop(self, 'cap_tris', toggle=True)\n\n elif self.type == 'COMPOSITE':\n row = box.row(align=True)\n row.prop(self, 'width')\n row.prop(self, 'length')\n\n col = box.column(align=True)\n col.prop(self, 'tl1')\n col.prop(self, 'tl2')\n col.prop(self, 'tl3')\n col.prop(self, 'tl4')\n\n elif self.type == 'H-SHAPED':\n row = box.row(align=True)\n row.prop(self, 'width')\n row.prop(self, 'length')\n\n row = box.row(align=True)\n\n col = row.column(align=True)\n col.prop(self, 'tw1')\n col.prop(self, 'tw2')\n col.prop(self, 'tw3')\n col.prop(self, 'tw4')\n\n col = row.column(align=True)\n col.prop(self, 'tl1')\n col.prop(self, 'tl2')\n col.prop(self, 'tl3')\n col.prop(self, 'tl4')\n" }, { "alpha_fraction": 0.7240437269210815, "alphanum_fraction": 0.7240437269210815, "avg_line_length": 19.33333396911621, "blob_id": "019cf92856fd76431c8bf2dee79827f297fbed4e", "content_id": "2e4fc00fc6233a4dd75d43ec67883d51d3095d0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/core/rails/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .rails import Rails\nfrom .rails_ops import RailOperator\nfrom .rails_props import RailProperty\nfrom .rails_types import MakeRailing\n\nclasses = (\n RailProperty, RailOperator\n)\n\ndef register_rail():\n for cls in classes:\n bpy.utils.register_class(cls)\n\ndef unregister_rail():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.5661280751228333, "alphanum_fraction": 0.6020346879959106, "avg_line_length": 27.810344696044922, "blob_id": "f01b30c19c31e3e880e581ff37719f037b80ba91", "content_id": "b4b8681e6228f0037618262e8075eab166698268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1671, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/utils/util_geometry.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom mathutils import Matrix\n\n\ndef cube(bm, width=2, length=2, height=2):\n\n sc_x = Matrix.Scale(width, 4, (1, 0, 0))\n sc_y = Matrix.Scale(length, 4, (0, 1, 0))\n sc_z = Matrix.Scale(height, 4, (0, 0, 1))\n mat = sc_x * sc_y * sc_z\n ret = bmesh.ops.create_cube(bm, size=1, matrix=mat)\n return ret\n\n\ndef plane(bm, width=2, length=2):\n\n sc_x = Matrix.Scale(width, 4, (1, 0, 0))\n sc_y = Matrix.Scale(length, 4, (0, 1, 0))\n mat = sc_x * sc_y\n ret = bmesh.ops.create_grid(\n bm, x_segments=1, y_segments=1, size=1, matrix=mat\n )\n return ret\n\n\ndef circle(bm, radius=1, segs=10, cap_tris=False):\n ret = bmesh.ops.create_circle(\n bm, cap_ends=True, cap_tris=cap_tris, segment=segs, diameter=radius * 2\n )\n return ret\n\n\ndef cone(bm, r1=.5, r2=.01, height=2, segs=32):\n ret = bmesh.ops.create_cone(\n bm, diameter1=r1 * 2, diameter2=r2 * 2, depth=height,\n cap_ends=True, cap_tris=True, segments=segs\n )\n return ret\n\n\ndef cylinder(bm, radius=1, height=2, segs=10):\n ret = bmesh.ops.create_circle(\n bm, cap_ends=True, cap_tris=False, segments=segs, diameter=radius * 2\n )\n # Returns:\n # verts: output verts\n # type: list of(bmesh.types.BMVert)\n # Return type: dict with string keys\n verts = ret['verts']\n face = list(verts[0].link_faces)\n\n res = bmesh.ops.extrude_discrete_faces(bm, faces=face)\n # print('****res****', res)\n bmesh.ops.translate(bm, verts=res['faces'][-1].verts, vec=(0, 0, height))\n\n result = {'verts': verts + list(res['faces'][-1].verts)}\n bmesh.ops.translate(bm, verts=result['verts'], vec=(0, 0, -height/2))\n return result\n" }, { "alpha_fraction": 0.5732881426811218, "alphanum_fraction": 0.5886234045028687, "avg_line_length": 31.988235473632812, "blob_id": "181768cb805dbdd05b39f102a24af421c4475bb4", "content_id": "ed4fe023e57ee668d202c902cd45ccabea017f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6030, "license_type": "no_license", "max_line_length": 99, "num_lines": 170, "path": "/core/floorplan/floorplan_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nimport random\nfrom bmesh.types import BMVert\nfrom mathutils import Vector, Matrix\nfrom ...utils import (\n clamp,\n plane,\n circle,\n filter_geom,\n calc_edge_median, # ่ฎก็ฎ—่พน็š„ไธญ็‚น\n filter_vertical_edges,\n filter_horizontal_edges\n)\n\n# ๅˆ›ๅปบไธๅŒๅฝข็Šถๅœฐๆฟๅนณ้ขๅ›พ็š„ๆ“ไฝœ\n\ndef fp_rectangular(bm, width, length, **kwargs):\n \"\"\"\n :param bm: (bmesh.types.BMesh)ๅˆ›ๅปบๆ–นๅฝข็š„็ฝ‘ๆ ผ\n :param width: (float)\n :param length: (float)\n :param kwargs: \n :return: \n \"\"\"\n plane(bm, width, length)\n\n\ndef fp_circular(bm, radius, segs, cap_tris, **kwargs):\n \"\"\"\n :param bm: (bmesh.types.BMesh)ๅˆ›ๅปบๅœ†ๅฝข็š„็ฝ‘ๆ ผ\n :param radius: (float)ๅœ†ๅฝขๅŠๅพ„\n :param segs: (int)ๅœ†ๅฝขๅˆ’ๅˆ†ๅคšๅฐ‘้ƒจๅˆ†\n :param cap_tris: (bool)ๆ˜ฏๅฆ็”จไธ‰่ง’ๅฝขๅกซๅ……ๅœ†ๅฝข\n :param kwargs: \n :return: \n \"\"\"\n circle(bm, radius, segs, cap_tris)\n\n\ndef fp_composite(bm, width, length, tl1, tl2, tl3, tl4, **kwargs):\n \"\"\"\n ็”ฑ4ไธช็Ÿฉๅฝขๆž„ๅปบๅๅญ—ๆžถๅฝข็Šถ\n :param bm: \n :param width: (float)ๅ†…้ƒจ็Ÿฉๅฝขๅฎฝ\n :param length: (float)ๅ†…้ƒจ็Ÿฉๅฝข้•ฟ\n :param tl1: (float)ๅบ•้ƒจ้•ฟ\n :param tl2: (float)ๅทฆไพง้•ฟ\n :param tl3: (float)ๅณไพง้•ฟ\n :param tl4: (float)้กถ้ƒจ้•ฟ\n \"\"\"\n base = plane(bm, width, length)\n ref = list(bm.faces)[-1].clac_center_median()\n\n edges = list(bm.edges)\n edges.sort(key=lambda ed: calc_edge_median(ed).x)\n edges.sort(key=lambda ed: calc_edge_median(ed).y)\n\n # ็Ÿฉๅฝขๅค–้ƒจๅ‚ๆ•ฐ\n exts = [tl1, tl2, tl3, tl4]\n for idx, e in enumerate(edges):\n if exts[idx] > 0:\n res = bmesh.ops.extrude_edge_only(bm, edges=[e])\n verts = filter_geom(res['geom'], BMVert)\n\n v = (calc_edge_median(e) - ref)\n v.normalize()\n bmesh.ops.translate(bm, verts=verts, vec=v * exts[idx])\n\n\ndef fp_hshaped(bm, width, length, tl1, tl2, tl3, tl4, tw1, tw2, tw3, tw4, **kwargs):\n \"\"\"\n ๅˆ›ๅปบ Hๅฝข้ขๆฟ\n :param bm: \n :param width: (float)ๅ†…้ƒจ็Ÿฉๅฝขๅฎฝ\n :param length: (float)ๅ†…้ƒจ็Ÿฉๅฝข้•ฟ\n :param tl1: (float)length bottom-left\n :param tl2: (float)length bottom-right\n :param tl3: (float)length top-left\n :param tl4: (float)length top-right\n :param tw1: (float)width bottom-left\n :param tw2: (float)width bottom-right\n :param tw3: (float)width top-left\n :param tw4: (float)width top-right\n \"\"\"\n base = plane(bm, width, length)\n face = list(bm.faces)[-1]\n # calc_center_median()--่ฟ”ๅ›ž้ข็š„ไธญ็‚น, return: vector\n ref = face.calc_center_median()\n n = face.normal\n\n for e in filter_vertical_edges(bm.edges, n):\n # extrude_edge_only(bm, edges)--ๅฐ†่พน็ผ˜ๆŒคๅŽ‹ๆˆ้ข, bm--ๆ“ไฝœ็š„bmesh, edges--ๆž„ๆˆ่พน็š„้กถ็‚นๅบๅˆ—\n res = bmesh.ops.extrude_edge_only(bm, edges=[e])\n verts = filter_geom(res['geom'], BMVert)\n\n v = (calc_edge_median(e) - ref)\n v.normalize() # unit length vector--ๅ•ไฝ้•ฟๅบฆๅ‘้‡\n # translate()--้€š่ฟ‡ๅ็งป้‡่ฝฌๆข้กถ็‚น\n bmesh.ops.translate(bm, verts=verts, vec=v)\n\n # ็ญ›้€‰้กถ้ƒจ็š„ไธญ้—ด้ƒจๅˆ†่พน\n op_edges = filter_horizontal_edges(bm.edges, n)\n op_edges.sort(key=lambda ed: calc_edge_median(ed).x)\n op_edges = op_edges[:2] + op_edges[4:]\n op_edges.sort(key=lambda ed: calc_edge_median(ed).y)\n lext = [tl1, tl2, tl3, tl4]\n wext = [tw1, tw2, tw3, tw4]\n\n for idx, e in enumerate(op_edges):\n if lext[idx] > 0:\n res = bmesh.ops.extrude_edge_only(bm, edges=[e])\n verts = filter_geom(res['geom'], BMVert)\n\n v = (calc_edge_median(e) - ref)\n v.normalize()\n\n flt_func = min if v.x > 0 else max\n mv1 = flt_func(list(e.verts), key=lambda v: v.co.x)\n mv2 = flt_func(verts, key=lambda v: v.co.x)\n\n bmesh.ops.translate(bm, verts=verts, vec=Vector((0, v.y, 0)) * lext[idx])\n bmesh.ops.translate(bm, verts=[mv1, mv2], vec=Vector((-v.x, 0, 0)) * wext[idx])\n\n\ndef fp_random(bm, seed, width, length, **kwargs):\n \"\"\"\n ๅˆ›ๅปบ้šๆœบๅฝข็Šถ็š„ๅปบ็ญ‘ๅœฐๅŸบ\n :param bm: \n :param seed: (int)\n :param width: \n :param length: \n \"\"\"\n random.seed(seed)\n sc_x = Matrix.Scale(width, 4, (1, 0, 0))\n sc_y = Matrix.Scale(length, 4, (0, 1, 0))\n mat = sc_x * sc_y\n bmesh.ops.create_grid(bm, x_segments=1, y_segments=1, size=1, matrix=mat)\n # random.sample(sequence, k)๏ผŒไปŽๆŒ‡ๅฎšๅบๅˆ—ไธญ้šๆœบ่Žทๅ–ๆŒ‡ๅฎš้•ฟๅบฆไธบk็š„็‰‡ๆฎตใ€‚sampleๅ‡ฝๆ•ฐไธไผšไฟฎๆ”นๅŽŸๆœ‰ๅบๅˆ—\n sample = random.sample(list(bm.edges), random.randrange(1, len(bm.edges)))\n ref = list(bm.faces)[-1].calc_center_median()\n for edge in sample:\n # ่ฎก็ฎ—่พน็š„ไธญๅฟƒๅ’Œ้•ฟๅบฆ\n cen = calc_edge_median(edge)\n elen = edge.calc_length()\n # ๅˆ‡ๅ‰ฒ\n # subdivide_edges()-->Return type: dict with string keys:\n # -->geom_inner, geom_split, geom: contains all output geometry\n res = bmesh.ops.subdivide_edges(bm, edges=[edge], cuts=2)\n new_verts = filter_geom(res['geom_inner'], BMVert)\n # link_edges()-->Edges connected to this vertex\n new_edge = list(set(new_verts[0].link_edges) & set(new_verts[1].link_edges))[-1]\n\n # resize new edge\n axis = Vector((1, 0, 0)) if new_verts[0].co.y == new_verts[1].co.y else Vector((0, 1, 0))\n scale_factor = clamp(random.random() * elen/new_edge.calc_length(), 1, 2.95)\n bmesh.ops.scale(bm, verts=new_verts, vec=axis*scale_factor, space=Matrix.Translation(-cen))\n\n # offset\n if random.choice([0, 1]):\n max_offset = (elen - new_edge.calc_length()) / 2\n rand_offset = random.random() * max_offset\n bmesh.ops.translate(bm, verts=new_verts, vec=axis*rand_offset)\n\n # extrude\n res = bmesh.ops.extrude_edge_only(bm, edges=[new_edge])\n bmesh.ops.translate(\n bm,\n verts=filter_geom(res['geom'], BMVert),\n vec=(cen - ref).normalized() * random.randrange(1, int(elen/2))\n )\n" }, { "alpha_fraction": 0.5851197838783264, "alphanum_fraction": 0.5907944440841675, "avg_line_length": 23.796875, "blob_id": "577428b2d1beb26ec5f499c6daf870283bc3fcd0", "content_id": "29bbd2952805b7ddd373c6e609068d64824262b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1586, "license_type": "no_license", "max_line_length": 65, "num_lines": 64, "path": "/core/roof/roof_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nimport bmesh\n\nfrom bmesh.types import BMVert, BMEdge, BMFace\nfrom ...utils import (\n select,\n filter_geom,\n )\n\ndef make_roof(bm, faces, type, **kwargs):\n select(faces, False)\n if type == 'FLAT':\n make_flat_roof(bm, faces, **kwargs)\n elif type == 'GABLE':\n make_gable_roof(bm, faces, **kwargs)\n elif type == 'HIP':\n make_hip_roof(bm, faces, **kwargs)\n\ndef make_flat_roof(bm, faces, thick, outset, **kwargs):\n\n ret = bmesh.ops.extrude_face_region(bm, geom=faces)\n bmesh.ops.translate(bm,\n vec=(0, 0, thick),\n verts=filter_geom(ret['geom'], BMVert))\n\n top_face = filter_geom(ret['geom'], BMFace)[-1]\n link_faces = [f for e in top_face.edges for f in e.link_faces\n if f is not top_face]\n\n bmesh.ops.inset_region(bm, faces=link_faces, depth=outset)\n bmesh.ops.recalc_face_normals(bm, faces=bm.faces)\n\n bmesh.ops.delete(bm,\n geom=faces,\n context=5)\n\n\ndef make_gable_roof(bm, faces, **kwargs):\n if not rectangular_area(faces):\n return\n\n if len(faces) == 1:\n pass\n else:\n pass\n\n\ndef make_hip_roof(bm, faces, **kwargs):\n pass\n\ndef rectangular_area(faces):\n face_area = sum([f.calc_area() for f in faces])\n\n verts = [v for f in faces for v in f.verts]\n verts = sorted(verts, key=lambda v: (v.co.x, v.co.y))\n\n _min, _max = verts[0], verts[-1]\n width = abs(_min.co.x - _max.co.x)\n length = abs(_min.co.y - _max.co.y)\n area = width * length\n\n if round(face_area, 4) == round(area, 4):\n return True\n return False" }, { "alpha_fraction": 0.505717933177948, "alphanum_fraction": 0.5260483026504517, "avg_line_length": 28.69811248779297, "blob_id": "b5f0aa282bbd303bd0db7a0dd9a233760aa1aee6", "content_id": "229f8f2b989cec2c271a77b28fe5b1225ab279a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1574, "license_type": "no_license", "max_line_length": 94, "num_lines": 53, "path": "/core/roof/roof_props.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom bpy.props import *\n\nclass RoofProperty(bpy.types.PropertyGroup):\n roof_items = [\n (\"FLAT\", \"Flat\", \"\", 0),\n (\"GABLE\", \"Gable\", \"\", 1),\n (\"HIP\", \"Hip\", \"\", 2),\n ]\n type = EnumProperty(\n name=\"Roof Type\", items=roof_items, default='FLAT',\n description=\"Type of roof to create\")\n\n thick = FloatProperty(\n name=\"Thickness\", min=0.01, max=1000.0, default=.1,\n description=\"Thickness of roof hangs\")\n\n outset = FloatProperty(\n name=\"Outset\", min=0.01, max=1000.0, default=.1,\n description=\"Outset of roof hangs\")\n\n height = FloatProperty(\n name=\"Height\", min=0.01, max=1000.0, default=1,\n description=\"Height of entire roof\")\n\n # o_types = [(\"LEFT\", \"Left\", \"\", 0), (\"RIGHT\", \"Right\", \"\", 1), ]\n # orient = EnumProperty(description=\"Orientation of gable\", items=o_types, default='LEFT')\n\n\n def draw(self, context, layout):\n layout.prop(self, 'type', text=\"\")\n\n box = layout.box()\n if self.type == 'FLAT':\n col = box.column()\n col.prop(self, 'thick')\n col.prop(self, 'outset')\n\n elif self.type == 'GABLE':\n col = box.column()\n col.prop(self, 'thick')\n col.prop(self, 'outset')\n col.prop(self, 'height')\n\n # row = box.row(align=True)\n # row.prop(self, 'orient', expand=True)\n\n else:\n col = box.column()\n col.prop(self, 'thick')\n col.prop(self, 'outset')\n\n col.prop(self, 'height')\n" }, { "alpha_fraction": 0.7076923251152039, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 18.117647171020508, "blob_id": "e9c67ea225d49f924ea83b16fe4f1203fde10a97", "content_id": "e2c9e50f7d00971c9175c24510a3fb22e26d532e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/core/roof/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\n\nfrom .roof import Roof\nfrom .roof_ops import RoofOperator\nfrom .roof_props import RoofProperty\n\nclasses = (\n RoofProperty, RoofOperator\n)\n\ndef register_roof():\n for cls in classes:\n bpy.utils.register_class(cls)\n\ndef unregister_roof():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.7236180901527405, "alphanum_fraction": 0.7236180901527405, "avg_line_length": 25.53333282470703, "blob_id": "f42ce5946bcd5f997603d87addd53a448fec81ad", "content_id": "6fb00e19441ac883d39ed8ec2f5963e2372b81ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/core/fill/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .fill_props import (FillBars, FillPanel, FillLouver, FillGlassPanes)\nfrom .fill_types import (fill_bar, fill_panel, fill_louver, fill_glass_panes)\n\nclasses = (FillBars, FillPanel, FillGlassPanes, FillLouver)\n\n\ndef register_fill():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister_fill():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n" }, { "alpha_fraction": 0.6356589198112488, "alphanum_fraction": 0.6369509100914001, "avg_line_length": 19.36842155456543, "blob_id": "45cc4d88105f3c7d6b5a73b29a8e86a9488cc6cb", "content_id": "fe932e78d8e331a724c060a714527dec1848fc94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 56, "num_lines": 38, "path": "/utils/util_object.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nimport bmesh\nfrom .util_mesh import select\n\n\ndef make_object(name, data=None):\n \"\"\"ๆ–ฐๅปบๅ…ƒ็ด \"\"\"\n return bpy.data.objects.new(name, data)\n\n\ndef bm_from_obj(obj):\n \"\"\"็”ฑ็‰ฉไฝ“ๆ•ฐๆฎๅˆ›ๅปบๅคš่พนๅฝข็ฝ‘ๆ ผ\"\"\"\n bm = bmesh.new()\n bm.from_mesh(obj.data)\n return bm\n\n\ndef bm_to_obj(bm, obj):\n \"\"\"ๅฐ†ๅคš่พนๅฝข็ฝ‘ๆ ผ่ต‹ไบˆ็‰ฉไฝ“\"\"\"\n # to_mesh()-->ๅฐ†ๆญคBMeshๆ•ฐๆฎๅ†™ๅ…ฅ็Žฐๆœ‰็š„Meshๆ•ฐๆฎๅ—\n bm.to_mesh(obj.data)\n bm.free()\n\n\ndef link_obj(obj):\n \"\"\"ๅฐ†็‰ฉไฝ“้“พๆŽฅๅˆฐๅœบๆ™ฏ\"\"\"\n bpy.context.scene.objects.link(obj)\n bpy.context.scene.objects.active = obj\n select(bpy.data.objects, False)\n obj.select = True\n obj.location = bpy.context.scene.cursor_location\n\n\ndef obj_clear_data(obj):\n \"\"\"ไปŽ็‰ฉไฝ“ไธŠๅˆ ้™ค็ฝ‘ๆ ผๅ‡ ไฝ•ๆ•ฐๆฎ\"\"\"\n bm = bm_from_obj(obj)\n bmesh.ops.delete(bm, geom=list(bm.verts), context=1)\n bm_to_obj(bm, obj)\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 27.5, "blob_id": "b1179229a3064429f6c80d26c05f6754c49f993b", "content_id": "695f7085a1834d4ae10136e7ec9d3fea96a0ce8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/utils/__init__.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "from .util_cynthia import *\nfrom .util_geometry import *\nfrom .util_mesh import *\nfrom .util_object import *\nfrom .util_material import *\nfrom .util_logging import Logger" }, { "alpha_fraction": 0.6431717872619629, "alphanum_fraction": 0.6431717872619629, "avg_line_length": 19.636363983154297, "blob_id": "d3a952087dfb6bc8110944e2d7b595ac2255cfd9", "content_id": "7f2a285ab816d65695711412e91c078ab440e408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 49, "num_lines": 11, "path": "/utils/util_logging.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import logging\n\n\nclass Logger:\n\n def __init__(self, name, level=logging.INFO):\n logging.basicConfig(level=level)\n self.logger = logging.getLogger(name)\n\n def info(self, text):\n self.logger.info(text)\n" }, { "alpha_fraction": 0.5443885922431946, "alphanum_fraction": 0.5443885922431946, "avg_line_length": 22.8799991607666, "blob_id": "9af64512c857d2b3aacb340b77e75cee3eed7ada", "content_id": "9d4d8ce5367d14a60cb90bc298c0be5173111b77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "no_license", "max_line_length": 60, "num_lines": 50, "path": "/core/floorplan/floorplan.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nimport bmesh\nfrom .floorplan_types import (\n fp_rectangular,\n fp_circular,\n fp_composite,\n fp_hshaped,\n fp_random\n)\nfrom ...utils import (\n link_obj,\n make_mesh,\n bm_to_obj,\n make_object,\n bm_from_obj,\n kwargs_from_props\n)\n\n\nclass Floorplan:\n\n @classmethod\n def build(cls, context, props):\n \"\"\"\n ๅˆฉ็”จ้ข„ๅ…ˆ่ฎพ็ฝฎๅฅฝ็š„ๅธƒๅฑ€ๅฝข็Šถ(fp_type)ๅ’Œๅฑžๆ€ง(props)ๆฅ็”Ÿๆˆๅ‡ ไฝ•\n Args:\n context:(bpy.context)blender context\n props:(bpy.types.PropertyGroup)FloorplanProperty\n \"\"\"\n # ๆ–ฐๅปบ็‰ฉไฝ“\n obj = make_object('floorplan', make_mesh('fp_mesh'))\n # ็”ฑ็‰ฉไฝ“ๆ•ฐๆฎๅˆ›ๅปบๅคš่พนๅฝข็ฝ‘ๆ ผ\n bm = bm_from_obj(obj)\n # ???\n kwargs = kwargs_from_props(props)\n if props.type == 'RECTANGULAR':\n fp_rectangular(bm, **kwargs)\n elif props.type == 'CIRCULAR':\n fp_circular(bm, **kwargs)\n elif props.type == 'COMPOSITE':\n fp_composite(bm, **kwargs)\n elif props.type == 'H-SHAPED':\n fp_hshaped(bm, **kwargs)\n elif props.type == 'RANDOM':\n fp_random(bm, **kwargs)\n\n # ๅฐ†ๅคš่พนๅฝข็ฝ‘ๆ ผ่ต‹ไบˆ็‰ฉไฝ“\n bm_to_obj(bm, obj)\n # ๅฐ†็‰ฉไฝ“้“พๆŽฅๅˆฐๅœบๆ™ฏ\n link_obj(obj)\n" }, { "alpha_fraction": 0.5323554873466492, "alphanum_fraction": 0.553638219833374, "avg_line_length": 30.04464340209961, "blob_id": "622f9c4680dc381969f345724b784539f4a3e715", "content_id": "d54f7c85114ab05c5f0f5a924a6818742ecf4b89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3477, "license_type": "no_license", "max_line_length": 70, "num_lines": 112, "path": "/core/rails/rails_props.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom bpy.props import *\n\nclass RailProperty(bpy.types.PropertyGroup):\n ps = FloatProperty(\n name=\"Post Size\", min=0.01, max=100.0, default=0.05,\n description=\"Size of each post\")\n\n pd = FloatProperty(\n name=\"Post Density\", min=0.0, max=1.0, default=.3,\n description=\"Number of posts along each edge\")\n\n rs = FloatProperty(\n name=\"Rail Size\", min=0.01, max=100.0, default=0.05,\n description=\"Size of each rail\")\n\n rd = FloatProperty(\n name=\"Rail Density\", min=0.0, max=1.0, default=.3,\n description=\"Number of rails over each edge\")\n\n ww = FloatProperty(\n name=\"Wall Width\", min=0.0, max=100.0, default=0.075,\n description=\"Width of each wall\")\n\n cpw = FloatProperty(\n name=\"Corner Post Width\", min=0.01, max=100.0, default=0.15,\n description=\"Width of each corner post\")\n\n cph = FloatProperty(\n name=\"Corner Post Height\", min=0.01, max=100.0, default=0.7,\n description=\"Height of each corner post\")\n\n hcp = BoolProperty(\n name=\"Corner Posts\", default=True,\n description=\"Whether the railing has corner posts\")\n\n expand = BoolProperty(\n name=\"Expand\", default=False,\n description=\"Whether to expand fill type to extremes\")\n\n has_decor = BoolProperty(\n name=\"Has Decor\", default=False,\n description=\"Whether corner posts have decor\")\n\n remove_colinear = BoolProperty(\n name=\"Remove Colinear\", default=False,\n description=\"Whether to remove extra colinear posts\")\n\n fill_types = [\n (\"POSTS\", \"Posts\", \"\", 0),\n (\"RAILS\", \"Rails\", \"\", 1),\n (\"WALL\", \"Wall\", \"\", 2)\n ]\n\n fill = EnumProperty(\n name=\"Fill Type\", items=fill_types, default='POSTS',\n description=\"Type of railing\")\n\n def draw(self, context, layout):\n\n row = layout.row()\n row.prop(self, \"fill\", text=\"\")\n\n box = layout.box()\n if self.fill == 'POSTS':\n col = box.column(align=True)\n col.prop(self, 'pd')\n col.prop(self, 'ps')\n\n box1 = box.box()\n box1.label(\"Corner Posts\")\n\n col = box1.column(align=True)\n col.prop(self, 'cpw')\n col.prop(self, 'cph')\n\n row = box1.row(align=True)\n row.prop(self, 'remove_colinear', toggle=True)\n row.prop(self, 'has_decor', toggle=True)\n\n elif self.fill == 'RAILS':\n col = box.column(align=True)\n col.prop(self, 'rd')\n col.prop(self, 'rs')\n col.prop(self, 'expand', text=\"Expand Rails\", toggle=True)\n\n box1 = box.box()\n box1.label(\"Corner Posts\")\n\n col = box1.column(align=True)\n col.prop(self, 'cpw')\n col.prop(self, 'cph')\n\n row = box1.row(align=True)\n row.prop(self, 'remove_colinear', toggle=True)\n row.prop(self, 'has_decor', toggle=True)\n\n elif self.fill == 'WALL':\n col = box.column(align=True)\n col.prop(self, 'ww')\n col.prop(self, 'expand', text=\"Expand Walls\", toggle=True)\n\n box1 = box.box()\n box1.label(\"Corner Posts\")\n\n col = box1.column(align=True)\n col.prop(self, 'cpw')\n col.prop(self, 'cph')\n\n row = box1.row(align=True)\n row.prop(self, 'remove_colinear', toggle=True)\n row.prop(self, 'has_decor', toggle=True)\n" }, { "alpha_fraction": 0.5284737944602966, "alphanum_fraction": 0.5313211679458618, "avg_line_length": 31.518518447875977, "blob_id": "1d139e4b08cb66d67e6efcfe23b9caa09e49b8d1", "content_id": "983377c865bb09e2c7104f936197f8d6f63639e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 86, "num_lines": 54, "path": "/core/balcony/balcony_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom bmesh.types import BMVert, BMFace\n\nfrom ..rails import MakeRailing\nfrom ...utils import (\n split,\n filter_geom,\n get_edit_mesh,\n calc_edge_median,\n )\n\n\ndef make_balcony(bm, faces, width, railing, size, off, open_side, **kwargs):\n \"\"\"Generate balcony geometry\n\n Args:\n *args: see balcony_props.py for types and description\n **kwargs: extra kwargs from BalconyProperty\n \"\"\"\n\n for f in faces:\n f = split(bm, f, size.y, size.x, off.x, off.y, off.z)\n ret = bmesh.ops.extrude_face_region(bm, geom=[f])\n bmesh.ops.translate(bm,\n verts=filter_geom(ret['geom'], BMVert),\n vec=-f.normal * width)\n\n if railing:\n face = filter_geom(ret['geom'], bmesh.types.BMFace)[-1]\n top_verts = sorted(list(face.verts), key=lambda v:v.co.z)[2:]\n edges = list({e for v in top_verts for e in v.link_edges\n if e not in list(face.edges)})\n\n if f.normal.y:\n edges.sort(key=lambda e:calc_edge_median(e).x, reverse=f.normal.y < 0)\n elif f.normal.x:\n edges.sort(key=lambda e:calc_edge_median(e).y, reverse=f.normal.x > 0)\n left, right = edges\n\n front = bm.edges.get(top_verts)\n\n r_edges = []\n if open_side == 'NONE':\n r_edges = [left, right, front]\n elif open_side == 'FRONT':\n r_edges = [left, right]\n elif open_side == 'LEFT':\n r_edges = [front, right]\n elif open_side == 'RIGHT':\n r_edges = [front, left]\n\n MakeRailing().from_edges(bm, r_edges, **kwargs)\n\n bmesh.ops.delete(bm, geom=[f], context=3)\n" }, { "alpha_fraction": 0.5994219779968262, "alphanum_fraction": 0.6023121476173401, "avg_line_length": 28.827587127685547, "blob_id": "ec16da0e4fa2fb99f770b1c59dc289f52507e44a", "content_id": "f955e09beba842c662aa3afe04de4ab8bf0adaff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/core/window/window_types.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bmesh\nfrom ...utils import (split, get_edit_mesh)\nfrom ..fill import (fill_bar, fill_louver, fill_glass_panes)\n\n\ndef make_window(bm, faces, **kwargs):\n \"\"\"ๅˆ›ๅปบ็ช—ๆˆท\"\"\"\n for face in faces:\n if face.normal.z:\n continue\n face = make_window_split(bm, face, **kwargs)\n if not face:\n continue\n face = make_window_frame(bm, face, **kwargs)\n make_window_fill(bm, face, **kwargs)\n\n\ndef make_window_split(bm, face, size, off, **kwargs):\n \"\"\"\n ๅฐ†็ช—ๆˆท้ข่ฟ›่กŒๅˆ†ๅ‰ฒ\n :param bm: \n :param face: \n :param size: (vector2)ๆ–ฐ้ขไธŽๆ—ง้ข็š„ๆฏ”ไพ‹\n :param off: (vector3)ๆ–ฐ้ขไธŽไธญๅฟƒ็š„่ท็ฆป\n \"\"\"\n return split(bm, face, size.y, size.x, off.x, off.y, off.z)\n\n\ndef make_window_frame(bm, face, ft, fd, **kwargs):\n \"\"\"\n ๅˆ›ๅปบ็ช—ๆˆทๅŸบ็ก€ๅฝข็Šถ\n :param fd:(float)Depth of the window frame\n :param ft:(float)thickness of the window frame\n \"\"\"\n bmesh.ops.remove_doubles(bm, verts=list(bm.verts))\n face = bmesh.ops.extrude_discrete_faces(bm, faces=[face]).get('faces')[-1]\n bmesh.ops.translate(bm, verts=face.verts, vec=face.normal*fd/2)\n if ft:\n bmesh.ops.inset_individual(bm, faces=[face], thickness=ft)\n bmesh.ops.recalc_face_normals(bm, faces=list(bm.faces))\n if fd:\n f = bmesh.ops.extrude_discrete_faces(bm, faces=[face]).get('faces')[-1]\n bmesh.ops.translate(bm, verts=f.verts, vec=-f.normal*fd)\n\n return f\n return face\n\n\ndef make_window_fill(bm, face, fill_type, **kwargs):\n \"\"\"็ช—ๆˆท้ขๅกซๅ……\"\"\"\n if fill_type == 'NONE':\n pass\n elif fill_type == 'GLASS PANES':\n fill_glass_panes(bm, face, **kwargs)\n elif fill_type == 'BAR':\n fill_bar(bm, face, **kwargs)\n elif fill_type == 'LOUVER':\n fill_louver(bm, face, **kwargs)\n" }, { "alpha_fraction": 0.6806020140647888, "alphanum_fraction": 0.6806020140647888, "avg_line_length": 26.18181800842285, "blob_id": "60792e4b49e8ec2e3272c1cf48577c39a421a033", "content_id": "9952fb1366d173dad986337976267a6dc8ba3d8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/core/floor/floor_ops.py", "repo_name": "yangshuangs/building-tool", "src_encoding": "UTF-8", "text": "import bpy\nfrom .floor import Floor\nfrom .floor_props import FloorProperty\n\n\nclass FloorOperator(bpy.types.Operator):\n \"\"\"ไปŽ้€‰ไธญ็š„ๅธƒๅฑ€ๅนณ้ข(floorplan)ๅˆ›ๅปบๆฅผๅฑ‚(floor)\"\"\"\n bl_idname = \"cynthia.add_floors\"\n bl_label = \"Add Floors\"\n bl_options = {'REGISTER', 'UNDO'}\n\n props = bpy.props.PointerProperty(type=FloorProperty)\n\n @classmethod\n def poll(cls, context):\n return context.object is not None and context.mode == 'EDIT_MESH'\n\n def execute(self, context):\n return Floor.build(context, self.props)\n\n def draw(self, context):\n self.props.draw(context, self.layout)\n" } ]
33
woodylouis/ServiceSniffer
https://github.com/woodylouis/ServiceSniffer
f030a29241cc63839a9bd2540562257a841c593f
2d0d8f1faae71a8874f2d5615e5a98367fefa982
084e2b219aa719636ce60284fad4da7ead88220a
refs/heads/master
2020-03-29T21:06:36.818058
2018-09-26T01:29:40
2018-09-26T01:29:40
149,529,220
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7679041028022766, "alphanum_fraction": 0.7717041969299316, "avg_line_length": 55.065574645996094, "blob_id": "72c9201ebcf686d20da1acb74bb9812643218539", "content_id": "2217b4845ec6973c4ed3f15ec7812b93cf03c9ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3425, "license_type": "no_license", "max_line_length": 531, "num_lines": 61, "path": "/README.md", "repo_name": "woodylouis/ServiceSniffer", "src_encoding": "UTF-8", "text": "<h1>Sniffing for Services</h1>\n\n<p>\nThe Commonwealth Scientific and Industrial Research Organisation (CSIRO) is an independent Australian Federal government agency responsible for scientific research. \n\nAs a result, CSIRO publishes a large amount of scientific data both internally and externally. Some of the data it publishes is via web services, both standard and custom. Despite sophisticated server management tools, it is very hard to know what services are installed across all CSIRO publication systems as a lot of the data services are ad hoc and not catalogued. Itโ€™s even harder to know, centrally, what the published data is about. \n\nThe main goal of the project is to help CSIRO identify and catalogue data services. The outcome of the project should help users like research scientists in CSIRO to search and discover what data services are available on the CSIRO network. A NASA web service, based on Thredds data server, is a good example for the project to start with to make query, which contains a catalogue for different datasets.\n\nThis project is to write code to interrogate CSIROโ€™s internal and external web presences and hunt for particular service technologies. This involve polling for open ports known to support specific applications and testing endpoints with a range of requests standard services are known to answer. Once discovered and identified, it is required to design a data structure like a database that is used to make queries of the descriptions of data exposed by the services and map them to standard dictionaries of data description terms.\n\n<strong>Objective</strong>\n\nThe goal of this project is to harvest NetCDF files from THREDDS servers across CSIRO network. Not only the files are found, but also the files are are also categorised by different data service types, such as OpenDAP, WMS, WMC, HTTP etc for each host.\nThis program is customised to be used only within CSIRO network but can be modified for general purpose.\n\n<strong>Outcome</strong>\n\nA database is created to store all metadata in SQlite. The database is easy to maintain. More than 3000 unique NetCDF files have been found and the number of files will be keep growing. In the database, host description, host URL, data service type are also captured.\n\n\n\nPlease check <a href=\"https://drive.google.com/file/d/1on3AIzCGQ0RNjU-ekA0VLm2iZv2f_2Rw/view?usp=sharing\">Project ERD</a>\n\n<strong>Technlogy</strong>\n\nIn this project, overall, pure Python is used to perform NetCDF file scraping. \n\nFurthermore, a range of Python libraries and packages is used:\n\n<ul>\n <li>Python Request</li>\n <li>Beautiful Soup</li>\n <li>RE</li>\n <li>NMAP</li>\n <li>XML</li>\n <li>urlsplit</li>\n</ul>\n\nThese tools are useful and handy to use especially Beautiful Soup. Not only it can dig data but also it can be used to transform unstructured data.\n\n<strong>Project Schema</strong>\n\nmain.py<br>\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|<br>\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| -- scan.py ( To scan network and get valid host return )<br>\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| -- tds_host.py ( To capture host description terms and data service types to database ) <br>\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| -- store.py ( SQL Query function to capture data to database )<br>\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;| -- helper.py ( helper functions)<br>\n</p>\n\n\n<strong>Find me on</strong>\n\nwww.woodylouis.me\n\n<strong>Contact</strong>\n\nlouisli@woodylouis.me\n\nWenjin.Li@csiro.au\n\n" }, { "alpha_fraction": 0.7457627058029175, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 40.20000076293945, "blob_id": "4452c46732fb00a6078de76f760086941a4971d7", "content_id": "5a3858ecec9e1473d9216e51a25e0b8d07dce51b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 413, "license_type": "no_license", "max_line_length": 91, "num_lines": 10, "path": "/database/test.sql", "repo_name": "woodylouis/ServiceSniffer", "src_encoding": "UTF-8", "text": "-- select nc_files.id, host_ip, service_type, server_url, nc_name, url_path as NC_File_Path\n-- from nc_files\n-- INNER JOIN hosts ON hosts.id = nc_files.host_id\n-- INNER JOIN services ON services.id = nc_files.service_id;\n\n\nselect hosts.id, hosts.host_ip, services.service_type\nfrom host_services\ninner join hosts on hosts.id = host_services.host_id\ninner join services on services.id = host_services.service_id;\n\n" }, { "alpha_fraction": 0.5316718816757202, "alphanum_fraction": 0.5652474761009216, "avg_line_length": 42.43608856201172, "blob_id": "331ec9ad01f87351d3e57ac52bab7d14cc992df4", "content_id": "97d5b2c1b87cba91666346efb4abea2efcb2c2cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5778, "license_type": "no_license", "max_line_length": 153, "num_lines": 133, "path": "/scan.py", "repo_name": "woodylouis/ServiceSniffer", "src_encoding": "UTF-8", "text": "##########################################################################################\n#######It is needed to be careful to run this program. It is involved NMAP package.#######\n##################Running this might arise attention from IM&T############################\n\nimport nmap\nimport datetime\nimport requests\n\n##########################################################################################\n#########A raw data for scanning will be generated once the scanning has started##########\n###########The report will be keep updating until scanning process has stopped############\n##########################################################################################\ndate = datetime.datetime.now().strftime(\"%d%m%Y_%H%M%S\")\n\n\n\ndef get_thredds_hosts(network_prefix):\n reportName = str(date) + \"_ScanReport\" + \".txt\"\n scanReport = open(reportName, \"a\")\n nm = nmap.PortScanner()\n activeHosts = []\n portDict = {}\n hostInfoDict = {}\n \"\"\"\n Perform simple ping and create a list of online hosts\n The parameters below might be aggressive.\n However, this is acceptable scanning speed and accuracy for bunch of /16 network after perform few tests.\n The parameters below might be aggressive can be modified.\n \"\"\"\n pingResult = nm.scan(hosts=network_prefix,\n arguments='--min-hostgroup=5000 --max-hostgroup=10000 --min-parallelism=100 --max-parallelism=200 --host-timeout=2s -T4 -n -sP')\n\n \"\"\"\n Get active hosts only from raw scanning report\n \"\"\"\n for pResult in pingResult['scan'].values():\n hostStatus = pResult['status']['state']\n host = pResult['addresses']['ipv4']\n if hostStatus == 'up':\n activeHosts.append(host)\n totalActiveHosts = (\"There are \" + str(len(activeHosts)) + \" active hosts online. The hosts are: \\n\" + '\\n'.join(\n '{}: {}'.format(*k) for k in enumerate(activeHosts, start=1)) + \"\\n\")\n\n\n \"\"\"\n Get the hosts with open ports and is HTTP protocol from active hosts above\n # 1-1024 popular port\n # 1194 openVPN\n # 1433 Microsoft SQL Server\n # 1434 Microsoft SQL Monitor\n # 2483-2484 Oracle database\n # 3306 MySQL database service\n # 4333 mSQL\n # 5432 PostgreSQL database\n # 8080 HTTP Proxy such as Apache\n # 27017 Mongo database\n \"\"\"\n\n for host in activeHosts:\n aDict = nm.scan(hosts=host,\n arguments='--min-hostgroup=5000 --max-hostgroup=10000 --min-parallelism=100 --max-parallelism=200 --host-timeout=2s -T5 -n -v',\n ports=\"80, 1433-1434, 2483-2484, 800, 3306, 4333, 5432, 5000, 8080, 9000, 8433, 27017, 50000\")\n scanReport.write(str(aDict))\n\n for serviceScan in aDict['scan'].values():\n host = serviceScan['addresses']['ipv4']\n try:\n portInfo = serviceScan['tcp']\n except:\n pass\n for port, info in portInfo.items():\n hostServices = info['name']\n portState = info['state']\n \"\"\"\n Get all hosts' HTTP service and port in a dictionary\n \"\"\"\n if portState == 'open' and 'http' in hostServices:\n if port not in portDict:\n portDict[port] = hostServices\n if host not in hostInfoDict:\n hostInfoDict[host] = portDict\n # print(hostInfoDict)\n \"\"\"\n In here, it will create a host status dictionary for all host using HTTP protocol.\n Constructing URLs with host IP, ports, main thredds catalog path.\n After that, it will generate a potential dictionary for potential TDS candidate by using REQUEST.\n If the status is '404', these hosts will be ignore. \n If the status is '302', it means the page redirect to login page. These host may have TDS installed and are store in database\n If the status is '200', it means these hosts are TDS servers and can be access.\n \"\"\"\n httpHostStatusDict = {}\n hostInfoDict = {'152.83.247.62': {80: 'http', 8080: 'http-proxy'}, '152.83.247.74': {80: 'http', 8080: 'http-proxy'}}\n for host, hostInfo in hostInfoDict.items():\n for port in hostInfo.keys():\n urls = f'http://{host}:{port}/thredds/catalog.html'\n try:\n r = requests.get(urls, timeout=0.5, allow_redirects=False)\n if str(r.url) not in httpHostStatusDict:\n httpHostStatusDict[str(r.url)] = str(r.status_code)\n except requests.exceptions.HTTPError:\n \"\"\"An HTTP error occurred.\"\"\"\n pass\n except requests.exceptions.ConnectionError:\n \"\"\"A Connection error occurred.\"\"\"\n pass\n except requests.exceptions.Timeout:\n \"\"\"\n The request timed out while trying to connect to the remote server.\n Requests that produced this error are safe to retry.\n \"\"\"\n pass\n except requests.exceptions.RequestException:\n \"\"\"\n There was an ambiguous exception that occurred while handling your request.\n \"\"\"\n pass\n # print(httpHostStatusDict)\n threddsCandidateHostList = []\n noThreddsInstalledHostList = []\n redirectToOtherURL = []\n unknownError = []\n for urls, status in httpHostStatusDict.items():\n if status == '404':\n noThreddsInstalledHostList.append(urls)\n elif status == '302':\n redirectToOtherURL.append(urls)\n elif status == '200':\n threddsCandidateHostList.append(urls)\n else:\n unknownError.append(urls)\n\n return threddsCandidateHostList\n # print(threddsCandidateHostList)\n\n" }, { "alpha_fraction": 0.571049153804779, "alphanum_fraction": 0.6148738265037537, "avg_line_length": 19.94444465637207, "blob_id": "ddb50c55f1e4bae77dbe7ae63b145fbdc697ef92", "content_id": "905b43cbceb72f9f3814d6c7f726dc7b659b8152", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 102, "num_lines": 36, "path": "/test.py", "repo_name": "woodylouis/ServiceSniffer", "src_encoding": "UTF-8", "text": "import scan\n\n# def getCandiateUrl():\n# candidateList = []\n# try:\n# with open(f\"C:\\\\Users\\LI252\\Desktop\\ScanResults\\ScanReport Request.txt\", \"r\") as reportFile:\n# for line in reportFile.read().splitlines():\n# if 'may have Thredds installed' in line:\n# print('T')\n#\n#\n# except:\n# print(\"Invalid file to interrogate valid thredds\")\n#\n# getCandiateUrl()\n\"\"\"\n\nLocal config file. \nIn the file,\nIP network address should be like:\n\n192.168.1.0/24\n192.168.2.0/24\n192.168.3.0/24\n\n\"\"\"\ntry:\n f = open(input(\"Please enter the path for the files that contains address: \"), \"r\")\n network = f.read()\nexcept:\n print(\"Please enter a valid file path\")\n\n\n\n\nscan.get_thredds_hosts(network)" }, { "alpha_fraction": 0.7055984735488892, "alphanum_fraction": 0.7055984735488892, "avg_line_length": 24.899999618530273, "blob_id": "dbc353727fc89cbf8ff8af093680e0627ae7baed", "content_id": "ef98c2264b1bc053b58ae9afce4e030df16390db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 51, "num_lines": 40, "path": "/database/create.sql", "repo_name": "woodylouis/ServiceSniffer", "src_encoding": "UTF-8", "text": "-- DROP TABLE if exists hosts;\n-- DROP TABLE if exists services;\n-- DROP TABLE if exists host_services;\nDROP TABLE if exists dataset;\n\n\n-- CREATE TABLE hosts (\n-- id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n-- host_ip VARCHAR NOT NULL,\n-- port INTEGER NOT NULL,\n-- server_url VARCHAR NOT NULL,\n-- container_description VARCHAR\n--\n-- );\n--\n-- CREATE TABLE services (\n-- id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n-- service_type VARCHAR NOT NULL UNIQUE\n--\n-- );\n--\n-- CREATE TABLE host_services (\n-- host_id INTEGER NOT NULL,\n-- service_id INTEGER NOT NULL,\n--\n-- PRIMARY KEY (host_id, service_id),\n-- FOREIGN KEY (host_id) REFERENCES hosts(id),\n-- FOREIGN KEY (service_id) REFERENCES services(id)\n-- );\n\nCREATE TABLE dataset (\nid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\ndataset_name VARCHAR NOT NULL,\ndescription VARCHAR,\nurl_path VARCHAR NOT NULL,\nservice_id INTEGER NOT NULL,\nhost_id INTEGER NOT NULL,\nFOREIGN KEY (host_id) REFERENCES hosts(id),\nFOREIGN KEY (service_id) REFERENCES services(id)\n);\n" } ]
5
yuvalturg/slides
https://github.com/yuvalturg/slides
409bad84f864a6ca9d814b7fa992340e56a8688e
fb3f03e66734ab8b4e212b8e9587685589970f5f
2746e80ffc36128098f6ec628060379d8e093f79
refs/heads/master
2020-05-30T15:46:55.557444
2020-01-30T07:12:50
2020-01-30T07:12:50
189,828,778
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.48201438784599304, "alphanum_fraction": 0.4940047860145569, "avg_line_length": 27.43181800842285, "blob_id": "6fb35268fd120e445bbf2c97fd96ff726544e9f7", "content_id": "839ff39c6bbc4e6d335ea0591cefc13409882af1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1251, "license_type": "permissive", "max_line_length": 70, "num_lines": 44, "path": "/talks/python-fuse/2_hellofs.py", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "import fuse\nimport stat\nimport errno\n\nfuse.fuse_python_api = (0, 2)\n\nclass PyConFS(fuse.Fuse):\n _PATH = \"/hello_pycon19\"\n _DATA = \"PyCon rocks \\m/\\n\"\n\n def getattr(self, path):\n print(\"getattr({})\".format(path))\n if path == \"/\":\n return fuse.Stat(st_mode=stat.S_IFDIR | 0o755, st_nlink=1)\n if path == self._PATH:\n return fuse.Stat(st_mode=stat.S_IFREG | 0o600,\n st_size=len(self._DATA),\n st_nlink=1)\n return -errno.ENOENT\n\n def readdir(self, path, offset):\n print(\"readdir({}, {})\".format(path, offset))\n if path == \"/\":\n for r in (\".\", \"..\", self._PATH[1:]):\n yield fuse.Direntry(r)\n\n def read(self, path, size, offset):\n print(\"read({}, {}, {})\".format(path, size, offset))\n if path != self._PATH:\n return -errno.ENOENT\n data_size = len(self._DATA)\n if offset < data_size:\n if offset + size > data_size:\n size = data_size - offset\n buf = self._DATA[offset:offset+size]\n else:\n buf = \"\"\n return buf\n\n\nif __name__ == '__main__':\n server = PyConFS()\n server.parse()\n server.main()\n" }, { "alpha_fraction": 0.5828220844268799, "alphanum_fraction": 0.5950919985771179, "avg_line_length": 15.300000190734863, "blob_id": "a23593d757d83c5ccee8f057ef8a52e1db3494f0", "content_id": "ea0138df54dc4838c91da35054c3ed8ea6552229", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "permissive", "max_line_length": 30, "num_lines": 10, "path": "/talks/python-fuse/0_emptyfs.py", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "import fuse\n\nfuse.fuse_python_api = (0, 2)\n\nclass PyConFS(fuse.Fuse): pass\n\nif __name__ == '__main__':\n server = PyConFS()\n server.parse()\n server.main()\n" }, { "alpha_fraction": 0.7264673113822937, "alphanum_fraction": 0.7320044040679932, "avg_line_length": 31.244047164916992, "blob_id": "d64aa19bb27e9d53f0d7819742f512107e6e7a53", "content_id": "abcb175f6a6c31842b32b8c3669b7b22fbd58d26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5418, "license_type": "permissive", "max_line_length": 155, "num_lines": 168, "path": "/talks/ovirt-node-coreos/content.md", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "# Reigniting oVirt Node\n\nYuval Turgeman, Red Hat <!-- .element: style=\"position: absolute; left: 0; top: 100%; font-size: 0.6em\" -->\nyuvalt@redhat.com <!-- .element: style=\"position: absolute; left: 0; top: 120%; font-size: 0.6em\" -->\n\nnote:\n- The reincarnation of ovirt node as a layered product of fedora-coreos\n\n---\n\n# Agenda\n- Background\n- Evolution\n- Build/Deploy CoreOS Node\n- Challenges\n\n\nnote:\n- Background - what's ovirt, and more specifically ovirt node and why we need it\n- Evolution - where we were a few years ago, what we're doing today and what we have in mind for the future\n- Explain how we built our own coreos-based node using the standard tools from coreos, and how we can deploy it\n- and then discuss the challanges we hit\n\n---\n\n\n# Background\n- What's an oVirt Node\n- Legacy node\n- NG node\n- CoreOS node\n\nnote:\n- What's ovirt? the leading open source virtualization plaform\n- An ovirt host (or hypervisor) is where vms actually run so for that we need the ovirt bits (vdsm, gluster) and virt stack (libvirt, qemu)\n- Turning a host to an ovirt host is just enabling the ovirt repos (ovirt-release) and install ovirt-host\n- Ovirt node is a minimal installation that already includes the required ovirt bits\n- DIfferent layouts, legacy 3x, ng 4x, coreos\n\n---\n\n\n# Legacy Node\n\n- LiveCD - mounted R/O ISO, with live-rw and persistant state\n- Image based, immutable OS, based on Fedora/CentOS/RHEL <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Maintenance, custom installer, whitelisting files, live-rw <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n\nnote:\n- Drawbacks - live-rw would obvirously be deleted which is something you dont really expect for an installed system.\n\n\n--\n\n![copyfile](legacy-lsblk.png)\n\n\n---\n\n\n# NG Node\n- LVM - R/O volume with mounted R/W snapshot on top\n- Image based, A/B updates, based on Fedora/CentOS/RHEL <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Upgrade flow maintenance (imgbased), delivery with yum/dnf <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n\nnote:\n- Based on LVM and snapshots, we compose a squashfs and deliver it inside an rpm using yum/dnf.\n- The squashfs is extracted into an LV which is then marked as R/O, and then we create a snapshot which is mounted r/w as / while /var is common.\n- A new boot entry is added, and selected by default, so if something didnt work you can just boot to the previous working version\n- Benefits - image based, A/B updates, it just works...\n- Drawbacks - making it just work is hard\n- zstream updates are very challenging\n- we can't simply run the rpm scriplets and expect them to work because rpm will not detect this as upgrade\n- Since we're using yum/dnf we need to mark the system as updated by using some hacks with on the rpmdb\n\n\n--\n\n\n![copyfile](node-ng.png)\n\n\nnote:\n- Minimal installation (no extra NIST partitioning), requires ~15-20G\n\n---\n\n\n# CoreOS Node\n- Motivation\n- rpm-ostree vs imgbased\n- Large community (CoreOS, Silverblue, Atomic)\n\n\nnote:\n- Motivation, first of all, it's cool, so why not ?\n- CoreOS/Atomic/rpm-ostree try to solve the same problems as what we're trying to solve:\n- image-based deployment with the ability to do rollbacks to the entire image.\n- rpm-ostree is more modern and tries to be a generic solution while imgbased became over the years a little more tightly coupled with ovirt\n- example (vdsm service)\n- Large community\n\n\n---\n\n\n# Build/Install\n- Coreos-assembler + configuration repository\n- Basic config with fedora-coreos-config.git\n- Extract ovirt-release rpm\n- Artifacts: rpm-ostree commit and raw metal installer\n- Installing just like a normal Fedora-CoreOS\n\nnote:\n- In order to compose our own coreos based node, we wanted to use the standard tools from coreos.\n- For this purpose we used the coreos-assembler (cosa) container.\n- Basically, we run the container with a git repository that holds several configuration files, and we call the commands we need (build, buildextend-metal)\n- clone the fedora-coreos-config git repository\n- update our manifets (set commit refs, package list)\n- grab ovirt-release rpm, install and enable ovirt repos\n- run coreos-container with a raw metal installer\n- artifacts are a thin iso, a raw image, and an ostree commit\n- Installing...\n\n--\n\n![copyfile](install.png)\n\n--\n\n![copyfile](coreos-node-1.png)\n\n--\n\n![copyfile](coreos-node-cockpit.png)\n\n\n---\n\n\n# Challenges\n- Kdump support\n- Managing users and groups (systemd-sysusers) <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Podman/docker <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n- Ignition versioning <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n- rpm-ostree (selinux scriplets, permissions) <!-- .element: class=\"fragment\" data-fragment-index=\"4\" -->\n\nnote:\n- LASTLY - Moving fast\n\n---\n\n\n# Open Issues\n- Non-standard paths <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Controlling kernel arguments <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n- Ansible module for rpm-ostree <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n\nnote: \n- Rebuilding vdsm with a custom path /var/run, but we need a hook for qemu\n- kernel arguments - ovirt uses grubby, but with an ostree-based system we need to use rpm-ostree\n- Ansible module requires a reboot after installing a new package - collaborating with the coreos team, trying to solve this\n\n\n---\n\n\n# Questions ?\n\n" }, { "alpha_fraction": 0.7919074892997742, "alphanum_fraction": 0.7919074892997742, "avg_line_length": 33.599998474121094, "blob_id": "f1e7c2a8818a5aa781d92c554ab9e4480e9eb40c", "content_id": "6c8afc385a345fb0f1af6d5ba3c98305e1246ec6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 173, "license_type": "permissive", "max_line_length": 82, "num_lines": 5, "path": "/README.md", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "# Talks\n\n* Filesystems in userspace https://yuvalturg.github.io/slides/talks/python-fuse\n\n* Reigniting oVirt Node https://yuvalturg.github.io/slides/talks/ovirt-node-coreos\n" }, { "alpha_fraction": 0.505464494228363, "alphanum_fraction": 0.5245901346206665, "avg_line_length": 21.875, "blob_id": "0de5f080a7cef3a067b27641f6b0328a8ddaec43", "content_id": "dac775e049ec2fcb3da6eb4ac1880f3bd0350cf0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "permissive", "max_line_length": 58, "num_lines": 16, "path": "/talks/python-fuse/1_getattr.py", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "import fuse\nimport stat\n\nfuse.fuse_python_api = (0, 2)\n\nclass PyConFS(fuse.Fuse):\n def getattr(self, path):\n print(\"Received path [%s]\" % path)\n if path == \"/\":\n return fuse.Stat(st_mode=stat.S_IFDIR | 0o755,\n st_nlink=1)\n\nif __name__ == '__main__':\n server = PyConFS()\n server.parse()\n server.main()\n" }, { "alpha_fraction": 0.5970364212989807, "alphanum_fraction": 0.609590470790863, "avg_line_length": 25.407608032226562, "blob_id": "251722de28ca2de1e4cf3bbfb8231f63af2ca06f", "content_id": "cd4b7148a7f41f9fe98d1aa55b6e33075468f477", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4859, "license_type": "permissive", "max_line_length": 127, "num_lines": 184, "path": "/talks/python-fuse/content.md", "repo_name": "yuvalturg/slides", "src_encoding": "UTF-8", "text": "# Filesystems in userspace\n\nYuval Turgeman, Red Hat <!-- .element: style=\"position: absolute; left: 0; top: 100%; font-size: 0.6em\" -->\nyuvalt@gmail.com <!-- .element: style=\"position: absolute; left: 0; top: 120%; font-size: 0.6em\" -->\n\n---\n\n# Agenda\n* Filesystems overview\n* Hello world\n* oVirt filesystem\n\nnote: how filesystems work, the differences between traditional filesystem\n implemented in the kernel and filesystems in userspace (fuse) - yes\n it's possible and quite easy with python\n\n---\n\n# Filesystems\n- What ? Why ?\n\n--\n\n![copyfile](longcopy.jpg)\n\n--\n\n# Filesystems\n- Interface to physical media\n- Virtual filesystems <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Represent and control state <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n- Implement a common interface (open, read, write) <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n- Use standard tools (ls, cat, shell redirection) <!-- .element: class=\"fragment\" data-fragment-index=\"4\" -->\n\nnote: Developer uses an API, but any user can use a filesystem\n\n---\n\n# How does it work ?\n- Virtual File System (VFS) - an abstract layer for all filesystems <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Kernel module registers the FS in the VFS <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n\n--\n\n* Traditional in-kernel filesystem\n\n![traditional](traditional-fs.png)\n\n---\n\n# FUSE\n- A kernel module that registers fuse in VFS <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- A userspace library and daemon <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n- No kernel knowledge, super simple, highly adopted <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n\nnote: A daemon that runs in the background listening to filesystem requests\n (open, read, write) on relative paths, over /dev/fuse\n\n--\n\n![fuse](fuse-structure.png)\n\n---\n\n# Python\n\n---\n\n# Hello World\n```python\nimport fuse\n\nfuse.fuse_python_api = (0, 2)\n\nclass PyConFS(fuse.Fuse): pass \n\nif __name__ == '__main__':\n server = PyConFS()\n server.parse()\n server.main()\n```\n\nnote: [1] run python2 0_emptyfs.py\n [2] show mount point and process\n [3] discuss client-server architecture\n\n--\n\n* getattr() - get file attributes, similar to stat\n\n```python\nimport stat\n\nclass PyConFS(fuse.Fuse):\n def getattr(self, path):\n print(\"Received path [%s]\" % path)\n if path == \"/\":\n return fuse.Stat(st_mode=stat.S_IFDIR | 0o755,\n st_nlink=1)\n```\n\n--\n\n```python\nclass PyConFS(fuse.Fuse):\n _PATH = \"/hello_pycon19\"\n _DATA = \"PyCon rocks \\m/\\n\"\n\n def getattr(self, path):\n print(\"getattr({})\".format(path))\n if path == \"/\":\n return fuse.Stat(st_mode=stat.S_IFDIR | 0o755,\n st_nlink=1)\n if path == self._PATH:\n return fuse.Stat(st_mode=stat.S_IFREG | 0o600,\n st_size=len(self._DATA),\n st_nlink=1)\n return -errno.ENOENT\n```\n\nnote: play with file permissions\n\n--\n\n* readdir() - list directory content\n\n```python\nclass PyConFS(fuse.Fuse):\n _PATH = \"/hello_pycon19\"\n\n def readdir(self, path, offset):\n print(\"readdir({}, {})\".format(path, offset))\n if path == \"/\":\n for r in (\".\", \"..\", self._PATH[1:]):\n yield fuse.Direntry(r)\n```\n\n--\n\n* read() - read file content \n\n```python\nclass PyConFS(fuse.Fuse):\n _PATH = \"/hello_pycon19\"\n _DATA = \"PyCon rocks \\m/\\n\"\n\n def read(self, path, size, offset):\n if path != self._PATH:\n return -errno.ENOENT\n data_size = len(self._DATA)\n if offset < data_size:\n if offset + size > data_size:\n size = data_size - offset\n return self._DATA[offset:offset+size]\n return \"\"\n```\n\n---\n\n# Operations\n\n- open <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- write <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n- truncate <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n- flush <!-- .element: class=\"fragment\" data-fragment-index=\"4\" -->\n- ... and more <!-- .element: class=\"fragment\" data-fragment-index=\"5\" -->\n\n---\n\n# Mounting\n- Modular mount <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n- Mount options <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n\n---\n\n# oVirt + FUSE\n* oVirt is the open virtualization software <!-- .element: class=\"fragment\" data-fragment-index=\"1\" -->\n* Provides a python API to access the oVirt Manager <!-- .element: class=\"fragment\" data-fragment-index=\"2\" -->\n* oVirtFS uses FUSE to provide a filesystem access <!-- .element: class=\"fragment\" data-fragment-index=\"3\" -->\n* Demo <!-- .element: class=\"fragment\" data-fragment-index=\"4\" -->\n\n---\n\n# Questions ?\n" } ]
6
dengxiaohu520/DataXWeb
https://github.com/dengxiaohu520/DataXWeb
0d7ca1e8ffafab89637584a608c4f1075908a089
070304cd278c9f9054060736326ec3ecce9b64c0
4306d80327ecbe597b8cec66f045dd04c46a34c0
refs/heads/master
2021-02-09T07:54:19.571056
2019-12-06T15:13:15
2019-12-06T15:13:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5536525845527649, "alphanum_fraction": 0.5774204730987549, "avg_line_length": 38.19178009033203, "blob_id": "3f85a1086b2b9198e65b301fd451d98ac5f8f530", "content_id": "b4ef1dec8fda74e18cc8a5b01d7e28c41f1a7636", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2979, "license_type": "no_license", "max_line_length": 112, "num_lines": 73, "path": "/dataxweb/backend/migrations/0003_auto_20191128_1428.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-28 06:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0002_auto_20191128_1136'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='dataxtask',\n name='from_character',\n field=models.CharField(blank=True, default='utf8', max_length=10, null=True, verbose_name='ๆฅๆบ็ผ–็ '),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='from_columns',\n field=models.CharField(blank=True, default='*', max_length=1000, null=True, verbose_name='ๆฅๆบๅˆ—'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='from_where',\n field=models.CharField(blank=True, default='', max_length=1000, null=True, verbose_name='ๆฅๆบๆกไปถ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='name',\n field=models.CharField(help_text='ๆญคๅ็งฐ็”จไบŽไฟๅญ˜ไธบjsonๆจกๆฟๅ็งฐ', max_length=256, verbose_name='ไปปๅŠกๅ็งฐ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='task_error_limit_percentage',\n field=models.FloatField(blank=True, default=0.02, null=True, verbose_name='้”™่ฏฏ่ฎฐๅฝ•็™พๅˆ†ๆฏ”'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='task_error_limit_record',\n field=models.SmallIntegerField(blank=True, default=5, null=True, verbose_name='้”™่ฏฏ่ฎฐๅฝ•ๆกๆ•ฐ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='task_speed_channel',\n field=models.SmallIntegerField(blank=True, default=5, null=True, verbose_name='้€Ÿๅบฆ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='to_character',\n field=models.CharField(blank=True, default='utf8', max_length=10, null=True, verbose_name='็›ฎๆ ‡็ผ–็ '),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='to_columns',\n field=models.CharField(blank=True, default='*', max_length=1000, null=True, verbose_name='็›ฎๆ ‡ๅˆ—'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='to_post_sql',\n field=models.CharField(blank=True, default='', max_length=1000, null=True, verbose_name='ๅŽ็ฝฎๆกไปถ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='to_pre_sql',\n field=models.CharField(blank=True, default='', max_length=1000, null=True, verbose_name='ๅ‰็ฝฎๆกไปถ'),\n ),\n migrations.AlterField(\n model_name='dataxtask',\n name='to_session',\n field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='็›ฎๆ ‡session'),\n ),\n ]\n" }, { "alpha_fraction": 0.6021978259086609, "alphanum_fraction": 0.6208791136741638, "avg_line_length": 40.671756744384766, "blob_id": "17d85fe8b88ef25544d4ba37a7345389a2b82d58", "content_id": "c15136d23899723b2fd29d5aa0244c637ee474df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11896, "license_type": "no_license", "max_line_length": 109, "num_lines": 262, "path": "/dataxweb/backend/models.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models import BooleanField as _BooleanField\nfrom django.contrib.auth.models import AbstractUser, PermissionsMixin, BaseUserManager, AbstractBaseUser\n# from django.urls import reverse\nimport django.utils.timezone as timezone\n# from django.core.validators import RegexValidator\n# from django.template.defaultfilters import slugify\n# from datetime import datetime\n# import pytz\nfrom django.utils.translation import ugettext_lazy as _\n\n# Create your models here.\n\n\nclass BooleanField(_BooleanField):\n def get_prep_value(self, value):\n if value in (0, '0', 'false', 'False'):\n return False\n elif value in (1, '1', 'true', 'True'):\n return True\n else:\n return super(BooleanField, self).get_prep_value(value)\n\n\n# AbstractBaseUserไธญๅชๅซๆœ‰3ไธชfield: password, last_loginๅ’Œis_active.\n# ๅฆ‚ๆžœไฝ ๅฏนdjango user model้ป˜่ฎค็š„first_name, last_nameไธๆปกๆ„,\n# ๆˆ–่€…ๅชๆƒณไฟ็•™้ป˜่ฎค็š„ๅฏ†็ ๅ‚จๅญ˜ๆ–นๅผ, ๅˆ™ๅฏไปฅ้€‰ๆ‹ฉ่ฟ™ไธ€ๆ–นๅผ.\nclass DataXUserProfile(AbstractUser):\n avatar = models.ImageField(upload_to='avatar/%Y/%m', default='avatar/default.png', max_length=200,\n verbose_name=_('็”จๆˆทๅคดๅƒ'))\n qq = models.CharField(max_length=20, blank=True, null=True, verbose_name='QQ')\n phone = models.CharField(max_length=11, blank=True, null=True, unique=True, verbose_name=_('ๆ‰‹ๆœบๅท'))\n nick_name = models.CharField(max_length=30, verbose_name=_('ๆ˜ต็งฐ'))\n\n # is_lock = models.BooleanField(default=False, verbose_name='ๆ˜ฏๅฆ้”ๅฎš', choices=((0, 'ๅฆ'), (1, 'ๆ˜ฏ')))\n # is_enable = models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆๅฏ็”จ', choices=((0, 'ๅฆ'), (1, 'ๆ˜ฏ')))\n\n class Meta(AbstractUser.Meta):\n db_table = 'dx_userprofile'\n swappable = 'AUTH_USER_MODEL'\n verbose_name = _('็”จๆˆท')\n verbose_name_plural = verbose_name\n ordering = ['-id']\n\n # class Meta:\n # db_table = 'datax_userprofile'\n # verbose_name = '็”จๆˆท'\n # verbose_name_plural = verbose_name\n # ordering = ['-id']\n\n def __str__(self):\n return self.username\n\n # def create_user(self, username, nickname, password=None):\n # # create user here\n # pass\n #\n # def create_superuser(self, username, password):\n # # create superuser here\n # pass\n\n\nclass BaseModel(models.Model):\n create_time = models.DateTimeField(_('ๅˆ›ๅปบๆ—ถ้—ด'), default=timezone.now)\n create_uid = models.IntegerField(_('ๅˆ›ๅปบไบบID'), default=123456789, auto_created=True)\n create_username = models.CharField(_('ๅˆ›ๅปบไบบๅ็งฐ'), max_length=30, default='admin', auto_created=True)\n operate_time = models.DateTimeField(_('ๆ“ไฝœๆ—ถ้—ด'), auto_now=True)\n operate_uid = models.IntegerField(_('ๆ“ไฝœไบบID'), default=123456789, auto_created=True)\n operate_username = models.CharField(_('ๆ“ไฝœไบบๅ็งฐ'), max_length=30, default='admin', auto_created=True)\n\n class Meta:\n abstract = True\n\n\n# ็ณป็ปŸ|็ซ™็‚น้…็ฝฎ\nclass DataXConfig(BaseModel):\n site_name = models.CharField(_('็ซ™็‚นๅ็งฐ'), max_length=50, null=True, blank=True)\n site_desc = models.CharField(_('็ซ™็‚นๆ่ฟฐ'), max_length=150, null=True, blank=True)\n site_author = models.CharField(_('ไฝœ่€…'), max_length=100, null=True, blank=True)\n site_company = models.CharField(_('ๅ…ฌๅธ'), max_length=100, default=None, blank=True, null=True)\n address = models.CharField(_('ๆ˜พ็คบๅœฐๅ€'), max_length=150, default=None, blank=True, null=True)\n telephone = models.CharField(_('็”ต่ฏ'), max_length=15)\n email = models.EmailField(_('้‚ฎ็ฎฑ'), max_length=50, null=True, blank=True)\n icp = models.CharField(_('ๅค‡ๆกˆๅท'), max_length=256, null=True, blank=True)\n remark = models.CharField(_('ๅค‡ๆณจ'), max_length=200, null=True, blank=True)\n qrcode = models.ImageField(_('ไบŒ็ปด็ '), null=True, blank=True, upload_to='sys/%Y/%m')\n is_enable = models.BooleanField(_('ๆ˜ฏๅฆๅฏ็”จ'), default=True)\n\n class Meta:\n db_table = \"dx_config\"\n verbose_name = _('็ซ™็‚น้…็ฝฎ')\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.site_name\n\n\n# ๅฏผ่ˆช่œๅ•็ฎก็†\nclass DataXNav(BaseModel):\n code = models.CharField(_('ๆ ‡่ฏ†'), max_length=20)\n name = models.CharField(_('ๅ็งฐ'), max_length=50, blank=True, null=True,)\n url = models.CharField(_('้“พๆŽฅ'), max_length=200)\n remark = models.CharField(_('ๆ่ฟฐ'), max_length=300, blank=True)\n parent = models.ForeignKey(to='self', default=0, null=True, blank=True, related_name='children',\n verbose_name=_('็ˆถ็บง'), limit_choices_to={'is_delete': False, 'is_root': True},\n on_delete=models.CASCADE)\n is_root = models.BooleanField(_('ๆ˜ฏๅฆไธ€็บง่œๅ•'), default=True)\n is_delete = models.BooleanField(_('ๆ˜ฏๅฆๅˆ ้™ค'), default=False)\n sort = models.IntegerField(_('ๆŽ’ๅบ'), default=0)\n is_enable = models.BooleanField(_('ๆ˜ฏๅฆๅฏ็”จ'), default=True)\n\n class Meta:\n db_table = \"dx_nav\"\n ordering = ['sort', '-create_time']\n verbose_name = _('ๅฏผ่ˆช่œๅ•็ฎก็†')\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\n# ไฝœไธš่ฐƒๅบฆ\nclass DataXJobScheduler(BaseModel):\n \"\"\"\n \"\"\"\n name = models.CharField(_('ไฝœไธšๅ็งฐ'), max_length=200)\n hostname = models.CharField(_('ไฝœไธšๆœบๅ™จๅ็งฐ'), max_length=100, default='', null=True, blank=True)\n ip = models.CharField(_('ไฝœไธšๆœบๅ™จip'), max_length=100, default='127.0.0.1', )\n port = models.PositiveSmallIntegerField(_('ไฝœไธšๆœบๅ™จ็ซฏๅฃ'), default=9999, )\n deploy_state = models.PositiveSmallIntegerField(\n _('ไฝœไธš้ƒจ็ฝฒ็Šถๆ€'), default=0,\n choices=(\n (0, _('ๆœช้ƒจ็ฝฒ')),\n (1, _('ๅทฒ้ƒจ็ฝฒ')),\n )\n )\n state = models.PositiveSmallIntegerField(\n _('ไฝœไธš่ฟ่กŒ็Šถๆ€'), default=0,\n choices=(\n (0, _('ๆœช่ฟ่กŒ')),\n (1, _('่ฟ่กŒไธญ')),\n (2, _('็ป“ๆŸ')),\n (3, _('ๅผ‚ๅธธ็ปˆๆญข')),\n )\n )\n start_time = models.DateTimeField(_('ไฝœไธšๅผ€ๅง‹ๆ—ถ้—ด'), default='', null=True, blank=True, )\n end_time = models.DateTimeField(_('ไฝœไธš็ป“ๆŸๆ—ถ้—ด'), default='', null=True, blank=True, )\n duration = models.IntegerField(_('่ฟ่กŒๆ—ถ้•ฟ'), default=0)\n\n # image_url = models.ImageField(_('ๅ›พ็‰‡'), null=True, blank=True, upload_to='company/%Y/%m')\n # about = models.ForeignKey(to='ChfAbout', null=True, blank=True, related_name='about_resource',\n # related_query_name='about', on_delete=models.CASCADE, verbose_name=_('ๅ“็‰Œไป‹็ป'))\n\n # job_speed_channel = models.SmallIntegerField(_('้€Ÿๅบฆ'), default=5)\n # job_error_limit_record = models.SmallIntegerField(_('้”™่ฏฏ่ฎฐๅฝ•ๆกๆ•ฐ'), default=5)\n # job_error_limit_percentage = models.FloatField(_('้”™่ฏฏ่ฎฐๅฝ•็™พๅˆ†ๆฏ”'), default=0.02)\n\n sort = models.IntegerField(_('ๆŽ’ๅบ'), default=10000)\n is_enable = models.BooleanField(_('ๆ˜ฏๅฆๅฏ็”จ'), default=True)\n\n class Meta:\n db_table = 'dx_jobscheduler'\n ordering = ['sort', '-create_time']\n verbose_name = _('ไฝœไธš่ฐƒๅบฆ')\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\n# ไฝœไธšไปปๅŠก\nclass DataXTask(BaseModel):\n scheduler = models.OneToOneField(\n DataXJobScheduler, null=False,\n related_name='Scheduler', # ๅๅ‘ๆŸฅ่ฏข็”จ(ๅฐฑไธ_setไบ†๏ผšd1.DataXTask.all() -ใ€‹d1.Scheduler.all())\n on_delete=models.CASCADE, # ๅค–้”ฎ,่‡ชๅŠจๅ…ณ่”่กจ็š„ไธป้”ฎ ็บง่”ๅˆ ้™ค\n to_field='id', verbose_name=_('ไฝœไธšไปปๅŠก')\n )\n name = models.CharField(_('ไปปๅŠกๅ็งฐ'), max_length=256, help_text='ๆญคๅ็งฐ็”จไบŽไฟๅญ˜ไธบjsonๆจกๆฟๅ็งฐ')\n from_dbtype = models.CharField(_('ๆฅๆบๅบ“็ฑปๅž‹'), max_length=50, )\n from_hostname = models.CharField(_('ๆฅๆบIP'), max_length=16, )\n from_port = models.SmallIntegerField(_('ๆฅๆบ็ซฏๅฃ'), default=3306, )\n from_username = models.CharField(_('ๆฅๆบ็”จๆˆทๅ'), max_length=50, )\n from_password = models.CharField(_('ๆฅๆบๅฏ†็ '), max_length=50, )\n from_db_name = models.CharField(_('ๆฅๆบๅบ“ๅ'), max_length=80, )\n from_table_name = models.CharField(_('ๆฅๆบ่กจๅ'), max_length=80, )\n from_columns = models.CharField(_('ๆฅๆบๅˆ—'), default='*', max_length=1000, null=True, blank=True, )\n from_where = models.CharField(_('ๆฅๆบๆกไปถ'), default='', max_length=1000, null=True, blank=True, )\n from_character = models.CharField(_('ๆฅๆบ็ผ–็ '), default='utf8', max_length=10, null=True, blank=True, )\n\n to_dbtype = models.CharField(_('็›ฎๆ ‡ๅบ“็ฑปๅž‹'), max_length=50, )\n to_hostname = models.CharField(_('็›ฎๆ ‡IP'), max_length=16, )\n to_port = models.SmallIntegerField(_('็›ฎๆ ‡็ซฏๅฃ'), default=3306, )\n to_username = models.CharField(_('็›ฎๆ ‡็”จๆˆทๅ'), max_length=50, )\n to_password = models.CharField(_('็›ฎๆ ‡ๅฏ†็ '), max_length=50, )\n to_db_name = models.CharField(_('็›ฎๆ ‡ๅบ“ๅ'), max_length=80, )\n to_table_name = models.CharField(_('็›ฎๆ ‡่กจๅ'), max_length=80, )\n to_columns = models.CharField(_('็›ฎๆ ‡ๅˆ—'), default='*', max_length=1000, null=True, blank=True, )\n to_pre_sql = models.CharField(_('ๅ‰็ฝฎๆกไปถ'), default='', max_length=1000, null=True, blank=True, )\n to_post_sql = models.CharField(_('ๅŽ็ฝฎๆกไปถ'), default='', max_length=1000, null=True, blank=True, )\n to_character = models.CharField(_('็›ฎๆ ‡็ผ–็ '), default='utf8', max_length=10, null=True, blank=True, )\n to_session = models.CharField(_('็›ฎๆ ‡session'), default='', max_length=256, null=True, blank=True, )\n to_write_mode = models.CharField(_('็›ฎๆ ‡ๅ†™ๅ…ฅๆจกๅผ'), default='insert', max_length=15)\n\n task_speed_channel = models.SmallIntegerField(_('้€Ÿๅบฆ'), default=5, null=True, blank=True, )\n task_error_limit_record = models.SmallIntegerField(_('้”™่ฏฏ่ฎฐๅฝ•ๆกๆ•ฐ'), default=5, null=True, blank=True, )\n task_error_limit_percentage = models.FloatField(_('้”™่ฏฏ่ฎฐๅฝ•็™พๅˆ†ๆฏ”'), default=0.02, null=True, blank=True, )\n\n sort = models.IntegerField(_('ๆŽ’ๅบ'), default=0)\n is_enable = models.BooleanField(_('ๆ˜ฏๅฆๅฏ็”จ'), default=True)\n\n class Meta:\n db_table = 'dx_task'\n ordering = ['-create_time']\n verbose_name = _('ไฝœไธšไปปๅŠก')\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n # def long_profile(self):\n # if len(str(self.long_content)) > 50:\n # return '{}...'.format(str(self.long_content)[0:50])\n # else:\n # return str(self.long_content)\n #\n # long_profile.allow_tags = True\n # long_profile.short_description = _('')\n\n\n# ไฝœไธšไปปๅŠก็Šถๆ€\nclass DataXTaskStatus(BaseModel):\n \"\"\"\n \"\"\"\n task = models.ManyToManyField(\n DataXTask, null=True,\n related_name='Task', # ๅๅ‘ๆŸฅ่ฏข็”จ(ๅฐฑไธ_setไบ†๏ผšd1.DataXTaskStatus_set.all() -ใ€‹d1.Task.all())\n # through=''\n verbose_name=_('ไปปๅŠก็Šถๆ€')\n )\n name = models.CharField(_('ไปปๅŠกๅ็งฐ'), max_length=200)\n state = models.PositiveSmallIntegerField(\n _('ไปปๅŠก็Šถๆ€'), default=0,\n choices=(\n (0, _('ๆœช่ฟ่กŒ')),\n (1, _('่ฟ่กŒไธญ')),\n (2, _('ๅทฒๅฎŒๆˆ')),\n (3, _('็ปˆๆญข')),\n )\n )\n start_time = models.DateTimeField(_('ๅผ€ๅง‹ๆ—ถ้—ด'), default='', null=True, blank=True, )\n end_time = models.DateTimeField(_('็ป“ๆŸๆ—ถ้—ด'), default='', null=True, blank=True, )\n duration = models.IntegerField(_('่ฟ่กŒๆ—ถ้•ฟ'), default=0)\n\n class Meta:\n db_table = 'dx_taskstatus'\n ordering = ['-create_time']\n verbose_name = _('ไปปๅŠก็Šถๆ€')\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\n" }, { "alpha_fraction": 0.5949689745903015, "alphanum_fraction": 0.59908527135849, "avg_line_length": 32.12770462036133, "blob_id": "ab30bf1e4d2c524cd6b0cdaeb53071317a70317e", "content_id": "22e6981f5b5b1b2b37fa0aedbaa8bc7884079bce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16529, "license_type": "no_license", "max_line_length": 115, "num_lines": 462, "path": "/dataxweb/backend/admin.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "from backend import models\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.admin import SimpleListFilter\nfrom django.contrib.admin.models import LogEntry\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core import serializers\nfrom django.contrib.contenttypes.models import ContentType\n\n# Register your models here.\nadmin.site.index_title = _('ๆฌข่ฟŽไฝฟ็”จDataX็ฎก็†็ณป็ปŸ')\nadmin.site.site_title = _('DataX็ฎก็†็ณป็ปŸ')\nadmin.site.site_header = _('DataX็ฎก็†็ณป็ปŸ')\n\n\n# ็ผ–ๅ†™ๅฏ็”จไบŽๆ•ดไธชadmin็ซ™็‚น็š„action\n# admin.site.add_action(export_selected_objects)\n\n# ็ฆ็”จๅ…จ็ซ™็บงๅˆซ็š„ acitons\n# ็ฆ็”จๅ†…็ฝฎ็š„ๅˆ ้™คๆ–นๆณ•\n# admin.site.disable_action('delete_selected')\n\n# ๆ˜ฏๅฆๅฏ็”จ่ฟ‡ๆปค\nclass IsEnableFilter(SimpleListFilter):\n title = 'ๆ˜ฏๅฆๅฏ็”จ'\n parameter_name = 'is_enable'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๆ˜ฏ'), (0, 'ๅฆ')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_enable=True)\n elif self.value() == '0':\n return queryset.filter(is_enable=False)\n else:\n return queryset.filter()\n\n\n# ๆ˜ฏๅฆ้ข†ๅ–\nclass IsGetFilter(SimpleListFilter):\n title = 'ๆ˜ฏๅฆ้ข†ๅ–'\n parameter_name = 'is_get'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๅทฒ้ข†ๅ–'), (0, 'ๆœช้ข†ๅ–')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_get=True)\n elif self.value() == '0':\n return queryset.filter(is_get=False)\n else:\n return queryset.filter()\n\n\n# ๆ˜ฏๅฆๅทฒ้€š็Ÿฅ\nclass IsInformFilter(SimpleListFilter):\n title = 'ๆ˜ฏๅฆ้€š็Ÿฅ'\n parameter_name = 'is_inform'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๅทฒ้€š็Ÿฅ'), (0, 'ๆœช้€š็Ÿฅ')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_inform=True)\n elif self.value() == '0':\n return queryset.filter(is_inform=False)\n else:\n return queryset.filter()\n\n\n# ๆ˜ฏๅฆๆŽจ่\nclass IsRecommandFilter(SimpleListFilter):\n title = 'ๆ˜ฏๅฆๆŽจ่'\n parameter_name = 'is_recommand'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๆ˜ฏ'), (0, 'ๅฆ')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_recommand=True)\n elif self.value() == '0':\n return queryset.filter(is_recommand=False)\n else:\n return queryset.filter()\n\n\n# ่ถ…็บง็”จๆˆท็Šถๆ€\nclass IsSuperUserFilter(SimpleListFilter):\n title = '่ถ…็บง็”จๆˆท็Šถๆ€'\n parameter_name = 'is_superuser'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๆ˜ฏ'), (0, 'ๅฆ')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_superuser=True)\n elif self.value() == '0':\n return queryset.filter(is_superuser=False)\n else:\n return queryset.filter()\n\n\n# ่ถ…็บง็”จๆˆท็Šถๆ€\nclass IsActiveFilter(SimpleListFilter):\n title = 'ๆ˜ฏๅฆๆœ‰ๆ•ˆ'\n parameter_name = 'is_superuser'\n\n def lookups(self, request, model_admin):\n return [(1, 'ๆ˜ฏ'), (0, 'ๅฆ')]\n\n def queryset(self, request, queryset):\n # pdb.set_trace()\n if self.value() == '1':\n return queryset.filter(is_active=True)\n elif self.value() == '0':\n return queryset.filter(is_active=False)\n else:\n return queryset.filter()\n\n\n# admin here\n# ็ฎก็†ๅ‘˜\n@admin.register(models.DataXUserProfile)\nclass DataXUserProfileAdmin(UserAdmin):\n list_display = ('username', 'email', 'nick_name', 'first_name', 'last_name', 'qq', 'phone',\n 'is_superuser', 'is_active', )\n list_display_links = ('username', )\n list_editable = ('nick_name', 'qq', 'phone', 'is_superuser', 'is_active', )\n list_per_page = 30\n list_filter = (IsSuperUserFilter, IsActiveFilter, 'groups')\n search_fields = ('username', 'nick_name', 'first_name', 'last_name', 'email')\n ordering = ('username',)\n filter_horizontal = ('groups', 'user_permissions',)\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username', )\n\n\n# ็ณป็ปŸ้…็ฝฎ\n@admin.register(models.DataXConfig)\nclass DataXConfigAdmin(admin.ModelAdmin):\n # def get_queryset(self, request):\n # \"\"\"ไฝฟๅฝ“ๅ‰็™ปๅฝ•็š„็”จๆˆทๅช่ƒฝ็œ‹ๅˆฐ่‡ชๅทฑ่ดŸ่ดฃ็š„่ฎฐๅฝ•\"\"\"\n # qs = super(SysConfigAdmin, self).get_queryset(request)\n # if request.user.is_superuser:\n # return qs\n #\n # return qs.filter(user=models.ChfUserProfile.objects.filter(username=request.user))\n\n # def get_readonly_fields(self, request, obj=None):\n # \"\"\" ้‡ๆ–ฐๅฎšไน‰ๆญคๅ‡ฝๆ•ฐ๏ผŒ้™ๅˆถๆ™ฎ้€š็”จๆˆทๆ‰€่ƒฝไฟฎๆ”น็š„ๅญ—ๆฎต \"\"\"\n # if request.user.is_superuser:\n # self.readonly_fields = []\n #\n # return self.readonly_fields\n #\n # readonly_fields = ('id', 'code', 'name', 'url', 'parent', 'sort', 'is_root', 'is_enable', 'is_delete')\n\n list_display = ('id', 'site_name', 'site_desc', 'site_author', 'site_company', 'address', 'telephone', 'email',\n 'icp', 'is_enable', 'qrcode')\n list_display_links = ('id', 'site_name', )\n # list_editable = ('telephone', 'is_enable', 'icp')\n list_filter = (IsEnableFilter, )\n list_per_page = 10\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username', )\n search_fields = ('site_name', 'site_author', 'site_company', 'address', 'telephone', 'email', 'icp', )\n\n\n# ๅฏผ่ˆช่œๅ•็ฎก็†\n@admin.register(models.DataXNav)\nclass DataXNavAdmin(admin.ModelAdmin):\n list_display = ('id', 'code', 'name', 'url', 'parent', 'sort', 'is_root', 'is_enable', 'is_delete')\n list_display_links = ('id', 'name', 'url', )\n list_editable = ('code', 'sort', 'is_enable', )\n list_filter = (IsEnableFilter, )\n list_per_page = 30\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username', )\n search_fields = ('name', 'en_name', 'url', )\n\n\n# ็”จๆˆทๆ—ฅๅฟ—\n@admin.register(LogEntry)\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('object_id', 'object_repr', 'action_flag', 'user', 'change_message', )\n\n # ๅฑ่”ฝๅขžๅŠ ๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_add_permission(self, request):\n return False\n\n # ๅฑ่”ฝๅˆ ้™คๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_delete_permission(self, request, obj=None):\n return False\n\n # ๅฑ่”ฝไฟฎๆ”นๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_change_permission(self, request, obj=None):\n return False\n\n\n# ไฝœไธš่ฐƒๅบฆ\n@admin.register(models.DataXJobScheduler)\nclass DataXJobSchedulerAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'hostname', 'ip', 'port', 'deploy_state', 'state', 'start_time',\n 'end_time', 'duration', 'sort', 'is_enable')\n list_display_links = ('id', 'name', )\n list_editable = ('is_enable', 'hostname', 'ip', 'port', )\n list_filter = (IsEnableFilter, )\n list_per_page = 10\n search_fields = ('name', 'hostname', 'ip', 'port', )\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username',)\n\n # list_max_show_all =\n # list_per_page =\n # list_select_related =\n # change_list_template =\n # sortable_by =\n # '''ๆฏ้กตๆ˜พ็คบๆก็›ฎๆ•ฐ'''\n # list_per_page = 10\n # ๆŒ‰ๆ—ฅๆœŸ็ญ›้€‰\n # date_hierarchy = 'create_time'\n # ๆŒ‰ๅˆ›ๅปบๆ—ฅๆœŸๆŽ’ๅบ\n # ordering = ('-create_time',)\n # prepopulated_fields = {'slug': ('name',)}\n\n # class Media:\n # js = (\n # # '/static/plugins/kindeditor-4.1.10/kindeditor-all-min.js',\n # '/static/plugins/kindeditor-4.1.10/kindeditor.js',\n # '/static/plugins/kindeditor-4.1.10/lang/zh_CN.js',\n # '/static/plugins/kindeditor-4.1.10/config.js',\n # )\n\n actions = ['view_job_json', 'view_job_task', 'deploy_job', 'start_job', 'stop_job', 'view_job_log', ]\n\n def view_job_json(self, request, queryset):\n pass\n\n view_job_json.short_description = 'ๆŸฅ็œ‹ไฝœไธšjson'\n\n def deploy_job(self, request, queryset):\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n self.message_user(request, 'ไฝœไธš้ƒจ็ฝฒๆˆๅŠŸ๏ผ')\n\n # response = HttpResponse(content_type=\"application/json\")\n # serializers.serialize(\"json\", queryset, stream=response)\n # return response\n\n # ่Žทๅพ—่ขซๆ‰“้’ฉ็š„checkboxๅฏนๅบ”็š„ๅฏน่ฑก\n selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)\n # ่Žทๅ–ๅฏนๅบ”็š„ๆจกๅž‹\n ct = ContentType.objects.get_for_model(queryset.model)\n # ๆž„้€ ่ฎฟ้—ฎ็š„url๏ผŒไฝฟ็”จGETๆ–นๆณ•๏ผŒ่ทณ่ฝฌๅˆฐ็›ธๅบ”็š„้กต้ข\n return HttpResponseRedirect(\"/export/?ct=%s&ids=%s\" % (ct.pk, \",\".join(selected)))\n\n deploy_job.short_description = '้ƒจ็ฝฒไฝœไธš'\n\n def view_job_task(self, request, queryset):\n pass\n\n view_job_task.short_description = 'ๆŸฅ็œ‹ไฝœไธšไปปๅŠก'\n\n def start_job(self, request, queryset):\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n self.message_user(request, 'ไฝœไธšๅทฒๅฏๅŠจ๏ผ')\n\n start_job.short_description = 'ๅฏๅŠจไฝœไธš'\n\n def stop_job(self, request, queryset):\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n self.message_user(request, 'ไฝœไธšๅทฒๅœๆญข๏ผ')\n\n stop_job.short_description = 'ๅœๆญขไฝœไธš'\n\n def view_job_log(self, request, queryset):\n pass\n\n view_job_log.short_description = 'ๆŸฅ็œ‹ไฝœไธšๆ—ฅๅฟ—'\n\n\n# ไฝœไธšไปปๅŠก\n@admin.register(models.DataXTask)\nclass DataXTaskAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'from_dbtype', 'from_hostname', 'from_port', 'from_username', 'from_password',\n 'from_db_name', 'from_table_name', 'from_columns', 'from_where', 'from_character',\n 'to_dbtype', 'to_hostname', 'to_port', 'to_username', 'to_password',\n 'to_db_name', 'to_table_name', 'to_columns', 'to_pre_sql', 'to_post_sql', 'to_character',\n 'to_session', 'to_write_mode', 'task_speed_channel', 'task_error_limit_record',\n 'task_error_limit_percentage', 'sort', 'is_enable')\n list_display_links = ('id', 'name',)\n list_editable = ('is_enable',)\n list_filter = (IsEnableFilter, )\n list_per_page = 10\n # list_max_show_all = 200\n # paginator = Paginator\n preserve_filters = True\n # Action้€‰้กน้ƒฝๆ˜ฏๅœจ้กต้ขไธŠๆ–นๆ˜พ็คบ\n # actions_on_top = True\n # Action้€‰้กน้ƒฝๆ˜ฏๅœจ้กต้ขไธ‹ๆ–นๆ˜พ็คบ\n # actions_on_bottom = False\n # ๆ˜ฏๅฆๆ˜พ็คบ้€‰ๆ‹ฉไธชๆ•ฐ\n # actions_selection_counter = True\n save_on_top = True\n # save_as = False\n # save_as_continue = False\n search_fields = (\n 'name', 'from_dbtype', 'from_hostname', 'from_port', 'from_username',\n 'from_db_name', 'from_table_name', 'from_columns', 'from_where',\n 'to_dbtype', 'to_hostname', 'to_port', 'to_username',\n 'to_db_name', 'to_table_name', 'to_columns', 'to_pre_sql', 'to_post_sql',\n )\n fieldsets = (\n ('ๆฅๆบ', {\n # 'classes': ('collapse', ),\n 'fields': [\n 'from_dbtype', 'from_hostname', 'from_port', 'from_username', 'from_password',\n 'from_db_name', 'from_table_name', 'from_columns', 'from_where', 'from_character',\n ],\n }),\n ('็›ฎๆ ‡', {\n # 'classes': ('collapse', 'wide', 'extrapretty'),\n 'fields': [\n 'to_dbtype', 'to_hostname', 'to_port', 'to_username', 'to_password',\n 'to_db_name', 'to_table_name', 'to_columns', 'to_pre_sql', 'to_post_sql', 'to_character',\n 'to_session', 'to_write_mode',\n ],\n }),\n ('ๅ…ถไป–', {\n 'fields': [\n 'task_speed_channel', 'task_error_limit_record',\n 'task_error_limit_percentage', 'sort', 'is_enable'\n ],\n }),\n )\n # date_hierarchy = 'create_time' # ่ฏฆ็ป†ๆ—ถ้—ดๅˆ†ๅฑ‚็ญ›้€‰\n\n # ๅช่ฏปๅญ—ๆฎต\n # readonly_fields = (, )\n\n # # ๆทปๅŠ ๆ•ฐๆฎๆจกๆฟ้กต\n # add_form_template = None\n # # ไฟฎๆ”นๆ•ฐๆฎ็š„ๆจกๆฟ้กต\n # change_form_template = None\n # # ไฟฎๆ”นๅคšๆกๆ•ฐๆฎ็š„ๆจกๆฟ้กต\n # change_list_template = None\n # # ๅˆ ้™ค็กฎ่ฎคไฟกๆฏๆจกๆฟ้กต\n # delete_confirmation_template = None\n # # ๅˆ ้™คๅ…ณ่”ๆ•ฐๆฎ็š„็กฎ่ฎค้กต\n # delete_selected_confirmation_template = None\n # # ไฟฎๆ”นๅŽ†ๅฒ็š„ๆจกๆฟ้กต\n # object_history_template = None\n\n # ๅผนๅ‡บๆก†ๆจกๆฟ้กต\n popup_response_template = None\n\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username',)\n actions = ['view_task_json', 'start_task', 'stop_task', 'view_task_log', 'export_as_json', ]\n # ๆŽ’ๅบ\n # ordering = ('-id',)\n # def get_ordering(self, request):\n # return ['-id', ]\n\n def save_model(self, request, obj, form, change):\n print('*' * 100)\n print(obj.name, obj.from_dbtype, obj.from_hostname, obj.from_port, obj.from_username, obj.from_password)\n print(obj.name, obj.to_dbtype, obj.to_hostname, obj.to_port, obj.to_username, obj.to_password)\n print('*' * 100)\n\n if change:\n pass\n # ไฟฎๆ”น\n # ไฟฎๆ”นๅฏนๅบ”็š„jsonๆจกๆฟๆ–‡ไปถ\n else:\n pass\n # ๆ–ฐๅขž\n # ๆ–ฐๅขžๅฏนๅบ”็š„jsonๆจกๆฟๆ–‡ไปถ\n\n super(DataXTaskAdmin, self).save_model(request, obj, form, change)\n\n def delete_model(self, request, obj):\n # ๅœๆญขไปปๅŠก\n super(DataXTaskAdmin, self).delete_model(request, obj)\n\n def view_task_json(self, request, queryset):\n pass\n\n view_task_json.short_description = 'ๆŸฅ็œ‹ไปปๅŠกjson'\n\n # ๅฎšๅˆถAction่กŒไธบๅ…ทไฝ“ๆ–นๆณ•\n def start_task(self, request, queryset):\n print(self, request, queryset)\n print(request.POST.getlist('_selected_action'))\n print(request, queryset)\n # queryset.update(status='published')\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n self.message_user(request, 'ไปปๅŠกๅทฒๅฏๅŠจ๏ผ')\n\n start_task.short_description = \"ๅฏๅŠจไปปๅŠก\"\n\n def stop_task(self, request, queryset):\n print(self, request, queryset)\n print(request.POST.getlist('_selected_action'))\n print(request, queryset)\n # queryset.update(status='published')\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n self.message_user(request, 'ไปปๅŠกๅทฒๅœๆญข๏ผ')\n\n stop_task.short_description = \"ๅœๆญขไปปๅŠก\"\n\n def view_task_log(self, request, queryset):\n print(self, request, queryset)\n print(request.POST.getlist('_selected_action'))\n print(request, queryset)\n # queryset.update(status='published')\n # ๆ“ไฝœๅฎŒๆˆๅŽ็š„ๆ็คบไฟกๆฏ\n # self.message_user(request, 'ไปปๅŠกๅทฒๅœๆญข๏ผ')\n\n view_task_log.short_description = \"ๆŸฅ็œ‹ไปปๅŠกๆ—ฅๅฟ—\"\n\n def export_as_json(self, request, queryset):\n response = HttpResponse(content_type=\"application/json\")\n serializers.serialize(\"json\", queryset, stream=response)\n return response\n\n export_as_json.short_description = 'ๅฏผๅ‡บjson'\n\n # def export_selected_objects(self, request, queryset):\n # # ่Žทๅพ—่ขซๆ‰“้’ฉ็š„checkboxๅฏนๅบ”็š„ๅฏน่ฑก\n # selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)\n # # ่Žทๅ–ๅฏนๅบ”็š„ๆจกๅž‹\n # ct = ContentType.objects.get_for_model(queryset.model)\n # # ๆž„้€ ่ฎฟ้—ฎ็š„url๏ผŒไฝฟ็”จGETๆ–นๆณ•๏ผŒ่ทณ่ฝฌๅˆฐ็›ธๅบ”็š„้กต้ข\n # return HttpResponseRedirect(\"/export/?ct=%s&ids=%s\" % (ct.pk, \",\".join(selected)))\n\n\n@admin.register(models.DataXTaskStatus)\nclass DataXTaskStatusAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'state', 'start_time', 'end_time', 'duration')\n list_display_links = ('id', 'name')\n # list_filter = ()\n list_per_page = 10\n search_fields = ('task', 'name', )\n exclude = ('create_uid', 'create_username', 'create_time', 'operate_uid', 'operate_username')\n\n # ๅฑ่”ฝๅขžๅŠ ๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_add_permission(self, request):\n return False\n\n # ๅฑ่”ฝๅˆ ้™คๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_delete_permission(self, request, obj=None):\n return False\n\n # ๅฑ่”ฝไฟฎๆ”นๅŠŸ่ƒฝๆŒ‰้’ฎ\n def has_change_permission(self, request, obj=None):\n return False\n" }, { "alpha_fraction": 0.5488069653511047, "alphanum_fraction": 0.6160520315170288, "avg_line_length": 24.61111068725586, "blob_id": "b8205c4a1e9e7fcdd7ffe643be69033ff74735db", "content_id": "582261caf78578ceb9b7fd4f3ed758a4da7bdded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 118, "num_lines": 18, "path": "/dataxweb/backend/migrations/0004_auto_20191128_1613.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-28 08:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0003_auto_20191128_1428'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='dataxtaskstatus',\n name='task',\n field=models.ManyToManyField(null=True, related_name='Task', to='backend.DataXTask', verbose_name='ไปปๅŠก็Šถๆ€'),\n ),\n ]\n" }, { "alpha_fraction": 0.5565042495727539, "alphanum_fraction": 0.5846308469772339, "avg_line_length": 35.87036895751953, "blob_id": "497501b931876a3a15ad38f5d7732184de9ad813", "content_id": "361ba38ee5f4d55ace321b632199fa7440b81eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2127, "license_type": "no_license", "max_line_length": 143, "num_lines": 54, "path": "/dataxweb/backend/migrations/0005_auto_20191129_2120.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-29 13:20\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0004_auto_20191128_1613'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='deploy_state',\n field=models.PositiveSmallIntegerField(choices=[(0, 'ๆœช้ƒจ็ฝฒ'), (1, 'ๅทฒ้ƒจ็ฝฒ')], default=0, verbose_name='ไฝœไธš้ƒจ็ฝฒ็Šถๆ€'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='duration',\n field=models.IntegerField(default=0, verbose_name='่ฟ่กŒๆ—ถ้•ฟ'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='end_time',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='ไฝœไธš็ป“ๆŸๆ—ถ้—ด'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='hostname',\n field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='ๆ‰ง่กŒไฝœไธšๆœบๅ™จๅ็งฐ'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='ip',\n field=models.CharField(default='127.0.0.1', max_length=100, verbose_name='ๆ‰ง่กŒไฝœไธšๆœบๅ™จip'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='port',\n field=models.PositiveSmallIntegerField(default=9999, verbose_name='ๆ‰ง่กŒไฝœไธšๆœบๅ™จ็ซฏๅฃ'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='start_time',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='ไฝœไธšๅผ€ๅง‹ๆ—ถ้—ด'),\n ),\n migrations.AddField(\n model_name='dataxjobscheduler',\n name='state',\n field=models.PositiveSmallIntegerField(choices=[(0, 'ๆœช่ฟ่กŒ'), (1, '่ฟ่กŒไธญ'), (2, '็ป“ๆŸ'), (3, 'ๅผ‚ๅธธ็ปˆๆญข')], default=0, verbose_name='ไฝœไธš่ฟ่กŒ็Šถๆ€'),\n ),\n ]\n" }, { "alpha_fraction": 0.5778430700302124, "alphanum_fraction": 0.5992446541786194, "avg_line_length": 51.9555549621582, "blob_id": "9c8ef0339f9a10f4e93f91c522a4e8d6f0e7d316", "content_id": "806b4666f97a482ae36751bddb6de4e5c685dce9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2525, "license_type": "no_license", "max_line_length": 176, "num_lines": 45, "path": "/dataxweb/backend/migrations/0002_auto_20191128_1136.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-28 03:36\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='dataxtask',\n name='scheduler',\n field=models.OneToOneField(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='Scheduler', to='backend.DataXJobScheduler', verbose_name='ไฝœไธšไปปๅŠก'),\n preserve_default=False,\n ),\n migrations.CreateModel(\n name='DataXTaskStatus',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('operate_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๆ“ไฝœไบบๅ็งฐ')),\n ('operate_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๆ“ไฝœไบบID')),\n ('create_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๅˆ›ๅปบไบบๅ็งฐ')),\n ('create_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๅˆ›ๅปบไบบID')),\n ('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')),\n ('operate_time', models.DateTimeField(auto_now=True, verbose_name='ๆ“ไฝœๆ—ถ้—ด')),\n ('name', models.CharField(max_length=200, verbose_name='ไปปๅŠกๅ็งฐ')),\n ('state', models.PositiveSmallIntegerField(choices=[(0, 'ๆœช่ฟ่กŒ'), (1, '่ฟ่กŒไธญ'), (2, 'ๅทฒๅฎŒๆˆ'), (3, '็ปˆๆญข')], default=0, verbose_name='ไปปๅŠก็Šถๆ€')),\n ('start_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅผ€ๅง‹ๆ—ถ้—ด')),\n ('end_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='็ป“ๆŸๆ—ถ้—ด')),\n ('duration', models.IntegerField(default=0, verbose_name='่ฟ่กŒๆ—ถ้•ฟ')),\n ('task', models.ManyToManyField(related_name='Task', to='backend.DataXTask', verbose_name='ไปปๅŠก็Šถๆ€')),\n ],\n options={\n 'verbose_name': 'ไปปๅŠก็Šถๆ€',\n 'verbose_name_plural': 'ไปปๅŠก็Šถๆ€',\n 'db_table': 'dx_taskstatus',\n 'ordering': ['-create_time'],\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5539728403091431, "alphanum_fraction": 0.5824615955352783, "avg_line_length": 26.90062141418457, "blob_id": "c10c8bc1d5e279dbf2e145d9f9f93c7b074ce89e", "content_id": "654685f1a20517dbdf5a1cdae55b5d742a954550", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6335, "license_type": "no_license", "max_line_length": 161, "num_lines": 161, "path": "/README.md", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# DataXWeb\n\nDataXWebๅŸบไบŽDataX,DataX ๆ˜ฏ้˜ฟ้‡Œๅทดๅทด้›†ๅ›ขๅ†…่ขซๅนฟๆณ›ไฝฟ็”จ็š„็ฆป็บฟๆ•ฐๆฎๅŒๆญฅๅทฅๅ…ท/ๅนณๅฐ๏ผŒๅฎž็ŽฐๅŒ…ๆ‹ฌ MySQLใ€Oracleใ€SqlServerใ€Postgreใ€HDFSใ€Hiveใ€ADSใ€HBaseใ€TableStore(OTS)ใ€MaxCompute(ODPS)ใ€DRDS ็ญ‰ๅ„็งๅผ‚ๆž„ๆ•ฐๆฎๆบไน‹้—ด้ซ˜ๆ•ˆ็š„ๆ•ฐๆฎๅŒๆญฅๅŠŸ่ƒฝ\n\n\n### ่ฎกๅˆ’\n1. ไปปๅŠก่ฐƒๅบฆ\n2. ่ฟ่กŒ็›‘ๆŽง\n3. ๆจกๆฟๅผ•ๆ“Ž\n4. ๅฎžๆ—ถๆ—ฅๅฟ—\n5. ๅคš็ฑปๅž‹ๆ•ฐๆฎๅบ“ๆ”ฏๆŒ\n6. ๅฎšๆ—ถไปปๅŠก(ๅ…จ้‡|ๅขž้‡)\n7. ๆŠฅ่ญฆ่ฎพ็ฝฎ\n8. ้‚ฎไปถ้…็ฝฎ\n9. ็”จๆˆท็ฎก็†\n10. ๆƒ้™็ฎก็†\n11. ไฝฟ็”จๆ–‡ๆกฃ่ฏดๆ˜Ž\n12. ๆ•ฐๆฎๅบ“sqlๆ‰ง่กŒ\n13. ็ปŸ่ฎกๅˆ†ๆžๅŠๆŠฅ่กจ\n14. ๆ™บ่ƒฝAI\n15. Http Server\n16. ๅ•ๆœบๅคš็บฟ็จ‹่ฟ่กŒ\n17. ๅ•ๆœบๅคš่ฟ›็จ‹่ฟ่กŒ\n18. ๅˆ†ๅธƒๅผ่ฟ่กŒ\n19. ๆททๅˆๆจกๅผ่ฟ่กŒ๏ผˆYarn+ๅคš่ฟ›็จ‹ๆจกๅผ่ฟ่กŒ๏ผ‰\n20. ่‡ชๅŠจไผธ็ผฉ่ฟ่กŒ\n21. ่ดŸ่ฝฝๅ‡่กกๅŠไปปๅŠก้”ๆœบๅˆถ\n22. Mysqlๆ•ฐๆฎๅบ“ๅญ˜ๆ”พๅบ”็”จๆ•ฐๆฎ\n23. ็ฝ‘้กต็ซฏไฟฎๆ”นๅนถๆŒไน…ๅŒ–job้…็ฝฎ็š„jsonๅˆฐๆ•ฐๆฎๅบ“\n24. ็ฝ‘้กต็ซฏๅฎžๆ—ถๆŸฅ็œ‹ๆŠฝๅ–ๆ—ฅๅฟ—๏ผŒ็ฑปไผผJenkins็š„ๆ—ฅๅฟ—ๆŽงๅˆถๅฐ่พ“ๅ‡บๅŠŸ่ƒฝ\n25. job่ฟ่กŒ่ฎฐๅฝ•ๅฑ•็คบ๏ผŒ้กต้ขๆ“ไฝœๅœๆญขdataxไฝœไธš๏ผˆๅผ€ๅ‘ไธญ๏ผ‰\n26. ็ฝ‘้กต็ซฏๅ„็ง่ฏปๅ†™ๆ’ไปถๆจกๆฟ็”Ÿๆˆ๏ผŒๅฏไปฅๅœจ้กต้ข็ป„่ฃ…ไฝฟ็”จ\n27. ๅฎž็Žฐ้ƒจๅˆ†ๅ†™ๆ’ไปถๆ”ฏๆŒ่‡ชๅŠจๅปบ่กจๅŠŸ่ƒฝ\n\n### ็Žฏๅขƒ\n\n\n### ๅฎ‰่ฃ…\n\n\n### ๆ“ไฝœๆญฅ้ชค\n\n+ ๅฏๅŠจ\ngunicorn -b 0.0.0.0:5001 -D run:app\n\n### ไธพไธชๆ —ๅญ\n\n```\nadmin/ [name='index']\nadmin/ login/ [name='login']\nadmin/ logout/ [name='logout']\nadmin/ password_change/ [name='password_change']\nadmin/ password_change/done/ [name='password_change_done']\nadmin/ jsi18n/ [name='jsi18n']\nadmin/ r/<int:content_type_id>/<path:object_id>/ [name='view_on_site']\nadmin/ auth/group/\nadmin/ backend/dataxuserprofile/\nadmin/ backend/dataxconfig/\nadmin/ backend/dataxnav/\nadmin/ admin/logentry/\nadmin/ backend/dataxjobscheduler/\nadmin/ backend/dataxtask/\nadmin/ ^(?P<app_label>auth|backend|admin)/$ [name='app_list']\n```\n\n```\nๅขž้‡ๅŒๆญฅๅฎž็Žฐ\nๅฎž็Žฐๅขž้‡ๅŒๆญฅ้œ€่ฆๅœจ่กจไธญๅขžๅŠ ไธ€ไธชๆ—ถ้—ดๆˆณๅญ—ๆฎต๏ผŒๅฆ‚update_time๏ผŒๅœจๅŒๆญฅ้…็ฝฎๆ–‡ไปถไธญ๏ผŒ้€š่ฟ‡whereๆกไปถ๏ผŒๆ นๆฎๆ—ถ้—ดๆˆณๅญ—ๆฎต็ญ›้€‰ๅฝ“ๅ‰ๆ—ถ้—ดๅ‘ๅ‰ไธ€ๆฎตๆ—ถ้—ดๅ†…็š„ๅขž้‡ๆ•ฐๆฎใ€‚\n\njsonๆ–‡ไปถไธญ๏ผŒ${start_time}ๅ’Œ${end_time}ไธบ่ฐƒ็”จdatax.pyๆ—ถไผ ๅ…ฅ็š„ๅ‚ๆ•ฐ\ndatax/bin/datax.py ../../mysql2mysql.json -p \"-Dstart_time=1546337137 -Dend_time=1546337237\"\n\n{\n \"job\": {\n \"content\": [\n {\n \"reader\": {\n \"name\": \"mysqlreader\", \n \"parameter\": {\n \"column\": [\n \"doc_id\",\"title\",\"file_path\",\"approval_id\",\"page_count\",\"version\"\n ], \n \"connection\": [\n {\n \"jdbcUrl\": [\"jdbc:mysql://192.168.81.1:3306/bootdo?useUnicode=true&characterEncoding=utf8\"], \n \"table\": [\"es_approval_doc\"]\n }\n ], \n \"password\": \"123456\", \n \"username\": \"root\",\n \"where\": \"version > FROM_UNIXTIME(${start_time}) and version < FROM_UNIXTIME(${end_time})\",\n }\n }, \n \"writer\": {\n \"name\": \"mysqlwriter\", \n \"parameter\": {\n \"column\": [\n \"doc_id\",\"title\",\"file_path\",\"approval_id\",\"page_count\",\"version\"\n ], \n \"writeMode\":\"update\",\n \"connection\": [\n {\n \"jdbcUrl\": \"jdbc:mysql://192.168.81.1:3306/bootdo?useUnicode=true&characterEncoding=utf8\", \n \"table\": [\"es_approval_doc_copy\"]\n }\n ], \n \"password\": \"123456\", \n \"username\": \"root\"\n }\n }\n }\n ], \n \"setting\": {\n \"speed\": {\n \"channel\": \"1\"\n }\n }\n }\n}\n\nๅฎšๆ—ถๅŒๆญฅๅฎž็Žฐ\nๅฎšๆ—ถๅŒๆญฅๅฏไปฅ้‡‡็”จๆ“ไฝœ็ณป็ปŸ็š„ๅฎšๆ—ถไปปๅŠก+shell่„šๆœฌๅฎž็Žฐใ€‚ไปฅไธ‹ไธบๅœจlinux็ณป็ปŸไธญ็š„ๆ–นๆกˆ๏ผš\n\n1ใ€็ผ–ๅ†™shell่„šๆœฌ๏ผŒๅ‘ฝๅไธบsyntask.sh๏ผš\n#!/bin/bash\n# source /etc/profile\n# ๆˆช่‡ณๆ—ถ้—ด่ฎพ็ฝฎไธบๅฝ“ๅ‰ๆ—ถ้—ดๆˆณ\nend_time=$(date +%s)\n# ๅผ€ๅง‹ๆ—ถ้—ด่ฎพ็ฝฎไธบ120sๅ‰ๆ—ถ้—ดๆˆณ\nstart_time=$(($end_time - 120))\n# datax/bin/datax.py ../../mysql2mysql.json -p \"-Dstart_time=$start_time -Dend_time=$end_time\"\n่ฟ™้‡Œ้€š่ฟ‡่„šๆœฌ่Žทๅ–็”จไบŽ็ญ›้€‰ๆกไปถไธญ็š„ๅผ€ๅง‹ๆ—ถ้—ดstart_timeๅ’Œ็ป“ๆŸๆ—ถ้—ดend_time๏ผŒๅฐ†ไธคไธชๆ—ถ้—ดไฝœไธบๅ‚ๆ•ฐไผ ็ป™datax.pyใ€‚\n\n2ใ€ๅœจcrontabไธญ๏ผŒๆทปๅŠ ไปปๅŠก่ฎกๅˆ’๏ผš\n$crontab -e\n* */1 * * * /syntask.sh\nDataXไธ้€‚ๅˆๅฎžๆ—ถๆ•ฐๆฎๅŒๆญฅๆˆ–ๅคช้ข‘็น็š„ๅฎšๆ—ถๅŒๆญฅ๏ผŒๅ› ไธบๅŒๆญฅ้ƒฝ้œ€่ฆๅŽป่ฏปๅ–ๆบ่กจ๏ผŒ้ข‘็Ž‡่ฟ‡ๅคงๅฏนๆบ่กจไผš้€ ๆˆๅŽ‹ๅŠ›ใ€‚\nๆญคๅค–๏ผŒๆœ€ๅฅฝๆฏๆฌกๅขž้‡ๅŒๆญฅ็š„ๆ—ถ้—ดๆฎตๆฏ”ๅฎšๆ—ถไปปๅŠกๆ—ถ้—ด้—ด้š”ๅคงไธ€ไบ›๏ผŒไปฅไฟ่ฏๆ‰€ๆœ‰ๆ—ถ้—ดไบง็”Ÿ็š„ๆ•ฐๆฎ้ƒฝ่ขซ่ฆ†็›–ๅˆฐใ€‚\n\nๅผ‚ๅธธๆƒ…ๅ†ตไธ‹็š„่กฅๆ•‘ๆŽชๆ–ฝ๏ผš\nๅฆ‚ๆžœๆŸๆฎตๆ—ถ้—ดๅ†…็”ฑไบŽๆœๅŠกๅ™จใ€ๆ“ไฝœ็ณป็ปŸใ€็ฝ‘็ปœ็ญ‰ๅŽŸๅ› ้€ ๆˆๆŸไธชๆ—ถ้—ดๆฎตๅ†…ๆ•ฐๆฎๆฒกๆœ‰ๆญฃๅธธๅŒๆญฅ๏ผŒ้‚ฃไนˆๅฏไปฅ้€š่ฟ‡ๆ‰‹ๅŠจๆ‰ง่กŒๅŒๆญฅ็š„ๆ–นๅผ่ฟ›่กŒ่กฅๆ•‘๏ผŒๆ‰ง่กŒๅŒๆญฅๆ—ถ๏ผŒๅฐ†็ญ›้€‰็š„ๆ—ถ้—ดๆฎตๅŠ ๅคงๅคง่ฆ†็›–ๅผ‚ๅธธๅ‘็”Ÿ็š„ๆ•ดไธชๆ—ถ้—ดๆฎตใ€‚\n\n\nๅคš่กจๅŒๆญฅๅฎž็Žฐ\n้€šๅธธๆˆ‘ไปฌ็š„ไธšๅŠก็ณป็ปŸๅญ˜ๅœจๆœ‰ๅคšไธช่กจ๏ผŒ่กจไน‹้—ดๆœ‰ๅค–้”ฎๅ…ณ็ณปใ€‚ไธบๅฎž็Žฐๅคš่กจ็š„ๆ•ฐๆฎๅŒๆญฅ๏ผŒๆˆ‘ไปฌ้œ€่ฆ็†ๆธ…ๅค–้”ฎไพ่ต–ๅ…ณ็ณป๏ผŒไธบๆฏไธช่กจๅˆ†ๅˆซ็ผ–ๅ†™jsonๅŒๆญฅ้…็ฝฎๆ–‡ไปถ๏ผŒๅนถๆŒ‰ๅค–้”ฎไพ่ต–ๅ…ณ็ณป้€ไธช่ฐƒ็”จdatax.pyใ€‚\nๅฆ‚ๅฏนไบŽไธป่กจes_approvalๅ’Œๅญ่กจes_approval_doc๏ผŒๅฏไปฅๅฏนๅบ”ๅ†™ไธคไธชjson้…็ฝฎๆ–‡ไปถ๏ผšmysql2mysql-approval.jsonๅ’Œmysql2mysql-approval-doc.json๏ผŒๅœจsyntask.shไธญๅ…ˆ่ฐƒ็”จไธป่กจ้…็ฝฎๆ–‡ไปถ๏ผŒๅ†่ฐƒ็”จๅญ่กจ้…็ฝฎๆ–‡ไปถใ€‚\n\n#!/bin/bash\nsource /etc/profile\n# ๆˆช่‡ณๆ—ถ้—ด่ฎพ็ฝฎไธบๅฝ“ๅ‰ๆ—ถ้—ดๆˆณ\nend_time=$(date +%s)\n# ๅผ€ๅง‹ๆ—ถ้—ด่ฎพ็ฝฎไธบ120sๅ‰ๆ—ถ้—ดๆˆณ\nstart_time=$(($end_time - 3600))\n/datax/bin/datax.py /mysql2mysql-approval.json -p \"-Dstart_time=$start_time -Dend_time=$end_time\" \n/datax/bin/datax.py /mysql2mysql-approval-doc.json -p \"-Dstart_time=$start_time -Dend_time=$end_time\"\n\n\nๅคš็บงๅคš่ทฏๅŒๆญฅ\n่ฆๅฎž็Žฐๅคš็บงๅŒๆญฅ๏ผŒๅฏไปฅๅœจๆฏไธค็บงไน‹้—ดๆญๅปบไธ€ไธชdataxๅฎžไพ‹ๅฎž็Žฐ่ฟ™ไธค็บงไน‹้—ด็š„ๆ•ฐๆฎๅŒๆญฅใ€‚\n่ฆๅฎž็Žฐๅคš่ทฏๅŒๆญฅ๏ผŒๅฏไปฅไธบๅŒไธ€ไธช่กจ็ผ–ๅ†™ๅคšไธช้…็ฝฎๆ–‡ไปถ๏ผŒๅ‘ๅคšไธช็›ฎๆ ‡ๅบ“ๅŒๆญฅใ€‚\n```\n\n" }, { "alpha_fraction": 0.5578168630599976, "alphanum_fraction": 0.586493968963623, "avg_line_length": 31.75757598876953, "blob_id": "173d31bb6fcc082d267ae5810ba2d35ab925ccf2", "content_id": "b8e74f34756ec0ec412514b2de1b54c452fad3e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 97, "num_lines": 33, "path": "/dataxweb/backend/migrations/0006_auto_20191129_2123.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-29 13:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0005_auto_20191129_2120'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='dataxjobscheduler',\n name='end_time',\n field=models.DateTimeField(blank=True, default='', null=True, verbose_name='ไฝœไธš็ป“ๆŸๆ—ถ้—ด'),\n ),\n migrations.AlterField(\n model_name='dataxjobscheduler',\n name='start_time',\n field=models.DateTimeField(blank=True, default='', null=True, verbose_name='ไฝœไธšๅผ€ๅง‹ๆ—ถ้—ด'),\n ),\n migrations.AlterField(\n model_name='dataxtaskstatus',\n name='end_time',\n field=models.DateTimeField(blank=True, default='', null=True, verbose_name='็ป“ๆŸๆ—ถ้—ด'),\n ),\n migrations.AlterField(\n model_name='dataxtaskstatus',\n name='start_time',\n field=models.DateTimeField(blank=True, default='', null=True, verbose_name='ๅผ€ๅง‹ๆ—ถ้—ด'),\n ),\n ]\n" }, { "alpha_fraction": 0.5791698694229126, "alphanum_fraction": 0.5983089804649353, "avg_line_length": 73.34285736083984, "blob_id": "16af20b54d35aaa81b19d1cf20752a193259402d", "content_id": "3b0fcff922d05b64f0b30bda113de627b6ca4e8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13666, "license_type": "no_license", "max_line_length": 329, "num_lines": 175, "path": "/dataxweb/backend/migrations/0001_initial.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-27 13:28\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('auth', '0011_update_proxy_permissions'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DataXConfig',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('operate_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๆ“ไฝœไบบๅ็งฐ')),\n ('operate_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๆ“ไฝœไบบID')),\n ('create_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๅˆ›ๅปบไบบๅ็งฐ')),\n ('create_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๅˆ›ๅปบไบบID')),\n ('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')),\n ('operate_time', models.DateTimeField(auto_now=True, verbose_name='ๆ“ไฝœๆ—ถ้—ด')),\n ('site_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='็ซ™็‚นๅ็งฐ')),\n ('site_desc', models.CharField(blank=True, max_length=150, null=True, verbose_name='็ซ™็‚นๆ่ฟฐ')),\n ('site_author', models.CharField(blank=True, max_length=100, null=True, verbose_name='ไฝœ่€…')),\n ('site_company', models.CharField(blank=True, default=None, max_length=100, null=True, verbose_name='ๅ…ฌๅธ')),\n ('address', models.CharField(blank=True, default=None, max_length=150, null=True, verbose_name='ๆ˜พ็คบๅœฐๅ€')),\n ('telephone', models.CharField(max_length=15, verbose_name='็”ต่ฏ')),\n ('email', models.EmailField(blank=True, max_length=50, null=True, verbose_name='้‚ฎ็ฎฑ')),\n ('icp', models.CharField(blank=True, max_length=256, null=True, verbose_name='ๅค‡ๆกˆๅท')),\n ('remark', models.CharField(blank=True, max_length=200, null=True, verbose_name='ๅค‡ๆณจ')),\n ('qrcode', models.ImageField(blank=True, null=True, upload_to='sys/%Y/%m', verbose_name='ไบŒ็ปด็ ')),\n ('is_enable', models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆๅฏ็”จ')),\n ],\n options={\n 'verbose_name': '็ซ™็‚น้…็ฝฎ',\n 'verbose_name_plural': '็ซ™็‚น้…็ฝฎ',\n 'db_table': 'dx_config',\n },\n ),\n migrations.CreateModel(\n name='DataXJobScheduler',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('operate_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๆ“ไฝœไบบๅ็งฐ')),\n ('operate_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๆ“ไฝœไบบID')),\n ('create_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๅˆ›ๅปบไบบๅ็งฐ')),\n ('create_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๅˆ›ๅปบไบบID')),\n ('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')),\n ('operate_time', models.DateTimeField(auto_now=True, verbose_name='ๆ“ไฝœๆ—ถ้—ด')),\n ('name', models.CharField(max_length=200, verbose_name='ไฝœไธšๅ็งฐ')),\n ('sort', models.IntegerField(default=10000, verbose_name='ๆŽ’ๅบ')),\n ('is_enable', models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆๅฏ็”จ')),\n ],\n options={\n 'verbose_name': 'ไฝœไธš่ฐƒๅบฆ',\n 'verbose_name_plural': 'ไฝœไธš่ฐƒๅบฆ',\n 'db_table': 'dx_jobscheduler',\n 'ordering': ['sort', '-create_time'],\n },\n ),\n migrations.CreateModel(\n name='DataXTask',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('operate_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๆ“ไฝœไบบๅ็งฐ')),\n ('operate_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๆ“ไฝœไบบID')),\n ('create_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๅˆ›ๅปบไบบๅ็งฐ')),\n ('create_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๅˆ›ๅปบไบบID')),\n ('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')),\n ('operate_time', models.DateTimeField(auto_now=True, verbose_name='ๆ“ไฝœๆ—ถ้—ด')),\n ('name', models.CharField(max_length=256, verbose_name='ไปปๅŠกๅ็งฐ')),\n ('from_dbtype', models.CharField(max_length=50, verbose_name='ๆฅๆบๅบ“็ฑปๅž‹')),\n ('from_hostname', models.CharField(max_length=16, verbose_name='ๆฅๆบIP')),\n ('from_port', models.SmallIntegerField(default=3306, verbose_name='ๆฅๆบ็ซฏๅฃ')),\n ('from_username', models.CharField(max_length=50, verbose_name='ๆฅๆบ็”จๆˆทๅ')),\n ('from_password', models.CharField(max_length=50, verbose_name='ๆฅๆบๅฏ†็ ')),\n ('from_db_name', models.CharField(max_length=80, verbose_name='ๆฅๆบๅบ“ๅ')),\n ('from_table_name', models.CharField(max_length=80, verbose_name='ๆฅๆบ่กจๅ')),\n ('from_columns', models.CharField(default='*', max_length=1000, verbose_name='ๆฅๆบๅˆ—')),\n ('from_where', models.CharField(default='', max_length=1000, verbose_name='ๆฅๆบๆกไปถ')),\n ('from_character', models.CharField(default='utf8', max_length=10, verbose_name='ๆฅๆบ็ผ–็ ')),\n ('to_dbtype', models.CharField(max_length=50, verbose_name='็›ฎๆ ‡ๅบ“็ฑปๅž‹')),\n ('to_hostname', models.CharField(max_length=16, verbose_name='็›ฎๆ ‡IP')),\n ('to_port', models.SmallIntegerField(default=3306, verbose_name='็›ฎๆ ‡็ซฏๅฃ')),\n ('to_username', models.CharField(max_length=50, verbose_name='็›ฎๆ ‡็”จๆˆทๅ')),\n ('to_password', models.CharField(max_length=50, verbose_name='็›ฎๆ ‡ๅฏ†็ ')),\n ('to_db_name', models.CharField(max_length=80, verbose_name='็›ฎๆ ‡ๅบ“ๅ')),\n ('to_table_name', models.CharField(max_length=80, verbose_name='็›ฎๆ ‡่กจๅ')),\n ('to_columns', models.CharField(default='*', max_length=1000, verbose_name='็›ฎๆ ‡ๅˆ—')),\n ('to_pre_sql', models.CharField(default='', max_length=1000, verbose_name='ๅ‰็ฝฎๆกไปถ')),\n ('to_post_sql', models.CharField(default='', max_length=1000, verbose_name='ๅŽ็ฝฎๆกไปถ')),\n ('to_character', models.CharField(default='utf8', max_length=10, verbose_name='็›ฎๆ ‡็ผ–็ ')),\n ('to_session', models.CharField(default='', max_length=256, verbose_name='็›ฎๆ ‡session')),\n ('to_write_mode', models.CharField(default='insert', max_length=15, verbose_name='็›ฎๆ ‡ๅ†™ๅ…ฅๆจกๅผ')),\n ('task_speed_channel', models.SmallIntegerField(default=5, verbose_name='้€Ÿๅบฆ')),\n ('task_error_limit_record', models.SmallIntegerField(default=5, verbose_name='้”™่ฏฏ่ฎฐๅฝ•ๆกๆ•ฐ')),\n ('task_error_limit_percentage', models.FloatField(default=0.02, verbose_name='้”™่ฏฏ่ฎฐๅฝ•็™พๅˆ†ๆฏ”')),\n ('sort', models.IntegerField(default=0, verbose_name='ๆŽ’ๅบ')),\n ('is_enable', models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆๅฏ็”จ')),\n ],\n options={\n 'verbose_name': 'ไฝœไธšไปปๅŠก',\n 'verbose_name_plural': 'ไฝœไธšไปปๅŠก',\n 'db_table': 'dx_task',\n 'ordering': ['-create_time'],\n },\n ),\n migrations.CreateModel(\n name='DataXNav',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('operate_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๆ“ไฝœไบบๅ็งฐ')),\n ('operate_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๆ“ไฝœไบบID')),\n ('create_username', models.CharField(auto_created=True, default='admin', max_length=30, verbose_name='ๅˆ›ๅปบไบบๅ็งฐ')),\n ('create_uid', models.IntegerField(auto_created=True, default=123456789, verbose_name='ๅˆ›ๅปบไบบID')),\n ('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๅˆ›ๅปบๆ—ถ้—ด')),\n ('operate_time', models.DateTimeField(auto_now=True, verbose_name='ๆ“ไฝœๆ—ถ้—ด')),\n ('code', models.CharField(max_length=20, verbose_name='ๆ ‡่ฏ†')),\n ('name', models.CharField(blank=True, max_length=50, null=True, verbose_name='ๅ็งฐ')),\n ('url', models.CharField(max_length=200, verbose_name='้“พๆŽฅ')),\n ('remark', models.CharField(blank=True, max_length=300, verbose_name='ๆ่ฟฐ')),\n ('is_root', models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆไธ€็บง่œๅ•')),\n ('is_delete', models.BooleanField(default=False, verbose_name='ๆ˜ฏๅฆๅˆ ้™ค')),\n ('sort', models.IntegerField(default=0, verbose_name='ๆŽ’ๅบ')),\n ('is_enable', models.BooleanField(default=True, verbose_name='ๆ˜ฏๅฆๅฏ็”จ')),\n ('parent', models.ForeignKey(blank=True, default=0, limit_choices_to={'is_delete': False, 'is_root': True}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='backend.DataXNav', verbose_name='็ˆถ็บง')),\n ],\n options={\n 'verbose_name': 'ๅฏผ่ˆช่œๅ•็ฎก็†',\n 'verbose_name_plural': 'ๅฏผ่ˆช่œๅ•็ฎก็†',\n 'db_table': 'dx_nav',\n 'ordering': ['sort', '-create_time'],\n },\n ),\n migrations.CreateModel(\n name='DataXUserProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),\n ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),\n ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),\n ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\n ('avatar', models.ImageField(default='avatar/default.png', max_length=200, upload_to='avatar/%Y/%m', verbose_name='็”จๆˆทๅคดๅƒ')),\n ('qq', models.CharField(blank=True, max_length=20, null=True, verbose_name='QQ')),\n ('phone', models.CharField(blank=True, max_length=11, null=True, unique=True, verbose_name='ๆ‰‹ๆœบๅท')),\n ('nick_name', models.CharField(max_length=30, verbose_name='ๆ˜ต็งฐ')),\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n ],\n options={\n 'verbose_name': '็”จๆˆท',\n 'verbose_name_plural': '็”จๆˆท',\n 'db_table': 'dx_userprofile',\n 'ordering': ['-id'],\n 'abstract': False,\n 'swappable': 'AUTH_USER_MODEL',\n },\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.4592755436897278, "alphanum_fraction": 0.4660806655883789, "avg_line_length": 27.26653289794922, "blob_id": "01fdc672fe77b4956f34a1ca23ffc485bb4460c7", "content_id": "ec350bd6e197954536cb5db0d4de957596f25a99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15395, "license_type": "no_license", "max_line_length": 112, "num_lines": 499, "path": "/dataxweb/dataxweb/settings.py", "repo_name": "dengxiaohu520/DataXWeb", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for dataxweb project.\n\nGenerated by 'django-admin startproject' using Django 2.2.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.2/ref/settings/\n\"\"\"\n\nimport os\nfrom django.utils.translation import ugettext_lazy as _\nimport django.utils.log\nimport logging\nimport logging.handlers\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '4q@%gatxy8hi)rn6!qt6+59n(^eki-ght5*6tzqtpbvaukpemy'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'simpleui',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'backend',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'dataxweb.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'backend/templates'),\n os.path.join(BASE_DIR, 'frontend/dist'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n # 'backend.views.global_setting',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'dataxweb.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n },\n # 'default': {\n # 'ENGINE': 'django.db.backends.mysql',\n # 'HOST': 'localhost',\n # 'PORT': '3306',\n # 'NAME': 'chf',\n # 'USER': 'root',\n # 'PASSWORD': '123456',\n # 'OPTIONS': {\n # \"init_command\": \"SET sql_mode='STRICT_TRANS_TABLES'\",\n # }\n # }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\n# LANGUAGE_CODE = 'en-us'\nLANGUAGE_CODE = 'zh-hans'\n\n# TIME_ZONE = 'UTC'\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'frontend/dist/static'),\n]\n\n\nSESSION_SAVE_EVERY_REQUEST = True\nSESSION_COOKIE_AGE = 60 * 30 # 30ๅˆ†้’Ÿ\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False # ไผš่ฏcookieๅฏไปฅๅœจ็”จๆˆทๆต่งˆๅ™จไธญไฟๆŒๆœ‰ๆ•ˆๆœŸใ€‚True๏ผšๅ…ณ้—ญๆต่งˆๅ™จ๏ผŒๅˆ™Cookieๅคฑๆ•ˆ\n\n# ่‡ชๅฎšไน‰็”จๆˆทmodel ๅฆๅˆ™ไผšๆŠฅ๏ผšHINT: Add or change a related_name argument to the definition\n# for โ€˜User.user_permissionsโ€™ or โ€˜User.user_permissionsโ€™.\nAUTH_USER_MODEL = 'backend.DataXUserProfile'\n\n\nAPPEND_SLASH = True\n\nSITE_NAME = _('DataXWeb')\nSITE_DESC = _('DataXWebๅฎ˜็ฝ‘')\nSITE_AUTHOR = 'flack'\n\nMEDIA_URL = '/uploads/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'uploads').replace('\\\\', '/') # ่ฎพ็ฝฎ้™ๆ€ๆ–‡ไปถ่ทฏๅพ„ไธบไธป็›ฎๅฝ•ไธ‹็š„uploadsๆ–‡ไปถๅคน\n\n\n# ้ฆ–้กต้…็ฝฎ\n# SIMPLEUI_HOME_PAGE = 'https://www.baidu.com'\n# ้ฆ–้กตๆ ‡้ข˜\n# SIMPLEUI_HOME_TITLE = '็™พๅบฆไธ€ไธ‹ไฝ ๅฐฑ็Ÿฅ้“'\n# ้ฆ–้กตๅ›พๆ ‡\n# SIMPLEUI_HOME_ICON = 'fa fa-user'\n# ่ฎพ็ฝฎsimpleui ็‚นๅ‡ป้ฆ–้กตๅ›พๆ ‡่ทณ่ฝฌ็š„ๅœฐๅ€\n# SIMPLEUI_INDEX = 'http://www.dataxweb.cn/index'\nSIMPLEUI_INDEX = 'http://127.0.0.1:9000'\n# ่‡ชๅฎšไน‰SIMPLEUI็š„Logo ไฟฎๆ”นLOGO\nSIMPLEUI_LOGO = STATIC_URL + 'images/login_logo.jpg'\n\n# ๆœๅŠกๅ™จไฟกๆฏ\nSIMPLEUI_HOME_INFO = False\n# ๅฟซ้€Ÿๆ“ไฝœ\nSIMPLEUI_HOME_QUICK = True\n# ๆœ€่ฟ‘ๅŠจไฝœ\nSIMPLEUI_HOME_ACTION = True\n# ็ปŸ่ฎกๅˆ†ๆžไฟกๆฏ\nSIMPLEUI_ANALYSIS = False\n\n# ้ป˜่ฎคๅ›พๆ ‡\n# SIMPLEUI_DEFAULT_ICON = True\n# SIMPLEUI_ICON = {\n# '็ณป็ปŸ': 'fab fa-apple',\n# 'ไฟกๆฏ็ฎก็†': 'fas fa-user-tie'\n# }\n\nSIMPLEUI_CONFIG = {\n 'system_keep': False,\n # ๅผ€ๅฏๆŽ’ๅบๅ’Œ่ฟ‡ๆปคๅŠŸ่ƒฝ, ไธๅกซๆญคๅญ—ๆฎตไธบ้ป˜่ฎคๆŽ’ๅบๅ’Œๅ…จ้ƒจๆ˜พ็คบ, ็ฉบๅˆ—่กจ[] ไธบๅ…จ้ƒจไธๆ˜พ็คบ.\n 'menu_display': [\n _('็ณป็ปŸ'),\n _('ๅฎขๆˆท็ซฏ็ฎก็†'),\n _('ไฝœไธš่ฐƒๅบฆ'),\n _('่ฟ่กŒ็›‘ๆŽง'),\n _('ๅฎžๆ—ถๆ—ฅๅฟ—'),\n _('้€š็Ÿฅ็ฎก็†'),\n _('็”จๆˆท็ฎก็†'),\n _('ๆƒ้™็ฎก็†'),\n _('ๆ•ฐๆฎๅบ“็ฎก็†'),\n _('็ปŸ่ฎกๅˆ†ๆž'),\n ],\n 'menus': [\n {\n 'name': _('็ณป็ปŸ'),\n 'icon': 'fas fa-cog',\n 'models': [\n {\n 'name': _('ๅฏผ่ˆช่œๅ•'),\n 'icon': 'fa fa-book-open',\n # 'url': 'home/sysnav/'\n },\n {\n 'name': _('็ซ™็‚น้…็ฝฎ'),\n 'icon': 'fa fa-book-open',\n # 'url': 'home/sysconfig/'\n },\n {\n 'name': _('้—ฎ้ข˜ๅˆ—่กจ'),\n 'icon': 'fa fa-book-open',\n # 'url': 'home/chfquestion/'\n },\n {\n 'name': _('ๆธ…้™ค็ณป็ปŸ็ผ“ๅญ˜'),\n 'icon': 'fa fa-broom',\n },\n {\n 'name': _('็ณป็ปŸๆ—ฅๅฟ—'),\n 'icon': 'fa fa-cat',\n 'url': 'logentry/',\n },\n {\n 'name': _('ๆ•ฐๆฎๅบ“ๅค‡ไปฝ'),\n 'icon': 'fa fa-coins',\n },\n {\n 'name': _('ๅ…ณ้”ฎ่ฏ็ฎก็†'),\n 'icon': 'fa fa-book-open',\n }\n ]\n },\n # {\n # 'name': _('ๅฎขๆˆท็ซฏ็ฎก็†'),\n # 'icon': 'fas fa-sitemap',\n # 'models': [\n # {\n # 'name': _('ๅฎขๆˆท็ซฏๅˆ—่กจ'),\n # 'icon': 'fa fa-info',\n # # 'url': 'home/chfindexplate/'\n # },\n # ]\n # },\n {\n 'name': _('ไฝœไธš่ฐƒๅบฆ'),\n 'icon': 'fas fa-pepper-hot',\n 'models': [\n {\n 'name': _('ไฝœไธš่ฐƒๅบฆๅˆ—่กจ'),\n 'icon': 'fa fa-project-diagram',\n 'url': 'backend/dataxjobscheduler/'\n },\n {\n 'name': _('ไฝœไธšไปปๅŠกๅˆ—่กจ'),\n 'icon': 'fa fa-project-diagram',\n 'url': 'backend/dataxtask/'\n },\n ]\n },\n {\n 'name': _('่ฟ่กŒ็›‘ๆŽง'),\n 'icon': 'fas fa-pencil-alt',\n 'url': 'backend/dataxtaskstatus/'\n },\n {\n 'name': _('ๅฎžๆ—ถๆ—ฅๅฟ—'),\n 'icon': 'fas fa-comments',\n 'models': [\n {\n 'name': _('ๅฎžๆ—ถๆ—ฅๅฟ—ๅˆ—่กจ'),\n 'icon': 'fa fa-comment-dots',\n 'url': 'admin/logentry/'\n }\n ]\n },\n {\n 'name': _('้€š็Ÿฅ็ฎก็†'),\n 'icon': 'fas fa-users-cog',\n 'models': [\n {\n 'name': _('ๆŠฅ่ญฆ่ฎพ็ฝฎ'),\n 'icon': 'fa fa-user-friends',\n 'url': ''\n },\n {\n 'name': _('้‚ฎไปถ่ฎพ็ฝฎ'),\n 'icon': 'fa fa-user-friends',\n 'url': ''\n },\n {\n 'name': _('็Ÿญไฟก่ฎพ็ฝฎ'),\n 'icon': 'fa fa-user-friends',\n 'url': ''\n },\n {\n 'name': _('ๅพฎไฟก่ฎพ็ฝฎ'),\n 'icon': 'fa fa-user-friends',\n 'url': ''\n }\n ]\n },\n {\n 'name': _('็”จๆˆท็ฎก็†'),\n 'icon': 'fas fa-users',\n 'models': [\n {\n 'name': _('็”จๆˆท'),\n 'icon': 'fas fa-user',\n # 'url': 'home/chfuserprofile/'\n },\n {\n 'app': 'auth',\n 'name': _('็”จๆˆท็ป„'),\n 'icon': 'fa fa-user-tag',\n 'url': 'auth/group/'\n }\n ]\n },\n {\n 'app': 'auth',\n 'name': 'ๆƒ้™็ฎก็†',\n 'icon': 'fas fa-user-shield',\n 'models': [{\n 'name': '็”จๆˆท',\n 'icon': 'fa fa-user',\n 'url': 'home/chfuserprofile/'\n }]\n },\n {\n 'name': 'ๆ•ฐๆฎๅบ“็ฎก็†',\n 'icon': 'fas fa-compact-disc',\n 'models': [\n {\n 'name': _('ๆ•ฐๆฎๅบ“ๅˆ—่กจ'),\n 'icon': 'fa fa-book-open',\n # 'url': 'home/sysnav/'\n },\n ]\n },\n {\n 'name': '็ปŸ่ฎกๅˆ†ๆž',\n 'icon': 'fas fa-compact-disc',\n 'url': ''\n },\n ]\n}\n\n\n# DataX ไปปๅŠก่„šๆœฌๅญ˜ๆ”พ็›ฎๅฝ•\nDATAX_JOB_JSON_FILE_PATH = r'D:\\Flack\\Project\\Github\\DataXPython3\\datax\\bin\\json'\n\n# DataX ่ฟ่กŒๆ–‡ไปถ็›ฎๅฝ•\nDATAX_PY_PATH = r'D:\\Flack\\Project\\Github\\DataXPython3\\datax\\bin\\datax.py'\n\n# ๆ—ฅๅฟ—็บงๅˆซ\nLOG_LEVEL = logging.DEBUG\n\n# ๆ—ฅๅฟ—็›ฎๅฝ•\nLOG_PATH = os.path.join(BASE_DIR, 'log')\nif not os.path.join(LOG_PATH):\n os.mkdir(LOG_PATH)\n\n# ๆ—ฅๅฟ—็ณป็ปŸ็š„่ฎฐๅฝ•ๅ™จ๏ผŒๅค„็†ๅ™จ๏ผŒ่ฟ‡ๆปคๅ™จๅ’Œๆ ผๅผๅ™จ\nLOGGING = {\n # ๆŒ‡ๆ˜ŽdictConfig็š„็‰ˆๆœฌ\n 'version': 1,\n # ่ฎพ็ฝฎTrueๅฐ†็ฆ็”จๆ‰€ๆœ‰็š„ๅทฒ็ปๅญ˜ๅœจ็š„ๆ—ฅๅฟ—้…็ฝฎ\n 'disable_existing_loggers': False,\n # ๆ ผๅผๅ™จ\n 'formatters': {\n 'standard': {\n 'fotmat': '%(asctime)s [%(threadName)s:%(thread)d] [%(name)s:%(lineno)d] [%(module)s:%(funcName)s] '\n '[%(levelname)s]- %(message)s' # ๆ—ฅๅฟ—ๆ ผๅผ\n },\n 'verbose': {\n 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',\n 'style': '{',\n },\n 'simple': {\n 'format': '{levelname} {asctime} {message}',\n 'style': '{',\n },\n },\n # ่ฟ‡ๆปคๅ™จ\n 'filters': {\n # 'special': {\n # '()': 'home.logging.SpecialFilter',\n # 'foo': 'bar',\n # },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue'\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n # ๅค„็†ๅ™จ ๅฎšไน‰ไบ†4ไธชๅค„็†ๅ™จ\n 'handlers': {\n # 'default': {\n # 'level': 'DEBUG',\n # 'class': 'logging.handlers.RotatingFileHandler',\n # 'filename': 'log/all.log', # ๆ—ฅๅฟ—่พ“ๅ‡บๆ–‡ไปถ\n # 'maxBytes': 1024*1024*5, # ๆ–‡ไปถๅคงๅฐ\n # 'backupCount': 2, # ๅค‡ไปฝไปฝๆ•ฐ\n # 'formatter': 'standard', # ไฝฟ็”จๅ“ช็งformattersๆ—ฅๅฟ—ๆ ผๅผ\n # },\n # 'error': {\n # 'level': 'ERROR',\n # 'class': 'logging.handlers.RotatingFileHandler',\n # 'filename': 'log/error.log',\n # 'maxBytes': 1024*1024*5,\n # 'backupCount': 2,\n # 'formatter': 'standard',\n # },\n # # ๆ–‡ไปถๅค„็†ๅ™จ๏ผŒๆ‰€ๆœ‰้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰่€Œerror็š„ๆถˆๆฏไผš่ขซๅ‘้€็ป™็ซ™็‚น็ฎก็†ๅ‘˜๏ผŒไฝฟ็”จ็š„ๆ˜ฏspecialๆ ผๅผๅ™จ\n # 'file_handler': {\n # 'level': 'NOTSET',\n # # 'class': 'logging.FileHandler',\n # 'class': 'logging.handlers.TimedRotatingFileHandler',\n # 'when': 'W0', # ๆ—ฅๅฟ—ๆ–‡ไปถๆฏๅ‘จ็ฌฌไธ€ๅคฉ็ฟป่ฝฌ\n # 'filename': 'log/error.log', # ๆ—ฅๅฟ—ๆ–‡ไปถ็š„ๅญ˜ๅ‚จๅœฐๅ€\n # 'formatter': 'verbose',\n # 'backupCount': 500, # ๆœ€ๅคšๅฏไปฅไฟๅญ˜500ไธชๆ–‡ไปถ\n # # 'filters': ['require_debug_true'],\n # },\n # ้‚ฎไปถๅค„็†ๅ™จ๏ผŒๆ‰€ๆœ‰้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰่€Œerror็š„ๆถˆๆฏไผš่ขซๅ‘้€็ป™็ซ™็‚น็ฎก็†ๅ‘˜๏ผŒไฝฟ็”จ็š„ๆ˜ฏspecialๆ ผๅผๅ™จ\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false']\n },\n # ๆตๅค„็†ๅ™จ๏ผŒๆ‰€ๆœ‰็š„้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰debug็š„ๆถˆๆฏไผš่ขซไผ ๅˆฐstderr๏ผŒไฝฟ็”จ็š„ๆ˜ฏsimpleๆ ผๅผๅ™จ\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n # 'filters': ['require_debug_true'],\n },\n # Nullๅค„็†ๅ™จ๏ผŒๆ‰€ๆœ‰้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰debug็š„ๆถˆๆฏไผš่ขซไผ ๅˆฐ/dev/null\n # 'null': {\n # 'level': 'DEBUG',\n # # 'class': 'logging.NullHandler',\n # 'class': 'django.utils.log.NullHandler',\n # },\n # 'request_handler': {\n # 'level': 'DEBUG',\n # 'class': 'logging.handlers.RotatingFileHandler',\n # 'filename': '/sourceDns/log/script.log',\n # 'maxBytes': 1024*1024*5,\n # 'backupCount': 5,\n # 'formatter': 'standard',\n # },\n # 'scripts_handler': {\n # 'level': 'DEBUG',\n # 'class': 'logging.handlers.RotatingFileHandler',\n # 'filename': '/sourceDns/log/script.log',\n # 'maxBytes': 1024*1024*5,\n # 'backupCount': 5,\n # 'formatter': 'standard',\n # }\n },\n # ๅฎšไน‰ไบ†ไธ‰ไธช่ฎฐๅฝ•ๅ™จ\n 'loggers': {\n # ๆ‰€ๆœ‰้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰error็š„ๆถˆๆฏไผš่ขซๅ‘ๅพ€mail_adminsๅค„็†ๅ™จ๏ผŒๆถˆๆฏไธๅ‘็ˆถๅฑ‚ๆฌกๅ‘้€\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n # ๆ‰€ๆœ‰้ซ˜ไบŽ๏ผˆๅŒ…ๆ‹ฌ๏ผ‰info็š„ๆถˆๆฏๅŒๆ—ถไผš่ขซๅ‘ๅพ€consoleๅ’Œmail_adminsๅค„็†ๅ™จ๏ผŒไฝฟ็”จspecial่ฟ‡ๆปคๅ™จ\n # 'my.custom': {\n # 'handlers': ['console', 'mail_admins'],\n # 'level': 'INFO',\n # 'filters': ['special'],\n # }\n 'django.db.backends': {\n 'handlers': ['console'],\n 'propagate': True,\n 'level': 'DEBUG',\n },\n },\n}\n\n\n" } ]
10
rodchenk/time-expenses
https://github.com/rodchenk/time-expenses
a22a5b0ff21feaca65e2c17bc9ed4e23bae93174
b0b37f00514c9f54300097a01f53f8923937caa9
daf3c52a9a26e00e1502b4c991b5d7777d444469
refs/heads/master
2022-10-28T19:59:13.703717
2020-06-06T14:20:21
2020-06-06T14:20:21
267,668,472
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6709905862808228, "alphanum_fraction": 0.6757075190544128, "avg_line_length": 27.299999237060547, "blob_id": "292b4320c0bd51b9f6336d2cdd17f3fc87e491a6", "content_id": "97d3d22f0a8e69e3f48faf5e3ee8da34a8793be4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "permissive", "max_line_length": 120, "num_lines": 30, "path": "/src/analyzer/excel_reader.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "from analyzer.abstract_reader import AbstractReader\nimport xlrd\n\nclass ExcelReader(AbstractReader):\n\n\tdef __init__(self, filename):\n\t\tAbstractReader.__init__(self, self.__count, filename)\n\n\tdef get_stats(self):\n\t\treturn AbstractReader.get_stats(self)\n\n\tdef __count(self):\n\t\tworkbook = xlrd.open_workbook(self.filename)\n\t\tcount = 0\n\n\t\tfor sheet in workbook.sheet_names():\n\t\t\tworksheet = workbook.sheet_by_name(sheet)\n\t\t\tfor index, row in enumerate(worksheet.get_rows()):\n\t\t\t\tvals = filter(lambda x: len(str(x).strip()) > 0, worksheet.row_values(index))\n\t\t\t\tfor _x in list(vals):\n\t\t\t\t\tself.total_words += len(list(filter(lambda x: not x.replace('.', '').replace(',', '').isdigit() ,str(_x).split())))\n\nif __name__ == '__main__':\n\tfilename = './../test/file3.xlsx'\n\n\timport xlrd\n\tworkbook = xlrd.open_workbook(filename)\n\n\tprint(dir(workbook))\n\texit(0)" }, { "alpha_fraction": 0.599365770816803, "alphanum_fraction": 0.6448202729225159, "avg_line_length": 16.18181800842285, "blob_id": "bab6ab898d2deea85f3c69b9948882e8a3ea02bf", "content_id": "84b00e68e4ffe5fdcc3b2ebaa7194ba6d6eb73c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 946, "license_type": "permissive", "max_line_length": 112, "num_lines": 55, "path": "/README.md", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "## Time Expenses\n\nPython based tool for calculating the time expenses for translating docx, ppt and xlsx\n\n### Usage\n\n1. Clone the repo\n\n```sh\ngit clone https://github.com/rodchenk/time-expenses.git\n```\n\n2. Go inside the folder\n\n```sh\ncd time-expenses\n```\n\n3. Define your custom config\n\n```py\n# src/config.py\n\nDARK_THEME = True\nUNITS = 'minutes'\nCHART = 45\n...\n```\n\n4. Install dependencies\n\n```sh\npip install -r requirements.txt\n```\n\n5. Execute the script\n\n```sh\npy src/main.py --source .\\..\\folder --output .\\..\\file.csv\n```\n\nOr use the GUI-Wrapper:\n\n```sh\npy src/gui/main.py\n```\n\nAs result a .csv file will be generated with full statstic of all found files (images, charts, tables and so on)\n\nFile | Characters | Words | Images | Tables | Charts | Time in min\n--- | --- | --- | --- |--- |--- | ---\n\\time-expenses\\test\\file.docx | 1511 | 247 | 0 | 1 | 0 | 87\n\\time-expenses\\test\\file2.docx|460|80|1|2|1|52\n | | | | | |\nTotal|1971|327|1|3|1|139\n\n" }, { "alpha_fraction": 0.560804009437561, "alphanum_fraction": 0.568844199180603, "avg_line_length": 30.125, "blob_id": "06b4f40e8c531953a59841aee409705f7e681daf", "content_id": "3ed4a4b389e4ba6d4285c50de3e74759c2590ba0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "permissive", "max_line_length": 87, "num_lines": 32, "path": "/src/docs/config.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "#################################### User part start ##################################\n\n\"\"\" GUI Color Theme. Set True for light mode, Valid values True, False \"\"\"\nDARK_THEME = False\n\n\"\"\" Units for time expenses. Valid values are 'seconds', minutes', 'hours' \"\"\"\nUNITS = 'minutes'\n\n\"\"\" Time to process a single word \"\"\"\nWORD = 0.1\n\n\"\"\" Time to process a single table \"\"\"\nTABLE = 25\n\n\"\"\" Time to process a single image/screenshot \"\"\"\nIMAGE = 35\n\n\"\"\" Time to process a single chart \"\"\"\nCHART = 45\n\n#################################### User part end ##################################\n\ndef __validate__():\n\tif not UNITS in ['seconds', 'minutes', 'hours']:\n\t\traise IOError('Invaid config value %s for time units' % UNITS)\n\tif type(DARK_THEME) is not bool:\n\t\traise IOError('Invalid config value %s for color mode' % DARK_THEME)\n\tfor _ in (WORD, TABLE, IMAGE, CHART):\n\t\tif type(_) is not int and type(_) is not float:\n\t\t\traise IOError('Invalid config value %s for time variables' % _)\n\n__validate__()" }, { "alpha_fraction": 0.6687116622924805, "alphanum_fraction": 0.6789366006851196, "avg_line_length": 27.764705657958984, "blob_id": "2fff8b0b4c27cbdf63118589f635f7007234d0ac", "content_id": "78dc16fe2e225b1aab61a74e3c9c5851afe06c07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "permissive", "max_line_length": 109, "num_lines": 17, "path": "/src/analyzer/abstract_reader.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "\nclass AbstractReader(object):\n\n\tdef __init__(self, counter_callback, filename):\n\t\tself._counter = counter_callback\n\t\tself.filename = filename\n\t\tself.total_chars, self.total_words, self.total_charts, self.total_images, self.total_tables = 0, 0, 0, 0, 0\n\n\tdef get_stats(self):\n\t\tself._counter()\n\t\treturn {\n\t\t\t'file': self.filename,\n\t\t\t'chars': self.total_chars,\n\t\t\t'words': self.total_words,\n\t\t\t'charts': self.total_charts,\n\t\t\t'images': self.total_images,\n\t\t\t'tables': self.total_tables\n\t\t}" }, { "alpha_fraction": 0.6107460260391235, "alphanum_fraction": 0.6265944838523865, "avg_line_length": 30.93827247619629, "blob_id": "3c19269f5f73b791de08c0659ad3a62631c3743e", "content_id": "33063e9bd02bad58f5d3e6fa5906898c79e43e75", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2587, "license_type": "permissive", "max_line_length": 188, "num_lines": 81, "path": "/src/main.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "import argparse\n\nimport os\nimport json\n\nfrom analyzer.word_reader import WordReader\nfrom analyzer.excel_reader import ExcelReader\nfrom docs import config\n\nparser = argparse.ArgumentParser(description='Python based tool for calculating time expenses')\n\nparser.add_argument('-s', '--source', dest='source', help='Source folder in which all files are allocated', required=True, metavar='')\nparser.add_argument('-o', '--output', dest='output', help='Output file to write the result time and stats', required=True, metavar='')\n\nargs = parser.parse_args()\n\nCSV_SEPARATOR = ';'\nCSV_HEADER = 'File{0}Characters{0}Words{0}Images{0}Tables{0}Charts{0}Time in {1}\\n'.format(CSV_SEPARATOR, config.UNITS)\n\n\ndef __write_output(stats) -> None:\n\tabs_source_path = os.path.abspath(args.output)\n\ttotal = {\n\t\t'chars': 0,\n\t\t'words': 0,\n\t\t'images': 0,\n\t\t'tables': 0,\n\t\t'charts': 0,\n\t\t'time': 0\n\t}\n\twith open(abs_source_path, 'w') as output:\n\t\toutput.write(CSV_HEADER)\n\n\t\tfor _stat in stats:\n\t\t\ttotal['chars'] += _stat['chars']\n\t\t\ttotal['words'] += _stat['words']\n\t\t\ttotal['images'] += _stat['images']\n\t\t\ttotal['tables'] += _stat['tables']\n\t\t\ttotal['charts'] += _stat['charts']\n\n\t\t\ttime = float(_stat['words']) * config.WORD + float(_stat['images']) * config.IMAGE + float(_stat['tables']) * config.TABLE + float(_stat['charts']) * config.CHART\n\t\t\ttotal['time'] += time\n\n\t\t\toutput.write('{1}{0}{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}\\n'.format(CSV_SEPARATOR, _stat['file'], _stat['chars'], _stat['words'], _stat['images'], _stat['tables'], _stat['charts'], time))\n\n\t\toutput.write('\\n')\n\t\toutput.write('{1}{0}{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}\\n'.format(CSV_SEPARATOR, 'Total', total['chars'], total['words'], total['images'], total['tables'], total['charts'], total['time']))\n\n\ndef __main() -> None:\n\n\tabs_source_path = os.path.abspath(args.source)\n\tstats = []\n\n\tfor r, d, f in os.walk(abs_source_path):\n\t for file in f:\n\t if file.endswith('.docx'):\n\t wr = WordReader(os.path.join(r, file))\n\t stats.append(wr.get_stats())\n\t if file.endswith('.xlsx'):\n\t \twr = ExcelReader(os.path.join(r, file))\n\t \tstats.append(wr.get_stats())\n\t\n\t__write_output(stats)\n\n\nif __name__ == '__main__':\n\tfrom datetime import datetime\n\tstart = datetime.now()\n\n\ttry:\n\t\t__main()\n\texcept Exception as e:\n\t\tend = datetime.now() - start\n\t\tprint('---Calculation caceled---\\n')\n\t\tprint('Total time:\\t%s' % round(end, 2))\n\t\tprint('Error:\\t%s' % str(e))\n\telse:\n\t\tend = datetime.now() - start\n\t\tprint('---Calculation successful---\\n')\n\t\tprint('Total time:\\t%s sec' % round(end.total_seconds(), 2))\n" }, { "alpha_fraction": 0.6621694564819336, "alphanum_fraction": 0.6866343021392822, "avg_line_length": 33.96226501464844, "blob_id": "0cd30972398bab0066bcf26b44ddb839675d8f7c", "content_id": "afeb24f79ffdb67b045799f3d53cfe21abbcbc01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5559, "license_type": "permissive", "max_line_length": 290, "num_lines": 159, "path": "/src/gui/main.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "from tkinter.filedialog import askopenfilename, askdirectory, asksaveasfilename\nimport tkinter\nfrom tkinter import messagebox\nfrom tkinter.ttk import Progressbar, Style\nfrom PIL import ImageTk, Image\nimport time\nimport os\nimport sys\nimport subprocess\nimport threading\n\ntry:\n\tfrom src.docs import config\nexcept Exception as e:\n\tprint(str(e))\n\tsys.exit(0)\n\n\nclass Gui:\n\n\tDARK_THEME = {\n\t\t'PRIMARY': '#222',\n\t\t'SECONDARY': '#333',\n\t\t'TEXT': '#fff',\n\t\t'SEC_TEXT': '#4d5c69'\n\t}\n\n\tLIGHT_THEME = {\n\t\t'PRIMARY': '#fff',\n\t\t'SECONDARY': '#eee',\n\t\t'TEXT': '#222',\n\t\t'SEC_TEXT': '#a9b5c0'\n\t}\n\n\tAPP_NAME = 'Timefy'\n\n\tdef __init__(self):\n\t\tself.THEME = self.DARK_THEME if config.DARK_THEME else self.LIGHT_THEME\n\t\tgui_dir = os.path.dirname(__file__)\n\t\tassets_dir = gui_dir + './../assets/'\n\n\t\tself.top = tkinter.Tk()\n\t\tself.top.title(self.APP_NAME)\n\t\tself.top.geometry(\"500x175\")\n\t\tself.top.resizable(False, False)\n\t\tself.top.iconbitmap(default=assets_dir + 'favicon.ico')\n\n\t\tframe = tkinter.Frame(self.top, padx=10, pady=10, bg=self.THEME['PRIMARY'])\n\t\tframe.pack(fill=tkinter.BOTH, expand=True)\n\t\t\n\t\tsearchImg = ImageTk.PhotoImage(Image.open(assets_dir + 'search.png').resize((20, 20), Image.ANTIALIAS))\n\t\tsourceButton = tkinter.Button(frame, image=searchImg, padx=0, relief=tkinter.FLAT, command=self.__load_source, bg=self.THEME['PRIMARY'])\n\t\tsourceButton.image = searchImg\n\t\tsourceButton.grid(column=2, row=0, padx=(5, 5), pady=(5, 0))\n\n\t\toutputButton = tkinter.Button(frame, image=searchImg, padx=0, relief=tkinter.FLAT, command=self.__load_output, bg=self.THEME['PRIMARY'])\n\t\toutputButton.grid(column=2, row=1, padx=(5, 5), pady=(5, 0))\n\n\t\tsourceLabel = tkinter.Label(frame, text='Source', bg=self.THEME['PRIMARY'], fg=self.THEME['TEXT'], width=8)\n\t\tsourceLabel.grid(row=0, column=0)\n\t\tself.sourceValue = tkinter.StringVar()\n\t\tsource = tkinter.Entry(frame, bg=self.THEME['SECONDARY'], textvariable=self.sourceValue, fg=self.THEME['TEXT'], width=60, borderwidth=5, relief=tkinter.FLAT, state='disabled', disabledbackground=self.THEME['SECONDARY'], disabledforeground=self.THEME['TEXT'])\n\t\tsource.grid(row=0, column=1, pady=(6, 0))\n\n\t\toutputLabel = tkinter.Label(frame, text='Output', bg=self.THEME['PRIMARY'], fg=self.THEME['TEXT'], width=8)\n\t\toutputLabel.grid(column=0, row=1)\n\t\tself.outputValue = tkinter.StringVar()\n\t\toutput = tkinter.Entry(frame, bg=self.THEME['SECONDARY'], textvariable=self.outputValue, fg=self.THEME['TEXT'], width=60, borderwidth=5, relief=tkinter.FLAT, state='disabled', disabledbackground=self.THEME['SECONDARY'], disabledforeground=self.THEME['TEXT'])\n\t\toutput.grid(row=1, column=1, pady=(6, 0))\n\n\t\tgenerate = tkinter.Button(frame, text='GENERATE', bg='#3742fa', fg='#fff', bd=0, padx=15, pady=5, command=self.__gen)\n\t\tgenerate.grid(row=2, column=1, columnspan=2, sticky=tkinter.E, pady=(20, 0))\n\n\t\tself.should_append = tkinter.IntVar()\n\t\t# append = tkinter.Checkbutton(frame, text=\"Append\", selectcolor=self.THEME['SECONDARY'], relief=tkinter.FLAT, onvalue=1, offvalue=0, variable=self.should_append, bg=self.THEME['PRIMARY'], activebackground=self.THEME['PRIMARY'], activeforeground=self.THEME['TEXT'], fg=self.THEME['TEXT'])\n\t\t# append.grid(row=2, column=1, pady=(20, 0), padx=(175, 0))\n\n\t\treset = tkinter.Button(frame, text='RESET', bg=self.THEME['SECONDARY'], fg=self.THEME['TEXT'], padx=15, pady=5, bd=0, command=self.reset)\n\t\treset.grid(row=2, column=1, pady=(20, 0), padx=(175, 0))\n\n\t\tgithub = tkinter.Label(frame, text='github.com/rodchenk', bg=self.THEME['PRIMARY'], fg=self.THEME['SEC_TEXT'], pady=5)\n\t\tgithub.grid(row=2, column=0, columnspan=2, sticky=tkinter.W, pady=(20, 0), padx=10)\n\n\t\ts = Style()\n\n\t\ts.theme_use(\"default\")\n\t\ts.configure(\"TProgressbar\", thickness=5, background='#26A65B', troughrelief='flat')\n\t\tself.progress = Progressbar(frame, orient=tkinter.HORIZONTAL, length=465, mode='determinate', style=\"TProgressbar\") \n\n\n\tdef run(self):\n\t\tself.top.mainloop()\n\n\tdef reset(self):\n\t\tself.outputValue.set('')\n\t\tself.sourceValue.set('')\n\n\tdef __show_progress(self):\n\t\tself.progress.grid(row=3, column=0, columnspan=3, pady=(25, 0))\n\n\t\tfor x in range(51):\n\n\t\t\tself.progress['value'] = 2 * x\n\t\t\tself.top.update_idletasks() \n\t\t\ttime.sleep(0.01) \n\n\n\tdef __hide_progress(self):\n\t\tself.progress.grid_forget()\n\n\tdef __gen(self):\n\t\tsource, output, append = self.sourceValue.get(), self.outputValue.get(), self.should_append.get() == 1\n\n\t\tif not source or not output:\n\t\t\treturn\n\n\t\t# self.__show_progress()\n\t\tthreading.Thread(target=self.__show_progress).start()\n\n\t\tresult = self.__call_script(source, output)\n\n\t\tif result == 0:\n\t\t\t_open = messagebox.askyesno('Success', 'Report has been generated. Do you want to open it?')\n\t\t\tif _open:\n\t\t\t\tsubprocess.Popen(output, shell=True, stdout = subprocess.PIPE)\n\t\telse:\n\t\t\tmessagebox.showerror('Error', 'An error has occured')\n\n\t\tself.__hide_progress()\n\n\tdef __call_script(self, source, output):\n\t\tcli_path = os.path.dirname(__file__) + './../main.py'\n\t\tcommand = 'python %s -s %s -o %s' % (cli_path, source, output)\n\t\tp = subprocess.Popen(command, shell=True, stdout = subprocess.PIPE)\n\t\tstdout, stderr = p.communicate()\n\n\t\treturn p.returncode\n\n\n\tdef __load_source(self):\n\t\tdname = askdirectory()\n\t\tself.sourceValue.set(dname)\n\n\tdef __load_output(self):\n\t\tfname = asksaveasfilename(filetypes=((\"CSV Files\", \"*.csv;\"), (\"All files\", \"*.*\") ))\n\t\tif not fname:\n\t\t\treturn\n\t\tif not fname.endswith('.csv'):\n\t\t\tif fname[-1:] == '.':\n\t\t\t\tfname = fname[:-1]\n\t\t\tfname += '.csv'\n\t\tself.outputValue.set(fname)\n\n\nif __name__ == '__main__':\n\ttry:\n\t\tGui().run()\n\texcept IOError as e:\n\t\tprint(str(e))\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.8048780560493469, "avg_line_length": 9.5, "blob_id": "5820f507c18cba26e6056eecf7d830ae3fa8d37a", "content_id": "25b30a81fe0be93449d7d4f7701cbd8e8c875f6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 41, "license_type": "permissive", "max_line_length": 15, "num_lines": 4, "path": "/src/docs/requirements.txt", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "docx2txt\nargparse==1.4.0\npython-docx\nxlrd" }, { "alpha_fraction": 0.6857688426971436, "alphanum_fraction": 0.6895893216133118, "avg_line_length": 27.29729652404785, "blob_id": "d3b36a84568054d90ee90b4e99fa27bb1b84a25f", "content_id": "c685c07523be4de55df092b31896b3952d5fd5b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "permissive", "max_line_length": 73, "num_lines": 37, "path": "/src/analyzer/word_reader.py", "repo_name": "rodchenk/time-expenses", "src_encoding": "UTF-8", "text": "import docx2txt\nimport zipfile\nfrom docx.api import Document\nfrom analyzer.abstract_reader import AbstractReader\n\nclass WordReader(AbstractReader):\n\n\tdef __init__(self, filename):\n\t\tAbstractReader.__init__(self, self.__count, filename)\n\t\t\n\tdef get_stats(self):\n\t\treturn AbstractReader.get_stats(self)\n\n\tdef __count(self):\n\t\ttry:\n\t\t\tcontent = docx2txt.process(self.filename)\n\t\texcept Exception as e:\n\t\t\traise IOError('File %s not found' % self.filename)\n\n\t\tdocument = Document(self.filename)\n\n\t\tdocx_zip = zipfile.ZipFile(self.filename)\n\t\tall_files = docx_zip.namelist()\n\n\t\timages = filter(lambda x: x.startswith('word/media/'), all_files)\n\t\tcharts = filter(lambda x: x.startswith('word/charts/'), all_files)\n\n\t\t_tw = 0\n\t\tfor word in content.split():\n\t\t\tif not word.replace('.', '').replace(',', '').isdigit():\n\t\t\t\t_tw += 1 \n\n\t\tself.total_chars = len(list(filter(lambda x: x != ' ', list(content))))\n\t\tself.total_words = _tw\n\t\tself.total_charts = len(list(charts))\n\t\tself.total_images = len(list(images))\n\t\tself.total_tables = len(document.tables)\n" } ]
8
shufantj/FTeikPy
https://github.com/shufantj/FTeikPy
bf222bcd592d56a5f74e443b44eaba48c3d05239
cc187d3beb658d02a7dfc1a78c7c54f507d525a2
2a700da2f6df5af01275dde00791cd0dba417782
refs/heads/master
2020-08-26T15:39:02.836775
2018-02-25T11:42:17
2018-02-25T11:42:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6777296662330627, "alphanum_fraction": 0.6996151804924011, "avg_line_length": 27.094594955444336, "blob_id": "e82db2d33aef823b1a7f1b627211bce48c6313da", "content_id": "fe5086927028889c7ae82aa602e54120d466c0b9", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4158, "license_type": "permissive", "max_line_length": 97, "num_lines": 148, "path": "/README.rst", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "*******\nFTeikPy\n*******\n\n.. figure:: examples/marmousi.png\n\nFTeikPy is a Python package that computes accurate first arrival traveltimes in\n2-D and 3-D heterogeneous isotropic velocity model, with the possibility to use\na different grid spacing in Z, X and Y directions. The algorithm handles\nproperly the curvature of wavefronts close to the source. The source can be\nplaced without any problem between grid points.\n\n:Version: 1.5.0\n:Author: Mark Noble\n:Maintainer: Keurfon Luu\n:Web site: https://github.com/keurfonluu/fteikpy\n:Copyright: This document has been placed in the public domain.\n:License: FTeikPy is released under the MIT License.\n\n**NOTE**: the 2-D and 3-D Eikonal solvers included in FTeikPy are written in\nFortran. The original source codes can be found `here <https://github.com/Mark-Noble/FTEIK2D>`__.\nDetailed implementation of local operators and global propagation scheme\nimplemented in this package are inspired from [1]_. If you find this algorithm\nand/or package useful, citing this paper would be appreciated.\n\n\nInstallation\n============\n\nThe recommended way to install FTeikPy is through pip (internet required):\n\n.. code-block:: bash\n\n pip install fteikpy\n\nOtherwise, download and extract the package, then run:\n\n.. code-block:: bash\n\n python setup.py install\n\n\nUsage\n=====\n\n**New in 1.4.0**: added a posteriori ray tracer for 2-D velocity models.\n\nFirst, import FTeikPy and define (or import) your velocity model (here in 2-D):\n\n.. code-block:: python\n\n import numpy as np\n from fteikpy import Eikonal\n\n nz, nx = 351, 1000\n dz, dx = 10., 10.\n vel2d = np.full((nz, nx), 1500.)\n\nThen, initialize the Eikonal solver:\n\n.. code-block:: python\n\n eik = Eikonal(vel2d, grid_size = (dz, dx), n_sweep = 2)\n\nFinally, for a given source point with coordinate (z,x), run the method *solve*:\n\n.. code-block:: python\n\n source = (0., 5000.)\n tt = eik.solve(source)\n\nThe same can be done on a 3-D velocity model (just a bit slower...).\n\n\nTroubleshooting on Windows\n==========================\n\nA Fortran compiler is required to install this package. While it is\nstraightforward on Unix systems, it can be quite a pain on Windows. We recommend\ninstalling `Anaconda <https://www.continuum.io/downloads>`__ that contains all\nthe required packages to install FTeikPy on Windows systems.\n\n1. Download `MinGW 64 bits <https://sourceforge.net/projects/mingw-w64/files/>`__\n (choose *x86_64-posix-sjlj*) and extract the archive in your drive root.\n\n2. Add MinGW to your system path:\n\n C:\\\\<Your MinGW directory>\\\\bin\n\n3. Create the file *distutils.cfg* in *<Your Python directory path>\\\\Lib\\\\distutils*\n with the following content to use MinGW compiler:\n\n.. code-block::\n\n [build]\n compiler=mingw32\n\n4. Open a terminal and install *libpython*:\n\n.. code-block:: batch\n\n conda install libpython\n\n\nIf you got the error:\n\n Error: ValueError: Unknown MS Compiler version 1900\n\nYou may need to manually patch the file *cygwinccompiler.py* located in:\n\n <Your Python directory path>\\\\Lib\\\\distutils\n\nby replacing:\n\n.. code-block:: python\n\n self.dll_libraries = get_msvcr()\n\nin lines 157 and 318 by (be careful with indentation):\n\n.. code-block:: python\n\n pass\n\nYou should also patch the file *mingw32compiler.py* located in:\n\n <Your Python directory path>\\\\Lib\\\\site-packages\\\\numpy\\\\distutils\n\nby commenting out from lines 96 to 104:\n\n.. code-block:: python\n\n # msvcr_success = build_msvcr_library()\n # msvcr_dbg_success = build_msvcr_library(debug=True)\n # if msvcr_success or msvcr_dbg_success:\n # # add preprocessor statement for using customized msvcr lib\n # self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')\n #\n # # Define the MSVC version as hint for MinGW\n # msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))\n # self.define_macro('__MSVCRT_VERSION__', msvcr_version)\n\n\nReferences\n==========\n.. [1] M. Noble, A. Gesret and N. Belayouni, *Accurate 3-D finite difference\n computation of traveltimes in strongly heterogeneous media*, Geophysical\n Journal International, 2014, 199(3): 1572-1585\n" }, { "alpha_fraction": 0.5920074582099915, "alphanum_fraction": 0.6180297136306763, "avg_line_length": 30.202898025512695, "blob_id": "af0006725d4af81295b321ccf06e8282342f0cdd", "content_id": "3f5759a5e4a7a97d8a3cdcd195c72629a295f93e", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2152, "license_type": "permissive", "max_line_length": 99, "num_lines": 69, "path": "/examples/example_interpolation.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis example show that velocity interpolation estimate more accurately\ntraveltimes. In this example, the eikonal equation is solved on a 6-by-6 grid.\nTraveltimes obtained with velocity interpolation are compared to time\ninterpolation and the analytical solution.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import RegularGridInterpolator\ntry:\n from fteikpy import Eikonal\nexcept ImportError:\n import sys\n sys.path.append(\"../\")\n from fteikpy import Eikonal\n\n\ndef traveltime(vel, src, rcv):\n return np.linalg.norm(np.array(src) - np.array(rcv)) / vel\n\n\nif __name__ == \"__main__\":\n # Parameters\n nz, nx = 100, 100\n dz, dx = 1., 1.\n source = ( 50., 50. )\n velocity = 1500.\n \n # Analytical solution\n az = dz * np.arange(nz)\n ax = dx * np.arange(nx)\n Z, X = np.meshgrid(az, ax, indexing = \"ij\")\n tt_true = np.array([ traveltime(velocity, source, (z, x))\n for z, x in zip(Z.ravel(), X.ravel()) ]).reshape((nz, nx))\n \n # Eikonal solver\n eik = Eikonal(np.full((6, 6), velocity), (20., 20.))\n ttgrid = eik.solve(source)\n \n # Time interpolation\n fn = RegularGridInterpolator((20. * np.arange(6), 20. * np.arange(6)), ttgrid.grid)\n tt_time = np.array([ fn([ z, x ]) for z, x in zip(Z.ravel(), X.ravel()) ]).reshape((nz, nx))\n \n # Velocity interpolation (using ttgrid's get method)\n tt_vel = np.array([ ttgrid.get(z, x) for z, x in zip(Z.ravel(), X.ravel()) ]).reshape((nz, nx))\n \n # Plot traveltime grids\n fig = plt.figure(figsize = (12, 4), facecolor = \"white\")\n fig.patch.set_alpha(0.)\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n \n ax1.imshow(tt_true); ax1.set_title(\"Analytical solution\")\n ax2.imshow(tt_time); ax2.set_title(\"Time interpolation\")\n ax3.imshow(tt_vel); ax3.set_title(\"Velocity interpolation\")\n \n ax1.grid(True, linestyle = \":\")\n ax2.grid(True, linestyle = \":\")\n ax3.grid(True, linestyle = \":\")\n \n fig.tight_layout()\n fig.show()" }, { "alpha_fraction": 0.6212121248245239, "alphanum_fraction": 0.6515151262283325, "avg_line_length": 19.482759475708008, "blob_id": "2573a5bc9a9ac40c9f9164a6f1d624623942c53e", "content_id": "279581e222119cba20d166ba440c59f0794dadc1", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "permissive", "max_line_length": 78, "num_lines": 29, "path": "/fteikpy/__init__.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nFTeikPy is a Python module that computes accurate first arrival traveltimes in\n2-D and 3-D heterogeneous isotropic velocity model.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nfrom .ttgrid import TTGrid\nfrom .eikonal import Eikonal\nfrom .ray import Ray, ray_coverage\nfrom .layered_model import lay2vel, lay2tt\nfrom .bspline_model import bspline1, bspline2, vel2spl, spl2vel\n\n__version__ = \"1.5.0\"\n__all__ = [\n \"TTGrid\",\n \"Eikonal\",\n \"Ray\",\n \"ray_coverage\",\n \"lay2vel\",\n \"lay2tt\",\n \"bspline1\",\n \"bspline2\",\n \"vel2spl\",\n \"spl2vel\",\n ]\n" }, { "alpha_fraction": 0.3906705677509308, "alphanum_fraction": 0.46501457691192627, "avg_line_length": 19.81818199157715, "blob_id": "303d49cdb2c56018403594e2f9fa7f621ec68d43", "content_id": "09973582f5c381256c934de87de839d26851f26f", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "permissive", "max_line_length": 65, "num_lines": 33, "path": "/examples/raytracer/_fminbnd.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\n\n__all__ = [ \"fminbnd\" ]\n\n\ndef fminbnd(f, lower, upper, eps = 1e-4, args = (), kwargs = {}):\n # Define func\n func = lambda x: f(x, *args, **kwargs)\n \n # Golden ratio\n gr = 0.61803398874989479\n \n # Golden section search\n x1 = np.array([ lower ])\n x2 = np.array([ upper ])\n x3 = x2 - gr * (x2 - x1)\n x4 = x1 + gr * (x2 - x1)\n while np.abs(x3 - x4) > eps:\n if func(x3) < func(x4):\n x2 = x4\n else:\n x1 = x3\n x3 = x2 - gr * (x2 - x1)\n x4 = x1 + gr * (x2 - x1)\n xmin = 0.5 * (x1 + x2)\n return xmin, func(xmin)" }, { "alpha_fraction": 0.5219255089759827, "alphanum_fraction": 0.531882643699646, "avg_line_length": 37.88718032836914, "blob_id": "1c91ed2923b2396a8ea5be288046ccb536d70f27", "content_id": "0e62a703bd420d4c72231ca0a63281797b3fabc1", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15165, "license_type": "permissive", "max_line_length": 125, "num_lines": 390, "path": "/fteikpy/eikonal.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nEikonal interfaces eikonal solver routines written in Fortran with Python.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.axes import Axes\nfrom ._fteik2d import fteik2d\nfrom ._fteik3d import fteik3d\nfrom scipy.interpolate import RegularGridInterpolator\nfrom scipy.ndimage import gaussian_filter\nfrom .ttgrid import TTGrid\n\n__all__ = [ \"Eikonal\" ]\n\n\nclass Eikonal:\n \"\"\"\n Compute first arrival traveltime solving the eikonal equation using a\n finite difference scheme in 2-D and 3-D in isotropic velocity medium.\n \n Parameters\n ----------\n velocity_model : ndarray of shape (nz, nx[, ny])\n Velocity model grid in m/s.\n grid_size : tuple (dz, dx[, dy])\n Grid size in meters.\n n_sweep : int, default 1\n Number of sweeps.\n zmin : int or float, default 0.\n Z axis first coordinate.\n xmin : int or float, default 0.\n X axis first coordinate.\n ymin : int or float, default 0.\n Y axis first coordinate. Only used if 3-D velocity model.\n \"\"\"\n \n def __init__(self, velocity_model, grid_size, n_sweep = 1,\n zmin = 0., xmin = 0., ymin = 0.):\n if not isinstance(velocity_model, np.ndarray) \\\n and not velocity_model.ndim in [ 2, 3 ]:\n raise ValueError(\"velocity_model must be a 2-D or 3-D ndarray\")\n if np.any(velocity_model <= 0.):\n raise ValueError(\"velocity_model must be positive\")\n else:\n self._velocity_model = np.array(velocity_model)\n self._n_dim = velocity_model.ndim\n if np.any(np.array(velocity_model.shape) < 4):\n raise ValueError(\"velocity_model grid shape must be at least 4\")\n else:\n self._grid_shape = velocity_model.shape\n if not isinstance(grid_size, (list, tuple, np.ndarray)):\n raise ValueError(\"grid_size must be a list, tuple or ndarray\")\n if len(grid_size) != self._n_dim:\n raise ValueError(\"grid_size should be of length %d, got %d\" \\\n % (self._n_dim, len(grid_size)))\n if np.any(np.array(grid_size) <= 0.):\n raise ValueError(\"elements in grid_size must be positive\")\n else:\n self._grid_size = grid_size\n if not isinstance(n_sweep, int) or n_sweep <= 0:\n raise ValueError(\"n_sweep must be a positive integer, got %s\" % n_sweep)\n else:\n self._n_sweep = n_sweep\n if not isinstance(zmin, (int, float)):\n raise ValueError(\"zmin must be an integer or float\")\n else:\n self._zmin = zmin\n self._zaxis = zmin + grid_size[0] * np.arange(self._grid_shape[0])\n if not isinstance(xmin, (int, float)):\n raise ValueError(\"xmin must be an integer or float\")\n else:\n self._xmin = xmin\n self._xaxis = xmin + grid_size[1] * np.arange(self._grid_shape[1])\n if self._n_dim == 3:\n if not isinstance(ymin, (int, float)):\n raise ValueError(\"ymin must be an integer or float\")\n else:\n self._ymin = ymin\n self._yaxis = ymin + grid_size[2] * np.arange(self._grid_shape[2])\n \n def rescale(self, new_shape):\n \"\"\"\n Upscale or downscale velocity model.\n \n Parameters\n ----------\n new_shape : list or ndarray\n New shape.\n \"\"\"\n if not isinstance(new_shape, (list, tuple, np.ndarray)) or len(new_shape) != self._n_dim \\\n or not np.all([ isinstance(n, int) for n in new_shape ]):\n raise ValueError(\"new_shape must be a tuple with %d integers\" % self._n_dim)\n if np.any([ n < 4 for n in new_shape ]):\n raise ValueError(\"elements in new_shape must be at least 4\")\n \n if new_shape == self._grid_shape:\n pass\n elif len(new_shape) == 2:\n fn = RegularGridInterpolator((self._zaxis, self._xaxis), self._velocity_model)\n zaxis = np.linspace(self._zmin, self._zaxis[-1], new_shape[0])\n xaxis = np.linspace(self._xmin, self._xaxis[-1], new_shape[1])\n Z, X = np.meshgrid(zaxis, xaxis, indexing = \"ij\")\n cz, cx = [ new / old for new, old in zip(new_shape, self._grid_shape) ]\n self._velocity_model = fn([ [ z, x ] for z, x in zip(Z.ravel(), X.ravel()) ]).reshape(new_shape)\n self._grid_shape = new_shape\n self._grid_size = (self._grid_size[0] / cz, self._grid_size[1] / cx)\n self._zaxis = self._zmin + self._grid_size[0] * np.arange(self._grid_shape[0])\n self._xaxis = self._xmin + self._grid_size[1] * np.arange(self._grid_shape[1])\n elif len(new_shape) == 3:\n fn = RegularGridInterpolator((self._zaxis, self._xaxis, self._yaxis), self._velocity_model)\n zaxis = np.linspace(self._zmin, self._zaxis[-1], new_shape[0])\n xaxis = np.linspace(self._xmin, self._xaxis[-1], new_shape[1])\n yaxis = np.linspace(self._ymin, self._yaxis[-1], new_shape[2])\n Z, X, Y = np.meshgrid(zaxis, xaxis, yaxis, indexing = \"ij\")\n cz, cx, cy = [ new / old for new, old in zip(new_shape, self._grid_shape) ]\n self._velocity_model = fn([ [ z, x, y ] for z, x, y in zip(Z.ravel(), X.ravel(), Y.ravel()) ]).reshape(new_shape)\n self._grid_shape = new_shape\n self._grid_size = (self._grid_size[0] / cz, self._grid_size[1] / cx, self._grid_size[2] / cy)\n self._zaxis = self._zmin + self._grid_size[0] * np.arange(self._grid_shape[0])\n self._xaxis = self._xmin + self._grid_size[1] * np.arange(self._grid_shape[1])\n self._yaxis = self._ymin + self._grid_size[2] * np.arange(self._grid_shape[2])\n \n def smooth(self, sigma):\n \"\"\"\n Smooth velocity model. This method uses SciPy's gaussian_filter\n function.\n \n Parameters\n ----------\n sigma : int, float or tuple\n Standard deviation in meters for Gaussian kernel. The standard\n deviations of the Gaussian filter are given for each axis as a\n sequence, or as a single number, in which case it is equal for all\n axes.\n \"\"\"\n # Check inputs\n if not isinstance(sigma, (int, float, list, tuple)):\n raise ValueError(\"sigma must be a scalar or a tuple\")\n if isinstance(sigma, (int, float)) and sigma < 0.:\n raise ValueError(\"sigma must be positive\")\n elif isinstance(sigma, (list, tuple)):\n if len(sigma) != self._n_dim:\n raise ValueError(\"sigma must be a scalar or a tuple of length %d\" % self._n_dim)\n if np.any(np.array(sigma) < 0.):\n raise ValueError(\"elements in sigma must be positive\")\n \n # Gaussian filtering\n if isinstance(sigma, (int, float)):\n npts = np.full(self._n_dim, sigma) / self._grid_size\n else:\n npts = np.array(sigma) / self._grid_size\n self._velocity_model = gaussian_filter(self._velocity_model, npts)\n \n def solve(self, sources, dtype = \"float32\", n_threads = 1):\n \"\"\"\n Compute the traveltime grid associated to a source point.\n \n Parameters\n ----------\n sources : list or ndarray\n Sources coordinates (Z, X[, Y]).\n dtype : {'float32', 'float64'}, default 'float32'\n Traveltime grid data type.\n n_threads : int, default 1\n Number of threads to pass to OpenMP.\n \n Returns\n -------\n tt : TTGrid\n Traveltime grid.\n \"\"\"\n # Check inputs\n if not isinstance(sources, (list, tuple, np.ndarray)):\n raise ValueError(\"sources must be a list, tuple or ndarray\")\n if isinstance(sources, np.ndarray) and sources.ndim not in [ 1, 2 ]:\n raise ValueError(\"sources must be 1-D or 2-D ndarray\")\n if isinstance(sources, (list, tuple)) and len(sources) != self._n_dim:\n raise ValueError(\"sources should have %d coordinates, got %d\" \\\n % (self._n_dim, len(sources)))\n elif isinstance(sources, np.ndarray) and sources.ndim == 1 and len(sources) != self._n_dim:\n raise ValueError(\"sources should have %d coordinates, got %d\" \\\n % (self._n_dim, len(sources)))\n elif isinstance(sources, np.ndarray) and sources.ndim == 2 and sources.shape[1] != self._n_dim:\n raise ValueError(\"sources should have %d coordinates, got %d\" \\\n % (self._n_dim, sources.shape[1]))\n if dtype not in [ \"float32\", \"float64\" ]:\n raise ValueError(\"dtype must be 'float32' or 'float64'\")\n if not isinstance(n_threads, int) or n_threads < 1:\n raise ValueError(\"n_threads must be atleast 1, got %s\" % n_threads)\n \n # Define src array\n if isinstance(sources, (list, tuple)) or sources.ndim == 1:\n src = np.array(sources)[None,:]\n else:\n src = np.array(sources)\n src_shift = np.array([ self._shift(s) for s in src ])\n nsrc = src.shape[0]\n \n # Call Eikonal solver\n if self._n_dim == 2:\n dz, dx = self._grid_size\n nz, nx = self._grid_shape\n for i in range(nsrc):\n self._check_2d(src_shift[i,0], src_shift[i,1])\n grid = fteik2d.solve(1./self._velocity_model, src_shift[:,0], src_shift[:,1],\n dz, dx, self._n_sweep, n_threads = n_threads)\n tt = [ TTGrid(grid = np.array(g, dtype = dtype),\n source = s,\n grid_size = self._grid_size,\n zmin = self._zmin,\n xmin = self._xmin) for g, s in zip(grid, src) ]\n elif self._n_dim == 3:\n dz, dx, dy = self._grid_size\n nz, nx, ny = self._grid_shape\n for i in range(nsrc):\n self._check_3d(src[i,0], src[i,1], src[i,2])\n grid = fteik3d.solve(1./self._velocity_model, src_shift[:,0], src_shift[:,1], src_shift[:,2],\n dz, dx, dy, self._n_sweep, n_threads = n_threads)\n tt = [ TTGrid(grid = np.array(g, dtype = dtype),\n source = s,\n grid_size = self._grid_size,\n zmin = self._zmin,\n xmin = self._xmin,\n ymin = self._ymin) for g, s in zip(grid, src) ]\n if isinstance(sources, (list, tuple)) or sources.ndim == 1:\n return tt[0]\n else:\n return tt\n \n def plot(self, n_levels = 200, axes = None, figsize = (10, 4), cont_kws = {}):\n \"\"\"\n Plot the velocity model.\n\n Parameters\n ----------\n n_levels : int, default 200\n Number of levels for contour.\n axes : matplotlib axes or None, default None\n Axes used for plot.\n figsize : tuple, default (8, 8)\n Figure width and height if axes is None.\n cont_kws : dict\n Keyworded arguments passed to contour plot.\n\n Returns\n -------\n cax : matplotlib contour\n Contour plot.\n \"\"\"\n if not isinstance(n_levels, int) or n_levels < 1:\n raise ValueError(\"n_levels must be a positive integer\")\n if axes is not None and not isinstance(axes, Axes):\n raise ValueError(\"axes must be Axes\")\n if not isinstance(figsize, (list, tuple)) or len(figsize) != 2:\n raise ValueError(\"figsize must be a tuple with 2 elements\")\n if not isinstance(cont_kws, dict):\n raise ValueError(\"cont_kws must be a dictionary\")\n\n if self._n_dim == 2:\n if axes is None:\n fig = plt.figure(figsize = figsize, facecolor = \"white\")\n ax1 = fig.add_subplot(1, 1, 1)\n else:\n ax1 = axes\n cax = ax1.contourf(self._xaxis, self._zaxis, self._velocity_model, n_levels, **cont_kws)\n ax1.set_xlabel(\"X (m)\")\n ax1.set_ylabel(\"Depth (m)\")\n ax1.invert_yaxis()\n return cax\n else:\n raise ValueError(\"plot unavailable for 3-D grid\")\n \n def _shift(self, coord):\n if self._n_dim == 2:\n return np.array(coord) - np.array([ self._zmin, self._xmin ])\n elif self._n_dim == 3:\n return np.array(coord) - np.array([ self._zmin, self._xmin, self._ymin ])\n \n def _check_2d(self, z, x):\n if np.logical_or(np.any(z < self._zaxis[0]), np.any(z > self._zaxis[-1])):\n raise ValueError(\"z out of bounds\")\n if np.logical_or(np.any(x < self._xaxis[0]), np.any(x > self._xaxis[-1])):\n raise ValueError(\"x out of bounds\")\n \n def _check_3d(self, z, x, y):\n self._check_2d(z, x)\n if np.logical_or(np.any(y < self._yaxis[0]), np.any(y > self._yaxis[-1])):\n raise ValueError(\"y out of bounds\")\n \n @property\n def velocity_model(self):\n \"\"\"\n ndarray of shape (nz, nx[, ny])\n Velocity model grid in m/s.\n \"\"\"\n return self._velocity_model\n \n @velocity_model.setter\n def velocity_model(self, value):\n self._velocity_model = value\n \n @property\n def grid_shape(self):\n \"\"\"\n tuple (nz, nx[, ny])\n Velocity grid's shape.\n \"\"\"\n return self._grid_shape\n \n @grid_shape.setter\n def grid_shape(self, value):\n self._grid_shape = value\n \n @property\n def n_dim(self):\n \"\"\"\n int\n Number of dimensions (2 or 3).\n \"\"\"\n return self._n_dim\n \n @n_dim.setter\n def n_dim(self, value):\n self._n_dim = value\n \n @property\n def grid_size(self):\n \"\"\"\n tuple (dz, dx[, dy])\n Grid size in meters.\n \"\"\"\n return self._grid_size\n \n @grid_size.setter\n def grid_size(self, value):\n self._grid_size = value\n \n @property\n def n_sweep(self):\n \"\"\"\n int\n Number of sweeps.\n \"\"\"\n return self._n_sweep\n \n @n_sweep.setter\n def n_sweep(self, value):\n self._n_sweep = value\n \n @property\n def zaxis(self):\n \"\"\"\n ndarray of size nz\n Z coordinates of the grid.\n \"\"\"\n return self._zaxis\n \n @zaxis.setter\n def zaxis(self, value):\n self._zaxis = value\n \n @property\n def xaxis(self):\n \"\"\"\n ndarray of size nx\n X coordinates of the grid.\n \"\"\"\n return self._xaxis\n \n @xaxis.setter\n def xaxis(self, value):\n self._xaxis = value\n \n @property\n def yaxis(self):\n \"\"\"\n ndarray of size ny\n Y coordinates of the grid.\n \"\"\"\n return self._yaxis\n \n @yaxis.setter\n def yaxis(self, value):\n self._yaxis = value" }, { "alpha_fraction": 0.5560463666915894, "alphanum_fraction": 0.5952512621879578, "avg_line_length": 31.945453643798828, "blob_id": "a7bdcc035dc53cf66a7856c47ffcc0801f1d257f", "content_id": "625cba21ce585f25cdc8939e587ae4073a0ee4fb", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1811, "license_type": "permissive", "max_line_length": 97, "num_lines": 55, "path": "/examples/example_eikonal.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis example benchmarks the performances of a ray tracer with the 2D and 3D\nEikonal solvers on a stratified medium.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nimport time\nfrom raytracer.raytracer import Ray3D\ntry:\n from fteikpy import Eikonal, lay2vel, lay2tt\nexcept ImportError:\n import sys\n sys.path.append(\"../\")\n from fteikpy import Eikonal, lay2vel, lay2tt\n\n\nif __name__ == \"__main__\":\n # Parameters\n sources = np.loadtxt(\"shots.txt\")\n receivers = np.loadtxt(\"stations.txt\")\n dz, dx, dy = 2.5, 2.5, 2.5\n nz, nx, ny = 400, 280, 4\n n_threads = 8\n \n # Make a layered velocity model\n lay = 1500. + 250. * np.arange(10)\n zint = 100. + 100. * np.arange(10)\n vel2d = lay2vel(np.hstack((lay[:,None], zint[:,None])), dz, (nz, nx))\n vel3d = np.tile(vel2d[:,:,None], ny)\n \n # Ray tracer\n start_time = time.time()\n ray = Ray3D()\n tcalc_ray = ray.lay2tt(sources[:,[1,2,0]], receivers[:,[1,2,0]], lay, zint)\n print(\"Ray tracer: %.3f seconds\" % (time.time() - start_time))\n \n # Eikonal 2D\n start_time = time.time()\n tcalc_eik2d = lay2tt(vel2d, (dz, dx), sources, receivers, n_sweep = 2, n_threads = n_threads)\n print(\"\\nEikonal 2D: %.3f seconds\" % (time.time() - start_time))\n print(\"Mean residual (2D): \", (tcalc_eik2d - tcalc_ray).mean())\n \n # Eikonal 3D\n start_time = time.time()\n eik3d = Eikonal(vel3d, (dz, dx, dy), n_sweep = 2)\n tt = eik3d.solve(sources, n_threads = n_threads)\n tcalc_eik3d = np.array([ [ grid.get(z, x, y, check = False) for z, x, y in receivers ]\n for grid in tt ]).transpose()\n print(\"\\nEikonal 3D: %.3f seconds\" % (time.time() - start_time))\n print(\"Mean residual (3D): \", (tcalc_eik3d - tcalc_ray).mean())" }, { "alpha_fraction": 0.5768985152244568, "alphanum_fraction": 0.6081684827804565, "avg_line_length": 26.508771896362305, "blob_id": "a46a5598696a6a06bf847104c4af59567721136d", "content_id": "86732ea62d87e8fe42821394dc1c9e170cb353c6", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1567, "license_type": "permissive", "max_line_length": 92, "num_lines": 57, "path": "/examples/example_plot.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis example shows how to compute a traveltime grid using an Eikonal solver\nand to plot it.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nimport sys\nsys.path.append(\"../\")\nimport matplotlib.pyplot as plt\nimport pickle\ntry:\n from fteikpy import Eikonal\nexcept ImportError:\n import sys\n sys.path.append(\"../\")\n from fteikpy import Eikonal\n\n\nif __name__ == \"__main__\":\n # Parameters\n source = ( 0., 0. )\n marmousi = pickle.load(open(\"marmousi.pickle\", \"rb\"))[51:,:]\n nz, nx = marmousi.shape\n dz, dx = 10., 10.\n \n # Compute traveltimes using a 2D Eikonal solver\n eik = Eikonal(marmousi, grid_size = (dz, dx), n_sweep = 3)\n eik.smooth(50)\n tt = eik.solve(source)\n \n # Trace ray from receivers to source\n nrcv = 200\n receivers = np.zeros((nrcv, 2))\n receivers[:,1] = np.linspace(4400., eik.xaxis[-1], nrcv)\n rays = tt.raytracer(receivers, ray_step = 1., max_ray = 1000, n_threads = 8)\n \n # Plot velocity model and isochrones\n fig = plt.figure(figsize = (10, 3.5), facecolor = \"white\")\n fig.patch.set_alpha(0.)\n ax1 = fig.add_subplot(1, 1, 1)\n \n cax = eik.plot(axes = ax1, cont_kws = dict(cmap = \"viridis\"))\n tt.plot(n_levels = 100, axes = ax1, cont_kws = dict(colors = \"black\", linewidths = 0.5))\n for ray in rays:\n ray.plot(axes = ax1, plt_kws = dict(color = \"black\", linewidth = 0.5))\n \n ax1.set_title(\"Marmousi\")\n cb = fig.colorbar(cax)\n cb.set_label(\"P-wave velocity (m/s)\")\n \n fig.tight_layout()\n fig.show()" }, { "alpha_fraction": 0.6074380278587341, "alphanum_fraction": 0.692148745059967, "avg_line_length": 36.230770111083984, "blob_id": "b015c9ea8e823caa637f94ba6b3009c27630c98c", "content_id": "59ebf9d0c02ac68964a8638af7a2b4a4cac9e712", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 484, "license_type": "permissive", "max_line_length": 85, "num_lines": 13, "path": "/fteikpy/compile.sh", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# PARAMETERS\nF2PY=\"f2py\"\nFC=\"gfortran\"\nFFLAGS=\"-O3 -ffast-math -funroll-loops -fno-protect-parens -fopenmp\"\nF90DIR=\"f90/\"\n\n# COMMANDS\n$F2PY -c -m _fteik2d --fcompiler=$FC --f90flags=\"$FFLAGS\" -lgomp \"$F90DIR\"fteik2d.f90\n$F2PY -c -m _fteik3d --fcompiler=$FC --f90flags=\"$FFLAGS\" -lgomp \"$F90DIR\"fteik3d.f90\n$F2PY -c -m _lay2vel --fcompiler=$FC --f90flags=\"$FFLAGS\" \"$F90DIR\"lay2vel.f90\n$F2PY -c -m _bspline --fcompiler=$FC --f90flags=\"$FFLAGS\" -lgomp \"$F90DIR\"bspline.f90\n" }, { "alpha_fraction": 0.4591943919658661, "alphanum_fraction": 0.48021015524864197, "avg_line_length": 34.25308609008789, "blob_id": "613e737124e49cd40eec3b6cbbcc233822362902", "content_id": "728ec40a0def6d77f83e4c82634103165c6173e2", "detected_licenses": [ "MIT", "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5710, "license_type": "permissive", "max_line_length": 114, "num_lines": 162, "path": "/examples/raytracer/raytracer.py", "repo_name": "shufantj/FTeikPy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nRay Tracer computes the first arrival traveltimes in a stratified medium. This\ncode is not optimized and is only provided as a comparison for Eikonal solvers.\n\nAuthor: Keurfon Luu <keurfon.luu@mines-paristech.fr>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nfrom ._fminbnd import fminbnd\n\n__all__ = [ \"Ray3D\" ]\n\n\nclass Ray3D:\n \n def __init__(self, arrival = 0, positions = None, traveltime = None):\n self.arrival = arrival\n self.positions = positions\n self.traveltime = traveltime\n \n def trace(self, src, rcv, vel, zint):\n # Check src and rcv\n assert src[2] < zint[-1]\n assert rcv[2] < zint[-1]\n \n self.source = src.copy()\n self.receiver = rcv.copy()\n\n # Determine source and receiver layers\n ns = 0\n nr = 0\n for i in range(len(zint)-1):\n if src[2] >= zint[i]:\n ns += 1\n if rcv[2] >= zint[i]:\n nr += 1\n \n # Number of layers between source and receiver\n nlayer = np.abs(ns - nr) + 1\n self.positions = np.zeros((nlayer + 1, 3))\n \n if ns == nr:\n self.positions[0,:] = src.copy()\n self.positions[-1,:] = rcv.copy()\n self.traveltime = np.linalg.norm(src - rcv) / vel[ns]\n return\n else:\n self.dhorz = np.linalg.norm(src[:2] - rcv[:2]) \n \n # Initialize positions\n self.positions[0,2] = src[2]\n if nlayer > 1:\n if ns > nr:\n if nr == 0:\n self.positions[1:nlayer,2] = zint[ns-1::-1]\n else:\n self.positions[1:nlayer,2] = zint[ns-1:nr-1:-1]\n else:\n self.positions[1:nlayer,2] = zint[ns:nr]\n self.positions[-1,2] = rcv[2]\n \n # Layers of interest\n V = np.zeros(nlayer)\n V[0], V[-1] = vel[ns], vel[nr]\n H = np.zeros(nlayer)\n if ns < nr:\n H[0] = np.abs(self.source[2] - zint[ns])\n for i in range(1, nlayer-1):\n V[i] = vel[ns+i]\n H[i] = zint[ns+i] - zint[ns+i-1]\n H[-1] = np.abs(self.receiver[2] - zint[nr-1])\n elif ns > nr:\n H[0] = np.abs(self.source[2] - zint[ns-1])\n for i in range(1, nlayer-1):\n V[i] = vel[ns-i]\n H[i] = zint[ns-i] - zint[ns-i-1]\n H[-1] = np.abs(self.receiver[2] - zint[nr])\n \n # Shift so that xsrc, ysrc = 0, 0\n self._shift()\n \n # Rotate to remove y axis\n self._rotate()\n \n # Invert for the take-off angle\n iopt, gfit = fminbnd(self._costfunc, 0., 180., eps = 1e-16, args = (V, H))\n \n # Shoot with optimal take-off angle...\n self.positions[:,0] = self._shoot(iopt, V, H)\n \n # ...and compute traveltime\n self.traveltime = 0.\n for i in range(nlayer):\n self.traveltime = self.traveltime + np.linalg.norm(self.positions[i,:] - self.positions[i+1,:]) / V[i]\n \n # Unrotate\n self._unrotate()\n \n # Unshift\n self._unshift()\n \n def lay2tt(self, src, rcv, vel, zint):\n # Parameters\n nsrc = src.shape[0]\n nrcv = rcv.shape[0]\n \n # Compute traveltimes using a ray tracer\n tcalc = np.zeros((nrcv, nsrc)) \n for j in range(nsrc):\n for k in range(nrcv):\n self.trace(src[j,:], rcv[k,:], vel, zint)\n tcalc[k,j] = self.traveltime\n return tcalc\n \n def _shoot(self, i, V, H):\n p = np.sin(np.deg2rad(i)) / V[0]\n nlayer = len(V)\n X = np.zeros(nlayer + 1) \n for i in range(1, nlayer + 1):\n X[i] = X[i-1] + H[i-1] * np.tan(np.arcsin(V[i-1]*p))\n return X\n \n def _costfunc(self, i, *args):\n V, H = args\n X = self._shoot(i, V, H)\n return np.abs(self.dhorz - X[-1])\n \n def _shift(self, pos = False):\n self.receiver[:2] -= self.source[:2]\n if pos is True:\n for i in range(self.positions.shape[0]):\n self.positions[i,:2] -= self.source[:2]\n \n def _unshift(self):\n self.receiver[:2] += self.source[:2]\n for i in range(self.positions.shape[0]):\n self.positions[i,:2] += self.source[:2] \n \n def _rotate(self, pos = False):\n self.theta = -np.arctan2(self.receiver[1], self.receiver[0])\n x = self.receiver[0] * np.cos(self.theta) - self.receiver[1] * np.sin(self.theta)\n y = self.receiver[0] * np.sin(self.theta) + self.receiver[1] * np.cos(self.theta)\n self.receiver[0] = x\n self.receiver[1] = y\n if pos is True:\n x = self.positions[:,0] * np.cos(self.theta) - self.positions[:,1] * np.sin(self.theta)\n y = self.positions[:,0] * np.sin(self.theta) + self.positions[:,1] * np.cos(self.theta)\n self.positions[:,0] = x\n self.positions[:,1] = y\n \n def _unrotate(self):\n x = self.receiver[0] * np.cos(-self.theta) - self.receiver[1] * np.sin(-self.theta)\n y = self.receiver[0] * np.sin(-self.theta) + self.receiver[1] * np.cos(-self.theta)\n self.receiver[0] = x\n self.receiver[1] = y\n x = self.positions[:,0] * np.cos(-self.theta) - self.positions[:,1] * np.sin(-self.theta)\n y = self.positions[:,0] * np.sin(-self.theta) + self.positions[:,1] * np.cos(-self.theta)\n self.positions[:,0] = x\n self.positions[:,1] = y" } ]
9
A2Karren/python
https://github.com/A2Karren/python
df6ce9169ee9427cbb6c05ec2b425a7a11d82df9
e3e4987902648a421036a8fdce619164ff1bdb23
c3f9823b30746e9897e70fe21f74c137fe86ee7b
refs/heads/master
2020-03-25T06:25:45.025106
2018-08-10T21:51:01
2018-08-10T21:51:01
143,500,719
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6569608449935913, "alphanum_fraction": 0.6760691404342651, "avg_line_length": 23.422222137451172, "blob_id": "029749b7aa03ed1d5a541846b55b7419599506be", "content_id": "97b19a0fd74c8aaf7eaeb97a5e23c1f0e63c0d3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2198, "license_type": "no_license", "max_line_length": 130, "num_lines": 90, "path": "/Hello.py", "repo_name": "A2Karren/python", "src_encoding": "UTF-8", "text": "print(\"Hello world\")\nprint(\"I'm going to build a chatbot one day!\")\n\n#For Loops\n\ntexasCities = [\"Houston\",\"Austin\",\"Dallas\",\"San Antonio\"]\nfor texas in texasCities:\n print(texas)\n\n\n#List of Lists\ntexasRanking = [\"Houston,1\",\"Austin,2\",\"Dallas,3\",\"San Antonio,4\"]\nfinalRanking = []\n\nfor rows in texasRanking:\n interimRanking = rows.split(',')\n finalRanking.append(interimRanking)\nprint(finalRanking)\n\n\n#Unique Mechanisms in List of Lists\nfirstCityPair = finalRanking[0]\nfirstCity = firstCityPair[0]\nprint(firstCityPair)\nprint(firstCity)\n\nalternativeCity = finalRanking [1][0]\nprint(alternativeCity)\n\n#Comparison Operators\nprint(8 == 8) # True\nprint(8 != 8) # False\nprint(8 == 10) # False\nprint(8 != 10) # True\n\n\n#List Operations\nanimals = [\"cat\", \"dog\", \"rabbit\", \"horse\", \"giant_horrible_monster\"]\ncat_found = \"cat\" in animals\nspace_monster_found = \"space_monster\" in animals\nprint(space_monster_found)\n\nif \"cat\" in animals:\n print(\"Cat found\")\n\n#Disadvantages of using a list vs. Leveraging Indexes over Dictionaries is silly\nstudents = [\"Tom\", \"Jim\", \"Sue\", \"Ann\"]\nscores = [70, 80, 85, 75]\n\nindexes = [0,1,2,3]\nname = \"Sue\"\nscore = 0\nfor i in indexes:\n if students[i] == name:\n score = scores[i]\nprint(score)\n\n#Count the number of times that each element occurs in the list named pantry that appears in the code block below. You'll need to:\npantry = [\"apple\", \"orange\", \"grape\", \"apple\", \"orange\", \"apple\", \"tomato\", \"potato\", \"grape\"]\npantry_count = {}\n\nfor item in pantry:\n if item in pantry_count:\n pantry_count[item] = pantry_count[item] + 1\n else:\n pantry_count[item] = 1\nprint(pantry_count)\n\n\n\n#To prepare new list of list you have to split it by lines, then commas, and then append to empty list\n\n#Three arguments to a function\nwonder_woman = ['Wonder Woman','Patty Jenkins','Color',141,'Gal Gadot','English','USA',2017]\n\ndef is_usa(input_lst):\n if input_lst[6] == \"USA\":\n return True\n else:\n return False\n\n\ndef index_equals_str(input_lst,index,input_str):\n if input_lst[index] == input_str:\n return True\n else:\n return False\n\nwonder_woman_in_color = index_equals_str(wonder_woman,2,\"Color\")\nprint(wonder_woman_in_color)\n" } ]
1
rensbaardman/ASE-to-CSS
https://github.com/rensbaardman/ASE-to-CSS
f36ab7c70993cea872d19c5b60c8eb2eedcb57d5
3cda90b006d3ea3cef1043ba6c25c88d843911de
758dcf942e5ba80dce4c43541b51fd6918c89147
refs/heads/master
2021-07-06T07:33:58.668296
2020-11-12T10:19:21
2020-11-12T10:19:21
205,889,125
4
1
MIT
2019-09-02T15:46:33
2020-01-31T13:28:05
2020-11-12T10:19:22
Python
[ { "alpha_fraction": 0.56098872423172, "alphanum_fraction": 0.6550241708755493, "avg_line_length": 21.69512176513672, "blob_id": "99ca03844eeefe0541a4e6c671a0add37b2685d8", "content_id": "aaf8708be2581f083842e465da6ca6428c1540f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1861, "license_type": "permissive", "max_line_length": 112, "num_lines": 82, "path": "/README.MD", "repo_name": "rensbaardman/ASE-to-CSS", "src_encoding": "UTF-8", "text": "# ASE-to-CSS\n\nConvert Adobe Swatch Exchange files to CSS or SCSS variables!\n\n### Usage\n\nCSS:\n\n```\n$ python ase-to-css.py swatches.ase\n```\n\ngenerates `swatches.css` with all swatch colors defined as CSS variables on the `:root` variables.\n\nSCSS/SASS:\n\nAn option '--style' is given to set the output stylesheet format.\n\n```\n$ python ase-to-css.py swatches.ase --style scss\n```\n\ngenerates `swatches.scss` with all swatch colors defined as SASS variables.\n\n### Example\n\nWhen converting the standard swatches in Illustrator using the CSS argument, this is the output:\n\n```css\n/* converted from swatches.ase */\n:root {\n\t--White: RGB(255, 255, 255);\n\t--Black: RGB(0, 0, 0);\n\t--RGB_Red: RGB(255, 0, 0);\n...\n\t--R_66_G_33_B_11: RGB(66, 33, 11);\n\n\t/* Grays */\n\t--R_0_G_0_B_0: RGB(0, 0, 0);\n...\n\t--R_242_G_242_B_242: RGB(242, 242, 242);\n\n\t/* Web Color Group */\n\t--R_63_G_169_B_245: RGB(63, 169, 245);\n...\n\t--R_189_G_204_B_212: RGB(189, 204, 212);\n}\n\n```\n\nWhen converting the standard swatches in Illustrator using the SCSS style option, this is the output:\n\n```scss\n/* converted from swatches.ase */\n\t$White: RGB(255, 255, 255);\n\t$Black: RGB(0, 0, 0);\n\t$RGB_Red: RGB(255, 0, 0);\n...\n\t$R_66_G_33_B_11: RGB(66, 33, 11);\n\n\t/* Grays */\n\t$R_0_G_0_B_0: RGB(0, 0, 0);\n...\n\t$R_242_G_242_B_242: RGB(242, 242, 242);\n\n\t/* Web Color Group */\n\t$R_63_G_169_B_245: RGB(63, 169, 245);\n...\n\t$R_189_G_204_B_212: RGB(189, 204, 212);\n\n```\n\n### todo\n\n- also output LESS\n- consider (giving an option) to prefix group names to variable names\n- check for clashing variable names (note: variable names are case sensitive)\n- add tests\n- check for different swatch options (what if the colors haven't been specified as RGB, or as spot colors, etc.)\n- option to change indentation (currently: 1 tab)\n- bundle as module (+ seperate CLI?)\n- add options export as RGBA (or as hexadecimal) instead of RGB\n" }, { "alpha_fraction": 0.6501849889755249, "alphanum_fraction": 0.6612849235534668, "avg_line_length": 28.147058486938477, "blob_id": "bdc375e4b9a95d2236799baef7e886261a1202c5", "content_id": "366b0d9421b156a8e1ac5ec9923bd51001786539", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2973, "license_type": "permissive", "max_line_length": 210, "num_lines": 102, "path": "/ase-to-css.py", "repo_name": "rensbaardman/ASE-to-CSS", "src_encoding": "UTF-8", "text": "import swatch as swatch_parser\nimport sys\nimport re\nimport argparse\n\ndef parse_swatches(filepath):\n\treturn swatch_parser.parse(filepath)\n\ndef swatches_to_css(swatches, style, filepath='', declarations_only=False):\n\n\t# this might not be Windows-compatible\n\tfilename = filepath.split('/')[-1]\n\tcss = ''\n\n\t# root variable for using css vars\n\tif(style == 'scss'):\n\t\tprefix = '/* converted from {0} */\\n'.format(filename)\n\telse:\n\t\tprefix = '/* converted from {0} */\\n:root {{\\n'.format(filename)\n\n\tif(style == 'scss'):\n\t\tpostfix = '\\n'\n\telse:\n\t\tpostfix = '}\\n'\n\n\t\t#set prefix to -- for css and $ for sass\n\tif(style == 'scss'):\n\t\tvarprefx = '$'\n\telse:\n\t\tvarprefx = '--'\n\n\t# check for valid CSS identifiers (custom property names - a.k.a. variables - are identifiers)\n\t# In CSS, identifiers (including element names, classes, and IDs in selectors) can contain only the characters [a-zA-Z0-9] and ISO 10646 characters U+0080 and higher, plus the hyphen (-) and the underscore (_)\n\tpattern = re.compile(r\"[^a-zA-Z0-9-_]\")\n\n\tfor swatch in swatches:\n\n\t\t# this means it is a (palette) group\n\t\tif 'swatches' in swatch:\n\t\t\t\n\t\t\t# only output extra newline if not the first entry\n\t\t\tif css == '':\n\t\t\t\tnewline = ''\n\t\t\telse:\n\t\t\t\tnewline = '\\n'\n\n\t\t\tgroup_prefix = '{0}\\t/* {1} */\\n'.format(newline, swatch['name'])\n\t\t\tgroup_postfix = '\\n'\n\t\t\tgroup_declarations = swatches_to_css(swatch['swatches'], style, declarations_only=True)\n\t\t\tcss += group_prefix + group_declarations + group_postfix\n\n\t\telse:\n\t\t\tvalues = swatch['data']['values']\n\t\t\tcolor = [round(255 * value) for value in values]\n\t\t\tcss_color = 'RGB({0}, {1}, {2})'.format(*color)\n\n\t\t\t# replace all invalid characters with '_'\n\t\t\tname = swatch['name']\n\t\t\tcss_name = re.sub(pattern, '_', name)\n\n\t\t\tcss += '\\t{0}{1}: {2};\\n'.format(varprefx, css_name, css_color)\n\n\tif declarations_only:\n\t\treturn css\n\telse:\n\t\t# suppress extra newline after group if last entry\n\t\tif css[-2:] == '\\n\\n':\n\t\t\tcss = css[:-1]\n\t\treturn prefix + css + postfix\n\ndef save_css(swatch_filepath, style, css):\n\t#set file extention based on desired output\n\tif(style == 'scss'):\n\t\text = '.scss'\n\telse:\n\t\text = '.css'\n\n\tif swatch_filepath.split('.')[-1] == 'ase':\n\t\tcss_filepath = '.'.join(swatch_filepath.split('.')[:-1]) + ext\n\telse:\n\t\tcss_filepath = swatch_filepath + ext\n\n\twith open(css_filepath, 'w') as file:\n\t\tfile.write(css)\n\ndef main():\n\ttext = 'This program transforms swatches in a .ase file into css variables.' #program discription\n\t#swatch_filepath = sys.argv[1]\n\t#style = sys.argv[2]\n\n\tparser = argparse.ArgumentParser(description=text) #parser init\n\tparser.add_argument(\"input\", help=\"Input file path.\") #argument for the input file\n\tparser.add_argument(\"-s\", \"--style\", help=\"Set output style sheet format.\", default=\"css\") #argument for style\n\t\n\targs = parser.parse_args() #put the arguements here\n\n\tswatches = parse_swatches(args.input)\n\n\tcss = swatches_to_css(swatches, args.style, filepath=args.input)\n\tsave_css(args.input, args.style, css)\n\nmain()\n" } ]
2
TurnrDev/discord-ext-alternatives
https://github.com/TurnrDev/discord-ext-alternatives
9a1d706a1dd1d014c8e09ebcef6f41070dfcf67b
e81237047ab3189b46e8ba226ea3988b0ad59ea2
e6aad3c35556d66664f303ecf6ae6a00d5a92ed4
refs/heads/master
2022-12-25T21:47:05.614025
2020-08-29T07:40:20
2020-08-29T07:40:20
295,233,939
0
0
Apache-2.0
2020-09-13T20:41:05
2020-08-29T07:40:24
2020-09-04T13:04:07
null
[ { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 27, "blob_id": "b735f952b4d07c93386e52964ee3d6e99e957564", "content_id": "469624d3fa75bd884a61b4d6f1c1b31be1bffa26", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "permissive", "max_line_length": 72, "num_lines": 3, "path": "/discord/ext/alternatives/_alternative_converters.py", "repo_name": "TurnrDev/discord-ext-alternatives", "src_encoding": "UTF-8", "text": "_ALL = {\n # This will be populated by loaded alternative converters at runtime\n}\n" } ]
1
sinhlt58/vienuet_nlp
https://github.com/sinhlt58/vienuet_nlp
0acb591930ed4d67e7a89bb13b68505b8f00ecea
b8feab9aaad97cc799d35a0bd79eafaae3b8ff04
a72ac372ab8f9afd82d956848fa1bba32c868bbf
refs/heads/master
2020-03-22T17:59:58.030218
2018-07-20T10:36:56
2018-07-20T10:36:56
140,429,882
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.710073709487915, "alphanum_fraction": 0.710073709487915, "avg_line_length": 21.66666603088379, "blob_id": "c8763453ffe9608282f195a4712ae172a31d0ec7", "content_id": "5ab7fd64579f58968540d38f0e04e52d2bcb5d5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 73, "num_lines": 18, "path": "/vienuet_nlp/chatterbot/preprocessors.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "\"\"\"\nStatement pre-processors.\n\"\"\"\n\n\ndef preprocess_vi(chatbot, statement):\n \"\"\"\n Remove any consecutive whitespace characters from the statement text.\n \"\"\"\n import re\n import pyvi.ViTokenizer as tokenizer\n\n tokenized_text = tokenizer.tokenize(statement.text)\n statement.add_extra_data('tokenized_text', tokenized_text)\n\n #statement.text = statement.text.lower()\n\n return statement" }, { "alpha_fraction": 0.5694521069526672, "alphanum_fraction": 0.571112334728241, "avg_line_length": 37.46808624267578, "blob_id": "f15a19e35db6b462adad1ed4a55cb4b2372895cd", "content_id": "b7fc0aeb46f78d158c196eda5ce7e64ee13a9bd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1813, "license_type": "no_license", "max_line_length": 75, "num_lines": 47, "path": "/vienuet_nlp/chatterbot/bots.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "from chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nfrom vienuet_nlp.chatterbot.trainers import ViFQAListTrainer\n\nclass EnglishSimpleBot(ChatBot):\n \n def __init__(self, name):\n super().__init__(name,\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n input_adapter='chatterbot.input.TerminalAdapter',\n output_adapter='chatterbot.output.TerminalAdapter',\n logic_adapters=[\n # 'chatterbot.logic.MathematicalEvaluation',\n # 'chatterbot.logic.TimeLogicAdapter',\n \"chatterbot.logic.BestMatch\"\n ],\n database='./database.sqlite3',\n read_only=False \n )\n super().set_trainer(ChatterBotCorpusTrainer)\n\nclass VietnameseFQABot(ChatBot):\n\n def __init__(self, name, database_name):\n super().__init__(name,\n input_adapter='chatterbot.input.TerminalAdapter',\n output_adapter='chatterbot.output.TerminalAdapter',\n preprocessors=[\n 'vienuet_nlp.chatterbot.preprocessors.preprocess_vi'\n ],\n logic_adapters=[\n # 'chatterbot.logic.MathematicalEvaluation',\n # 'chatterbot.logic.TimeLogicAdapter',\n {\n 'import_path': 'chatterbot.logic.BestMatch'\n },\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.4,\n 'default_response': 'Xin lแป—i mรฌnh khรดng hiแปƒu.'\n }\n ],\n storage_adapter='chatterbot.storage.MongoDatabaseAdapter',\n database=database_name,\n read_only=True\n )\n super().set_trainer(ViFQAListTrainer)" }, { "alpha_fraction": 0.6475995779037476, "alphanum_fraction": 0.6537283062934875, "avg_line_length": 27, "blob_id": "5df37b5d27dc8ef1f6a929c66824ee7592e2538e", "content_id": "42a32ba93bbeea5b55274a2e491678be3ccece8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 979, "license_type": "no_license", "max_line_length": 50, "num_lines": 35, "path": "/setup.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESCRIPTION = fh.read()\n\nwith open('requirements.txt') as requirements:\n REQUIREMENTS = requirements.readlines()\n\nVIENUET_NLP = __import__('vienuet_nlp')\nVERSION = VIENUET_NLP.__version__\nAUTHOR = VIENUET_NLP.__author__\nAUTHOR_EMAIL = VIENUET_NLP.__email__\nURL = VIENUET_NLP.__url__\nDESCRIPTION = VIENUET_NLP.__doc__\n\nsetuptools.setup(\n name='vienuet_nlp',\n version=VERSION,\n url=URL,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n packages=setuptools.find_packages(),\n package_dir={'vienuet_nlp': 'vienuet_nlp'},\n include_package_data=True,\n install_requires=REQUIREMENTS,\n python_requires='>=3.6.5, <4',\n platforms=['ubuntu'],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: Ubuntu',\n ]\n)" }, { "alpha_fraction": 0.7032967209815979, "alphanum_fraction": 0.7032967209815979, "avg_line_length": 9.625, "blob_id": "dabfd8f303180425b9e04dae2d7ed0823478e18e", "content_id": "7b43dce1d588d636442d90d2629ebba9ea6e6257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 91, "license_type": "no_license", "max_line_length": 16, "num_lines": 8, "path": "/requirements.txt", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "# numpy\r\n# spacy\r\n# nltk\r\n# tensorflow\r\n# tensorflow-hub\r\n# sentencepiece\r\nchatterbot\r\npyvi" }, { "alpha_fraction": 0.6493617296218872, "alphanum_fraction": 0.6493617296218872, "avg_line_length": 31.66666603088379, "blob_id": "facd09869b13e43cb8a7b4eaaeec4c40ad925f50", "content_id": "55efa738c7546bbf870a91cadb7ab04c271eb3a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 70, "num_lines": 36, "path": "/vienuet_nlp/chatterbot/trainers.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "from chatterbot.trainers import Trainer\nfrom chatterbot.conversation import Statement, Response\nfrom chatterbot import utils\n\nclass ViFQAListTrainer(Trainer):\n\n def __init__(self, storage, **kwargs):\n super(ViFQAListTrainer, self).__init__(storage, **kwargs)\n\n def train(self, data):\n self.logger.info(\"Start ViFQAListTrainer training...\")\n\n for qna_pair in data:\n answer_text = qna_pair.get('answer')\n\n questions = qna_pair.get('questions')\n\n for question_count, question_text in enumerate(questions):\n answer_statement = self.get_or_create(answer_text)\n question_statement = self.get_or_create(question_text)\n\n answer_statement.add_response(Response(question_text))\n\n self.storage.update(question_statement)\n self.storage.update(answer_statement)\n\n self.logger.info(\"Finished ViFQAListTrainer training!\")\n\nclass ViFQANerualTrainer(Trainer):\n\n def __init__(self, storage, **kwargs):\n super(ViFQANerualTrainer, self).__init__(storage, **kwargs)\n\n def train(self, data):\n pass\n # we will use tensorflow here" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 43.5, "blob_id": "2dd0f95932cf37908a85341e1a21b03a2587947e", "content_id": "afd8516da676e7ae108d27bafc541c528ea54a5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 74, "num_lines": 2, "path": "/README.md", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "# vienuet_nlp\r\nA NLP library for English, Vietnamese which includes many other libraries. \r\n" }, { "alpha_fraction": 0.6150234937667847, "alphanum_fraction": 0.6384976506233215, "avg_line_length": 25.625, "blob_id": "bdd853598d3630629ae5da69acd2c68fa9a81906", "content_id": "882f795f996c43347e92952c65833f08d880ee4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 85, "num_lines": 8, "path": "/vienuet_nlp/__init__.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "\"\"\"\nThis is a nlp library for English and Vietnamese which includes many other libraries.\n\"\"\"\n\n__version__ = '1.0.0'\n__author__ = 'Luu Truong Sinh'\n__email__ = 'sinhlt58@gmail.com'\n__url__ = 'https://github.com/sinhlt58/vienuet_nlp'\n" }, { "alpha_fraction": 0.7089337110519409, "alphanum_fraction": 0.7118155360221863, "avg_line_length": 22.200000762939453, "blob_id": "6a2c5942a19ffee9c6a537debf6bbd4a4ab395d2", "content_id": "595b055c1b13ce19f2ec901751f1d6a11448c392", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 62, "num_lines": 15, "path": "/vienuet_nlp/chatterbot/examples/bot.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "from vienuet_nlp.chatterbot.bots import VietnameseFQABot\nimport json\n\nbot = VietnameseFQABot(\"fqa\", database_name='fqa-test')\n\nfqa_data = json.load(open('fqna_data.json', encoding='utf-8'))\n\nbot.train(fqa_data)\n\nwhile True:\n try:\n response_text = bot.get_response(None)\n\n except(KeyboardInterrupt, EOFError, SystemExit):\n break" }, { "alpha_fraction": 0.7195122241973877, "alphanum_fraction": 0.7195122241973877, "avg_line_length": 29.25, "blob_id": "69ed471f540c59a4a4f106c2ac48286e61576c0a", "content_id": "9f110bf931d70253344f49dac0b7db587b1ce332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 84, "num_lines": 8, "path": "/vienuet_nlp/data_logging/mongo_sheet.py", "repo_name": "sinhlt58/vienuet_nlp", "src_encoding": "UTF-8", "text": "import pygsheets\nfrom mongoengine import connect\n\nclass MongoSheet:\n\n def __init__(self, credential_file):\n self.credential_file = credential_file\n self.gc = pygsheets.authorize(outh_file=credential_file, outh_nonlocal=True)\n " } ]
9
DMClimbo/docker
https://github.com/DMClimbo/docker
ea1b3acd66d93aab62f0d89265cd83747a31ce05
d27ff08cb070b7afbecb304f51117a08cf0b0b2d
05fa5fa0057a666784ec061958ff808015a410f5
refs/heads/master
2023-03-30T18:26:39.004061
2021-04-07T06:48:26
2021-04-07T06:48:26
354,297,497
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5511727333068848, "alphanum_fraction": 0.5746268630027771, "avg_line_length": 19.18181800842285, "blob_id": "7352f8314edb602c8d8f36fccbdf1009cca54760", "content_id": "e1e5f96effda7beb19b853652d56db294efca9c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 53, "num_lines": 44, "path": "/server/webserver.py", "repo_name": "DMClimbo/docker", "src_encoding": "UTF-8", "text": "from flask import Flask,request, jsonify\r\nimport subprocess\r\nimport os \r\nimport yaml, json\r\n\r\nfrom functions import handle_json \r\nfrom flask_cors import CORS\r\nfrom redis import StrictRedis\r\n\r\n#่ฟžๆŽฅredisๆ•ฐๆฎๅบ“\r\nr = StrictRedis(host='192.168.1.36', port=7002, db=1)\r\n\r\n#่งฃๅ†ณ่ทจๅŸŸ้—ฎ้ข˜\r\napp = Flask(__name__)\r\nCORS(app, resources=r'/*')\r\n\r\n\r\n@app.route('/', methods=['POST','GET'])\r\ndef get_json():\r\n #ๆŽฅๆ”ถๅˆฐpost่ฏทๆฑ‚ๅŽๆ–ฐๅปบๆ–‡ไปถๅคนไฟๅญ˜่ฎญ็ปƒ็ป“ๆžœ\r\n if request.method =='POST':\r\n\r\n json_get = request.get_data(as_text=True)\r\n json_dict = json.loads(json_get)\r\n \r\n #ๅˆ›ๅปบๆ–‡ไปถๅคนๅนถ็”Ÿๆˆyaml้…็ฝฎๆ–‡ไปถ่ฟ”ๅ›ž่ฎญ็ปƒๅ‘ฝไปค\r\n cmd = handle_json(json_dict)\r\n print(cmd)\r\n #ๅฐ†ๅ‘ฝไปคๆ’ๅ…ฅ้˜Ÿๅˆ—\r\n try:\r\n r.lpush('queue', cmd)\r\n except Exception as e:\r\n print(e)\r\n\r\n return \"่ฏทๆฑ‚ๅทฒๆ’ๅ…ฅ้˜Ÿๅˆ—\"\r\n\r\n if request.method == 'GET':\r\n return \"ๆœๅŠกๅ™จๆญฃๅธธ๏ผŒๅฏไปฅๅ‘้€่ฎญ็ปƒ่ฏทๆฑ‚\"\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host=\"0.0.0.0\", port=9000)\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.585669755935669, "alphanum_fraction": 0.5887850522994995, "avg_line_length": 29.75342559814453, "blob_id": "862e84f073ad24678799e6673eaa708cf8b8d7e7", "content_id": "74a14655feaa6c1f142df8526258e47778f35652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2465, "license_type": "no_license", "max_line_length": 110, "num_lines": 73, "path": "/server/functions.py", "repo_name": "DMClimbo/docker", "src_encoding": "UTF-8", "text": "from multiprocessing import Process\nimport os \nimport yaml, json\nimport requests\nimport pymysql\nimport zipfile\n\n#ๆ นๆฎjsonๆ–‡ไปถ้…็ฝฎ่ฎญ็ปƒ็Žฏๅขƒ\ndef handle_json(json_dict):\n #ๅˆ›ๅปบๆ–‡ไปถๅคนไฟๅญ˜่ฎญ็ปƒๅ‚ๆ•ฐๅ’Œ็ป“ๆžœ\n outputdir = r'/cv/output/' + json_dict[\"USER\"] + '_' + json_dict[\"MODEL_NAME\"]\n os.mkdir(outputdir)\n \n # ไฟฎๆ”นไฟๅญ˜ๆจกๅž‹ไฝ็ฝฎๅ’Œๆ•ฐๆฎ้›†ๆ–‡ไปถๅคนไธบ็ปๅฏน่ทฏๅพ„\n json_dict[\"MODEL\"][\"MODEL_SAVE_DIR\"] = outputdir\n dataset_dir = get_dataset(json_dict)\n json_dict[\"DATASET\"][\"IMAGE_ROOT\"] = dataset_dir\n\n #็”Ÿๆˆyaml้…็ฝฎๆ–‡ไปถ\n yaml_file = open(outputdir + '/config.yaml','w')\n yaml.safe_dump(json_dict, stream=yaml_file, default_flow_style=False)\n\n print(\"ๅทฒ็”Ÿๆˆ้…็ฝฎๆ–‡ไปถ\")\n return \"python /cv/project/train.py --config \" + outputdir + \"/config.yaml\"\n # return True\n\n\n#ๆฃ€ๆŸฅๆ•ฐๆฎ้›†ๆ˜ฏๅฆๅญ˜ๅœจ๏ผŒ่‹ฅไธๅญ˜ๅœจๅˆ™ไธ‹่ฝฝๅนถๅˆ›ๅปบ\ndef get_dataset(json_dict):\n dataset_dir = '/cv/data/' + json_dict['DATASET']['TRAINSET'] + '_' + json_dict['DATASET']['VERSION'] + '/'\n if(os.path.exists(dataset_dir)):\n print('ๆ•ฐๆฎ้›†ๅทฒๅญ˜ๅœจ')\n else:\n os.mkdir(dataset_dir)\n url = json_dict['URL']\n file_path = dataset_dir + json_dict['DATASET']['VERSION'] + '.zip'\n get_file(url, file_path)\n unzip(dataset_dir)\n # download_images(url,dataset_dir)\n return dataset_dir\n\n\n#ไธ‹่ฝฝๆ–‡ไปถ\ndef get_file(url, file_path): \n try:\n r = requests.get(url, stream=True) ##ไฟฎๆ”น\n content = requests.get(url).content # ่ฟ™้‡Œๅฟ…้กป็”จ.content่€Œไธ่ƒฝ็”จtext\n print('ๅผ€ๅง‹ไธ‹่ฝฝ')\n with open(file_path, \"wb\") as f:\n f.write(content)\n\n if r.status_code == 200:\n return r.content\n elif r.status_code == 404:\n print (\"File not found\" )\n return None\n except Exception as e:\n print(\"Could not get file. Exception is: %s\" % e)\n\n return \"download successed\"\n\n\n#่งฃๅŽ‹zipๆ–‡ไปถๅนถๅˆๅนถ\ndef unzip(storage_path):\n file_list = os.listdir(storage_path)\n for file_name in file_list:\n if os.path.splitext(file_name)[1] == '.zip':\n file_name = storage_path + file_name\n file_zip = zipfile.ZipFile(file_name, 'r')\n for file in file_zip.namelist():\n file_zip.extract(file, storage_path)\n file_zip.close()\n os.remove(file_name)\n\n\n" }, { "alpha_fraction": 0.6462522745132446, "alphanum_fraction": 0.6617915630340576, "avg_line_length": 30.285715103149414, "blob_id": "98a8e8c702cb01f3e15eafdf875c1598918d045e", "content_id": "ccd8ef866e897699476a37852a8c8b450195094f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 155, "num_lines": 35, "path": "/readme.txt", "repo_name": "DMClimbo/docker", "src_encoding": "UTF-8", "text": "็”Ÿๆˆ้•œๅƒ๏ผšdocker build -t REPOSITORY:TAG .\n\n็”Ÿๆˆๅฎนๅ™จ๏ผšsudo docker run -itd --runtime=nvidia --shm-size=\"8g\" --name classfication -p9001:9000 -v /etc/localtime:/etc/localtime:ro REPOSITORY:TAG /bin/bash\n\nไฟฎๆ”นๅฎนๅ™จๅฑžๆ€ง๏ผš1.docker ps -a ๆŸฅ็œ‹ๅฎนๅ™จID\n 2.suไปฅroot็”จๆˆท่ฟ›ๅ…ฅ็›ฎๅฝ•: cd /var/lib/docker/containers/ๅฎนๅ™จID\n 3.ไฟฎๆ”นhostconfig.jsonๆ–‡ไปถ\n\nๅคๅˆถๆ–‡ไปถ่ฟ›ๅฎนๅ™จ๏ผšๅ…ณ้—ญๅฎนๅ™จๅŽ่ฟ่กŒ docker cp ็›ฎๆ ‡ๆ–‡ไปถ ๅฎนๅ™จID:ๆœŸๆœ›ๅฎนๅ™จๅ†…ไฝ็ฝฎ\n\nๅฐ†ๅฎนๅ™จๆ‰“ๅŒ…ๆˆ้•œๅƒ๏ผšdocker commit ๅฎนๅ™จID REPOSITORY:TAG\n\n\ndockerไฟฎๆ”นๅฎนๅ™จ้ป˜่ฎคๅญ˜ๅ‚จ่ทฏๅพ„(ไธไฟฎๆ”น็š„่ฏ้ป˜่ฎคๅ ็”จ็ณป็ปŸ็›˜็ฉบ้—ด)๏ผš\n ๏ผˆ1๏ผ‰ๅˆ›ๅปบdockerๅฎนๅ™จๅญ˜ๆ”พ็š„่ทฏๅพ„\n # mkdir -p /home/data/docker/lib\n\n ๏ผˆ2๏ผ‰ๅœๆญขDockerๆœๅŠกๅนถ่ฟ็งปๆ•ฐๆฎๅˆฐๆ–ฐ็›ฎๅฝ•\n # systemctl stop docker.service\n # rsync -avz /var/lib/docker/ /home/data/docker/lib/\n\n ๏ผˆ3๏ผ‰ๅˆ›ๅปบDocker้…็ฝฎๆ–‡ไปถ\n # mkdir -p /etc/systemd/system/docker.service.d/ \n # vim /etc/systemd/system/docker.service.d/devicemapper.conf\n [Service]\n ExecStart=\n ExecStart=/usr/bin/dockerd --graph=/home/data/docker/lib/\n\n (4๏ผ‰้‡ๅฏDockerๆœๅŠก\n # systemctl daemon-reload \n # systemctl restart docker\n \n ๏ผˆ5๏ผ‰ๆŸฅ็œ‹็Žฐๅœจๅฎนๅ™จๅญ˜ๆ”พ็š„็›ฎๅฝ•\n # docker info | grep \"Dir\"\n Docker Root Dir: /home/data/docker/lib" }, { "alpha_fraction": 0.6756198406219482, "alphanum_fraction": 0.6756198406219482, "avg_line_length": 21.045454025268555, "blob_id": "7d785d346c1e288ba4b42f1d2cd1b283032d69eb", "content_id": "b14e1661bdec7a628390077eefffda7b39cf7d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 560, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/server/main.py", "repo_name": "DMClimbo/docker", "src_encoding": "UTF-8", "text": "from multiprocessing import Process\nimport os \n\nStart_server = 'python ./webserver.py'\nStart_check_queue = 'python ./check_queue.py'\n\n\ndef start_process(cmd):\n os.system(cmd)\n\n\n#ๅฏๅŠจflaskๆœๅŠกๅ™จๅ’Œๆฃ€ๆต‹้˜Ÿๅˆ—่ฟ›็จ‹\nif __name__ == \"__main__\":\n #ๅฏๅŠจflaskๅŽ็ซฏ\n Server = Process(target=start_process, args=(Start_server,))\n Server.start()\n print('webๅŽ็ซฏๅฏๅŠจๆˆๅŠŸ')\n\n #ๅฏๅŠจๆฃ€ๆต‹้˜Ÿๅˆ—็จ‹ๅบ\n Check_queue = Process(target=start_process, args=(Start_check_queue,))\n Check_queue.start()\n print('่ฏทๆฑ‚้˜Ÿๅˆ—ๅฏๅŠจๆˆๅŠŸ')" }, { "alpha_fraction": 0.5773420333862305, "alphanum_fraction": 0.6143791079521179, "avg_line_length": 19.727272033691406, "blob_id": "7b94d9d4eaed36cdcccfdbb59e2fdb7e60f97065", "content_id": "4c140926d6fbecab7ec3e2c5632916c5e2aeba9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 525, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/server/check_queue.py", "repo_name": "DMClimbo/docker", "src_encoding": "UTF-8", "text": "from multiprocessing import Process\nimport os\nfrom redis import StrictRedis\nfrom time import sleep\n\n#ๅˆๅง‹ๅŒ–redis่ฟžๆŽฅ\nr = StrictRedis(host='192.168.1.36', port=7002, db=1)\n\n#ๅœจ็ปˆ็ซฏ่ฟ่กŒ้˜Ÿๅˆ—ไธญ็š„ๅ‘ฝไปค\ndef start_train(cmd):\n os.system(cmd)\n\n#ๅพช็Žฏๆฃ€ๆต‹redis้˜Ÿๅˆ—\nwhile 1:\n while r.llen('queue') != 0:\n #่Žทๅ–้˜Ÿๅˆ—ๅคดๅ…ƒ็ด \n cmd = (r.rpop('queue'))\n #print(cmd)\n p = Process(target=start_train, args=(cmd,))\n p.start()\n print('ๅผ€ๅง‹่ฎญ็ปƒ')\n sleep(5)\n\n\n\n" } ]
5
AlySpoly/gammapy
https://github.com/AlySpoly/gammapy
3c5bf3c05a8caf9c3324af1882025f64778dc487
42416f5c8bd904a27d70ba8607bfe3db582fb6b6
466fda10552bff80e88994fc76d4ffe1e6668b56
refs/heads/master
2020-03-14T02:30:36.381492
2018-04-27T08:54:23
2018-04-27T08:54:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5840787291526794, "alphanum_fraction": 0.6185151934623718, "avg_line_length": 28.81333351135254, "blob_id": "53480f52acaff14f0015cd4ddc0a2c43e9997bfd", "content_id": "e6651d3615c0cebbf4b9d1ade3dfd5d3c74a8743", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2236, "license_type": "permissive", "max_line_length": 82, "num_lines": 75, "path": "/gammapy/utils/root/tests/test_convert.py", "repo_name": "AlySpoly/gammapy", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nHAS_ROOT = False\n\n# TODO: re-enable this\n# I'm disabling ROOT because it makes the pytest collector crash:\n#try:\n# import ROOT\n# from ... import root\n# HAS_ROOT = True\n#except:\n# HAS_ROOT = False\n\n\n@pytest.mark.skipif('not HAS_ROOT')\ndef test_graph1d_to_table():\n x = np.array([-0.22, 0.05, 0.25, 0.35])\n y = np.array([1, 2.9, 5.6, 7.4])\n ex = np.array([.05, .1, .07, .07])\n ey = np.array([.8, .7, .6, .5])\n n = len(x)\n\n graph = ROOT.TGraphErrors(n, x, y, ex, ey)\n table = root.graph1d_to_table(graph)\n assert_allclose(table['x'], x)\n\n graph = ROOT.TGraph(n, x, y)\n table = root.graph1d_to_table(graph)\n assert_allclose(table['x'], x)\n\n\n@pytest.mark.skipif('not HAS_ROOT')\ndef test_hist1d_to_table():\n hist = ROOT.TH1F('name', 'title', 4, -10, 10)\n hist.Fill(3)\n\n table = root.hist1d_to_table(hist)\n assert_allclose(table['x'], [-7.5, -2.5, 2.5, 7.5])\n assert_allclose(table['y'], [0., 0., 1., 0.])\n\n\n@pytest.mark.skipif('not HAS_ROOT')\ndef make_test_TH2():\n \"\"\"Generate an example TH2 we use to test TH2_to_FITS(),\n corresponding approximately to the HESS survey region.\"\"\"\n name, title = 'test_image', 'My Test Image'\n nbinsx, xlow, xup = 1400, 60, -80\n nbinsy, ylow, yup = 100, -5, 5\n h = ROOT.TH2F(name, title, nbinsx, xlow, xup, nbinsy, ylow, yup)\n # Just for fun:\n # Fill with distance to Galactic center, to have something to look at\n for ix in range(nbinsx):\n for iy in range(nbinsy):\n x = h.GetXaxis().GetBinCenter(ix)\n y = h.GetYaxis().GetBinCenter(iy)\n value = np.sqrt(x * x + y * y)\n h.SetBinContent(ix, iy, value)\n return h\n\n\n@pytest.mark.xfail\n@pytest.mark.skipif('not HAS_ROOT')\ndef test_TH2_to_FITS():\n h = make_test_TH2()\n h.Print('base')\n f = root.TH2_to_FITS(h)\n from pprint import pprint\n pprint(f.header2classic())\n filename = 'TH2_to_FITS.fits'\n print('Writing {}'.format(filename))\n f.writetofits(filename, overwrite=True)\n" }, { "alpha_fraction": 0.6652994155883789, "alphanum_fraction": 0.698113203048706, "avg_line_length": 37.0625, "blob_id": "1629702b0da259b3c96034c4935606404f51778f", "content_id": "e637585f47b7f9e8cc6fa64abaca08b61ac90ec6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 82, "num_lines": 32, "path": "/gammapy/utils/serialization/tests/test_xml.py", "repo_name": "AlySpoly/gammapy", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom ...scripts import make_path\nfrom ....cube import SourceLibrary\nfrom ....spectrum import models as spectral\nfrom ....image import models as spatial\n\ndef test_xml_to_source_library():\n filename = '$GAMMAPY_EXTRA/test_datasets/models/fermi_model.xml'\n sourcelib = SourceLibrary.from_xml(filename)\n\n assert len(sourcelib.skymodels) == 4\n\n model1 = sourcelib.skymodels[0]\n assert isinstance(model1.spectral_model, spectral.PowerLaw)\n assert isinstance(model1.spatial_model, spatial.SkyPointSource)\n\n pars1 = model1.parameters\n assert pars1['index'].value == -2.1\n assert pars1['index'].unit == ''\n assert pars1['index'].parmax == -1.0\n assert pars1['index'].parmin == -5.0\n assert pars1['index'].frozen == False\n\n assert pars1['lon_0'].value == 187.25\n assert pars1['lon_0'].unit == 'deg'\n assert pars1['lon_0'].parmax == 360\n assert pars1['lon_0'].parmin == -360\n assert pars1['lon_0'].frozen == True\n\n # TODO: Test Evaluate combined model as soon as '+' works for SkyModel\n\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.712195098400116, "avg_line_length": 28.285715103149414, "blob_id": "223df24762eaf2cf8e2e1437326ba12cf4399c5a", "content_id": "49681ea82e7be45942510d564b42204a3f9118cf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "permissive", "max_line_length": 63, "num_lines": 7, "path": "/gammapy/utils/root/__init__.py", "repo_name": "AlySpoly/gammapy", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions to work with ROOT and rootpy.\n\n* ROOT: https://root.cern.ch\n* rootpy: http://www.rootpy.org/\n\"\"\"\nfrom .convert import *\n" }, { "alpha_fraction": 0.5456700325012207, "alphanum_fraction": 0.5522603988647461, "avg_line_length": 32.265384674072266, "blob_id": "e27f3d072e27bf96a96e7c1d792070d9e73bc28f", "content_id": "30abd4e4df9fd1af67e946370cabab65423b669f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17298, "license_type": "permissive", "max_line_length": 99, "num_lines": 520, "path": "/gammapy/time/lightcurve.py", "repo_name": "AlySpoly/gammapy", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import OrderedDict\nimport numpy as np\nimport astropy.units as u\nfrom astropy.table import Table\nfrom astropy.time import Time\nfrom ..spectrum.utils import CountsPredictor\nfrom ..stats.poisson import excess_error\nfrom ..utils.scripts import make_path\n\n__all__ = [\n 'LightCurve',\n 'LightCurveEstimator',\n]\n\n\nclass LightCurve(object):\n \"\"\"Lightcurve container.\n\n The lightcurve data is stored in ``table``.\n\n For now we only support times stored in MJD format!\n\n TODO: specification of format is work in progress\n See https://github.com/open-gamma-ray-astro/gamma-astro-data-formats/pull/61\n\n Usage: :ref:`time-lc`\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with lightcurve data\n \"\"\"\n\n def __init__(self, table):\n self.table = table\n\n def __repr__(self):\n return '{}(len={})'.format(self.__class__.__name__, len(self.table))\n\n @property\n def time_scale(self):\n \"\"\"Time scale (str).\n\n Taken from table \"TIMESYS\" header.\n Common values: \"TT\" or \"UTC\".\n Assumed default is \"UTC\".\n \"\"\"\n return self.table.meta.get('TIMESYS', 'utc')\n\n @property\n def time_format(self):\n \"\"\"Time format (str).\"\"\"\n return 'mjd'\n\n # @property\n # def time_ref(self):\n # \"\"\"Time reference (`~astropy.time.Time`).\"\"\"\n # return time_ref_from_dict(self.table.meta)\n\n def _make_time(self, colname):\n val = self.table[colname].data\n scale = self.time_scale\n format = self.time_format\n return Time(val, scale=scale, format=format)\n\n @property\n def time(self):\n \"\"\"Time (`~astropy.time.Time`).\"\"\"\n return self._make_time('time')\n\n @property\n def time_min(self):\n \"\"\"Time bin start (`~astropy.time.Time`).\"\"\"\n return self._make_time('time_min')\n\n @property\n def time_max(self):\n \"\"\"Time bin end (`~astropy.time.Time`).\"\"\"\n return self._make_time('time_max')\n\n @property\n def time_mid(self):\n \"\"\"Time bin center (`~astropy.time.Time`).\n\n ::\n time_mid = time_min + 0.5 * time_delta\n \"\"\"\n return self.time_min + 0.5 * self.time_delta\n\n @property\n def time_delta(self):\n \"\"\"Time bin width (`~astropy.time.TimeDelta`).\n\n ::\n time_delta = time_max - time_min\n \"\"\"\n return self.time_max - self.time_min\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read from file.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.read`.\n \"\"\"\n filename = make_path(filename)\n table = Table.read(str(filename), **kwargs)\n return cls(table=table)\n\n def write(self, filename, **kwargs):\n \"\"\"Write to file.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.write`.\n \"\"\"\n filename = make_path(filename)\n self.table.write(str(filename), **kwargs)\n\n def compute_fvar(self):\n r\"\"\"Calculate the fractional excess variance.\n\n This method accesses the the ``FLUX`` and ``FLUX_ERR`` columns\n from the lightcurve data.\n\n The fractional excess variance :math:`F_{var}`, an intrinsic\n variability estimator, is given by\n\n .. math::\n F_{var} = \\sqrt{\\frac{S^{2} - \\bar{\\sigma^{2}}}{\\bar{x}^{2}}}.\n\n It is the excess variance after accounting for the measurement errors\n on the light curve :math:`\\sigma`. :math:`S` is the variance.\n\n Returns\n -------\n fvar, fvar_err : `~numpy.ndarray`\n Fractional excess variance.\n\n References\n ----------\n .. [Vaughan2003] \"On characterizing the variability properties of X-ray light\n curves from active galaxies\", Vaughan et al. (2003)\n http://adsabs.harvard.edu/abs/2003MNRAS.345.1271V\n \"\"\"\n flux = self.table['flux'].data.astype('float64')\n flux_err = self.table['flux_err'].data.astype('float64')\n\n flux_mean = np.mean(flux)\n n_points = len(flux)\n\n s_square = np.sum((flux - flux_mean) ** 2) / (n_points - 1)\n sig_square = np.nansum(flux_err ** 2) / n_points\n fvar = np.sqrt(np.abs(s_square - sig_square)) / flux_mean\n\n sigxserr_a = np.sqrt(2 / n_points) * (sig_square / flux_mean) ** 2\n sigxserr_b = np.sqrt(sig_square / n_points) * (2 * fvar / flux_mean)\n sigxserr = np.sqrt(sigxserr_a ** 2 + sigxserr_b ** 2)\n fvar_err = sigxserr / (2 * fvar)\n\n return fvar, fvar_err\n\n def compute_chisq(self):\n \"\"\"Calculate the chi-square test for `LightCurve`.\n\n Chisquare test is a variability estimator. It computes\n deviations from the expected value here mean value\n\n Returns\n -------\n ChiSq, P-value : tuple of float or `~numpy.ndarray`\n Tuple of Chi-square and P-value\n \"\"\"\n import scipy.stats as stats\n flux = self.table['flux']\n yexp = np.mean(flux)\n yobs = flux.data\n chi2, pval = stats.chisquare(yobs, yexp)\n return chi2, pval\n\n def plot(self, ax=None):\n \"\"\"Plot flux versus time.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes` or None, optional.\n The `~matplotlib.axes.Axes` object to be drawn on.\n If None, uses the current `~matplotlib.axes.Axes`.\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes` or None, optional.\n The `~matplotlib.axes.Axes` object to be drawn on.\n If None, uses the current `~matplotlib.axes.Axes`.\n \"\"\"\n import matplotlib.pyplot as plt\n ax = plt.gca() if ax is None else ax\n\n # TODO: Should we plot with normal time axis labels (ISO, not MJD)?\n\n x, xerr = self._get_plot_x()\n y, yerr = self._get_plot_y()\n\n ax.errorbar(x=x, y=y, xerr=xerr, yerr=yerr, linestyle=\"None\")\n ax.scatter(x=x, y=y)\n ax.set_xlabel(\"Time (MJD)\")\n ax.set_ylabel(\"Flux (cm-2 s-1)\")\n\n return ax\n\n def _get_plot_x(self):\n try:\n x = self.time.mjd\n except KeyError:\n x = self.time_mid.mjd\n\n try:\n xerr = x - self.time_min.mjd, self.time_max.mjd - x\n except KeyError:\n xerr = None\n\n return x, xerr\n\n def _get_plot_y(self):\n y = self.table['flux'].quantity.to('cm-2 s-1').value\n\n if 'flux_errp' in self.table.colnames:\n yp = self.table['flux_errp'].quantity.to('cm-2 s-1').value\n yn = self.table['flux_errn'].quantity.to('cm-2 s-1').value\n yerr = yn, yp\n elif 'flux_err' in self.table.colnames:\n yerr = self.table['flux_err'].quantity.to('cm-2 s-1').value\n else:\n yerr = None\n\n return y, yerr\n\n\nclass LightCurveEstimator(object):\n \"\"\"Light curve estimator.\n\n For a usage example see :gp-extra-notebook:`light_curve`.\n\n Parameters\n ----------\n spec_extract : `~gammapy.spectrum.SpectrumExtraction`\n Contains statistics, IRF and event lists\n \"\"\"\n\n def __init__(self, spec_extract):\n self.obs_list = spec_extract.obs_list\n self.obs_spec = spec_extract.observations\n self.off_evt_list = self._get_off_evt_list(spec_extract)\n self.on_evt_list = self._get_on_evt_list(spec_extract)\n\n @staticmethod\n def _get_off_evt_list(spec_extract):\n \"\"\"\n Returns list of OFF events for each observations\n \"\"\"\n off_evt_list = []\n for bg in spec_extract.bkg_estimate:\n off_evt_list.append(bg.off_events)\n return off_evt_list\n\n @staticmethod\n def _get_on_evt_list(spec_extract):\n \"\"\"\n Returns list of ON events for each observations\n \"\"\"\n on_evt_list = []\n for obs in spec_extract.bkg_estimate:\n on_evt_list.append(obs.on_events)\n\n return on_evt_list\n\n @staticmethod\n def create_fixed_time_bin(time_step, spectrum_extraction):\n \"\"\"Create time intervals of fixed size.\n\n Parameters\n ----------\n time_step : float\n Size of the light curve bins in seconds\n spectrum_extraction : `~gammapy.spectrum.SpectrumExtraction`\n Contains statistics, IRF and event lists\n\n Returns\n -------\n intervals : list of `~astropy.time.Time`\n List of time intervals\n \"\"\"\n intervals = []\n time_start = Time(100000, format=\"mjd\")\n time_end = Time(0, format=\"mjd\")\n time_step = time_step / (24 * 3600)\n\n for obs in spectrum_extraction.obs_list:\n time_events = obs.events.time\n if time_start > time_events.min():\n time_start = time_events.min()\n if time_end < time_events.max():\n time_end = time_events.max()\n\n time = time_start.value\n while time < time_end.value:\n time += time_step\n intervals.append([\n Time(time - time_step, format=\"mjd\"),\n Time(time, format=\"mjd\"),\n ])\n return intervals\n\n def light_curve(self, time_intervals, spectral_model, energy_range):\n \"\"\"Compute light curve.\n\n Implementation follows what is done in:\n http://adsabs.harvard.edu/abs/2010A%26A...520A..83H.\n\n To be discussed: assumption that threshold energy in the\n same in reco and true energy.\n\n Parameters\n ----------\n time_intervals : `list` of `~astropy.time.Time`\n List of time intervals\n spectral_model : `~gammapy.spectrum.models.SpectralModel`\n Spectral model\n energy_range : `~astropy.units.Quantity`\n True energy range to evaluate integrated flux (true energy)\n\n Returns\n -------\n lc : `~gammapy.time.LightCurve`\n Light curve\n \"\"\"\n rows = []\n for time_interval in time_intervals:\n useinterval, row = self.compute_flux_point(time_interval, spectral_model, energy_range)\n if useinterval:\n rows.append(row)\n\n return self._make_lc_from_row_data(rows)\n\n @staticmethod\n def _make_lc_from_row_data(rows):\n table = Table()\n table['time_min'] = [_['time_min'].value for _ in rows]\n table['time_max'] = [_['time_max'].value for _ in rows]\n\n table['flux'] = [_['flux'].value for _ in rows] * u.Unit('1 / (s cm2)')\n table['flux_err'] = [_['flux_err'].value for _ in rows] * u.Unit('1 / (s cm2)')\n\n table['livetime'] = [_['livetime'].value for _ in rows] * u.s\n table['n_on'] = [_['n_on'] for _ in rows]\n table['n_off'] = [_['n_off'] for _ in rows]\n table['alpha'] = [_['alpha'] for _ in rows]\n table['measured_excess'] = [_['measured_excess'] for _ in rows]\n table['expected_excess'] = [_['expected_excess'].value for _ in rows]\n\n return LightCurve(table)\n\n def compute_flux_point(self, time_interval, spectral_model, energy_range):\n \"\"\"Compute one flux point for one time interval.\n\n Parameters\n ----------\n time_interval : `~astropy.time.Time`\n Time interval (2-element array, or a tuple of Time objects)\n spectral_model : `~gammapy.spectrum.models.SpectralModel`\n Spectral model\n energy_range : `~astropy.units.Quantity`\n True energy range to evaluate integrated flux (true energy)\n\n Returns\n -------\n useinterval : bool\n Is True if the time_interval produce a valid flux point\n measurements : dict\n Dictionary with flux point measurement in the time interval\n \"\"\"\n tmin, tmax = time_interval[0], time_interval[1]\n livetime = 0\n alpha_mean = 0.\n alpha_mean_backup = 0.\n measured_excess = 0\n predicted_excess = 0\n n_on = 0\n n_off = 0\n useinterval = False\n\n # Loop on observations\n for t_index, obs in enumerate(self.obs_list):\n\n spec = self.obs_spec[t_index]\n\n # discard observations not matching the time interval\n obs_start = obs.events.time[0]\n obs_stop = obs.events.time[-1]\n if (tmin < obs_start and tmax < obs_start) or (tmin > obs_stop):\n continue\n\n useinterval = True\n # get ON and OFF evt list\n off_evt = self.off_evt_list[t_index]\n on_evt = self.on_evt_list[t_index]\n\n # introduce the e_reco binning here, since it's also used\n # in the calculation of predicted counts\n e_reco = spec.e_reco\n emin = e_reco[e_reco.searchsorted(max(spec.lo_threshold, energy_range[0]))]\n emax = e_reco[e_reco.searchsorted(min(spec.hi_threshold, energy_range[1])) - 1]\n\n # compute ON events\n on = on_evt.select_energy([emin, emax])\n on = on.select_energy(energy_range)\n on = on.select_time([tmin, tmax])\n n_on_obs = len(on.table)\n\n # compute OFF events\n off = off_evt.select_energy([emin, emax])\n off = off.select_energy(energy_range)\n off = off.select_time([tmin, tmax])\n n_off_obs = len(off.table)\n\n # compute effective livetime (for the interval)\n if tmin >= obs_start and tmax <= obs_stop:\n # interval included in obs\n livetime_to_add = (tmax - tmin).to('s')\n elif tmin >= obs_start and tmax >= obs_stop:\n # interval min above tstart from obs\n livetime_to_add = (obs_stop - tmin).to('s')\n elif tmin <= obs_start and tmax <= obs_stop:\n # interval min below tstart from obs\n livetime_to_add = (tmax - obs_start).to('s')\n elif tmin <= obs_start and tmax >= obs_stop:\n # obs included in interval\n livetime_to_add = (obs_stop - obs_start).to('s')\n else:\n livetime_to_add = 0 * u.sec\n\n # Take into account dead time\n livetime_to_add *= (1. - obs.observation_dead_time_fraction)\n\n # Compute excess\n obs_measured_excess = n_on_obs - spec.alpha * n_off_obs\n\n # Compute the expected excess in the range given by the user\n # but must respect the energy threshold of the observation\n # (to match the energy range of the measured excess)\n # We use the effective livetime and the right energy threshold\n e_idx = np.where(np.logical_and.reduce(\n (e_reco >= spec.lo_threshold, # threshold\n e_reco <= spec.hi_threshold, # threshold\n e_reco >= energy_range[0], # user\n e_reco <= energy_range[-1]) # user\n ))[0]\n counts_predictor = CountsPredictor(\n livetime=livetime_to_add,\n aeff=spec.aeff,\n edisp=spec.edisp,\n model=spectral_model\n )\n counts_predictor.run()\n counts_predicted_excess = counts_predictor.npred.data.data[e_idx[:-1]]\n\n obs_predicted_excess = np.sum(counts_predicted_excess)\n\n # compute effective normalisation between ON/OFF (for the interval)\n livetime += livetime_to_add\n alpha_mean += spec.alpha * n_off_obs\n alpha_mean_backup += spec.alpha * livetime_to_add\n measured_excess += obs_measured_excess\n predicted_excess += obs_predicted_excess\n n_on += n_on_obs\n n_off += n_off_obs\n\n # Fill time interval information\n if useinterval:\n int_flux = spectral_model.integral(energy_range[0], energy_range[1])\n\n if n_off > 0.:\n alpha_mean /= n_off\n if livetime > 0.:\n alpha_mean_backup /= livetime\n if alpha_mean == 0.: # use backup if necessary\n alpha_mean = alpha_mean_backup\n\n flux = measured_excess / predicted_excess.value\n flux *= int_flux\n flux_err = int_flux / predicted_excess.value\n # Gaussian errors, TODO: should be improved\n flux_err *= excess_error(n_on=n_on, n_off=n_off, alpha=alpha_mean)\n else:\n flux = 0\n flux_err = 0\n\n # Store measurements in a dict and return that\n return useinterval, OrderedDict([\n ('time_min', Time(tmin, format='mjd')),\n ('time_max', Time(tmax, format='mjd')),\n ('flux', flux * u.Unit('1 / (s cm2)')),\n ('flux_err', flux_err * u.Unit('1 / (s cm2)')),\n\n ('livetime', livetime * u.s),\n ('alpha', alpha_mean),\n ('n_on', n_on),\n ('n_off', n_off),\n ('measured_excess', measured_excess),\n ('expected_excess', predicted_excess),\n ])\n" }, { "alpha_fraction": 0.6514957547187805, "alphanum_fraction": 0.6544871926307678, "avg_line_length": 32.191490173339844, "blob_id": "411b145ea2a189810af1229da931d177a2b7533e", "content_id": "6e0828215744ef6c345d425f665a305fe058cb5b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4680, "license_type": "permissive", "max_line_length": 97, "num_lines": 141, "path": "/gammapy/utils/serialization/xml.py", "repo_name": "AlySpoly/gammapy", "src_encoding": "UTF-8", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nModel classes to generate XML.\n\nFor XML model format definitions, see here:\n\n* http://cta.irap.omp.eu/ctools/user_manual/getting_started/models.html#spectral-model-components\n* http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/source_models.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom ...extern import xmltodict\nfrom ...cube.models import SourceLibrary, SkyModel\nfrom ..modeling import Parameter, ParameterList\nfrom ...maps import Map\nimport numpy as np\nimport astropy.units as u\nimport gammapy.image.models as spatial\nimport gammapy.spectrum.models as spectral\n\n__all__ = [\n 'UnknownModelError',\n 'source_library_to_xml',\n 'xml_to_source_library',\n]\n\n\nclass UnknownModelError(ValueError):\n \"\"\"\n Error when encountering unknown model types.\n \"\"\"\n\nclass UnknownParameterError(ValueError):\n \"\"\"\n Error when encountering unknown model types.\n \"\"\"\n\ndef source_library_to_xml(skymodels):\n \"\"\"\n Convert `~gammapy.cube.models.SkyModelList` to XML\n \"\"\"\n\ndef xml_to_source_library(xml):\n \"\"\"\n Convert XML to `~gammapy.cube.models.SkyModelList`\n \"\"\"\n full_dict = xmltodict.parse(xml)\n skymodels = list()\n for xml_skymodel in full_dict['source_library']['source']:\n skymodels.append(xml_to_skymodel(xml_skymodel))\n return SourceLibrary(skymodels)\n\n\ndef xml_to_skymodel(xml):\n \"\"\"\n Convert XML to `~gammapy.cube.models.SkyModel`\n \"\"\"\n # TODO: type_ is not used anywhere\n type_ = xml['@type']\n\n name = xml['@name'] \n spatial = xml_to_model(xml['spatialModel'], 'spatial')\n spectral = xml_to_model(xml['spectrum'], 'spectral')\n return SkyModel(spatial_model=spatial, spectral_model=spectral, name=name)\n\n\ndef xml_to_model(xml, which):\n \"\"\"\n Convert XML to `~gammapy.image.models.SkySpatialModel` or\n `~gammapy.spectrum.models.SpectralModel`\n \"\"\"\n type_ = xml['@type']\n parameters = xml_to_parameter_list(xml['parameter'], which)\n\n try:\n model = model_registry[which][type_]\n except KeyError:\n msg = \"{} model '{}' not registered\"\n raise UnknownModelError(msg.format(which, type_))\n\n if type_ == 'MapCubeFunction':\n filename = xml['@file']\n map_ = Map.read(filename)\n model = model(map=map_, norm=parameters['norm'].value)\n elif type_ == 'FileFunction':\n filename = xml['@file']\n model = model.read_fermi_isotropic_model(filename)\n else:\n # TODO: The new model API should support this, see issue #1398\n # >>> return model(parameters)\n # The following is a dirty workaround\n kwargs = dict()\n for par in parameters.parameters:\n kwargs[par.name] = -1 * u.Unit(par.unit)\n model = model(**kwargs)\n model.parameters = parameters\n return model\n\n\n# TODO: MapCubeFunction does not have a good equivalent yet\nmodel_registry = dict(spatial=dict(), spectral=dict())\nmodel_registry['spatial']['SkyDirFunction'] = spatial.SkyPointSource\nmodel_registry['spatial']['MapCubeFunction'] = spatial.SkyDiffuseMap\nmodel_registry['spatial']['ConstantValue'] = spatial.SkyDiffuseConstant\nmodel_registry['spectral']['PowerLaw'] = spectral.PowerLaw\nmodel_registry['spectral']['FileFunction'] = spectral.TableModel\n\n\ndef xml_to_parameter_list(xml, which):\n \"\"\"\n Convert XML to `~gammapy.utils.modeling.ParameterList`\n\n TODO: Introduce scale argument to `~gammapy.utils.modeling.Parameter`.\n \"\"\"\n parameters = list()\n for par in np.atleast_1d(xml):\n try:\n name, unit = parname_registry[which][par['@name']]\n except KeyError:\n msg = \"{} parameter '{}' not registered\"\n raise UnknownParameterError(msg.format(which, par['@name']))\n\n parameters.append(Parameter(\n name = name,\n value = float(par['@value']) * float(par['@scale']),\n unit = unit,\n parmin = float(par['@min']),\n parmax = float(par['@max']),\n frozen = bool(1 - int(par['@free']))\n ))\n return ParameterList(parameters)\n\n\nparname_registry = dict(spatial=dict(), spectral=dict())\nparname_registry['spatial']['RA'] = 'lon_0', 'deg'\nparname_registry['spatial']['DEC'] = 'lat_0', 'deg'\nparname_registry['spatial']['Normalization'] = 'norm', ''\nparname_registry['spatial']['Value'] = 'value', 'MeV cm-2 s-1'\nparname_registry['spectral']['Prefactor'] = 'amplitude', 'MeV cm-2 s-1'\nparname_registry['spectral']['Index'] = 'index', ''\nparname_registry['spectral']['Scale'] = 'reference', 'MeV'\nparname_registry['spectral']['Normalization'] = 'scale', ''\n" } ]
5
rakeshsukla53/geopy
https://github.com/rakeshsukla53/geopy
0991452c0560ef1dc65ffe8586f156a98d964b71
cf38cfcbeb8061ac2b931900fea6db1b33c17b8a
d2f7748eac3545d5aa8b19925209d2a32f435a02
refs/heads/master
2021-01-17T23:18:23.859005
2015-04-10T17:16:20
2015-04-10T17:16:20
33,730,127
0
0
null
2015-04-10T13:32:51
2015-04-06T16:26:37
2015-04-08T22:22:34
null
[ { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 28, "blob_id": "4b877bb10e4587ae5b5c739bc15fbba641974a3b", "content_id": "e30ae61e65621f46a637448810243e02aa70667e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "permissive", "max_line_length": 28, "num_lines": 1, "path": "/test/__init__.py", "repo_name": "rakeshsukla53/geopy", "src_encoding": "UTF-8", "text": "\nfrom test.geocoders import *\n" }, { "alpha_fraction": 0.8837675452232361, "alphanum_fraction": 0.8877755403518677, "avg_line_length": 54.33333206176758, "blob_id": "7566687fabbe00681227b0d97f817177ec4b95f3", "content_id": "42b5b67bc811657276c1a23e746ee57a1f6141a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "permissive", "max_line_length": 77, "num_lines": 18, "path": "/test/geocoders/__init__.py", "repo_name": "rakeshsukla53/geopy", "src_encoding": "UTF-8", "text": "\nfrom test.geocoders.arcgis import ArcGISTestCase, ArcGISAuthenticatedTestCase\nfrom test.geocoders.baidu import BaiduTestCase\nfrom test.geocoders.base import GeocoderTestCase\nfrom test.geocoders.bing import BingTestCase\nfrom test.geocoders.databc import DataBCTestCase\nfrom test.geocoders.dotus import GeocoderDotUSTestCase\nfrom test.geocoders.geocodefarm import GeocodeFarmTestCase\nfrom test.geocoders.geonames import GeoNamesTestCase\nfrom test.geocoders.googlev3 import GoogleV3TestCase\nfrom test.geocoders.nominatim import NominatimTestCase\nfrom test.geocoders.opencage import OpenCageTestCase\nfrom test.geocoders.openmapquest import OpenMapQuestTestCase\nfrom test.geocoders.placefinder import YahooPlaceFinderTestCase\nfrom test.geocoders.smartystreets import LiveAddressTestCase\nfrom test.geocoders.what3words import What3WordsTestCase\nfrom test.geocoders.yandex import YandexTestCase\nfrom test.geocoders.ignfrance import IGNFranceTestCase\nfrom test.geocoders.navidata import NaviDataTestCase\n\n" }, { "alpha_fraction": 0.5820420980453491, "alphanum_fraction": 0.6154151558876038, "avg_line_length": 27.191011428833008, "blob_id": "c2339ec4e3e28fcdd7149a1936ca79b6fbf287ac", "content_id": "8b5942f39587e1f3a18767a0e932ea3fcf78e7cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2517, "license_type": "permissive", "max_line_length": 161, "num_lines": 89, "path": "/geopy/geocoders/geoclient_geopy.py~", "repo_name": "rakeshsukla53/geopy", "src_encoding": "UTF-8", "text": "__author__ = 'rakesh'\n\n\nimport urllib2\nimport json, time\n\n'''\nclass NYCGeoclient is the NYC API gecoder.\n'''\n\nclass NYGeoclient():\n '''\n the documentation for NYC geocoder is here https://developer.cityofnewyork.us/api/geoclient-api\n '''\n\n def __init__(self, api_key, api_id):\n '''\n api_key and api_id are mandatory field here\n API Authentication is required for the NYC geoclient\n '''\n\n self.api_key = '&app_key=' + api_key\n self.domain_name = 'https://api.cityofnewyork.us/geoclient/v1/search.json?input='\n self.api_id = '&app_id=' + api_id\n\n\n\n def geocodeNYC(self, address, throttle=0.05):\n\n '''\n geolocator = NYGeoclient('bb80a97381a0fa916bb860768fa71b8f', 'e7df1765')\n\n address, latitude, longitude = geolocator.geocode('175 5th Avenue NYC')\n\n print address, latitude, longitude\n\n Throttle default value is 50ms\n '''\n if throttle != 0.05:\n throttle = (throttle * 0.001)\n\n time.sleep(throttle)\n\n address = address.split()\n\n urlAddress = \"%20\".join(address)\n\n urlAddress = \"%20\" + urlAddress\n\n full_address = self.domain_name + str(urlAddress) + self.api_id + self.api_key\n\n a = urllib2.urlopen(full_address)\n\n location = json.load(a)['results'][0]['response']\n\n normalizedAddress = location['houseNumberIn'] + \" \" + location['boePreferredStreetName'] + \" \" + location['firstBoroughName'] + \" \" + location['zipCode']\n\n latitude = location['latitude']\n\n longitude = location['longitude']\n\n return normalizedAddress, latitude, longitude\n\n def parameters(self, address, *args):\n '''\n if you need particular information then you should call this function with those parameters\n\n geolocator = NYGeoclient('bb80a97381a0fa916bb860768fa71b8f', 'e7df1765')\n\n print geolocator.parameters('175 5th Avenue NYC', 'houseNumberIn', 'boePreferredStreetName')\n '''\n address = address.split()\n\n urlAddress = \"%20\".join(address)\n\n urlAddress = \"%20\" + urlAddress\n\n full_address = self.domain_name + str(urlAddress) + self.api_id + self.api_key\n\n a = urllib2.urlopen(full_address)\n\n location = json.load(a)['results'][0]['response']\n\n information = {}\n if args:\n for i in range(len(args)):\n information[args[i]] = location[args[i]]\n\n return information #it will return a dictionary of information\n\n\n\n\n\n\n\n\n" } ]
3
teamtako/takoRunOnline
https://github.com/teamtako/takoRunOnline
056b63d4069c165a50bdafdaba172c2b68ce44ce
3a21ecdccbf4beb68a0d3f2cda93227e0c6c292e
2d84b8c95eb270f7c42d9a44b12502f1953b461d
refs/heads/master
2020-09-22T13:50:03.580381
2020-04-04T23:26:11
2020-04-04T23:26:11
225,226,311
0
1
null
2019-12-01T20:30:14
2020-03-29T18:58:35
2020-03-29T19:00:16
JavaScript
[ { "alpha_fraction": 0.547250509262085, "alphanum_fraction": 0.6001012921333313, "avg_line_length": 31.24500846862793, "blob_id": "e18de57eb90401a31ea44bfd3ea028cb448986cd", "content_id": "fd0c70024af05c8e1784233d9764c5ebc7b14168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 17767, "license_type": "no_license", "max_line_length": 449, "num_lines": 551, "path": "/webgl_demo.js", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "//type stuff\nvar lvl1 = \"beluga whale populations are exposed to a variety of stressors and threats including pollution heavy metals chemicals trash shipping energy exploration commercial fishing extreme weather events strandings subsistence harvesting and other types of human disturbance such as underwater noise The Cook Inlet population faces additional threats because of its proximity to the most densely populated area of Alaska during the summer season\";\nvar spacesInList = 1;\nvar activeWords = []; //all words(objects with words accosited to them) that are on the screen at any given time\nvar activeWord=\"\"; //The word that you are currently typing\nvar wordDone=true; //if the current word is typed and a new one needs to be chosen\nvar words=[];\nvar wordAt=0;\nvar color=\"green\";\n//type stuff\n\n\nvar startShaking;\nvar isShaking;\nvar canvas;\nvar textCanvas;\nvar textCtx;\nvar gl;\n\nvar light;\nvar camera;\n\nvar states = {\n TITLE: 0,\n GAME: 1, \n GAME_OVER: 2,\n PAUSE: 3\n}\n\nvar currentState = states.TITLE;\n\nvar playerMesh;\nvar meshes = [];\nvar asteroids = [];\nvar speeds = [];\nvar rocketMesh;\nvar rocketMeshes = [];\nvar trashMeshes = [];\n\nvar asteroid1;\nvar asteroid2;\nvar asteroid3;\nvar asteroid4;\nvar asteroid5;\nvar asteroid6;\n\nvar stopvar;\nvar verticalVelocity = 0;\nvar gravity = 1;\nvar jumping = false;\nvar cubeSpeed = 10;\n\nvar startTime = 0;\nvar endTime = 0;\nvar deltaTime = 0;\n\nvar mouseX;\nvar mouseY;\n\nvar mvmtSpeed = 0.01;\n\nvar score = 5;\n\nvar speed = 0.1;\nvar destZ = 0;\nvar destY = 0;\n\nvar octopus;\nvar animatedMeshes = [];\nvar bubbleEmitter;\nvar particleEmitters = [];\n\nconst KEY_0 = 48;\nconst KEY_1 = 49;\nconst KEY_2 = 50;\nconst KEY_3 = 51;\nconst KEY_4 = 52;\nconst KEY_5 = 53;\nconst KEY_6 = 54;\nconst KEY_7 = 55;\nconst KEY_8 = 56;\nconst KEY_9 = 57;\nconst KEY_A = 65;\nconst KEY_B = 66;\nconst KEY_C = 67;\nconst KEY_D = 68;\nconst KEY_E = 69;\nconst KEY_F = 70;\nconst KEY_G = 71;\nconst KEY_H = 72;\nconst KEY_I = 73;\nconst KEY_J = 74;\nconst KEY_K = 75;\nconst KEY_L = 76;\nconst KEY_M = 77;\nconst KEY_N = 78;\nconst KEY_O = 79;\nconst KEY_P = 80;\nconst KEY_Q = 81;\nconst KEY_R = 82;\nconst KEY_S = 83;\nconst KEY_T = 84;\nconst KEY_U = 85;\nconst KEY_V = 86;\nconst KEY_W = 87;\nconst KEY_X = 88;\nconst KEY_Y = 89;\nconst KEY_Z = 90;\nconst KEY_UP = 38;\nconst KEY_DOWN = 40;\nconst KEY_LEFT = 37;\nconst KEY_RIGHT = 39;\nconst KEY_SPACE = 32;\nvar logo;\n\nwindow.onload = function () {\n startShaking = false;\n isShaking = false;\n logo = document.getElementById(\"logoImageID\");\n\n window.addEventListener(\"keyup\", keyUp); \n window.addEventListener(\"keydown\", keyDown);\n window.addEventListener(\"mousemove\", mouseMove);\n window.addEventListener(\"mousedown\", mouseDown);\n window.addEventListener(\"mouseup\", mouseUp);\n\n canvas = document.getElementById(\"canvasID\");\n gl = canvas.getContext(\"webgl2\");\n textCanvas = document.getElementById(\"textCanvasID\");\n textCtx = textCanvas.getContext(\"2d\");\n\n textCanvas.style.position = \"absolute\";\n textCanvas.style.left = \"0px\";\n textCanvas.style.top = \"0px\";\n textCanvas.width = window.innerWidth * 0.95;\n textCanvas.height = window.innerHeight * 0.95;\n\n canvas.width = window.innerWidth * 0.95;\n canvas.height = window.innerHeight * 0.95;\n gl.viewport(0, 0, canvas.width, canvas.height);\n gl.enable(gl.CULL_FACE);\n gl.clearColor(0.5, 0.7, 1.0, 1.0);\n gl.enable(gl.DEPTH_TEST);\n gl.enable(gl.BLEND);\n gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);\n handleType();\n let vertices = [\n -0.5, -0.5, 0.5, 0, 0, 1, 0.0, 1.0,\n -0.5, 0.5, 0.5, 0, 0, 1, 0.0, 0.0,\n 0.5, 0.5, 0.5, 0, 0, 1, 1.0, 0.0,\n 0.5, -0.5, 0.5, 0, 0, 1, 1.0, 1.0,\n\n 0.5, -0.5, -0.5, 0, 0, -1, 0.0, 1.0,\n 0.5, 0.5, -0.5, 0, 0, -1, 0.0, 0.0,\n -0.5, 0.5, -0.5, 0, 0, -1, 1.0, 0.0,\n -0.5, -0.5, -0.5, 0, 0, -1, 1.0, 1.0,\n\n 0.5, -0.5, 0.5, 1, 0, 0, 0.0, 1.0,\n 0.5, 0.5, 0.5, 1, 0, 0, 0.0, 0.0,\n 0.5, 0.5, -0.5, 1, 0, 0, 1.0, 0.0,\n 0.5, -0.5, -0.5, 1, 0, 0, 1.0, 1.0,\n\n -0.5, -0.5, -0.5, -1, 0, 0, 0.0, 1.0,\n -0.5, 0.5, -0.5, -1, 0, 0, 0.0, 0.0,\n -0.5, 0.5, 0.5, -1, 0, 0, 1.0, 0.0,\n -0.5, -0.5, 0.5, -1, 0, 0, 1.0, 1.0,\n\n -0.5, 0.5, 0.5, 0, 1, 0, 0.0, 1.0,\n -0.5, 0.5, -0.5, 0, 1, 0, 0.0, 0.0,\n 0.5, 0.5, -0.5, 0, 1, 0, 1.0, 0.0,\n 0.5, 0.5, 0.5, 0, 1, 0, 1.0, 1.0,\n\n -0.5, -0.5, -0.5, 0, -1, 0, 0.0, 1.0,\n -0.5, -0.5, 0.5, 0, -1, 0, 0.0, 0.0,\n 0.5, -0.5, 0.5, 0, -1, 0, 1.0, 0.0,\n 0.5, -0.5, -0.5, 0, -1, 0, 1.0, 1.0,\n ];\n indices = [\n 0, 1, 2, 2, 3, 0,\n 4, 5, 6, 6, 7, 4,\n 8, 9, 10, 10, 11, 8,\n 12, 13, 14, 14, 15, 12,\n 16, 17, 18, 18, 19, 16,\n 20, 21, 22, 22, 23, 20\n ]; \n \n camera = new Camera(); \n camera.setPerspectiveProjection(70.0, canvas.width / canvas.height, 0.001, 1000.0); \n camera.position = new Vector3(-5, 2, 0); \n camera.orientation = new Quaternion(0, 1, 0, 1); \n camera.updateView(0);\n \n initAnimatedTexturedMeshRenderer();\n initParticleRenderer();\n initTexturedMeshRenderer();\n initSkyboxRenderer();\n\n\n loadSkyboxFaceImage(SeaSkybox[0], 256, 256, \"+x\");\n loadSkyboxFaceImage(SeaSkybox[3], 256, 256, \"+z\");\n loadSkyboxFaceImage(SeaSkybox[2], 256, 256, \"-x\");\n loadSkyboxFaceImage(SeaSkybox[3], 256, 256, \"-z\");\n loadSkyboxFaceImage(SeaSkybox[4], 256, 256, \"-y\");\n loadSkyboxFaceImage(SeaSkybox[5], 256, 256, \"+y\");\n\n trashMeshes = [createTexturedMesh(bottleData2[0],bottleData2[1]), createTexturedMesh(straw[0],straw[1]), createTexturedMesh(bag[0],bag[1])];\n\n asteroids = [trashMeshes[Math.floor(Math.random()*trashMeshes.length)]];\n\n speeds = [Math.random()*0.01];\n \n rocketMesh = createTexturedMesh(rocketData[0], rocketData[1]);\n rocketMesh.scale.scale(1);\n rocketMesh.orientation.rotate(new Vector3(-1 ,0,0), -Math.PI);\n \n\n for(i = 0; i < asteroids.length; i++){\n var fishyMesh = asteroids[i];\n // fishyMesh.textureID = generateGLTexture2D(monkeyPixels, 1024, 1024);\n fishyMesh.scale = new Vector3(0.7,0.7,0.7);\n fishyMesh.orientation.rotate(new Vector3(0, 1, 0), -Math.PI);\n fishyMesh.position.y = 2;\n fishyMesh.position.x -= (.1);\n } \n\n //Get # of words in word list\n var i = 0, strLength = lvl1.length;\n for(i; i < strLength; i++) {\n if(lvl1.charAt(i) == ' ')\n {\n spacesInList++;\n }\n }\n console.log(spacesInList);\n\n\n\n \n // let verts = [];\n // let inds = [];\n // generateUnitCubeVerticesIndexedWithNormalsTexCoords(verts, inds); // generates unit cube vertices that is indexed with normal texture coordinates\n //this.playerMesh = createTexturedMesh(verts, inds);\n playerMesh = createTexturedMesh(missileData[0], missileData[1]);\n playerMesh.orientation.rotate(new Vector3(0, 1, 0), -Math.PI);\n //meshes = [playerMesh];\n\n /*********************ANIMATED OCTOPUS***********************************/\n octopus = createAnimatedTexturedMesh(octopusMeshData[0], octopusMeshData[1]);\n octopus.textureID = generateGLTexture2D(octopusFlesh, 1024, 1024, \"linear\");\n octopus.animations[\"idle\"] = buildAnimation(octopusAnimation[\"idle\"]);\n octopus.currentAnimation = octopus.animations[\"idle\"];\n octopus.orientation.rotate(new Vector3(0, 0, 1), Math.PI);\n octopus.orientation.rotate(new Vector3(0, 1, 0), Math.PI);\n octopus.position = new Vector3(14, -5, 0);\n animatedMeshes = [octopus];\n /*********************----ANIMATED OCTOPUS----***********************************/\n\n /*********************PARTICLES***********************************/\n bubbleEmitter = new ParticleEmitter();\n bubbleEmitter.repeat = true;\n for(let i = 0; i < 8; i++){\n bubbleEmitter.positions.push(new Vector3(octopus.position.x, octopus.position.y, octopus.position.z));\n let sc = Math.random() * 0.5;\n bubbleEmitter.scales.push(new Vector3(sc, sc, sc));\n bubbleEmitter.orientations.push(new Quaternion());\n bubbleEmitter.velocities.push(new Vector3(0, 1, 0));\n bubbleEmitter.durations.push(20);\n bubbleEmitter.startDelays.push(Math.random() + (i * 3));\n }\n bubbleEmitter.updateFunction = function(pEmtr, deltaTime){\n for(let i = 0; i < pEmtr.positions.length; i++){\n if(pEmtr.totalTime > pEmtr.startDelays[i]){\n pEmtr.positions[i].add(Vector3.scale(pEmtr.velocities[i], deltaTime));\n pEmtr.positions[i].z += Math.sin(pEmtr.totalTime + pEmtr.startDelays[i]) * deltaTime;\n pEmtr.durations[i] -= deltaTime;\n pEmtr.scales[i].add(Vector3.scale(new Vector3(deltaTime, deltaTime, deltaTime), 0.05));\n if(pEmtr.durations[i] <= 0){\n pEmtr.positions[i].x = octopus.position.x;\n pEmtr.positions[i].y = octopus.position.y;\n pEmtr.positions[i].z = octopus.position.z;\n let sc = Math.random() * 0.5;\n bubbleEmitter.scales[i].x = sc;\n bubbleEmitter.scales[i].y = sc;\n bubbleEmitter.scales[i].z = sc;\n pEmtr.durations[i] = 20;\n }\n }\n }\n };\n\n bubbleEmitter.textureID = generateGLTexture2D(bubbleTexture, 64, 64, \"linear\");\n particleEmitters.push(bubbleEmitter);\n /*********************----PARTICLES----***********************************/\n startTime = new Date().getTime();\n\n setInterval(updateFrame, 1);\n stopvar = setInterval(updateFrame, 1);\n\n}\n\n//start type\n\n//initiates texts and the words in them into arrays\nfunction handleType(){\n \n words = lvl1.split(\" \");\n\n}\nfunction removeChar(){\n if(words[wordAt].length==1){\n \n wordAt = Math.floor(Math.random() * spacesInList);\n \n score++;\n for(var i = 0; i < asteroids.length; i++){\n asteroids[i] = trashMeshes[Math.floor(Math.random()*trashMeshes.length)];\n asteroids[i].position.x = 120;\n }\n speeds[0] = 0.01;\n fishyMesh.position.z = (Math.random() - .5) * 16;\n fishyMesh.position.y = (Math.random() * 16)-10;\n }else{\n words[wordAt]= words[wordAt].substring(1, words[wordAt].length);;\n }\n}\n//when a key is typed checks if it is the correct next letter and if there is no word selected pickes a word with the correct first char\nfunction validType(code){\n if(code==getKeyCode(words[wordAt].charAt(0))){\n color=\"white\";\n removeChar();\n activeWords[i]=activeWord;\n wordDone=false;\n }else{\n color=\"red\";\n }\n}\n\n\n//get next word for the astriod when it spawns\nfunction getWord(){\n var word=words[wordAt];\n return word.toString();\n}\n\nfunction getKeyCode(char) {\n var keyCode = char.charCodeAt(0);\n if(keyCode > 90) { // 90 is keyCode for 'z'\n return keyCode - 32;\n }\n return keyCode;\n }\n\n //end type\n\nfunction mouseMove(evt) {\n mouseX = evt.x;\n mouseY = evt.y;\n destZ = (((mouseX / canvas.width) * 8) - 4);\n destY = (((mouseY / canvas.height) * -8) + 6);\n}\nfunction mouseDown(evt) {\n /* rocketMeshes.push(new TexturedMesh(rocketMesh));\n rocketMeshes[rocketMeshes.length - 1].position = new Vector3(playerMesh.position.x,playerMesh.position.y,playerMesh.position.z);\n rocketMeshes[rocketMeshes.length - 1].orientation = Quaternion.rotationToQuaternion(new Vector3(1,0,-.1),1);\n\n rocketMeshes.push(new TexturedMesh(rocketMesh));\n rocketMeshes[rocketMeshes.length - 1].position = new Vector3(playerMesh.position.x,playerMesh.position.y,playerMesh.position.z);\n rocketMeshes[rocketMeshes.length - 1].orientation = Quaternion.rotationToQuaternion(new Vector3(1,0,.1),1);\n\n rocketMeshes.push(new TexturedMesh(rocketMesh));\n rocketMeshes[rocketMeshes.length - 1].position = new Vector3(playerMesh.position.x,playerMesh.position.y,playerMesh.position.z);\n rocketMeshes[rocketMeshes.length - 1].orientation = Quaternion.rotationToQuaternion(new Vector3(1,0,0),1); */\n}\nfunction mouseUp(evt) {\n speed = 0.1;\n}\nvar an = true;\nfunction keyDown(event) {\n switch (event.keyCode) {\n case KEY_SPACE:\n if(currentState == states.TITLE){\n currentState = states.GAME;\n }\n for(i = 0; i < asteroids.length; i++){\n asteroids[i].position.x = 120;\n \n }\n break;\n\n default:\n // removeChar();\n validType(event.keyCode);\n \n }\n}\n\nfunction gameState(){\n if (startShaking == true) {\n if (shakeAmount > 0) {\n isShaking = true;\n shakeAmount--;\n } else {\n isShaking = false;\n startShaking = false;\n }\n } else {\n isShaking = false;\n }\n distIntoArray = 0;\n rocketMeshes.forEach(element => {\n element.position.add(new Vector3(20 * deltaTime * ((element.orientation.x) / Math.PI),20 * deltaTime * ((element.orientation.y) / Math.PI),20 * deltaTime * ((element.orientation.z) / Math.PI)));\n \n if (element.position.x > 60)\n {\n rocketMeshes.splice(distIntoArray,1);\n element = null;\n }\n distIntoArray++;\n });\n\n for(i = 0; i < asteroids.length; i++){\n var fishyMesh = asteroids[i];\n if (fishyMesh.position.x <= -7) {\n score--;\n // fishyMesh.scale = new Vector3(Math.floor((Math.random()*2)+1),Math.floor((Math.random()*2)+1),Math.floor((Math.random()*2)+1));\n asteroids[i] = trashMeshes[Math.floor(Math.random()*trashMeshes.length)];\n fishyMesh.position.x = 120;\n fishyMesh.orientation.rotate(new Vector3(Math.random() * 360, Math.random() * 360, Math.random() * 360), 1 * deltaTime);\n fishyMesh.position.z = (Math.random() - .5) * 16;\n fishyMesh.position.y = (Math.random() * 16)-10;\n } else {\n fishyMesh.position.x -= 0.1 + ((score-5)/100);\n }\n\n fishyMesh.orientation.rotate(new Vector3(0, 0, 1), 1 * deltaTime);\n }\n\n if (isShaking == false) {\n camera.updateView(deltaTime);\n camera.position = new Vector3(-5, 2, 0); camera.orientation = new Quaternion(0, 1, 0, 1); camera.updateView(0);\n } else {\n camera.lookAt(Vector3.add(playerMesh.position, new Vector3(-10, Math.random() * (playerMesh.position.z * 2 - fishyMesh.position.z), Math.random() * (playerMesh.position.y * 2 - fishyMesh.position.y))), playerMesh.position, new Vector3(0, 1, 0));\n }\n \n let lightPos = new Vector3(4, 4, 4);\n renderSkybox(camera.projectionMatrix, camera.orientation);\n renderTexturedMeshes(meshes, camera, lightPos);\n renderTexturedMeshes(asteroids, camera, lightPos);\n renderTexturedMeshes(rocketMeshes, camera, lightPos);\n renderAnimatedTexturedMeshes(animatedMeshes, camera, lightPos, deltaTime);\n updateParticles(particleEmitters, deltaTime);\n renderParticles(particleEmitters, camera, deltaTime);\n \n\n textCtx.font = \"30px Arial\";\n \n textCtx.clearRect(0, 0, textCanvas.width, textCanvas.height);\n\n textCtx.fillStyle = color;\n \n var closestTrash = asteroids[0];\n\n for(j = 0; j < asteroids.length; j++){\n if(closestTrash.position.x > asteroids[j].position.x){\n closestTrash = asteroids[j];\n } \n }\n\n textCtx.fillStyle = \"#000000\";\n textCtx.fillRect(window.innerWidth/2-10-(getWord().length*10), 30, getWord().length*21, 50);\n textCtx.strokeStyle = \"#ffffff\";\n textCtx.beginPath();\n textCtx.lineWidth = \"5\";\n textCtx.rect(window.innerWidth/2-10-(getWord().length*10), 30, getWord().length*21, 50);\n textCtx.stroke();\n textCtx.fillStyle = \"#ffffff\";\n if(getWord().length<=1){\n textCtx.fillText(getWord()+\"\", window.innerWidth/2-(getWord().length*10)-7, 65);\n }else{\n textCtx.fillText(getWord()+\"\", window.innerWidth/2-(getWord().length*10), 65);\n }\n textCtx.font = \"30px Arial\";\n textCtx.fillText(\"Score: \" + score, 100, 100);\n \n //checkIntersection(fishyMesh, playerMesh);\n\n endTime = new Date().getTime();\n deltaTime = (endTime - startTime) / 1000.0;\n startTime = endTime;\n\n}\n\nfunction gameOverState(){\n gl.clearColor(1.0,0.0,0.0,1.0);\n textCtx.fillStyle = \"white\";\n textCtx.clearRect(0, 0, textCanvas.width, textCanvas.height);\n textCtx.font = \"100px Arial\";\n textCtx.fillText(\"You're Dead! Press S to restart\", 170, 200);\n clearInterval(stopvar);\n menuItems=[\"play\",\"donate\",\"credits\"];\n score = 5;\n}\nfunction shake(shakeAmount1) {\n startShaking = true;\n shakeAmount = shakeAmount1;\n}\nfunction titleState(){\n gl.clearColor(0.5, 0.7, 1.0, 1.0);\n textCtx.fillStyle = \"white\";\n textCtx.clearRect(0, 0, textCanvas.width, textCanvas.height);\n textCtx.drawImage(logo, (canvas.width-(canvas.width*0.5))/2, 0, canvas.width*0.5, (canvas.width*0.5)/2.2623771);\n textCtx.font = \"100px Arial\";\n textCtx.fillText(\"Press Space to Start Epic Game\", 150, 200);\n}\n\nfunction pauseState(){\n\n}\n\nfunction updateFrame() {\n gl.clear(gl.COLOR_BUFFER_BIT);\n gl.clear(gl.DEPTH_BUFFER_BIT);\n\n if(score <= 0){\n currentState = states.GAME_OVER;\n }\n\n if(currentState == states.GAME){\n gameState();\n } else if(currentState == states.GAME_OVER){\n gameOverState();\n } else if(currentState == states.TITLE){\n titleState();\n } else if(currentState == states.PAUSE){\n pauseState();\n }\n\n}\n\nfunction keyUp(event) {\n\n switch (event.keyCode) {\n case KEY_S: {\n if (currentState == states.GAME_OVER) {\n score = 5;\n currentState = states.TITLE;\n }\n }\n\n }\n}\n" }, { "alpha_fraction": 0.6017650365829468, "alphanum_fraction": 0.6072807312011719, "avg_line_length": 27.761905670166016, "blob_id": "bd73a7d0051a7dd8cf0c0106ed56561258a623f5", "content_id": "6fab01d61669a31d026c85596715b0a7a727d87a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1813, "license_type": "no_license", "max_line_length": 99, "num_lines": 63, "path": "/javascript_texture_exporter.py", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "\nimport bpy\nfrom os import system, name\n\nbl_info = {\n \"name\": \"JS Texture Export\", \n \"blender\": (2, 80, 0),\n \"category\": \"Object\",\n}\n\ndef clear(): \n if name == 'nt': \n system('cls') \n else: \n system('clear')\nclear()\n\ndef addVertex(list, vert):\n for i, tv in enumerate(list):\n if vert == tv:\n return i\n list.append(vert)\n return len(list) - 1\n \nclass JSTextureExporter(bpy.types.Operator):\n bl_idname = \"object.js_mesh_export\" \n bl_label = \"JS TEXTURE EXPORT\" \n bl_options = {'REGISTER', 'UNDO'}\n\n imgName: bpy.props.StringProperty(name=\"Image Name\")\n varName: bpy.props.StringProperty(name=\"Javascript Variable Name\")\n outFile: bpy.props.StringProperty(name=\"Output File Name\")\n outFileLoc: bpy.props.StringProperty(name=\"Output File Location\", subtype=\"FILE_PATH\")\n\n def execute(self, context): #CODE TO RUN WHEN EXECUTED\n if self.varName is '' or self.outFile is '' or self.imgName is '' or self.outFileLoc is '':\n return {'FINISHED'}\n \n texturePixels = []\n for p in bpy.data.images[self.imgName].pixels:\n texturePixels.append(int(p * 255))\n \n\n textureData = \"var \" + self.varName + \" = \" + str(texturePixels) + \";\"; \n \n file = open(self.outFileLoc + self.outFile + '.js', 'w')\n file.write(textureData);\n file.close()\n \n return {'FINISHED'}\n\ndef menu_func(self, context): \n self.layout.operator(JSTextureExporter.bl_idname)\n\ndef register():\n bpy.utils.register_class(JSTextureExporter) \n bpy.types.VIEW3D_MT_object.append(menu_func)\n \ndef unregister():\n bpy.utils.unregister_class(JSTextureExporter) \n bpy.types.VIEW3D_MT_object.remove(menu_func)\n \nif __name__ == \"__main__\":\n register()\n" }, { "alpha_fraction": 0.6503496766090393, "alphanum_fraction": 0.6608391404151917, "avg_line_length": 22.83333396911621, "blob_id": "d950a0211de105ad503fb8dfd60f5db364775f90", "content_id": "0e9b3825ee18689d4f59617fe36c451f455061e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/texture_exporter.py", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "import bpy\n\ntexturePixels = []\nfor p in bpy.data.images['Untitled'].pixels:\n texturePixels.append(int(p * 255))\n \n\ntextureData = \"var monkeyTexture = \" + str(texturePixels) + \";\"; \n \nfile = open('/Users/dave/Desktop/monkeyPic.js', 'w')\nfile.write(textureData);\nfile.close()\n" }, { "alpha_fraction": 0.5143312215805054, "alphanum_fraction": 0.5210191011428833, "avg_line_length": 30.089109420776367, "blob_id": "69c690d22dab16bfcd628fee13a5c0db09783668", "content_id": "5957fef599ba8c6ecc01a61acedb2dfbbc8ea384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3140, "license_type": "no_license", "max_line_length": 90, "num_lines": 101, "path": "/javascript_mesh_exporter.py", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "import bpy\nfrom os import system, name\n\nbl_info = {\n \"name\": \"JS Mesh Export\", \n \"blender\": (2, 80, 0),\n \"category\": \"Object\",\n}\n\ndef clear(): \n if name == 'nt': \n system('cls') \n else: \n system('clear')\nclear()\n\ndef addVertex(list, vert):\n for i, tv in enumerate(list):\n if vert == tv:\n return i\n list.append(vert)\n return len(list) - 1\n \nclass JSMeshExporter(bpy.types.Operator):\n bl_idname = \"object.js_mesh_export\" \n bl_label = \"JS MESH EXPORT\" \n bl_options = {'REGISTER', 'UNDO'}\n\n varName: bpy.props.StringProperty(name=\"Javascript Variable Name\")\n outFileLoc: bpy.props.StringProperty(name=\"Output File Location\", subtype=\"FILE_PATH\")\n outFile: bpy.props.StringProperty(name=\"Output File Name\")\n\n def execute(self, context): #CODE TO RUN WHEN EXECUTED\n if self.varName is '' or self.outFile is '' or self.outFileLoc is '':\n return {'FINISHED'}\n \n vertexList = []\n indexList = []\n obj = bpy.context.active_object\n polys = obj.data.polygons\n vertices = obj.data.vertices\n uvs = obj.data.uv_layers[0].data\n\n for p in polys:\n indCt = len(indexList)\n for i, v in enumerate(p.vertices):\n pos = []\n norm = []\n uv = []\n \n pos.append(round(vertices[v].co.x, 4))\n pos.append(round(vertices[v].co.y, 4))\n pos.append(round(vertices[v].co.z, 4))\n \n norm.append(round(vertices[v].normal.x, 4))\n norm.append(round(vertices[v].normal.y, 4))\n norm.append(round(vertices[v].normal.z, 4))\n \n uv.append(round(uvs[p.loop_indices[i]].uv.x, 4))\n uv.append(round(uvs[p.loop_indices[i]].uv.y, 4))\n \n vertex = [pos, norm, uv]\n \n if i < 3:\n indexList.append(addVertex(vertexList, vertex))\n else:\n indexList.append(indexList[len(indexList) - 1])\n indexList.append(addVertex(vertexList, vertex))\n indexList.append(indexList[indCt])\n\n nvList = []\n for v in vertexList:\n for p in v[0]:\n nvList.append(p)\n for n in v[1]:\n nvList.append(n)\n for u in v[2]:\n nvList.append(u)\n\n fileText = \"var \" + self.varName + \" = [\"\n fileText += str(nvList) + \",\"\n fileText += str(indexList) + \"];\\n\"\n \n file = open(self.outFileLoc + self.outFile + '.js', 'w')\n file.write(fileText);\n file.close()\n return {'FINISHED'}\n\ndef menu_func(self, context): \n self.layout.operator(JSMeshExporter.bl_idname)\n\ndef register():\n bpy.utils.register_class(JSMeshExporter) \n bpy.types.VIEW3D_MT_object.append(menu_func)\n \ndef unregister():\n bpy.utils.unregister_class(JSMeshExporter) \n bpy.types.VIEW3D_MT_object.remove(menu_func)\n \nif __name__ == \"__main__\":\n register()\n" }, { "alpha_fraction": 0.5521514415740967, "alphanum_fraction": 0.5604130625724792, "avg_line_length": 29.79787254333496, "blob_id": "5600a3365ea6131b26581e4bc3e02755ef7f0e43", "content_id": "ad22f9420fa0ba89beee03e4b18c6a63b87f206a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2905, "license_type": "no_license", "max_line_length": 90, "num_lines": 94, "path": "/javascript_cubemap_exporter.py", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "import bpy\nfrom os import system, name\n\nbl_info = {\n \"name\": \"JS Cube Map Export\", \n \"blender\": (2, 80, 0),\n \"category\": \"Object\",\n}\n\ndef clear(): \n if name == 'nt': \n system('cls') \n else: \n system('clear')\nclear()\n \nclass JSCubeMapExporter(bpy.types.Operator):\n bl_idname = \"object.js_cubemap_export\" \n bl_label = \"JS TEXTURE EXPORT\" \n bl_options = {'REGISTER', 'UNDO'}\n\n nxImg: bpy.props.StringProperty(name=\"-X Image\")\n pxImg: bpy.props.StringProperty(name=\"+X Image\")\n nzImg: bpy.props.StringProperty(name=\"-Z Image\")\n pzImg: bpy.props.StringProperty(name=\"+Z Image\")\n nyImg: bpy.props.StringProperty(name=\"-Y Image\")\n pyImg: bpy.props.StringProperty(name=\"+Y Image\")\n varName: bpy.props.StringProperty(name=\"Javascript Variable Name\")\n outFile: bpy.props.StringProperty(name=\"Output File Name\")\n outFileLoc: bpy.props.StringProperty(name=\"Output File Location\", subtype=\"FILE_PATH\")\n\n def execute(self, context): #CODE TO RUN WHEN EXECUTED\n if self.varName is '' or self.outFile is '' or self.outFileLoc is '':\n return {'FINISHED'}\n if self.nxImg is '' or self.pxImg is '' or self.nzImg is '':\n return {'FINISHED'}\n if self.pzImg is '' or self.nyImg is '' or self.pyImg is '':\n return {'FINISHED'}\n \n fileText = \"var \" + self.varName + \" = [\"\n\n nx = bpy.data.images[self.nxImg]\n pz = bpy.data.images[self.nzImg]\n px = bpy.data.images[self.pxImg]\n nz = bpy.data.images[self.pzImg]\n py = bpy.data.images[self.pyImg]\n ny = bpy.data.images[self.nyImg]\n\n pix = []\n for p in nx.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n pix = []\n for p in pz.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n pix = []\n for p in px.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n pix = []\n for p in nz.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n pix = []\n for p in py.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n pix = []\n for p in ny.pixels:\n pix.append(int(p * 255))\n fileText += str(pix) + \",\";\n\n fileText += \"];\"\n\n file = open(self.outFileLoc + self.outFile +'.js', 'w');\n file.write(fileText);\n file.close();\n \n return {'FINISHED'}\n\ndef menu_func(self, context): \n self.layout.operator(JSCubeMapExporter.bl_idname)\n\ndef register():\n bpy.utils.register_class(JSCubeMapExporter) \n bpy.types.VIEW3D_MT_object.append(menu_func)\n \ndef unregister():\n bpy.utils.unregister_class(JSCubeMapExporter) \n bpy.types.VIEW3D_MT_object.remove(menu_func)\n \nif __name__ == \"__main__\":\n register()\n \n \n" }, { "alpha_fraction": 0.5505882501602173, "alphanum_fraction": 0.5594117641448975, "avg_line_length": 24.388059616088867, "blob_id": "13c9d473773167926322eedd90cafc014712e88f", "content_id": "799a2b2af5ca7cd310c625932a29cb15f44b0b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1700, "license_type": "no_license", "max_line_length": 59, "num_lines": 67, "path": "/mesh_exporter.py", "repo_name": "teamtako/takoRunOnline", "src_encoding": "UTF-8", "text": "import bpy\nfrom os import system, name\n\nvertexList = []\nindexList = []\nobj = bpy.context.active_object\npolys = obj.data.polygons\nvertices = obj.data.vertices\nuvs = obj.data.uv_layers[0].data\n\ndef clear(): \n if name == 'nt': \n system('cls') \n else: \n system('clear')\nclear()\n\ndef addVertex(list, vert):\n for i, tv in enumerate(list):\n if vert == tv:\n return i\n list.append(vert)\n return len(list) - 1\n\nfor p in polys:\n indCt = len(indexList)\n for i, v in enumerate(p.vertices):\n pos = []\n norm = []\n uv = []\n \n pos.append(round(vertices[v].co.x, 4))\n pos.append(round(vertices[v].co.y, 4))\n pos.append(round(vertices[v].co.z, 4))\n \n norm.append(round(vertices[v].normal.x, 4))\n norm.append(round(vertices[v].normal.y, 4))\n norm.append(round(vertices[v].normal.z, 4))\n \n uv.append(round(uvs[p.loop_indices[i]].uv.x, 4))\n uv.append(round(uvs[p.loop_indices[i]].uv.y, 4))\n \n vertex = [pos, norm, uv]\n \n if i < 3:\n indexList.append(addVertex(vertexList, vertex))\n else:\n indexList.append(indexList[len(indexList) - 1])\n indexList.append(addVertex(vertexList, vertex))\n indexList.append(indexList[indCt])\n\nnvList = []\nfor v in vertexList:\n for p in v[0]:\n nvList.append(p)\n for n in v[1]:\n nvList.append(n)\n for u in v[2]:\n nvList.append(u)\n\nfileText = \"var monkeyData = [\"\nfileText += str(nvList) + \",\"\nfileText += str(indexList) + \"];\\n\"\n \nfile = open('/Users/dave/Desktop/monkey_mesh.js', 'w')\nfile.write(fileText);\nfile.close()" } ]
6
liu67224657/joyme-channel
https://github.com/liu67224657/joyme-channel
97fdd67efed0795587b0dd8680d02be60827a2d8
ac4c23858663deb5f5c48e0340b8c7e9f148744d
8cca6223477bff9e8e9abee10fdf88f170ff338a
refs/heads/master
2021-08-06T13:42:02.963567
2017-11-06T03:41:47
2017-11-06T03:41:47
109,643,668
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40211331844329834, "alphanum_fraction": 0.4108221232891083, "avg_line_length": 36.125, "blob_id": "a4aeb85f8b53de537dade527fe3d2a5359253c5c", "content_id": "d7b29bf89c67e929815f45d2f6709029c70fad92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8774, "license_type": "no_license", "max_line_length": 137, "num_lines": 232, "path": "/controller/sourceBaidugl.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(AROOT . 'controller' . DS . 'sourceAbstract.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\net\\RedisHelper;\n\nclass sourceBaiduglController extends sourceAbstractController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array('indexData', 'title', 'image', 'url', 'pubtime', 'category');\n\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n */\n public function query($cid)\n {\n //่Žทๅ–ๅ‚ๆ•ฐ\n $uptime = Request::getParam('update_time', time() - 24 * 3600);\n $type = Request::getParam('type', 1); // 1ๆธธๆˆ 2ๆ•ฐๆฎ\n $page = Request::getParam('page', 1);\n $size = Request::getParam('page_size', 20);\n\n $skip = ($page - 1) * $size;\n\n $count = 0;\n\n $data = array(\n 'org_name' => '็€่ฟท็ฝ‘',\n 'org_url' => 'http://www.joyme.com',\n 'update_time' => date('Y-m-d H:i:s', $uptime),\n 'update_count' => $count,\n 'list' => array()\n );\n\n if ($type == 1) {\n //ๆŸฅ่ฏขๆธธๆˆไฟกๆฏ\n $jChannelGameModel = new jChannelGameModel();\n $where = array('cid' => $cid, 'gamestatus' => 1);\n $channelgamelist = $jChannelGameModel->getData($where, $size, $skip);\n $datacount = $jChannelGameModel->getCount($where);\n\n $gids = '0';\n foreach ($channelgamelist as $val) {\n $gids .= ',' . $val['gid'];\n }\n\n //ๆŸฅ่ฏขๆธธๆˆ้™„ๅŠ ๆ•ฐๆฎ\n $jGameModel = new jGameModel();\n $where2 = array('gid' => array('in', $gids));\n $gamelist = $jGameModel->getData($where2, $size);\n\n foreach ($channelgamelist as $o => $p) {\n foreach ($gamelist as $k => $v) {\n if ($p['gid'] == $v['gid']) {\n $temparr = json_decode($v['extra'], true);\n $temparr = empty($temparr) ? array() : $temparr;\n $channelgamelist[$o] = array_merge($p, $v, $temparr);\n }\n }\n }\n\n\n foreach ($channelgamelist as $v) {\n $categoryTypeSet = '';//ๅˆ†็ฑป\n foreach ($v['categoryTypeSet'] as $v2) {\n $categoryTypeSet .= ',' . $v2['value'];\n }\n $categoryTypeSet = substr($categoryTypeSet, 1);\n $platformMap = ''; //ๅนณๅฐ\n foreach ($v['platformMap'] as $v2) {\n foreach ($v2 as $v3) {\n $platformMap .= ',' . $v3['desc'];\n }\n }\n $platformMap = substr($platformMap, 1);\n\n $levelGame = $v['levelGame'] == false ? 'ๅฆ' : 'ๆ˜ฏ';\n\n $gameurl = 'http://www.joyme.com/collection/' . $v['gid'];\n\n if (strtotime($v['createDate']) < $uptime) {\n continue;\n }\n\n $data['list'][] = array(\n 'url' => $gameurl,\n 'name' => $v['gameName'],\n 'game_alias' => $v['anotherName'],\n 'category' => $categoryTypeSet,\n 'developers' => self::null2str($v['gameDeveloper']),\n 'game_platform' => $platformMap,\n 'ios_url' => self::null2str($v['iosDownload']),\n 'android_url' => self::null2str($v['androidDownload']),\n 'web_url' => self::null2str($v['webpageDownload']),\n 'pc_url' => self::null2str($v['pcDownload']),\n 'xboxone_url' => self::null2str($v['xboxoneDownload']),\n 'ps4_url' => self::null2str($v['ps4Download']),\n 'has_tollgate' => $levelGame,\n 'topic_names' => '',\n 'icon' => $v['gameIcon'],\n 'publish_time' => date('Y-m-d', strtotime($v['gamePublicTime'])),\n 'update_time' => date('Y-m-d', strtotime($v['createDate'])),\n );\n }\n $data['update_count'] = $datacount;\n\n } else {\n //ๆŸฅ่ฏขๆธธๆˆไฟกๆฏ\n $jChannelGameModel = new jChannelGameModel();\n $where = array('cid' => $cid, 'gamestatus' => 1);\n $channelgamelist = $jChannelGameModel->getData($where, 1000);\n\n $gids = '0';\n foreach ($channelgamelist as $val) {\n $gids .= ',' . $val['gid'];\n }\n\n //ๆŸฅ่ฏขๆธธๆˆ้™„ๅŠ ๆ•ฐๆฎ\n $jGameModel = new jGameModel();\n $where2 = array('gid' => array('in', $gids));\n $gamelist = $jGameModel->getData($where2, 1000);\n\n foreach ($channelgamelist as $o => $p) {\n foreach ($gamelist as $k => $v) {\n if ($p['gid'] == $v['gid']) {\n $temparr = json_decode($v['extra'], true);\n $temparr = empty($temparr) ? array() : $temparr;\n $channelgamelist[$o] = array_merge($p, $v, $temparr);\n }\n }\n }\n\n //้‡ๅปบๆ•ฐ็ป„\n $channelgameidlist = array_column($channelgamelist, 'gid');\n $channelgamelist = array_combine($channelgameidlist, $channelgamelist);\n\n //ๆŸฅ่ฏขๆธ ้“ๆ•ฐๆฎ\n $jChannelDataModel = new jChannelDataModel();\n $where = array('cid' => $cid, 'gid' => array('in', $gids), 'source' => 1, 'isblock' => 0, 'pubdate' => array('gt', $uptime));\n $channelData = $jChannelDataModel->getData($where, $size, $skip);\n $datacount = $jChannelDataModel->getCount($where);\n $aids = '0';\n foreach ($channelData as $val) {\n $aids .= ',' . $val['aid'];\n }\n //ๆŸฅ่ฏข้™„ๅŠ ๆ•ฐๆฎ\n $jSourceDataModel = new jSourceDataModel();\n $where2 = array('source' => 1, 'aid' => array('in', $aids));\n $sourcedata = $jSourceDataModel->getData($where2, $size);\n\n //ๆŸฅ่ฏขbody\n $cmsModel = new cmsModel();\n $bodys = $cmsModel->getBodyByIds($aids);\n //้‡ๅปบๆ•ฐ็ป„\n $bodys = array_column($bodys, 'body', 'aid');\n// var_dump($bodys);exit;\n //ๅˆๅนถๆ•ฐๆฎ\n foreach ($channelData as $k => $val) {\n if (empty($bodys[$val['aid']])) {\n continue;\n }\n foreach ($sourcedata as $row) {\n if ($val['aid'] == $row['aid']) {\n $tmp = json_decode($row['data'], true);\n if ($tmp) {\n $tmparr = array(\n 'url' => $val['url'],\n 'title' => $tmp['title'],\n 'author' => $tmp['writer'],\n 'game_name' => $channelgamelist[$val['gid']]['gameName'],\n 'topic_name' => '',\n 'tollgate_name' => '',\n 'description' => $tmp['description'],\n 'content' => $bodys[$val['aid']],\n 'img_item' => self::getCatImgList($bodys[$val['aid']]),\n 'cover_img' => $val['litpic'],\n 'publish_time' => date('Y-m-d H:i:s', $val['pubdate']),\n 'update_time' => date('Y-m-d H:i:s', $val['pubdate'])\n );\n\n $data['list'][] = $tmparr;\n }\n }\n }\n }\n $data['update_count'] = $datacount;\n\n }\n\n return $data;\n }\n\n public static function null2str($val)\n {\n $val = empty($val) ? '' : $val;\n return $val;\n }\n\n public static function getCatImgList($content)\n {\n $res = array();\n preg_match_all('/<img.*?src=\"(.*?)\".*?width=\"(.*?)\".*?height=\"(.*?)\".*?>/is', $content, $match);\n preg_match_all('/<img.*?width=\"(.*?)\".*?height=\"(.*?)\".*?src=\"(.*?)\".*?\\/>/is', $content, $match2);\n if (empty($match[1]) && empty($match2[3])) {\n return array();\n } else {\n if (!empty($match[1])) {\n foreach ($match[1] as $k => $src) {\n $res[] = $src;\n }\n }\n if (!empty($match2[3])) {\n foreach ($match2[3] as $k => $src) {\n $res[] = $src;\n }\n }\n $res = array_values(array_unique($res));\n return $res;\n }\n }\n\n\n}" }, { "alpha_fraction": 0.47450247406959534, "alphanum_fraction": 0.48383083939552307, "avg_line_length": 23.967741012573242, "blob_id": "e06a6b467a6971c8a4e4e5e1b51ddb4ff0189a70", "content_id": "619a3d7636edc4518c72b98cf9c07f7584818409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1608, "license_type": "no_license", "max_line_length": 99, "num_lines": 62, "path": "/model/jGameModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 14:20\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) {\r\n die('bad request');\r\n}\r\nuse Joyme\\db\\JoymeModel;\r\nuse Joyme\\net\\Curl;\r\n\r\nclass jGameModel extends JoymeModel\r\n{\r\n public $tableName = 'game';\r\n\r\n public function __construct()\r\n {\r\n $this->db_config = array(\r\n 'hostname' => $GLOBALS['config']['db']['db_host'],\r\n 'username' => $GLOBALS['config']['db']['db_user'],\r\n 'password' => $GLOBALS['config']['db']['db_password'],\r\n 'database' => $GLOBALS['config']['db']['channel_db_name']\r\n );\r\n parent::__construct();\r\n }\r\n\r\n public function getData($where,$limit=10,$skip=0){\r\n return $this->select('*', $where, '', $limit, $skip);\r\n }\r\n\r\n public function getRowData($where){\r\n return $this->selectRow('*', $where);\r\n }\r\n public function addData($data){\r\n return $this->insert($data);\r\n }\r\n\r\n\r\n public function getsearchgame($gamename)\r\n {\r\n global $GLOBALS;\r\n if ($gamename) {\r\n $url = \"http://api.joyme.\".$GLOBALS['domain'].\"/collection/api/gamearchive/searchgame\";\r\n $curl = new Curl();\r\n $result = $curl->Get($url,array(\r\n 'searchtext'=> $gamename\r\n ));\r\n $result = json_decode($result,true);\r\n if ($result['rs']=='1') {\r\n return $result['result'];\r\n } else {\r\n return false;\r\n }\r\n } else {\r\n return false;\r\n }\r\n }\r\n}" }, { "alpha_fraction": 0.5289767980575562, "alphanum_fraction": 0.5356317758560181, "avg_line_length": 36.700347900390625, "blob_id": "8a8dfbaa342720dcaffe71a8abad72a89d1a2efa", "content_id": "d717a434e94a064a691abd37da8630fadfad3125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 11003, "license_type": "no_license", "max_line_length": 150, "num_lines": 287, "path": "/controller/joymewiki.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n/**\n * Created by PhpStorm.\n * User: xinshi\n * Date: 2015/10/29\n * Time: 12:21\n */\nif( !defined('IN') ) die('bad request');\ninclude_once( AROOT . 'controller'.DS.'app.class.php' );\nuse Joyme\\core\\Request;\n\nclass joymewikiController extends appController{\n\n protected $accessKeyId = 'm2LJu94lrAKPMGBm';\n protected $accessKeySecret = 'jO3aBvvxQKfBoBEHXadiLhG0YFi8OJ';\n protected $serverUrl = 'http://rds.aliyuncs.com/';\n protected $dBInstanceId = 'rdsnu7brenu7bre';\n private $accountName = 'wikiuser';\n\n private $dbName = '';\n private $charSet = 'utf8';\n private $dbDescription = '';\n\n public $table_num = 92;\n public $file_path = './public/data/wiki.sql';\n public $sh_path = './public/sh/';\n public $wiki_key = null;\n\n function index(){\n\n global $GLOBALS;\n $wikimodel = M('joymeWikiModel');\n $pb_show_num = 50; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\n $pb_page = Request::get('pb_page',1); //่Žทๅ–ๅฝ“ๅ‰้กต็ \n $conditions['wiki_name'] = Request::getParam('wiki_name');\n $total = $wikimodel->allWikiList($conditions,true);\n $data['item'] = $wikimodel->allWikiList($conditions,false,$pb_page,$pb_show_num);\n $page = M('pageModel');\n $page->mainPage(array('total' => $total,'perpage'=>$pb_show_num,'nowindex'=>$pb_page,'pagebarnum'=>10));\n $data['page_str'] = $page->show(2,$conditions);\n $data['static_url'] = $GLOBALS['static_url'];\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['param'] = $conditions['wiki_name'];\n render($data,'web','wiki/wiki_list');\n }\n\n //็ผ–่พ‘\n function showEditPage(){\n\n global $GLOBALS;\n $wiki_id = Request::get('wiki_id');\n if(empty($wiki_id)){\n return '';\n }\n $wikimodel = M('joymeWikiModel');\n $data['item'] = $wikimodel->selectInfoByWikiId($wiki_id);\n $data['static_url'] = $GLOBALS['static_url'];\n render($data,'web','wiki/edit_wiki');\n }\n\n function updateWikiData(){\n\n $update_id = Request::post('wiki_id');\n $data['joyme_wiki_key'] = Request::post('wiki_key');\n $data['context_path'] = Request::post('wiki_type');\n $data['joyme_wiki_domain'] = Request::post('wiki_domain');\n $data['joyme_wiki_name'] = Request::post('wiki_name');\n $data['support_subdomain'] = Request::post('supportSubDomain');\n $data['pc_keep_jscss'] = Request::post('pcKeepJscss');\n $data['m_keep_jscss'] = Request::post('mKeepJscss');\n $wikimodel = M('joymeWikiModel');\n if($wikimodel->updateWikiById($data,$update_id)){\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=joymewiki&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\n }else{\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=joymewiki&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\n }\n }\n\n //ๆ–ฐๅผ€wiki\n function createWiki(){\n\n global $GLOBALS;\n if($_POST){\n $result = false;\n $data['site_name'] = Request::post('wiki_name');\n $wiki_key = strtolower(Request::post('wiki_key'));\n $data['site_type'] = Request::post('wiki_type');\n $data['second_domain'] = Request::post('is_secondary_domain');\n $data['create_reason'] = Request::post('create_reason');\n $data['create_remark'] = Request::post('create_note');\n $data['create_time'] = time();\n $data['user_name'] = $_COOKIE['joume_username'];\n $wiki_type = $data['site_type'];\n $site_name = $data['site_name'];\n $wiki_title = Request::post('wiki_title');\n $is_mobile = Request::post('is_mobile');\n $wiki_keywords = Request::post('wiki_keywords');\n $wiki_description = Request::post('wiki_description');\n if(empty($wiki_key)|| empty($data['site_name']) || empty($data['user_name']) || empty($data['site_type'])||empty($data['create_reason'])){\n jsonEncode(array('rs'=>1,'msg'=>'Important parameters to be empty','data'=>''));\n }\n $data['site_key'] = $wiki_key.'wiki';\n $user_editstatus = $wiki_type==1?1:0;\n $this->dbName = $data['site_key'];\n //ๅˆ›ๅปบๆ•ฐๆฎๅบ“\n $wiikimodel = M('createDatabaseModel');\n if($GLOBALS['domain'] == 'com'){\n $this->dbDescription = $wiki_description;\n if(count($this->alyfindDataBase())>=1){\n if($wiikimodel->getTableNumByDbName($this->dbName)==0){\n $result = true;\n }\n }else{\n $result = $this->alyCreateDatabase();\n sleep(5);\n }\n }else{\n $result = $wiikimodel->createDataBase($this->dbName);\n }\n if($result){\n //ๅˆ›ๅปบ่กจ\n $createmodel = M('wikiCreateModel');\n $wiikimodel->createTable($this->createSql(),$this->dbName);\n if($wiikimodel->getTableNumByDbName($this->dbName)==$this->table_num){\n $wiikimodel->insertSeoTable($wiki_type,$site_name,$wiki_title,$wiki_keywords,$wiki_description,$user_editstatus,$is_mobile);\n $data['site_key'] = $wiki_key;\n if($createmodel->addData($data)){\n jsonEncode(array('rs'=>0,'msg'=>'The table creation success','data'=>''));\n }else{\n jsonEncode(array('rs'=>2,'msg'=>'The wiki log write failed','data'=>''));\n }\n }else{\n jsonEncode(array('rs'=>3,'msg'=>'The table creation failed','data'=>''));\n }\n }else{\n jsonEncode(array('rs'=>4,'msg'=>'The database creation failed','data'=>''));\n }\n }else{\n $data['static_url'] = $GLOBALS['static_url'];\n render($data,'web','wiki/create_wiki');\n }\n }\n\t\n\t//ๅˆ›ๅปบๆˆๅŠŸๅŽๆ‰ง่กŒ็š„่„šๆœฌ\n\t//$wiki_type 1 ๅŽŸ็”Ÿwiki 2 ๆ•ฐๅญ—็ซ™wiki\n\tfunction addsh(){\n\n\t\tglobal $GLOBALS;\n $wikikey = strtolower(Request::getParam('wikikey'));\n $wiki_type = Request::getParam('wiki_type');\n\t\tif($wiki_type == '1'){\n\t\t\t$rs = shell_exec(\"/usr/bin/sudo python \".$this->sh_path.\"add_nginx_rule.py \".$wikikey.\" \".$GLOBALS['domain']);\n\t\t\tif(intval($rs) == 0){\n\t\t\t\tshell_exec('/usr/bin/sudo /usr/local/nginx/sbin/nginx -s reload -c /usr/local/nginx/conf/nginx.conf');\n\t\t\t}else{\n jsonEncode(array('rs'=>5,'msg'=>'nginx conf check fail','data'=>''));\n }\n\t\t}\n echo '<meta charset=\"UTF-8\">';\n echo 'DBๅˆ›ๅปบๅฎŒๆฏ•๏ผŒnginx้‡ๅฏๅฎŒๆฏ•๏ผŒ็ŽฐๅœจๅŽปๅˆ›ๅปบๅ†…็ฝ‘่งฃๆž...';\n\t\t$sign = md5($wikikey.$wiki_type.$GLOBALS['domain'].'*&^jfFPGN^5fsf#;');\n\t\t$url = 'http://t.enjoyf.com/wiki/createwiki.php?wikikey='.$wikikey.'&wikitype='.$wiki_type.'&domain='.$GLOBALS['domain'].'&sign='.$sign;\n\t\techo \"<meta http-equiv='refresh' content='1;url=\\\"{$url}\\\"'>\";exit;\n\t}\n\n\n\t//้‡ๅฏnginx\n\tfunction reloadnginx(){\n\n\t\tshell_exec('/usr/bin/sudo /usr/local/nginx/sbin/nginx -s reload -c /usr/local/nginx/conf/nginx.conf');\n global $GLOBALS;\n $data['static_url'] = $GLOBALS['static_url'];\n $time = time()+600;\n $data['y'] = date('Y',$time);\n $data['m'] = date('m',$time);\n $data['d'] = date('d',$time);\n $data['h'] = date('H',$time);\n $data['i'] = date('i',$time);\n $data['s'] = date('s',$time);\n render($data,'web','wiki/tip_success');\n\t}\n\n\n function againReloadNginx(){\n\n shell_exec('/usr/bin/sudo /usr/local/nginx/sbin/nginx -s reload -c /usr/local/nginx/conf/nginx.conf');\n $this->wikiList();\n }\n\n\n //Ali cloud query the database exists\n function alyfindDataBase(){\n\n include_once AROOT.'public'. DS .'aliyun'. DS .'TopSdk.php';\n $c = new AliyunClient;\n $c->accessKeyId = $this->accessKeyId;\n $c->accessKeySecret = $this->accessKeySecret;\n $c->serverUrl=$this->serverUrl;\n\n $req = new Rds20130528DescribeDatabasesRequest();\n $req->setdBInstanceId($this->dBInstanceId);\n $req->setdBName($this->dbName);\n\n $resp = $c->execute($req);\n return $resp->Databases->Database;\n }\n\n //Ali cloud create the database\n function alyCreateDatabase(){\n\n include_once AROOT.'public'. DS .'aliyun'. DS .'TopSdk.php';\n $c = new AliyunClient;\n $c->accessKeyId = $this->accessKeyId;\n $c->accessKeySecret = $this->accessKeySecret;\n $c->serverUrl=$this->serverUrl;\n $req = new Rds20130528CreateDatabaseRequest();\n $req->setAccountName($this->accountName);\n $req->setdBName($this->dbName);\n $req->setCharacterSetName($this->charSet);\n $req->setdBDescription($this->dbDescription.' ');\n $req->setdBInstanceId($this->dBInstanceId);\n $resp = $c->execute($req);\n if(!isset($resp->Code)){\n return true;\n }\n return false;\n }\n\n //ๆ–ฐๅผ€wikiๅˆ—่กจ\n function wikiList(){\n\n $model = M('wikiCreateModel');\n $pb_show_num = 50; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\n $pb_page = Request::get('pb_page',1); //่Žทๅ–ๅฝ“ๅ‰้กต็ \n $conditions['wiki_type'] = Request::getParam('wiki_type');\n $conditions['create_reason'] = Request::getParam('create_reason');\n $conditions['start_time'] = strtotime(Request::getParam('start_time'));\n $conditions['end_time'] = strtotime(Request::getParam('end_time'));\n $total = $model->allWikiList($conditions,true);\n $data['item'] = $model->allWikiList($conditions,false,$pb_page,$pb_show_num);\n\n $page = M('pageModel');\n $conditions['start_time'] = Request::getParam('start_time');\n $conditions['end_time'] = Request::getParam('end_time');\n $page->mainPage(array('total' => $total,'perpage'=>$pb_show_num,'nowindex'=>$pb_page,'pagebarnum'=>10));\n $data['page_str'] = $page->show(2,$conditions);\n $data['static_url'] = $GLOBALS['static_url'];\n $data['param'] = $conditions;\n render($data,'web','wiki/wiki_log_list');\n }\n\n //get createSql\n function createSql(){\n\n if(file_exists($this->file_path)){\n $lines=file($this->file_path);\n $sqlstr=\"\";\n foreach($lines as $line){\n $line=trim($line);\n if($line!=\"\"){\n if(!($line{0}==\"#\" || $line{0}.$line{1}==\"--\")){\n $sqlstr.=$line;\n }\n }\n }\n $sqls=explode(\";\",rtrim($sqlstr,\";\"));\n return $sqls;\n }else{\n jsonEncode(array('rs'=>6,'msg'=>'Could not find wiki.sql','data'=>''));\n }\n }\n\n //check wiki key\n function checkWikiKeyIsExist(){\n\n $wiki_key = Request::post('wiki_key');\n $model = M('wikiCreateModel');\n $result = $model->getWikiInfoByWikiKey($wiki_key);\n $this->checkResult($result);\n }\n\n\n function test(){\n\n print_r($this->createSql());\n }\n}" }, { "alpha_fraction": 0.48906049132347107, "alphanum_fraction": 0.4993565082550049, "avg_line_length": 23.296875, "blob_id": "56fee8a83dcff799fc113ba3457b744b02595495", "content_id": "0c50b7ff84188ddfdad72e6b5da80c2337fd64a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 75, "num_lines": 64, "path": "/model/wikiDopinionModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\n/**\n * Created by PhpStorm.\n * User: xinshi\n * Date: 2015/10/29\n * Time: 17:08\n */\nif (!defined('IN'))\n die('bad request');\n\nuse Joyme\\db\\JoymeModel;\n\nclass wikiDopinionModel extends JoymeModel{\n\n public $fields = array();\n\n public $tableName = 'wiki_card_opinion';\n\n public function __construct() {\n\n $this->db_config = array(\n 'hostname' => $GLOBALS['config']['rds']['db_host'],\n 'username' => $GLOBALS['config']['rds']['db_user'],\n 'password' => $GLOBALS['config']['rds']['db_password'],\n 'database' => $GLOBALS['config']['rds']['db_name']\n );\n parent::__construct();\n }\n\n //๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝะฑ๏ฟฝ\n public function allOpinionList($conditions,$total,$page=1,$pageage=10){\n\n $skip = intval(($page-1)*$pageage);\n\n //๏ฟฝ๏ฟฝ๏ฟฝwiki๏ฟฝ๏ฟฝฮช๏ฟฝ๏ฟฝ\n if(!empty($conditions['wiki_name'])){\n $where['wiki'] = $conditions['wiki_name'];\n }\n //๏ฟฝวท๏ฟฝษพ๏ฟฝ๏ฟฝ\n if(!empty($conditions['remove_state'])){\n $where['remove_state'] = $conditions['remove_state'];\n }\n if($total){\n $count = $this->count($where);\n }else{\n $arr = $this->select('*',$where,'',$pageage,$skip);\n }\n $result = $total?$count:$arr;\n return $result;\n }\n\n //๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝ\n function updateState($remove_state,$opinion_id){\n\n $data = array(\n 'remove_state'=>$remove_state\n );\n $where = array(\n 'opinion_id'=>$opinion_id\n );\n return $this->update($data, $where);\n }\n}" }, { "alpha_fraction": 0.39873218536376953, "alphanum_fraction": 0.41109350323677063, "avg_line_length": 27.178571701049805, "blob_id": "bb7a44ee5f179574f43f295945b6dff526075eb0", "content_id": "21fc8b301bfe98b0ca558b8d02bd01f932b9089d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3263, "license_type": "no_license", "max_line_length": 104, "num_lines": 112, "path": "/controller/sourceBaiduglb.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(AROOT . 'controller' . DS . 'sourceAbstract.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\net\\RedisHelper;\n\nclass sourceBaiduglbController extends sourceAbstractController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array('indexData', 'title', 'image', 'url', 'pubtime', 'category');\n\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n */\n public function query($cid)\n {\n //่Žทๅ–ๅ‚ๆ•ฐ\n $gameid = Request::getParam('gameid', '');\n\n if(empty($gameid)){\n return 'no gameid';\n }\n\n //ๆŸฅ่ฏขๆธธๆˆไฟกๆฏ\n $jChannelGameModel = new jChannelGameModel();\n $where0 = array('cid' => $cid, 'gid'=>$gameid, 'gamestatus' => 1);\n $gameinfo = $jChannelGameModel->getRowData($where0);\n\n if(empty($gameinfo)){\n return 'no this game';\n }\n\n //่Žทๅ–ๅ‚ๆ•ฐ\n //atype:็ฑปๅž‹ 0่ต„่ฎฏ็ฑป 1่ง†้ข‘็ฑป 4ๆ”ป็•ฅ็ฑป 5่ต„ๆ–™็ฑป\n $atype = Request::getParam('type', '');\n\n switch ($atype){\n case 'data':\n $atype = 5;\n break;\n case 'news':\n $atype = 0;\n break;\n case 'strategy':\n $atype = 4;\n break;\n case 'video':\n $atype = 1;\n break;\n default:\n return 'no this type';\n }\n\n //ๆŸฅ่ฏขๆธ ้“ๆ•ฐๆฎ\n $jChannelDataModel = new jChannelDataModel();\n $where = array('atype' => $atype, 'gid'=>$gameid, 'cid' => $cid, 'source' => 1, 'isblock' => 0);\n $data = $jChannelDataModel->getData($where, 300);\n\n $aids = '0';\n foreach ($data as $val) {\n $aids .= ',' . $val['aid'];\n }\n //ๆŸฅ่ฏข้™„ๅŠ ๆ•ฐๆฎ\n $jSourceDataModel = new jSourceDataModel();\n $where2 = array('source' => 1, 'aid' => array('in', $aids));\n $sourcedata = $jSourceDataModel->getData($where2, 300);\n\n $newdata = \"\";\n //ๅˆๅนถๆ•ฐๆฎ\n foreach ($data as $k => $val) {\n foreach ($sourcedata as $row) {\n if ($val['aid'] == $row['aid']) {\n $tmp = json_decode($row['data'], true);\n if ($tmp) {\n $tag = explode(',',$tmp['keywords']);\n $tmparr = array(\n 'indexData' => $gameinfo['gamename'],\n 'title' => $tmp['title'],\n 'image' => $val['litpic'],\n 'url' => $val['url'],\n 'pubtime' => $val['pubdate'],\n 'category' => $tag[0]\n );\n if ($atype == 1) {\n $tmparr['playTime'] = 180;\n } else if ($atype == 5) {\n $tmparr['childCategory'] = '';\n }\n $newdata .= json_encode($tmparr).\"\\n\";\n }\n }\n }\n }\n\n if(empty($newdata)){\n return 'no data';\n }\n\n return $newdata;\n }\n\n\n}" }, { "alpha_fraction": 0.48647308349609375, "alphanum_fraction": 0.49367088079452515, "avg_line_length": 28.709922790527344, "blob_id": "c78349d12171aa9ce6cfb0712feccf7cef1a8426", "content_id": "433539828d64e6d53bdbdf0c38609fe24a3e8759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4177, "license_type": "no_license", "max_line_length": 134, "num_lines": 131, "path": "/static/script/style/action.js", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "\r\n$(function(){\r\n getConfig();\r\n\r\n $(\"#main-nav li ul li a\").live(\"click\",function(){\r\n\r\n $(this).addClass(\"current\").parents().siblings().find(\"a\").removeClass(\"current\");\r\n $(this).parents().parents().parents().siblings().find(\"a\").removeClass(\"current\");\r\n\r\n var url = $(this).attr(\"data-href\");\r\n var handler = 0;\r\n\r\n $(\".main-content-inner\").each(function(){\r\n var src = $(this).find(\"iframe\").attr(\"src\");\r\n if(url == src){\r\n $(this).show().siblings().hide();\r\n handler = 1;\r\n }\r\n })\r\n\r\n if(handler == 0){\r\n var factory = $('<div class=\"main-content-inner\" ><iframe src=\"\" width=\"100%\" height=\"\" frameborder=\"0\"></iframe></div>');\r\n factory.find(\"iframe\").attr(\"src\",url);\r\n $(\"#main-content\").append(factory);\r\n factory.show().siblings().hide();\r\n }\r\n\r\n myResize();\r\n\r\n\r\n })\r\n\r\n takeCookie();\r\n})\r\n/**\r\n * ๅพช็Žฏ้…็ฝฎๆ–‡ไปถ่ฟ›่กŒ่ต‹ๅ€ผ//ๆ‰“ๅŒ…็”Ÿๆˆๅทฆไพงๅˆ—่กจ\r\n * @method getConfig()\r\n * @param {empty} ๆฒกๆœ‰ๅ‚ๆ•ฐ\r\n * @return {empty} ๆฒกๆœ‰่ฟ”ๅ›žๅ€ผ\r\n */\r\nfunction getConfig(){\r\n var Oall = $('<li>' +\r\n '<a href=\"#\" class=\"nav-top-item\" style=\"padding-right: 15px;\"> ' +\r\n 'ๆ–‡็ซ ' +\r\n '</a>' +\r\n '<ul style=\"display: none;\">' +\r\n '</ul></li>');\r\n var Oli = $('<li><a class=\"\" href=\"#1/105\" data-href=\"http://www.baidu.com\">็ฎก็†ๆ–‡็ซ </a></li>');\r\n for(var i in Oconfigjson){\r\n var Oall = $('<li>' +\r\n '<a href=\"#\" class=\"nav-top-item\" style=\"padding-right: 15px;\"> ' +\r\n 'ๆ–‡็ซ ' +\r\n '</a>' +\r\n '<ul style=\"display: none;\">' +\r\n '</ul></li>');\r\n var Omain = Oconfigjson[i];\r\n var Oid = Omain.id;\r\n var OhomePage = Omain.homePage;\r\n var OmainText = Omain.menu[0].text;\r\n var Omenu = Omain.menu;\r\n var handle1 = i;\r\n Oall.find(\".nav-top-item\").text(OmainText);\r\n\r\n for(var j in Omain.menu[0].items){\r\n var Oreal = Omain.menu[0].items[j];\r\n var handle2 = Oreal.id;\r\n var Otext = Oreal.text;\r\n var Ohref = Oreal.href;\r\n var handler = \"#\"+handle1+\"/\"+handle2;\r\n var Oli = $('<li><a class=\"\" href=\"#1/105\" data-href=\"http://www.baidu.com\">็ฎก็†ๆ–‡็ซ </a></li>');\r\n\r\n Oli.find(\"a\").attr(\"data-href\",Ohref);\r\n Oli.find(\"a\").attr(\"href\",handler);\r\n Oli.find(\"a\").text(Otext);\r\n\r\n Oall.find(\"ul\").append(Oli);\r\n }\r\n\r\n $(\"#main-nav\").append(Oall);\r\n }\r\n}\r\n/**\r\n * ๅˆทๆ–ฐ็•™ไฝๅฝ“ๅ‰้กต้ข window.location.hash\r\n * @method takeCookie()\r\n * @param {empty} ๆฒกๆœ‰ๅ‚ๆ•ฐ\r\n * @return {empty} ๆฒกๆœ‰่ฟ”ๅ›žๅ€ผ\r\n */\r\nfunction takeCookie(){\r\n var Ocookie = window.location.hash;\r\n var mm = Ocookie.toString();\r\n var amm = mm.substr(1,Ocookie.length);\r\n// $(Ocookie).substr(1,$(Ocookie).length);\r\n var abmm = amm.split(\"/\");\r\n $(\"#main-nav li\").find(\".nav-top-item\").each(function(index){\r\n if(index == abmm[0]){\r\n $(this).trigger(\"click\");\r\n $(this).parents().find(\"ul li\").each(function(index){\r\n\r\n var newUrl = $(this).find(\"a\").attr(\"href\");\r\n if(Ocookie == newUrl){\r\n $(this).find(\"a\").trigger(\"click\");\r\n }\r\n })\r\n }\r\n })\r\n\r\n\r\n}\r\n\r\n\r\n/**\r\n * iframe ้š้กต้ขๅคงๅฐๆ”นๅ˜ๅคงๅฐ\r\n * @method myResize()\r\n * @param {empty} ๆฒกๆœ‰ๅ‚ๆ•ฐ\r\n * @return {empty} ๆฒกๆœ‰่ฟ”ๅ›žๅ€ผ\r\n */\r\nfunction myResize(){\r\n var pageWidth = window.innerWidth;\r\n var pageHeight = window.innerHeight;\r\n var resizeWidth;\r\n var resizeHeight;\r\n var chatcontentResizeHeight;\r\n var textareaResizeWidth;\r\n if(typeof pageWidth != \"number\"){\r\n pageWidth = document.documentElement.clientWidth || document.body.clientWidth;\r\n pageHeight = document.documentElement.clientHeight || document.body.clientHeight;\r\n }\r\n\r\n $(\".main-content-inner\").each(function(){\r\n $(this).find(\"iframe\").css(\"height\",pageHeight);\r\n })\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.48607146739959717, "alphanum_fraction": 0.49123522639274597, "avg_line_length": 25.950571060180664, "blob_id": "a76f06a7718e1f97f3eda2b49fe68463d82274f6", "content_id": "0fb723bf058a6b744d2cf54f6cd57a57d79ae9a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 7367, "license_type": "no_license", "max_line_length": 198, "num_lines": 263, "path": "/_lp/core/lib/core.function.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\nfunction transcribe($aList, $aIsTopLevel = true)\r\n{\r\n $gpcList = array();\r\n $isMagic = get_magic_quotes_gpc();\r\n\r\n foreach ($aList as $key => $value) {\r\n if (is_array($value)) {\r\n $decodedKey = ($isMagic && !$aIsTopLevel) ? stripslashes($key) : $key;\r\n $decodedValue = transcribe($value, false);\r\n } else {\r\n $decodedKey = stripslashes($key);\r\n $decodedValue = ($isMagic) ? stripslashes($value) : $value;\r\n }\r\n $gpcList[$decodedKey] = $decodedValue;\r\n }\r\n return $gpcList;\r\n}\r\n\r\n$_GET = transcribe($_GET);\r\n$_POST = transcribe($_POST);\r\n$_REQUEST = transcribe($_REQUEST);\r\n\r\n\r\nfunction v($str)\r\n{\r\n return isset($_REQUEST[$str]) ? $_REQUEST[$str] : false;\r\n}\r\n\r\nfunction z($str)\r\n{\r\n return strip_tags($str);\r\n}\r\n\r\nfunction c($str)\r\n{\r\n return isset($GLOBALS['config'][$str]) ? $GLOBALS['config'][$str] : false;\r\n}\r\n\r\nfunction g($str)\r\n{\r\n return isset($GLOBALS[$str]) ? $GLOBALS[$str] : false;\r\n}\r\n\r\nfunction t($str)\r\n{\r\n return trim($str);\r\n}\r\n\r\nfunction u($str)\r\n{\r\n return urlencode($str);\r\n}\r\n\r\n// render functiones\r\nfunction render($data = NULL, $layout = NULL, $sharp = 'default')\r\n{\r\n if ($layout == null) {\r\n if (is_ajax_request()) {\r\n $layout = 'ajax';\r\n } elseif (is_mobile_request()) {\r\n $layout = 'mobile';\r\n } else {\r\n $layout = 'web';\r\n }\r\n }\r\n\r\n $GLOBALS['layout'] = $layout;\r\n $GLOBALS['sharp'] = $sharp;\r\n\r\n $layout_file = AROOT . 'view/layout/' . $layout . '/' . $sharp . '.tpl.html';\r\n\r\n if (file_exists($layout_file)) {\r\n @extract($data);\r\n require($layout_file);\r\n } else {\r\n $layout_file = CROOT . 'view/layout/' . $layout . '/' . $sharp . '.tpl.html';\r\n if (file_exists($layout_file)) {\r\n @extract($data);\r\n require($layout_file);\r\n }\r\n }\r\n}\r\n\r\nfunction ajax_echo($info)\r\n{\r\n if (!headers_sent()) {\r\n header(\"Content-Type:text/html;charset=utf-8\");\r\n header(\"Expires: Thu, 01 Jan 1970 00:00:01 GMT\");\r\n header(\"Cache-Control: no-cache, must-revalidate\");\r\n header(\"Pragma: no-cache\");\r\n }\r\n\r\n echo $info;\r\n}\r\n\r\n\r\nfunction info_page($info, $title = '็ณป็ปŸๆถˆๆฏ')\r\n{\r\n if (is_ajax_request())\r\n $layout = 'ajax';\r\n else\r\n $layout = 'web';\r\n\r\n $data['top_title'] = $data['title'] = $title;\r\n $data['info'] = $info;\r\n\r\n render($data, $layout, 'info');\r\n\r\n}\r\n\r\nfunction is_ajax_request()\r\n{\r\n $headers = apache_request_headers();\r\n return (isset($headers['X-Requested-With']) && ($headers['X-Requested-With'] == 'XMLHttpRequest')) || (isset($headers['x-requested-with']) && ($headers['x-requested-with'] == 'XMLHttpRequest'));\r\n}\r\n\r\nif (!function_exists('apache_request_headers')) {\r\n function apache_request_headers()\r\n {\r\n foreach ($_SERVER as $key => $value) {\r\n if (substr($key, 0, 5) == \"HTTP_\") {\r\n $key = str_replace(\" \", \"-\", ucwords(strtolower(str_replace(\"_\", \" \", substr($key, 5)))));\r\n $out[$key] = $value;\r\n } else {\r\n $out[$key] = $value;\r\n }\r\n }\r\n\r\n return $out;\r\n }\r\n}\r\n\r\nfunction is_mobile_request()\r\n{\r\n $_SERVER['ALL_HTTP'] = isset($_SERVER['ALL_HTTP']) ? $_SERVER['ALL_HTTP'] : '';\r\n\r\n $mobile_browser = '0';\r\n\r\n if (preg_match('/(up.browser|up.link|mmp|symbian|smartphone|midp|wap|phone|iphone|ipad|ipod|android|xoom)/i', strtolower($_SERVER['HTTP_USER_AGENT'])))\r\n $mobile_browser++;\r\n\r\n if ((isset($_SERVER['HTTP_ACCEPT'])) and (strpos(strtolower($_SERVER['HTTP_ACCEPT']), 'application/vnd.wap.xhtml+xml') !== false))\r\n $mobile_browser++;\r\n\r\n if (isset($_SERVER['HTTP_X_WAP_PROFILE']))\r\n $mobile_browser++;\r\n\r\n if (isset($_SERVER['HTTP_PROFILE']))\r\n $mobile_browser++;\r\n\r\n $mobile_ua = strtolower(substr($_SERVER['HTTP_USER_AGENT'], 0, 4));\r\n $mobile_agents = array(\r\n 'w3c ', 'acs-', 'alav', 'alca', 'amoi', 'audi', 'avan', 'benq', 'bird', 'blac',\r\n 'blaz', 'brew', 'cell', 'cldc', 'cmd-', 'dang', 'doco', 'eric', 'hipt', 'inno',\r\n 'ipaq', 'java', 'jigs', 'kddi', 'keji', 'leno', 'lg-c', 'lg-d', 'lg-g', 'lge-',\r\n 'maui', 'maxo', 'midp', 'mits', 'mmef', 'mobi', 'mot-', 'moto', 'mwbp', 'nec-',\r\n 'newt', 'noki', 'oper', 'palm', 'pana', 'pant', 'phil', 'play', 'port', 'prox',\r\n 'qwap', 'sage', 'sams', 'sany', 'sch-', 'sec-', 'send', 'seri', 'sgh-', 'shar',\r\n 'sie-', 'siem', 'smal', 'smar', 'sony', 'sph-', 'symb', 't-mo', 'teli', 'tim-',\r\n 'tosh', 'tsm-', 'upg1', 'upsi', 'vk-v', 'voda', 'wap-', 'wapa', 'wapi', 'wapp',\r\n 'wapr', 'webc', 'winw', 'winw', 'xda', 'xda-'\r\n );\r\n\r\n if (in_array($mobile_ua, $mobile_agents))\r\n $mobile_browser++;\r\n\r\n if (strpos(strtolower($_SERVER['ALL_HTTP']), 'operamini') !== false)\r\n $mobile_browser++;\r\n\r\n // Pre-final check to reset everything if the user is on Windows\r\n if (strpos(strtolower($_SERVER['HTTP_USER_AGENT']), 'windows') !== false)\r\n $mobile_browser = 0;\r\n\r\n // But WP7 is also Windows, with a slightly different characteristic\r\n if (strpos(strtolower($_SERVER['HTTP_USER_AGENT']), 'windows phone') !== false)\r\n $mobile_browser++;\r\n\r\n if ($mobile_browser > 0)\r\n return true;\r\n else\r\n return false;\r\n}\r\n\r\nfunction uses($m)\r\n{\r\n load('lib/' . basename($m));\r\n}\r\n\r\nfunction NC( $class )\r\n{\r\n include_once(AROOT . 'controller' . DS . $class .'.class.php');\r\n $className = $class.'Controller';\r\n return new $className();\r\n}\r\n\r\nfunction memckey(/*...*/ ){\r\n $args = func_get_args();\r\n $key = $GLOBALS['config']['redis']['prefix'];\r\n foreach ( $args as $arg ) {\r\n $arg = str_replace( ':', '%3A', $arg );\r\n $key = $key . ':' . $arg;\r\n }\r\n return strtr( $key, ' ', '_' );\r\n}\r\n\r\nfunction load($file_path)\r\n{\r\n $file = AROOT . $file_path;\r\n if (file_exists($file)) {\r\n //echo $file;\r\n require($file);\r\n\r\n } else {\r\n //echo CROOT . $file_path;\r\n require(CROOT . $file_path);\r\n }\r\n\r\n}\r\n\r\n// ===========================================\r\n// load db functions\r\n// ===========================================\r\nif (defined('SAE_APPNAME'))\r\n include_once(CROOT . 'lib/db.sae.function.php');\r\nelse\r\n include_once(CROOT . 'lib/db.function.php');\r\n\r\nif (!function_exists('_')) {\r\n function _($string, $data = null)\r\n {\r\n if (!isset($GLOBALS['i18n'])) {\r\n $c = c('default_language');\r\n if (strlen($c) < 1) $c = 'zh_cn';\r\n\r\n $lang_file = AROOT . 'local' . DS . basename($c) . '.lang.php';\r\n if (file_exists($lang_file)) {\r\n include_once($lang_file);\r\n $GLOBALS['i18n'] = $c;\r\n } else\r\n $GLOBALS['i18n'] = 'zh_cn';\r\n\r\n\r\n }\r\n\r\n //print_r( $GLOBALS['language'][$GLOBALS['i18n']] );\r\n\r\n\r\n if (isset($GLOBALS['language'][$GLOBALS['i18n']][$string]))\r\n $to = $GLOBALS['language'][$GLOBALS['i18n']][$string];\r\n else\r\n $to = $string;\r\n\r\n if ($data == null)\r\n return $to;\r\n else {\r\n if (!is_array($data)) $data = array($data);\r\n return vsprintf($to, $data);\r\n }\r\n\r\n }\r\n} \t\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.3787696957588196, "alphanum_fraction": 0.3865290880203247, "avg_line_length": 38.726383209228516, "blob_id": "627bb59b6a6266985d4576bab03f0041a397c0b5", "content_id": "16264abff53282a8c34350be18356325750f1dce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 12733, "license_type": "no_license", "max_line_length": 148, "num_lines": 307, "path": "/model/jChannelDataModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 14:20\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) {\r\n die('bad request');\r\n}\r\nuse Joyme\\db\\JoymeModel;\r\n\r\nclass jChannelDataModel extends JoymeModel\r\n{\r\n public $tableName = 'channeldata';\r\n\r\n public static $atypes = array(\r\n \"0\" => \"่ต„่ฎฏ็ฑป\",\r\n \"1\" => \"่ง†้ข‘็ฑป\",\r\n \"4\" => \"ๆ”ป็•ฅ็ฑป\",\r\n \"5\" => \"่ต„ๆ–™็ฑป\",\r\n );\r\n\r\n public function __construct()\r\n {\r\n $this->db_config = array(\r\n 'hostname' => $GLOBALS['config']['db']['db_host'],\r\n 'username' => $GLOBALS['config']['db']['db_user'],\r\n 'password' => $GLOBALS['config']['db']['db_password'],\r\n 'database' => $GLOBALS['config']['db']['channel_db_name']\r\n );\r\n parent::__construct();\r\n }\r\n\r\n public function getData($where, $limit = 10, $skip = 0)\r\n {\r\n return $this->select('*', $where, 'pubdate desc', $limit, $skip);\r\n }\r\n\r\n public function addData($data)\r\n {\r\n return $this->insert($data);\r\n }\r\n\r\n public static function getAtype($key)\r\n {\r\n if (self::$atypes[$key]) {\r\n return self::$atypes[$key];\r\n } else {\r\n return '';\r\n }\r\n }\r\n\r\n public function getCount($where){\r\n return $this->count($where);\r\n }\r\n\r\n public static function getAllAtypes()\r\n {\r\n $atypes = self::$atypes;\r\n $allatypes = array();\r\n foreach ($atypes as $k => $v) {\r\n $allatypes[$k]['atype_desc'] = $v;\r\n $allatypes[$k]['checked'] = '';\r\n }\r\n return $allatypes;\r\n }\r\n\r\n //่Žทๅ–ๆธธๆˆ็›ธๅ…ณๆ–‡็ซ ๅˆ—่กจ\r\n public function getGameLists($where,$psize,$skip,$jchannellists,$constr)\r\n {\r\n $lists = $this->select(\"aid,gid,atype,isblock,url,datatype\", $where, 'pubdate DESC', $psize, $skip, 'aid');\r\n if ($lists) {\r\n $cids = array_column($jchannellists, 'cid');\r\n //ๆ•ฐๆฎid\r\n $aids = array_column($lists, 'aid');\r\n $jsourcedatamodel = new jSourceDataModel();\r\n $jsourcedatalists = $jsourcedatamodel->select('aid,data', array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids)\r\n ));\r\n $datas = array();\r\n if ($jsourcedatalists) {\r\n $datas = array_column($jsourcedatalists, 'data', 'aid');\r\n }\r\n //ๆธธๆˆid\r\n $gids = array_column($lists, 'gid');\r\n $jgamemodel = new jGameModel();\r\n $jgamelists = $jgamemodel->select('gid,extra', array(\r\n 'gid' => array('in', $gids)\r\n ));\r\n $gameinfos = array();\r\n if ($jgamelists) {\r\n $gameinfos = array_column($jgamelists, 'extra', 'gid');\r\n }\r\n //ๆŸฅ่ฏขๆธธๆˆๆทปๅŠ ็š„ๆธ ้“\r\n $jchannelgamemodel = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgamemodel->select(\"cid,gid\", array(\r\n 'gid' => array('in', $gids),\r\n 'gamestatus' => 1,\r\n 'datatype' => 1\r\n ));\r\n $jchannelgamecids = array();\r\n if ($jchannelgamelists) {\r\n foreach ($jchannelgamelists as $jk => $jchannelgamelist) {\r\n $jchannelgamecids[$jchannelgamelist['gid']][] = $jchannelgamelist['cid'];\r\n }\r\n }\r\n //ๅˆคๆ–ญๆธ ้“ๆ˜ฏๅฆๅทฒ้€‰\r\n $cidlists = $this->select(\"aid,cid\", array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids),\r\n 'datatype' => 1\r\n ), 'pubdate DESC', '', '');\r\n $jchannelcids = array();\r\n if ($cidlists) {\r\n foreach ($cidlists as $ck => $cidlist) {\r\n $jchannelcids[$cidlist['aid']][] = $cidlist['cid'];\r\n }\r\n }\r\n //ๅˆคๆ–ญ็Šถๆ€ๆ˜ฏๅฏ็”จ่ฟ˜ๆ˜ฏ็ฆ็”จ\r\n $isblocklists = $this->select(\"aid,cid,isblock\", array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids),\r\n 'datatype' => 1\r\n ), 'pubdate DESC', '', '');\r\n $jchannelisblocks = array();\r\n if ($isblocklists) {\r\n foreach ($isblocklists as $ik => $isblocklist) {\r\n $jchannelisblocks[$isblocklist['aid']][$isblocklist['cid']] = $isblocklist['isblock'];\r\n }\r\n }\r\n foreach ($lists as $k => $list) {\r\n if($list['datatype']==1){\r\n $lists[$k]['datatype'] = 'ๆธธๆˆ';\r\n }\r\n elseif ($list['datatype']==2){\r\n $lists[$k]['datatype'] = 'ๆ ็›ฎ';\r\n }\r\n if ($datas[$list['aid']]) {\r\n $data = json_decode($datas[$list['aid']], true);\r\n $lists[$k]['title'] = $data['title'];\r\n $lists[$k]['ctime'] = date(\"Y-m-d H:i:s\", $data['pubdate']);\r\n }\r\n $lists[$k]['atype_desc'] = self::getAtype($list['atype']);\r\n if ($gameinfos[$list['gid']]) {\r\n $gameinfo = json_decode($gameinfos[$list['gid']], true);\r\n $lists[$k]['gameName'] = $gameinfo['gameName'];\r\n }\r\n $channels = array();\r\n foreach ($cids as $cid) {\r\n if ($jchannelcids[$list['aid']]) {\r\n if (in_array($cid, $jchannelcids[$list['aid']])) {\r\n if ($jchannelgamecids[$list['gid']]) {\r\n if (in_array($cid, $jchannelgamecids[$list['gid']])) {\r\n if ($jchannelisblocks[$list['aid']][$cid] == '1') {\r\n $channels[$cid] = \"&#10006\";\r\n } else {\r\n $channels[$cid] = \"&#10004\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n }\r\n }\r\n $lists[$k]['channels'] = $channels;\r\n if ($list['atype'] == 5) {\r\n if ($list['isblock'] == '1') {\r\n $lists[$k]['op'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"0\">ๅฏ็”จ</a><br>';\r\n } else {\r\n $lists[$k]['op'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"1\">็ฆ็”จ</a><br>';\r\n }\r\n } else {\r\n $lists[$k]['op'] = '<a href=\"?c=jchanneldata&a=aedit&aid=' . $list['aid'] . '&atype=' . $list['atype'] .$constr. '\">็ผ–่พ‘</a><br>';\r\n }\r\n }\r\n }\r\n return $lists;\r\n }\r\n\r\n //่Žทๅ–ๆ ็›ฎ็›ธๅ…ณๆ–‡็ซ ๅˆ—่กจ\r\n public function getLabelLists($where,$psize,$skip,$jchannellists,$constr)\r\n {\r\n $lists = $this->select(\"aid,gid,atype,isblock,url,datatype\", $where, 'pubdate DESC', $psize, $skip, 'aid,gid');\r\n if ($lists) {\r\n $cids = array_column($jchannellists, 'cid');\r\n //ๆ•ฐๆฎid\r\n $aids = array_column($lists, 'aid');\r\n $jsourcedatamodel = new jSourceDataModel();\r\n $jsourcedatalists = $jsourcedatamodel->select('aid,data', array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids)\r\n ));\r\n $datas = array();\r\n if ($jsourcedatalists) {\r\n $datas = array_column($jsourcedatalists, 'data', 'aid');\r\n }\r\n //ๆ ็›ฎid\r\n $gids = array_column($lists, 'gid');\r\n $jchannelgamemodel = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgamemodel->select('gid,gamename',array(\r\n 'gid' => array('in', $gids),\r\n 'datatype' => 2\r\n ),'','','','gid');\r\n $gamenames = array();\r\n if($jchannelgamelists){\r\n $gamenames = array_column($jchannelgamelists,'gamename','gid');\r\n }\r\n\r\n //ๆŸฅ่ฏขๆธธๆˆๆทปๅŠ ็š„ๆธ ้“\r\n $jchannelgamelists = $jchannelgamemodel->select(\"cid,gid\", array(\r\n 'gid' => array('in', $gids),\r\n 'gamestatus' => 1,\r\n 'datatype' => 2\r\n ));\r\n $jchannelgamecids = array();\r\n if ($jchannelgamelists) {\r\n foreach ($jchannelgamelists as $jk => $jchannelgamelist) {\r\n $jchannelgamecids[$jchannelgamelist['gid']][] = $jchannelgamelist['cid'];\r\n }\r\n }\r\n //ๅˆคๆ–ญๆธ ้“ๆ˜ฏๅฆๅทฒ้€‰\r\n $cidlists = $this->select(\"aid,cid\", array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids),\r\n 'datatype' => 2\r\n ), 'pubdate DESC', '', '');\r\n $jchannelcids = array();\r\n if ($cidlists) {\r\n foreach ($cidlists as $ck => $cidlist) {\r\n $jchannelcids[$cidlist['aid']][] = $cidlist['cid'];\r\n }\r\n }\r\n //ๅˆคๆ–ญ็Šถๆ€ๆ˜ฏๅฏ็”จ่ฟ˜ๆ˜ฏ็ฆ็”จ\r\n $isblocklists = $this->select(\"aid,cid,isblock\", array(\r\n 'source' => 1,\r\n 'aid' => array('in', $aids),\r\n 'datatype' => 2\r\n ), 'pubdate DESC', '', '');\r\n $jchannelisblocks = array();\r\n if ($isblocklists) {\r\n foreach ($isblocklists as $ik => $isblocklist) {\r\n $jchannelisblocks[$isblocklist['aid']][$isblocklist['cid']] = $isblocklist['isblock'];\r\n }\r\n }\r\n foreach ($lists as $k => $list) {\r\n if($list['datatype']==1){\r\n $lists[$k]['datatype'] = 'ๆธธๆˆ';\r\n }\r\n elseif ($list['datatype']==2){\r\n $lists[$k]['datatype'] = 'ๆ ็›ฎ';\r\n }\r\n if ($datas[$list['aid']]) {\r\n $data = json_decode($datas[$list['aid']], true);\r\n $lists[$k]['title'] = $data['title'];\r\n $lists[$k]['ctime'] = date(\"Y-m-d H:i:s\", $data['pubdate']);\r\n }\r\n $lists[$k]['atype_desc'] = self::getAtype($list['atype']);\r\n if ($gamenames[$list['gid']]) {\r\n $lists[$k]['gameName'] = $gamenames[$list['gid']];\r\n }\r\n $channels = array();\r\n foreach ($cids as $cid) {\r\n if ($jchannelcids[$list['aid']]) {\r\n if (in_array($cid, $jchannelcids[$list['aid']])) {\r\n if ($jchannelgamecids[$list['gid']]) {\r\n if (in_array($cid, $jchannelgamecids[$list['gid']])) {\r\n if ($jchannelisblocks[$list['aid']][$cid] == '1') {\r\n $channels[$cid] = \"&#10006\";\r\n } else {\r\n $channels[$cid] = \"&#10004\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n } else {\r\n $channels[$cid] = \"&#10006\";\r\n }\r\n }\r\n }\r\n $lists[$k]['channels'] = $channels;\r\n if ($list['atype'] == 5) {\r\n if ($list['isblock'] == '1') {\r\n $lists[$k]['op'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"0\">ๅฏ็”จ</a><br>';\r\n } else {\r\n $lists[$k]['op'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"1\">็ฆ็”จ</a><br>';\r\n }\r\n } else {\r\n $lists[$k]['op'] = '<a href=\"?c=jchanneldata&a=aedit&aid=' . $list['aid'] . '&atype=' . $list['atype'] . $constr.'\">็ผ–่พ‘</a><br>';\r\n }\r\n }\r\n }\r\n return $lists;\r\n }\r\n\r\n}" }, { "alpha_fraction": 0.5880281925201416, "alphanum_fraction": 0.5880281925201416, "avg_line_length": 21.83333396911621, "blob_id": "d9cb11891b02e464fdadb29e02ad58ad4e15526d", "content_id": "bd01e2d5545983964f651c02bf36d459c2a1123b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 284, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/controller/index.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\nif( !defined('IN') ) die('bad request');\r\ninclude_once( AROOT . 'controller'.DS.'app.class.php' );\r\n\r\nclass indexController extends appController{\r\n\r\n function index(){\r\n\r\n $data['username'] = $_COOKIE['joume_username'];\r\n render($data,'web','home');\r\n }\r\n}" }, { "alpha_fraction": 0.4432304799556732, "alphanum_fraction": 0.4497268497943878, "avg_line_length": 31.20098114013672, "blob_id": "0c6eb713d6808ef80021f60c525ad58fb824d4ae", "content_id": "7fa495ef80085c2d0ea7f90700ef871dd2e973d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 7041, "license_type": "no_license", "max_line_length": 106, "num_lines": 204, "path": "/controller/douyuhezuo.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php if (!defined('IN')) die('bad request');\r\n\r\n/**\r\n * Description of douyuHezuoModel\r\n * @author ISLANDER\r\n * @datetime 2016-9-19 18:58:03\r\n */\r\n\r\ninclude_once(AROOT . 'controller' . DS . 'app.class.php');\r\nuse Joyme\\core\\Request;\r\nuse Joyme\\net\\Curl;\r\nuse Joyme\\qiniu\\Qiniu_Utils;\r\n\r\nclass douyuhezuoController extends appController\r\n{\r\n\r\n public function __construct()\r\n {\r\n parent::__construct();\r\n }\r\n \r\n public function index(){\r\n $conditions = $where = array();\r\n //ๆธธๆˆๅ็งฐ\r\n $nickname = Request::getParam('nickname', '');\r\n if($nickname){\r\n $where['nickname'] = array('like', '%' . $nickname . '%');\r\n $conditions['nickname'] = $nickname;\r\n }\r\n //wikikey\r\n $wikikey = Request::getParam('wikikey', '');\r\n if($wikikey){\r\n $where['wikikey'] = array('like', '%' . $wikikey . '%');\r\n $conditions['wikikey'] = $wikikey;\r\n }\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $total = $douyuHezuoModel->getTotal($where);\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $lists = $douyuHezuoModel->select(\"*\", $where, 'ctime DESC', $psize, $skip);//getListData($where);\r\n $listdata = array();\r\n foreach($lists as $key=>$val){\r\n $info = $this->getRoomInfo($val['roomid']);\r\n $listdata[$key] = $val;\r\n if($info && $info['error'] == 0){\r\n $listdata[$key]['roomstatus'] = $info['data']['room_status'];\r\n }elseif($info && $info['error'] == 101){\r\n $listdata[$key]['roomstatus'] = 'ๆˆฟ้—ดไธๅญ˜ๅœจ';\r\n }elseif($info && $info['error'] == 102){\r\n $listdata[$key]['roomstatus'] = 'ๆˆฟ้—ดๆœชๆฟ€ๆดป';\r\n }elseif($info && $info['error'] == 103){\r\n $listdata[$key]['roomstatus'] = 'ๆˆฟ้—ด่Žทๅ–้”™่ฏฏ';\r\n }\r\n }\r\n \r\n $page = new pageModel();//M('pageModel');\r\n \r\n $page->mainPage(array('total' => $total,'perpage'=>$psize,'nowindex'=>$pno,'pagebarnum'=>10));\r\n $phtml = $page->show(2, $conditions);\r\n $data = array(\r\n 'nickname' => $nickname,\r\n 'wikikey' => $wikikey,\r\n 'total' => $total,\r\n 'list' => $listdata,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'zhibo/douyuhezuolist');\r\n }\r\n\r\n public function add()\r\n {\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $data = array();\r\n render($data, 'web', 'zhibo/douyuhezuoadd');\r\n }\r\n\r\n public function addPro()\r\n {\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $data = $this->getPostData();\r\n $data['userstatus'] = 1;\r\n $ret = $douyuHezuoModel->insertData($data);\r\n if ($ret) {\r\n addlog('joymewiki', 'add', 'ๆทปๅŠ ไบ†idไธบ'.$ret.'็š„ๆ–—้ฑผๅˆไฝœๆ•ฐๆฎ');\r\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=douyuhezuo&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=douyuhezuo&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n\r\n public function edit()\r\n {\r\n $id = Request::get('id', 0);\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $item = $douyuHezuoModel->getDataById($id);\r\n $pno = Request::get('pno', 1);\r\n $data = array(\r\n 'item' => $item,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'zhibo/douyuhezuoedit');\r\n }\r\n\r\n public function editPro()\r\n {\r\n $id = Request::post('id');\r\n if(is_numeric($id)){\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $data = $this->getPostData();\r\n $data['id'] = $id;\r\n $errno = $douyuHezuoModel->updateData($data);\r\n if ($errno) {\r\n addlog('joymewiki', 'update', 'ไฟฎๆ”นไบ†idไธบ'.$id.'็š„ๆ–—้ฑผๅˆไฝœๆ•ฐๆฎ');\r\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=douyuhezuo&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=douyuhezuo&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }else{\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=douyuhezuo&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n\r\n public function del()\r\n {\r\n $id = (int)Request::post('id');\r\n if ($id) {\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $errno = $douyuHezuoModel->delete(array(\r\n 'id' => $id\r\n ));\r\n if ($errno) {\r\n addlog('joymewiki', 'delete', 'ๆทปๅŠ ไบ†idไธบ'.$id.'็š„ๆ–—้ฑผๅˆไฝœๆ•ฐๆฎ');\r\n $res = array('rs' => 1, 'msg' => 'ๅˆ ้™คๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅˆ ้™คๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n\r\n jsonEncode($res);\r\n }\r\n \r\n public function updateMsg(){\r\n $id = (int)Request::post('id');\r\n $rank = Request::post('rank');\r\n $userstatus = Request::post('userstatus');\r\n $islock = Request::post('islock');\r\n $data = array();\r\n if($rank !== null){\r\n $data = array('rank'=>intval($rank));\r\n }else if($userstatus !== null){\r\n $data = array('userstatus'=>intval($userstatus));\r\n }else if($islock !== null){\r\n $data = array('islock'=>intval($islock));\r\n }\r\n \r\n if ($id) {\r\n $douyuHezuoModel = new douyuHezuoModel();\r\n $errno = $douyuHezuoModel->update($data, array(\r\n 'id' => $id\r\n ));\r\n if ($errno) {\r\n addlog('joymewiki', 'update', 'ๆทปๅŠ ไบ†idไธบ'.$id.'็š„ๆ–—้ฑผๅˆไฝœๆ•ฐๆฎ');\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n\r\n jsonEncode($res);\r\n }\r\n \r\n public function getRoomInfo($roomid){\r\n $roomid = intval($roomid);\r\n if($roomid == 0) return false;\r\n $curl = new Curl();\r\n $json = $curl->Get('http://open.douyucdn.cn/api/RoomApi/room/'.$roomid);\r\n $data = json_decode($json, true);\r\n return $data;\r\n }\r\n\r\n private function getPostData()\r\n {\r\n $data = array(\r\n 'wikikey' => Request::post('wikikey', ''),\r\n 'userid' => Request::post('userid', 0),\r\n 'roomid' => Request::post('roomid', 0),\r\n 'nickname' => Request::post('nickname', ''),\r\n 'qq' => Request::post('qq', 0),\r\n 'cellphone' => Request::post('cellphone', ''),\r\n 'userdesc' => Request::post('userdesc', ''),\r\n 'ctime' => time()\r\n );\r\n return $data;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5587288737297058, "alphanum_fraction": 0.5905068516731262, "avg_line_length": 33.485713958740234, "blob_id": "824251099028dc3647a1c6b873093481bbdf302f", "content_id": "4ce785cd0eaa51496db678f8075503fc827460d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2594, "license_type": "no_license", "max_line_length": 86, "num_lines": 70, "path": "/config/app.config.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n$GLOBALS['config']['site_name'] = 'LazyPHP3';\r\n$GLOBALS['config']['site_domain'] = 'lazyphp3.sinaapp.com';\r\n\r\n\r\n//่ฎพ็ฝฎๆ—ถๅŒบ\r\ndate_default_timezone_set('PRC');\r\nerror_reporting(E_ALL);\r\n//ไธƒ็‰›ๅ›พ็‰‡ๅœฐๅ€\r\n\r\n//้œ€่ฆ่ทณ่ฝฌ็š„็Žฏๅขƒ\r\n$strpos = explode(\".\",$_SERVER['SERVER_NAME']);\r\n$GLOBALS['domain'] = $strpos[2];\r\n\r\n//่ฎพ็ฝฎsourceๆŽฅๅฃ็ผ“ๅญ˜ๆœ‰ๆ•ˆๆœŸ\r\n$GLOBALS['config']['source']['expiration'] = 3600;\r\n$GLOBALS['config']['redis']['password'] = '';\r\n\r\nif($GLOBALS['domain']=='dev'){\r\n $wgQiNiuPath = 'joymetest.joyme.com';\r\n $pathkey ='dev';\r\n $secrectkey = '7ejw!9d#';\r\n $GLOBALS['config']['qiniu']['bucket'] = 'joymetest';\r\n $GLOBALS['config']['redis']['host'] = '172.16.75.32';\r\n $GLOBALS['config']['redis']['port'] = 6379;\r\n $GLOBALS['config']['source']['expiration'] = 1;\r\n}\r\nif($GLOBALS['domain']=='alpha'){\r\n $wgQiNiuPath = 'joymetest.qiniudn.com';\r\n $pathkey ='alpha';\r\n $secrectkey = '8F5&JL3';\r\n $GLOBALS['config']['qiniu']['bucket'] = 'joymetest';\r\n $GLOBALS['config']['redis']['host'] = '172.16.75.32';\r\n $GLOBALS['config']['redis']['port'] = 6379;\r\n $GLOBALS['config']['source']['expiration'] = 1;\r\n}\r\nif($GLOBALS['domain']=='beta'){\r\n $wgQiNiuPath = 'joymepic.joyme.com';\r\n $pathkey ='beta';\r\n $secrectkey = '#4g%klwe';\r\n $GLOBALS['config']['qiniu']['bucket'] = 'joymepic';\r\n $GLOBALS['config']['redis']['host'] = 'r-2ze25cf88632c7b4.redis.rds.aliyuncs.com';\r\n $GLOBALS['config']['redis']['port'] = 6379;\r\n $GLOBALS['config']['redis']['password'] = 'FHW2n2Gh';\r\n $GLOBALS['config']['source']['expiration'] = 60;\r\n}\r\nif($GLOBALS['domain']=='com'){\r\n $wgQiNiuPath = 'joymepic.joyme.com';\r\n $pathkey ='prod';\r\n $secrectkey = 'yh87&sw2';\r\n $GLOBALS['config']['qiniu']['bucket'] = 'joymepic';\r\n $GLOBALS['config']['redis']['host'] = 'r-2zef16817404a374.redis.rds.aliyuncs.com';\r\n $GLOBALS['config']['redis']['port'] = 6379;\r\n $GLOBALS['config']['redis']['password'] = 'zIGMyY12';\r\n}\r\n//้…็ฝฎๅŠ ่ฝฝPHPๅ…ฌๅ…ฑๅบ“็š„ๅ…ทไฝ“่ทฏๅพ„\r\n$GLOBALS['libPath'] = '/opt/www/joymephplib/'.$pathkey.'/phplib.php';\r\n//$GLOBALS['libPath'] = '/opt/www/joymephplib/phplib.php';\r\n//$GLOBALS['libPath'] = 'D:/wamp/www/joymephplib/trunk/phplib.php';\r\n$GLOBALS['static_url'] = 'http://static.joyme.'.$GLOBALS['domain'];\r\n$GLOBALS['config']['staticurl'] = 'http://static.joyme.'.$GLOBALS['domain'];\r\n\r\n//redisๅ‰็ผ€\r\n$GLOBALS['config']['redis']['prefix'] = 'channel';\r\n\r\n//ๆฃ€ๆต‹ๆ˜ฏๅฆ้œ€่ฆ็™ปๅฝ•\r\n$GLOBALS['config']['checklogin'] = true;\r\n\r\n//ๅฎšไน‰็พŽๅ›พๆ ็›ฎID\r\n$GLOBALS['config']['meituID'] = 1334;\r\n\r\n" }, { "alpha_fraction": 0.633152186870575, "alphanum_fraction": 0.6426630616188049, "avg_line_length": 22.74193572998047, "blob_id": "00dfa2cd4fe79d36b0440aec6bdb63605f6a2228", "content_id": "82c7d002e073324a5d92c3f3338ac9793b2ac13f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 124, "num_lines": 62, "path": "/public/sh/add_nginx_rule.py", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# File add_nginx_rule.py\n\nimport time\nimport sys\nimport os\nimport commands\n\n\ndomain=sys.argv[1]\nenv=sys.argv[2]\ntext='''\nlocation /%s/{\nif ($uri = '/%s/' ) {\nrewrite . /%s/้ฆ–้กต permanent;\n}\nif ($uri != '/%s/' ) {\nrewrite /%s/(.*)$ /$1 break;\nproxy_pass http://%s.joyme.%s;\n}\n}\n''' %(domain,domain,domain,domain,domain,domain,env)\n\ntext2='''\nlocation /%s/{\nif ($uri = '/%s/' ) {\nrewrite . /%s/้ฆ–้กต permanent;\n}\nproxy_set_header wikitype mwiki;\nif ($uri != '/%s/' ) {\nrewrite /%s/(.*)$ /$1 break;\nproxy_pass http://%s.joyme.%s;\n}\n}\n''' %(domain,domain,domain,domain,domain,domain,env)\n\n\n#curtime=time.strftime('%Y-%m-%d-%H:%M',time.localtime(time.time()))\n#subprocess.Popen('cp rule_ugc_rules rule_ugc_rules_%s' %curtime,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n#os.popen('cp rule_ugc_rules rule_ugc_rules_%s' %curtime)\n\nif env == \"beta\":\n file=open('/usr/local/nginx/conf/vhost/rule_ugc_rules','a')\n file.write(text)\n file.close()\n\n file2=open('/usr/local/nginx/conf/vhost/rule_ugc_rules_mobile','a')\n file2.write(text2)\n file2.close()\nelse:\n file=open('/usr/local/nginx/conf/rule_ugc_rules','a')\n file.write(text)\n file.close()\n\n file2=open('/usr/local/nginx/conf/rule_ugc_rules_mobile','a')\n file2.write(text2)\n file2.close()\n \n(status,output)=commands.getstatusoutput('/usr/bin/sudo /usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf')\n\nprint status\n" }, { "alpha_fraction": 0.4564681053161621, "alphanum_fraction": 0.47009751200675964, "avg_line_length": 33.32258224487305, "blob_id": "e9d55f272444db8f8807ccdd3b6e153ca0d21b0c", "content_id": "15467a292dc06c123b76fff60d943be34e80ebcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8831, "license_type": "no_license", "max_line_length": 155, "num_lines": 248, "path": "/controller/job.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\core\\Log;\n\nclass jobController extends coreController\n{\n public function index()\n {\n $this->jobprint('ๅผ€ๅง‹ๅฏผๅ…ฅ');\n //ๆŸฅ่ฏขๆธ ้“ไฟกๆฏ\n $this->jobprint('ๅผ€ๅง‹ๆŸฅ่ฏขๆธ ้“ไฟกๆฏ');\n $jchannelmodel = new jChannelModel();\n $tplArr = $jchannelmodel->select(\"cid,template,size\", array());\n\n $tplCidArr = array_column($tplArr, 'cid');\n $tplArr = array_combine($tplCidArr, $tplArr);\n\n //ๆŸฅ่ฏขๆ ็›ฎไฟกๆฏ\n $this->jobprint('ๅผ€ๅง‹ๆŸฅ่ฏขๆ ็›ฎไฟกๆฏ');\n $lanmuArr = array(1339,417,1334);\n $arctypeModel = M('cmsArctypeModel');\n $cidlist = $arctypeModel->select('id,typedir,namerule,channelcids',array('id'=>array('in',$lanmuArr)));\n\n $cids = array_column($cidlist,'id');\n $cidlist = array_combine($cids,$cidlist);\n $oknum = $failnum = 0;\n\n //ๆŸฅ่ฏขๆ‰€ๅฑž็š„ๆ ็›ฎๅˆ—่กจ\n $channelGameModel = new jChannelGameModel();\n $channelgameRs = $channelGameModel->select(\"cid,gid\", array('cid' => 6, 'datatype' => 2));\n\n $channelgameArr = array();\n foreach($channelgameRs as $v){\n $channelgameArr[$v['cid']][] = $v['gid'];\n }\n\n /*\n //ๆŸฅ่ฏขๆธธๆˆๅบ“็š„ๆ–‡็ซ ๅˆ—่กจ\n $this->jobprint('ๅผ€ๅง‹ๆŸฅ่ฏขๆธธๆˆๅบ“ๆ–‡็ซ ');\n $atype = 0;\n $cmsArcModel = M('cmsArcModel');\n $sql = $this->getsql();\n $taglistrs = $cmsArcModel->excuteSql($sql);\n\n $aids = array_column($taglistrs,'aid');\n $taglist = array_combine($aids,$taglistrs);\n\n //ๆŸฅ่ฏขcmsๅŸบ็ก€ไฟกๆฏ\n $cmsDataList = $cmsArcModel->select('*',array('id'=>array('in',$aids),'typeid'=>array('in',$lanmuArr)),'',10000);\n\n foreach($cmsDataList as $v){\n $gid = empty($taglist[$v['id']]['gid'])?'':$taglist[$v['id']]['gid'];\n $cid = 5;\n $extra = json_encode($v);\n $columninfo = $cidlist[$v['typeid']];\n $aurl = $this->getUrl($v['id'],$v['senddate'],$columninfo);\n $rs = $this->savedata($v['id'], $cid, $gid, $atype, $extra, $v['pubdate'], $aurl, $tplArr,$channelgameArr);\n if($rs){\n $oknum+=1;\n }else{\n $failnum+=1;\n $this->jobprint('ID: '.$v['id'].'ๅฏผๅ…ฅๅคฑ่ดฅ');\n }\n }\n $this->jobprint('ๅฏผๅ…ฅๆธธๆˆๅบ“ๆ–‡็ซ ๅฎŒๆฏ•๏ผŒๆˆๅŠŸไบ†'.$oknum.'ๆก๏ผŒๅคฑ่ดฅไบ†'.$failnum.'ๆกใ€‚');\n */\n //ๆŸฅ่ฏขๆ ็›ฎ็š„ๆ–‡็ซ ๅˆ—่กจ\n $atype = 0;\n $cmsArcModel = M('cmsArcModel');\n $this->jobprint('ๅผ€ๅง‹ๆŸฅ่ฏขๆ ็›ฎๆ–‡็ซ ');\n $sql1 = $this->getlanmusql(1339);\n $sql2 = $this->getlanmusql(417);\n $sql3 = $this->getlanmusql(1334);\n $taglist1 = $cmsArcModel->excuteSql($sql1);\n $taglist2 = $cmsArcModel->excuteSql($sql2);\n $taglist3 = $cmsArcModel->excuteSql($sql3);\n\n $taglistall = array_merge($taglist1,$taglist2,$taglist3);\n\n $aids = array_column($taglistall,'aid');\n\n //ๆŸฅ่ฏขcmsๅŸบ็ก€ไฟกๆฏ\n $cmsDataList = $cmsArcModel->select('*',array('id'=>array('in',$aids)),'',10000);\n\n foreach($cmsDataList as $v){\n $gid = '';\n $cid = 6;\n $extra = json_encode($v);\n $columninfo = $cidlist[$v['typeid']];\n $aurl = $this->getUrl($v['id'],$v['senddate'],$columninfo);\n $rs = $this->savedata($v['id'], $cid, $gid, $atype, $extra, $v['pubdate'], $aurl, $tplArr,$channelgameArr);\n if($rs){\n $oknum+=1;\n }else{\n $failnum+=1;\n $this->jobprint('ID: '.$v['id'].'ๅฏผๅ…ฅๅคฑ่ดฅ');\n }\n }\n\n $this->jobprint('ๅ…จ้ƒจๅฏผๅ…ฅๅฎŒๆฏ•๏ผŒๆˆๅŠŸไบ†'.$oknum.'ๆก๏ผŒๅคฑ่ดฅไบ†'.$failnum.'ๆกใ€‚');\n\n\n }\n public function getUrl($aid, $pubdate,$columninfo){\n if(!empty($columninfo['namerule']) && !empty($columninfo['typedir'])){\n $namerule = $columninfo['namerule'];\n $typedir = $columninfo['typedir'];\n }else{\n return '';\n }\n $url = $namerule;\n $Y = date('Y', $pubdate);\n $M = date('m', $pubdate);\n $D = date('d', $pubdate);\n\n $url = str_replace('{typedir}', $typedir, $url);\n $url = str_replace('{Y}', $Y, $url);\n $url = str_replace('{M}', $M, $url);\n $url = str_replace('{D}', $D, $url);\n $url = str_replace('{aid}', $aid, $url);\n $url = str_replace('{cmspath}', '', $url);\n return $url;\n }\n public function jobprint($str){\n echo $str.\" \\n\\n\";\n }\n public function getsql(){\n //tagid ๆธธๆˆๅบ“ID\n $sql = 'SELECT r1.dede_archives_id as aid,r1.tagid as gid\nFROM joymeapp.tag_dede_archives r1 \nWHERE tagid in(72045,100665) and relation_type=1 and dede_archives_pubdate>=1488297600000;\n';\n return $sql;\n }\n public function getlanmusql($typeids){\n $sql = 'select id as aid,typeid as gid from dede_archives where arcrank>-1 and ismake=1 and typeid in( '.$typeids.' ) order by id desc limit 800;';\n return $sql;\n }\n\n //ไธŠๆŠฅcms\n public function savedata($aid, $cid, $gid, $atype, $extra, $pubdate, $aurl, $tplArr,$channelgameArr)\n {\n $source = 1;\n $extraArr = json_decode($extra, true);\n\n $litpic = empty($extraArr['litpic']) ? '' : $extraArr['litpic'];\n\n $sourcedata = array(\n 'aid' => $aid,\n 'data' => $extra,\n 'source' => $source\n );\n\n\n $sourceDataModel = new jSourceDataModel();\n $sourceDataRs = $sourceDataModel->getRowData(array('aid' => $aid, 'source' => $source));\n if ($sourceDataRs) {\n $rs = $sourceDataModel->update(array('data' => $extra), array('aid' => $aid, 'source' => $source));\n } else {\n $rs = $sourceDataModel->addData($sourcedata);\n }\n\n //ๅˆคๆ–ญๆ˜ฏๅฆๅˆ ้™ค\n $isblock = 0;\n\n\n if ($rs !== false) {\n $channelDataModel = new jChannelDataModel();\n $url = '';\n $channeldata = array(\n 'cid' => '',\n 'gid' => $gid,\n 'aid' => $aid,\n 'atype' => $atype,\n 'litpic' => $litpic,\n 'source' => $source,\n 'url' => $url,\n 'pubdate' => $pubdate,\n 'isblock' => $isblock\n );\n $cidArr = explode(',', $cid);\n\n\n foreach ($cidArr as $v) {\n if ($tplArr[$v]['template'] == 'pc') {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'];\n } else if ($tplArr[$v]['template'] == 'wap') {\n $urlpre = 'http://m.joyme.' . $GLOBALS['domain'];\n } else {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'] . '/' . $tplArr[$v]['template'];\n }\n\n if ($atype == 5) {\n $channeldata['url'] = $aurl;\n } else {\n $channeldata['url'] = $urlpre . str_replace('article/pc/', '', $aurl);\n }\n $size = json_decode($tplArr[$v]['size'], true);\n if ($size && !empty($size[$atype])) {\n $channeldata['litpic'] = $litpic . '?imageView2/1/w/' . $size[$atype]['w'] . '/h/' . $size[$atype]['h'];\n }\n\n $channeldata['cid'] = $v;\n\n $typeid = empty($extraArr['typeid']) ? 0 : $extraArr['typeid'];\n\n //ๅˆคๆ–ญๆธ ้“ๆ˜ฏๅฆๅผ€ๅฏๆธธๆˆๆˆ–ๆ ็›ฎ\n if ($gid) {\n $channeldata['datatype'] = 1;\n $channeldata['gid'] = $gid;\n $channelDataModel->addData($channeldata);\n }else if ($channelgameArr[$v] && in_array($typeid, $channelgameArr[$v])) {\n $channeldata['datatype'] = 2;\n\n $channeldata['gid'] = $typeid;\n $channelDataModel->addData($channeldata);\n\n $typeid2 = empty($extraArr['typeid2']) ? '' : $extraArr['typeid2'];\n $typeid2Arr = explode(',', $typeid2);\n if (!empty($typeid2Arr)) {\n foreach ($typeid2Arr as $v2) {\n if (in_array($v2, $channelgameArr[$v])) {\n $channeldata['gid'] = $v2;\n $channelDataModel->addData($channeldata);\n }\n }\n\n }\n\n }\n }\n return true;\n } else {\n return false;\n }\n }\n\n}" }, { "alpha_fraction": 0.5349675416946411, "alphanum_fraction": 0.5508291125297546, "avg_line_length": 24.673076629638672, "blob_id": "ac42f3c5b1a0c29955bfb1c4a424eb3f9d0a200d", "content_id": "1ce9952ab7b6266a5dc7502514a40ad6f8359bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1525, "license_type": "no_license", "max_line_length": 101, "num_lines": 52, "path": "/controller/app.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\r\n/**\r\n * Created by JetBrains PhpStorm.\r\n * User: xinshi\r\n * Date: 15-4-15\r\n * Time: ไธ‹ๅˆ2:51\r\n * To change this template use File | Settings | File Templates.\r\n * //่ฟ™้‡Œๆ˜ฏๅฏนไธ€ไบ›ๅบ”็”จ็š„ๅ›บๅฎšๅˆคๆ–ญ๏ผŒๅฆ‚ๆžœไฝ ้œ€่ฆ่ฟ™ไบ›ๅˆคๆ–ญ๏ผŒๅˆ™่ฏท็ปงๆ‰ฟappController\r\n */\r\nuse Joyme\\core\\JoymeToolsUser;\r\n\r\nclass appController extends coreController\r\n{\r\n function __construct()\r\n {\r\n // ่ฝฝๅ…ฅ้ป˜่ฎค็š„\r\n if ($GLOBALS['config']['checklogin']) {\r\n $this->loginCheck();\r\n }\r\n parent::__construct();\r\n }\r\n\r\n //ๅˆคๆ–ญๅˆคๆ–ญ็™ปๅฝ•\r\n function loginCheck()\r\n {\r\n\r\n //ๅฟ…้กปๆŒ‡ๅฎšๅ›ž่ทณๅˆฐ joymeๅŸŸๅไธ‹๏ผŒๅฆๅˆ™่Žทๅ–ไธๅˆฐcookie๏ผ\r\n JoymeToolsUser::init($GLOBALS['domain'], $this->redirect_url());\r\n JoymeToolsUser::check(101, 102);\r\n if (!$_COOKIE['joume_username']) {\r\n setcookie(\"joume_username\", JoymeToolsUser::getUsername(), time() + 3600 * 24 * 7);\r\n }\r\n }\r\n\r\n //้€€ๅ‡บ\r\n function logout()\r\n {\r\n //ๅ…ทไฝ“้€€ๅ‡บ่ฐƒ็”จJavaๆŽฅๅฃ\r\n setcookie(\"joume_username\", $_COOKIE['joume_username'], time() - 1);\r\n $url = \"http://tools.joyme.\" . $GLOBALS['domain'] . \"/logout?reurl=\" . $this->redirect_url();\r\n header(\"location:$url\");\r\n }\r\n\r\n function redirect_url()\r\n {\r\n\r\n $redirect_url = \"http://\" . $_SERVER['SERVER_NAME'];\r\n return $redirect_url;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5455999970436096, "alphanum_fraction": 0.5600000023841858, "avg_line_length": 24, "blob_id": "6989b4079354f39801a65be0f767563706d82da8", "content_id": "13c561b867d82efbd8663d0e077bbd2d64c13628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 627, "license_type": "no_license", "max_line_length": 60, "num_lines": 25, "path": "/runjob.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "#!/usr/bin/env php\n<?php\n/* lp app root */\n// โ†‘____ for aoi . Do Not Delete it.\n/**** load lp framework ***/\ndefine( 'DS' , DIRECTORY_SEPARATOR );\n\ndefine( 'AROOT' , dirname( __FILE__ ) . DS );\n\nif(empty($argv[1])){\n echo 'no argv[1] controller';exit;\n}\nif(empty($argv[2])){\n echo 'no argv[2] function';exit;\n}\nif(empty($argv[3])){\n echo 'no argv[3] env';exit;\n}\n$_SERVER['SERVER_NAME'] = \"joymewiki.joyme.\".$argv[3];\n$_REQUEST['c'] = $argv[1];\n$_REQUEST['a'] = $argv[2];\n\n//ini_set('include_path', dirname( __FILE__ ) . DS .'_lp' );\ninclude_once( '_lp'.DS .'lp.init.php' );\n/**** lp framework init finished ***/\n" }, { "alpha_fraction": 0.4126879572868347, "alphanum_fraction": 0.4200124144554138, "avg_line_length": 38.00930404663086, "blob_id": "caf269a9d8ef3c3306c6dea6487c791970161845", "content_id": "ab31c46f8c9cdb2c1f078698af368702f7b64078", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 26442, "license_type": "no_license", "max_line_length": 332, "num_lines": 645, "path": "/controller/jchanneldata.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n/**\r\n * Description:ๆธ ้“็ฎก็†ๆ–‡็ซ ็›ธๅ…ณ\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 16:02\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(AROOT . 'controller' . DS . 'app.class.php');\r\nuse Joyme\\core\\Request;\r\nuse Joyme\\qiniu\\Qiniu_Utils;\r\nuse Joyme\\net\\Curl;\r\n\r\nclass jchanneldataController extends appController\r\n{\r\n public function __construct()\r\n {\r\n parent::__construct();\r\n }\r\n\r\n //ๆ–‡็ซ ๅˆ—่กจ\r\n public function alist()\r\n {\r\n $conditions = array();\r\n $where = array('source' => 1);\r\n $where['isblock'] = 0;\r\n //ๅˆ›ๅปบๆ—ถ้—ดๅผ€ๅง‹\r\n $begintime = Request::getParam('begintime');\r\n //ๅˆ›ๅปบๆ—ถ้—ด็ป“ๆŸ\r\n $endtime = Request::getParam('endtime');\r\n if ($begintime && empty($endtime)) {\r\n $gbegintime = strtotime($begintime);\r\n $where['pubdate'] = array('gt', $gbegintime);\r\n $conditions['begintime'] = $begintime;\r\n } elseif (empty($begintime) && $endtime) {\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['pubdate'] = array('lt', $gendtime);\r\n $conditions['endtime'] = $endtime;\r\n } elseif ($begintime && $endtime) {\r\n $gbegintime = strtotime($begintime);\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['pubdate'] = array(array('gt',$gbegintime),array('lt',$gendtime),'and');\r\n $conditions['begintime'] = $begintime;\r\n $conditions['endtime'] = $endtime;\r\n }\r\n\r\n $game_cids = '';\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey,channel_name\", array(\r\n 'isshow'=>1\r\n ), 'cid ASC', '', '');\r\n //ๅˆไฝœๆธ ้“\r\n $cid = Request::getParam('cid');\r\n if ($cid) {\r\n $game_cids = $cid;\r\n $where['cid'] = array('in', $cid);\r\n $conditions['pcid'] = implode(\",\",$cid);\r\n if ($jchannellists) {\r\n foreach ($jchannellists as $k => $jchannellist) {\r\n if (in_array($jchannellist['cid'], $cid)) {\r\n $jchannellists[$k]['checked'] = 'checked=\"checked\"';\r\n } else {\r\n $jchannellists[$k]['checked'] = '';\r\n }\r\n }\r\n }\r\n } else {\r\n $pcid = Request::getParam('pcid');\r\n if($pcid){\r\n $cid = explode(',',$pcid);\r\n $game_cids = $cid;\r\n $where['cid'] = array('in', $cid);\r\n $conditions['pcid'] = implode(\",\",$cid);\r\n if ($jchannellists) {\r\n foreach ($jchannellists as $k => $jchannellist) {\r\n if (in_array($jchannellist['cid'], $cid)) {\r\n $jchannellists[$k]['checked'] = 'checked=\"checked\"';\r\n } else {\r\n $jchannellists[$k]['checked'] = '';\r\n }\r\n }\r\n }\r\n }else{\r\n $cids = array_column($jchannellists, 'cid');\r\n if ($cids) {\r\n $where['cid'] = array('in', $cids);\r\n }\r\n if ($jchannellists) {\r\n foreach ($jchannellists as $k => $jchannellist) {\r\n $jchannellists[$k]['checked'] = '';\r\n }\r\n }\r\n $where['isblock'] = array('in', array(0, 1));\r\n }\r\n }\r\n\r\n if ($game_cids) {\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgame->select('gid', array(\r\n 'cid' => array('in', $game_cids)\r\n ), '', '', '');\r\n if ($jchannelgamelists) {\r\n $gids = array_column($jchannelgamelists, 'gid');\r\n $gids = array_unique($gids);\r\n $where['gid'] = array('in', $gids);\r\n }\r\n }\r\n\r\n //ๆ ‡็ญพ็ฑปๅž‹,ๆ ‡็ญพๅ็งฐ\r\n $group = 'aid';\r\n $labeltype = (int)Request::getParam('labeltype','1');\r\n $labelname = Request::getParam('labelname','');\r\n $conditions['labeltype'] = $labeltype;\r\n $conditions['labelname'] = $labelname;\r\n if($labeltype&&$labelname){\r\n if($labeltype==1){\r\n $where['datatype'] = 1;\r\n $group = 'aid';\r\n $jgamemodel = new jGameModel();\r\n $games = $jgamemodel->getsearchgame($labelname);\r\n if ($games) {\r\n $gameIds = array_column($games, 'gameId');\r\n $where['gid'] = array('in', $gameIds);\r\n $jcgwhere['gid'] = array('in', $gameIds);\r\n }\r\n }elseif ($labeltype==2){\r\n $where['datatype'] = 2;\r\n $group = 'aid,gid';\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgame->select('gid,gamename',array(\r\n 'datatype' => 2,\r\n 'gamename' => array('like','%'.$labelname.'%')\r\n ),'','','');\r\n if($jchannelgamelists){\r\n $labelids = array_column($jchannelgamelists,'gid');\r\n $where['gid'] = array('in', $labelids);\r\n $jcgwhere['gid'] = array('in', $labelids);\r\n }else{\r\n $where[1]=0;\r\n }\r\n }else{\r\n $where[1]=0;\r\n }\r\n }elseif ($labeltype&&empty($labelname)){\r\n $where['datatype'] = $labeltype;\r\n if($labeltype==1){\r\n $group = 'aid';\r\n }elseif ($labeltype==2){\r\n $group = 'aid,gid';\r\n }\r\n }elseif (empty($labeltype)&&$labelname){\r\n $where[1]=0;\r\n }else{\r\n $where['datatype'] = 1;\r\n $group = 'aid';\r\n }\r\n\r\n //ๆ–‡็ซ ๆ ‡้ข˜\r\n $arctitle = Request::getParam('arctitle');\r\n if ($arctitle) {\r\n $articleinfo = $this->getarticleinfo($arctitle);\r\n if ($articleinfo) {\r\n $aids = array_column($articleinfo, 'id');\r\n $where['aid'] = array('in', $aids);\r\n $conditions['arctitle'] = $arctitle;\r\n } else {\r\n $where[1] = 0;\r\n }\r\n }\r\n\r\n $allatypes = jChannelDataModel::getAllAtypes();\r\n //ๆ–‡็ซ ็ฑปๅž‹\r\n $atype = Request::getParam('atype');\r\n if ($atype) {\r\n $where['atype'] = array('in', $atype);\r\n $conditions['patype'] = implode(\",\",$atype);\r\n foreach ($allatypes as $k => $allatype) {\r\n if (in_array($k, $atype)) {\r\n $allatypes[$k]['checked'] = 'checked=\"checked\"';\r\n }\r\n }\r\n }else{\r\n $patype = Request::getParam('patype');\r\n if($patype){\r\n $atype = explode(',',$patype);\r\n $where['atype'] = array('in', $atype);\r\n $conditions['patype'] = implode(\",\",$atype);\r\n foreach ($allatypes as $k => $allatype) {\r\n if (in_array($k, $atype)) {\r\n $allatypes[$k]['checked'] = 'checked=\"checked\"';\r\n }\r\n }\r\n }\r\n }\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $constr = \"\";\r\n if($conditions){\r\n foreach ($conditions as $ck => $condition){\r\n $constr.=\"&\".$ck.\"=\".$condition;\r\n }\r\n }\r\n if($where['datatype'] == 1){\r\n $lists = $jchanneldatamodel->getGameLists($where,$psize, $skip,$jchannellists,$constr);\r\n }elseif ($where['datatype'] == 2){\r\n $lists = $jchanneldatamodel->getLabelLists($where,$psize, $skip,$jchannellists,$constr);\r\n }else{\r\n $lists = array();\r\n }\r\n $jchanneldatacount = $jchanneldatamodel->select('count(*) as num',$where,'','','',$group);\r\n $total = count($jchanneldatacount);\r\n $page = new pageModel();\r\n $page->mainPage(array('total' => $total, 'perpage' => $psize, 'nowindex' => $pno, 'pagebarnum' => 10));\r\n $phtml = $page->show(2, $conditions);\r\n $data = array(\r\n 'begintime' => $begintime,\r\n 'endtime' => $endtime,\r\n 'arctitle' => $arctitle,\r\n 'labeltype' => $labeltype,\r\n 'labelname' => $labelname,\r\n 'allatypes' => $allatypes,\r\n 'jchannellists' => $jchannellists,\r\n 'list' => $lists,\r\n 'total' => $total,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno,\r\n 'constr' => $constr\r\n );\r\n render($data, 'web', 'jchanneldata/alist');\r\n }\r\n\r\n public function aedit()\r\n {\r\n $aid = Request::getParam('aid');\r\n $atype = (int)Request::getParam('atype');\r\n if ($aid && $atype >= 0) {\r\n $jchanneldata = new jChannelDataModel();\r\n $jchanneldatalists = $jchanneldata->select('cid,gid,atype,litpic,isblock', array(\r\n 'aid' => $aid,\r\n 'atype' => $atype,\r\n 'source' => 1\r\n ));\r\n $litpics = array();\r\n $isblock = array();\r\n $gid = array();\r\n if ($jchanneldatalists) {\r\n $litpics = array_column($jchanneldatalists, 'litpic', 'cid');\r\n $isblock = array_column($jchanneldatalists, 'isblock', 'cid');\r\n $gid = array_column($jchanneldatalists, 'gid');\r\n }\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgame->select('cid', array(\r\n 'gid' => array('in', $gid)\r\n ), '', '', '');\r\n $jcgcids = array();\r\n if ($jchannelgamelists) {\r\n $jcgcids = array_column($jchannelgamelists, 'cid');\r\n }\r\n\r\n $jchannel = new jChannelModel();\r\n $jchannellists = $jchannel->select('cid,ckey,channel_name', array(\r\n 'isshow'=>1\r\n ), '', '');\r\n if ($jchannellists) {\r\n foreach ($jchannellists as $jk => $jchannellist) {\r\n if (isset($litpics[$jchannellist['cid']])) {\r\n $jchannellists[$jk]['litpic'] = $litpics[$jchannellist['cid']];\r\n }\r\n if (in_array($jchannellist['cid'], $jcgcids)) {\r\n if (isset($isblock[$jchannellist['cid']])&&$isblock[$jchannellist['cid']] === '1') {\r\n $jchannellists[$jk]['isblock_desc'] = \"็ฆ็”จ\";\r\n $jchannellists[$jk]['isblock_op'] = '<a href=\"javascript:;\" class=\"status\" data-cid=\"' . $jchannellist['cid'] . '\" data-isblock=\"0\">ๅฏ็”จ</a><br>';\r\n $jchannellists[$jk]['litpic_desc'] = '<i id=\"channel_' . $jchannellist['cid'] . '_' . $aid . '\" class=\"table-in-upload\" data-ckey=\"' . $jchannellist['ckey'] . '\" style=\"width: 80px;height: 34px;display: inline-block; cursor: pointer; text-align: center;line-height: 34px;border-radius: 4px;\">้€‰ๆ‹ฉๅ›พ็‰‡</i>';\r\n } elseif (isset($isblock[$jchannellist['cid']])&&$isblock[$jchannellist['cid']] === '0') {\r\n $jchannellists[$jk]['isblock_desc'] = \"ๅฏ็”จ\";\r\n $jchannellists[$jk]['isblock_op'] = '<a href=\"javascript:;\" class=\"status\" data-cid=\"' . $jchannellist['cid'] . '\" data-isblock=\"1\">็ฆ็”จ</a><br>';\r\n $jchannellists[$jk]['litpic_desc'] = '<i id=\"channel_' . $jchannellist['cid'] . '_' . $aid . '\" class=\"table-in-upload\" data-ckey=\"' . $jchannellist['ckey'] . '\" style=\"width: 80px;height: 34px;display: inline-block; cursor: pointer; text-align: center;line-height: 34px;border-radius: 4px;\">้€‰ๆ‹ฉๅ›พ็‰‡</i>';\r\n } else {\r\n $jchannellists[$jk]['isblock_desc'] = \"ๆœชๅผ€้€š\";\r\n $jchannellists[$jk]['isblock_op'] = '<span>ๆœชๅผ€้€š</span>';\r\n $jchannellists[$jk]['litpic_desc'] = '<span>้€‰ๆ‹ฉๅ›พ็‰‡</span>';\r\n }\r\n } else {\r\n $jchannellists[$jk]['isblock_desc'] = \"ๆœชๅผ€้€š\";\r\n $jchannellists[$jk]['isblock_op'] = '<span>ๆœชๅผ€้€š</span>';\r\n $jchannellists[$jk]['litpic_desc'] = '<span>้€‰ๆ‹ฉๅ›พ็‰‡</span>';\r\n }\r\n $jchannellists[$jk]['litpic_rule'] = $this->getlitpicrule($jchannellist['ckey'], $atype);\r\n }\r\n }\r\n $jsourcedata = new jSourceDataModel();\r\n $jsourcedatalist = $jsourcedata->selectRow('data', array(\r\n 'aid' => $aid,\r\n 'source' => 1\r\n ));\r\n $alist = array();\r\n if ($jsourcedatalist) {\r\n $alist = json_decode($jsourcedatalist['data'], true);\r\n }\r\n $uptoken = $this->getUptoken();\r\n $data = array(\r\n 'aid' => $aid,\r\n 'atype' => $atype,\r\n 'atype_desc' => jChannelDataModel::getAtype($atype),\r\n 'jchannellists' => $jchannellists,\r\n 'alist' => $alist,\r\n 'uptoken' => $uptoken,\r\n 'begintime' => Request::getParam('begintime',''),\r\n 'endtime' => Request::getParam('endtime',''),\r\n 'pcid' => Request::getParam('pcid',''),\r\n 'labeltype' => Request::getParam('labeltype',1),\r\n 'labelname' => Request::getParam('labelname',''),\r\n 'arctitle' => Request::getParam('arctitle',''),\r\n 'patype' => Request::getParam('patype','')\r\n );\r\n render($data, 'web', 'jchanneldata/aedit');\r\n } else {\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jchanneldata&a=alist\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n\r\n //่Žทๅ–\r\n public function getlitpicrule($ckey, $atype)\r\n {\r\n $litpicrule = '';\r\n if ($ckey == 'baiduglb') {\r\n if ($atype == '0') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผšๆฏ”ไพ‹1:1๏ผŒไธไฝŽไบŽ120*120';\r\n } elseif ($atype == '1') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผšๆฏ”ไพ‹4:3๏ผŒไธไฝŽไบŽ180*135๏ผŒๅปบ่ฎฎ200*150ไปฅไธŠ';\r\n } elseif ($atype == '4') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผšๆฏ”ไพ‹1:1๏ผŒไธไฝŽไบŽ120*120';\r\n }\r\n }\r\n elseif ($ckey == 'sougoupc') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผš202*152';\r\n }\r\n elseif ($ckey == 'sougouwap') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผš120*90';\r\n }\r\n elseif ($ckey == 'baidugl') {\r\n $litpicrule = 'ๅฐบๅฏธ่ฆๆฑ‚๏ผš90*65';\r\n }\r\n elseif ($ckey == 'aliyunos'){\r\n $litpicrule = 'ๅปบ่ฎฎๅฐบๅฏธ๏ผšๅฎฝ้ซ˜ๆฏ”26:19๏ผŒๅฐบๅฏธ312x228';\r\n }\r\n elseif ($ckey == 'sougouclient'){\r\n $litpicrule = 'ๅปบ่ฎฎๅฐบๅฏธ๏ผšๅฎฝๅบฆๅคงไบŽ็ญ‰ไบŽ 720px๏ผŒๆ–‡ไปถ้™ 300kb';\r\n }\r\n return $litpicrule;\r\n }\r\n\r\n\r\n //ไฟฎๆ”น่ต„ๆ–™็ฑปๆ–‡็ซ ็Šถๆ€\r\n public function articlestatus()\r\n {\r\n $aid = Request::getParam('aid');\r\n $isblock = Request::getParam('isblock');\r\n if ($aid && is_numeric($aid)\r\n && is_numeric($isblock)\r\n ) {\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $errno = $jchanneldatamodel->update(\r\n array(\r\n 'isblock' => $isblock\r\n ),\r\n array(\r\n 'aid' => (int)$aid,\r\n 'source' => 1,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n\r\n //ไฟฎๆ”นๅ…ถไป–็ฑปๅž‹ๆ–‡็ซ ็Šถๆ€\r\n public function aeditisblock()\r\n {\r\n $aid = Request::getParam('aid');\r\n $cid = Request::getParam('cid');\r\n $isblock = (int)Request::getParam('isblock');\r\n if ($cid && $isblock >= 0) {\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $errno = $jchanneldatamodel->update(\r\n array(\r\n 'isblock' => $isblock\r\n ),\r\n array(\r\n 'aid' => (int)$aid,\r\n 'cid' => (int)$cid,\r\n 'source' => 1,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n //ไฟฎๆ”นๆธ ้“็ผฉ็•ฅๅ›พๅœฐๅ€\r\n public function aeditlitpic()\r\n {\r\n $cid = (int)Request::getParam('cid');\r\n $aid = (int)Request::getParam('aid');\r\n $litpic = Request::getParam('litpic');\r\n if ($cid && $aid && $litpic) {\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $errno = $jchanneldatamodel->update(\r\n array(\r\n 'litpic' => $litpic\r\n ),\r\n array(\r\n 'cid' => $cid,\r\n 'aid' => $aid,\r\n 'source' => 1,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n\r\n //็คผๅŒ…ๅˆ—่กจ\r\n public function giftlist()\r\n {\r\n global $GLOBALS;\r\n $conditions = $where = array('source' => 2);\r\n //ๅˆ›ๅปบๆ—ถ้—ดๅผ€ๅง‹\r\n $begintime = Request::getParam('begintime');\r\n //ๅˆ›ๅปบๆ—ถ้—ด็ป“ๆŸ\r\n $endtime = Request::getParam('endtime');\r\n if ($begintime && empty($endtime)) {\r\n $gbegintime = strtotime($begintime);\r\n $where['pubdate'] = array('gt', $gbegintime);\r\n $conditions['begintime'] = $begintime;\r\n } elseif (empty($begintime) && $endtime) {\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['pubdate'] = array('lt', $gendtime);\r\n $conditions['endtime'] = $endtime;\r\n } elseif ($begintime && $endtime) {\r\n $gbegintime = strtotime($begintime);\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['pubdate'] = array(array('gt',$gbegintime),array('lt',$gendtime),'and');\r\n $conditions['begintime'] = $begintime;\r\n $conditions['endtime'] = $endtime;\r\n }\r\n //ๆธธๆˆๅ็งฐ\r\n $gamename = Request::getParam('gamename');\r\n $gid = (int)Request::getParam('gid');\r\n if ($gid) {\r\n $where['gid'] = $gid;\r\n $conditions['gid'] = $gid;\r\n } else {\r\n if ($gamename) {\r\n $jgamemodel = new jGameModel();\r\n $games = $jgamemodel->getsearchgame($gamename);\r\n if ($games) {\r\n $gameids = array_column($games, 'gameId');\r\n $where['gid'] = array('in', $gameids);\r\n $conditions['gamename'] = $gamename;\r\n } else {\r\n $where['1'] = 0;\r\n }\r\n }\r\n }\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $total = $jchanneldatamodel->count($where);\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $lists = $jchanneldatamodel->select(\"aid,gid,isblock\", $where, 'pubdate DESC', $psize, $skip);\r\n if ($lists) {\r\n //็คผๅŒ…ๅ†…ๅฎน\r\n $aids = array_column($lists, 'aid');\r\n $jsourcedatamodel = new jSourceDataModel();\r\n $jsourcedatalists = $jsourcedatamodel->select('aid,data', array(\r\n 'source' => 2,\r\n 'aid' => array('in', $aids)\r\n ));\r\n $datas = array();\r\n if ($jsourcedatalists) {\r\n $datas = array_column($jsourcedatalists, 'data', 'aid');\r\n }\r\n //ๆธธๆˆๅ†…ๅฎน\r\n $gids = array_column($lists, 'gid');\r\n $jgamemodel = new jGameModel();\r\n $jgamelists = $jgamemodel->select('gid,extra', array(\r\n 'gid' => array('in', $gids)\r\n ));\r\n $gameinfos = array();\r\n if ($jgamelists) {\r\n $gameinfos = array_column($jgamelists, 'extra', 'gid');\r\n }\r\n //ๆœ็‹—wapๅฏนๅบ”็š„ๆธธๆˆๆ˜ฏๅฆ็ฆ็”จ\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellist = $jchannelmodel->selectRow('cid',array(\r\n 'ckey' => 'sougouwap'\r\n ));\r\n $cid = 0;\r\n if($jchannellist){\r\n $cid = $jchannellist['cid'];\r\n }\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgame->select('gid', array(\r\n 'gid' => array('in', $gids),\r\n 'cid' => $cid,\r\n 'gamestatus' => 1\r\n ),'','','');\r\n $jchannelgids = array();\r\n if($jchannelgamelists){\r\n $jchannelgids = array_column($jchannelgamelists,'gid');\r\n }\r\n foreach ($lists as $k => $list) {\r\n if ($datas[$list['aid']]) {\r\n $data = json_decode($datas[$list['aid']], true);\r\n $lists[$k]['activityUrl'] = \"http://www.joyme.\" . $GLOBALS['domain'] . \"/gift/\" . $data['activityGoodsId'];\r\n $lists[$k]['activitySubject'] = $data['activitySubject'];\r\n $lists[$k]['start_time'] = date(\"Y-m-d H:i:s\", strtotime($data['startTime']));\r\n $endTime = strtotime($data['endTime']);\r\n $lists[$k]['end_time'] = date(\"Y-m-d H:i:s\", $endTime);\r\n if ($data['actStatus']['code'] == 'y') {\r\n $lists[$k]['remove_status'] = 'ๅฏ็”จ';\r\n if ($list['isblock'] == 1) {\r\n $lists[$k]['op_desc'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"0\">ๅฏ็”จ</a><br>';\r\n $lists[$k]['sougouwap_desc'] = '&#10006';\r\n } else {\r\n $lists[$k]['op_desc'] = '<a href=\"javascript:;\" class=\"status\" data-aid=\"' . $list['aid'] . '\" data-isblock=\"1\">็ฆ็”จ</a><br>';\r\n if(in_array($list['gid'],$jchannelgids)){\r\n $lists[$k]['sougouwap_desc'] = '&#10004';\r\n }else{\r\n $lists[$k]['sougouwap_desc'] = '&#10006';\r\n }\r\n }\r\n } else {\r\n $lists[$k]['remove_status'] = 'ไธๅฏ็”จ';\r\n $lists[$k]['op_desc'] = '<span>---</span>';\r\n $lists[$k]['sougouwap_desc'] = '&#10006';\r\n }\r\n }\r\n if ($gameinfos[$list['gid']]) {\r\n $gameinfo = json_decode($gameinfos[$list['gid']], true);\r\n $lists[$k]['wikiKey'] = $gameinfo['wikiKey'];\r\n $lists[$k]['gameName'] = $gameinfo['gameName'];\r\n }\r\n }\r\n }\r\n $page = new pageModel();\r\n $page->mainPage(array('total' => $total, 'perpage' => $psize, 'nowindex' => $pno, 'pagebarnum' => 10));\r\n $phtml = $page->show(2, $conditions);\r\n $data = array(\r\n 'begintime' => $begintime,\r\n 'endtime' => $endtime,\r\n 'gamename' => $gamename,\r\n 'total' => $total,\r\n 'list' => $lists,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'jchanneldata/giftlist');\r\n }\r\n\r\n //ไฟฎๆ”น็คผๅŒ…็Šถๆ€\r\n public function giftstatus()\r\n {\r\n $aid = Request::getParam('aid');\r\n $isblock = Request::getParam('isblock');\r\n if ($aid && is_numeric($aid)\r\n && is_numeric($isblock)\r\n ) {\r\n $jchanneldatamodel = new jChannelDataModel();\r\n $errno = $jchanneldatamodel->update(\r\n array(\r\n 'isblock' => $isblock\r\n ),\r\n array(\r\n 'aid' => (int)$aid,\r\n 'source' => 2,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n //่Žทๅ–ๆ–‡็ซ ็›ธๅ…ณไฟกๆฏ\r\n public function getarticleinfo($title)\r\n {\r\n global $GLOBALS;\r\n if ($title) {\r\n $url = \"http://article.joyme.\" . $GLOBALS['domain'] . \"/plus/channelapi.php\";\r\n $curl = new Curl();\r\n $result = $curl->Get($url, array(\r\n 'action' => 'searchtitle',\r\n 'title' => $title\r\n ));\r\n $result = json_decode($result, true);\r\n if ($result['rs'] == '1') {\r\n return $result['result'];\r\n } else {\r\n return false;\r\n }\r\n } else {\r\n return false;\r\n }\r\n }\r\n\r\n\r\n private function getUptoken()\r\n {\r\n $bucket = $GLOBALS['config']['qiniu']['bucket'];\r\n return Qiniu_Utils::Qiniu_UploadToken($bucket);\r\n }\r\n\r\n}" }, { "alpha_fraction": 0.36569541692733765, "alphanum_fraction": 0.3777279555797577, "avg_line_length": 35.701297760009766, "blob_id": "37b77df9eba9dc75234b299868c7dce0dd9997b5", "content_id": "de129ecb57e1e7c8085a6c10d4f4c825266d3292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8609, "license_type": "no_license", "max_line_length": 122, "num_lines": 231, "path": "/controller/sourceAliyunos.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(AROOT . 'controller' . DS . 'sourceAbstract.class.php');\n\nuse Joyme\\core\\Request;\n\nclass sourceAliyunosController extends sourceAbstractController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array('indexData', 'title', 'image', 'url', 'pubtime', 'category');\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n * $type: 1ๆ–ฐ้—ป 2็พŽๅ›พ\n */\n public function query($cid)\n {\n global $GLOBALS;\n\n $type = Request::getParam('type', 1); // 1ๆ–ฐ้—ป 2็พŽๅ›พ\n $traceId = Request::getParam('traceId', '');\n $offset = Request::getParam('offset', '');\n\n //ๆŸฅ่ฏขๆ‰€ๆœ‰็š„ๆธธๆˆ-ๆ ็›ฎ\n $jChannelGameModel = new jChannelGameModel();\n $where = array('cid' => $cid, 'gamestatus' => 1);\n\n $channelgamelist = $jChannelGameModel->getData($where, 1000);\n\n $gids = $gidall = '0';\n foreach ($channelgamelist as $val) {\n if ($val['datatype'] == 1) {\n $gids .= ',' . $val['gid'];\n }\n $gidall .= ',' . $val['gid'];\n }\n\n //ๆŸฅ่ฏขๆธธๆˆ้™„ๅŠ ๆ•ฐๆฎ\n $jGameModel = new jGameModel();\n $where2 = array('gid' => array('in', $gids));\n $gamelist = $jGameModel->getData($where2, 1000);\n\n foreach ($channelgamelist as $o => $p) {\n foreach ($gamelist as $k => $v) {\n if ($p['gid'] == $v['gid']) {\n $temparr = json_decode($v['extra'], true);\n $temparr = empty($temparr) ? array() : $temparr;\n if (empty($temparr['wikiUrl'])) {\n $temparr['wikiUrl'] = \"http://www.joyme.\" . $GLOBALS['domain'] . \"/collection/\" . $v['gid'];\n }\n $p['gamename'] = $temparr['gameName'];\n $channelgamelist[$o] = array_merge($p, $v, $temparr);\n }\n }\n }\n //้‡ๅปบๆ•ฐ็ป„\n $channelgameidlist = array_column($channelgamelist, 'gid');\n $channelgamelist = array_combine($channelgameidlist, $channelgamelist);\n\n //ๆŸฅ่ฏขๆธ ้“ๆ•ฐๆฎ\n $jChannelDataModel = new jChannelDataModel();\n $where3 = array('cid' => $cid, 'gid' => array('in', $gidall), 'isblock' => 0);\n if ($offset > 0) {\n $where3['pubdate'] = array('gt', $offset);\n }\n $data = $jChannelDataModel->getData($where3, 1000);\n\n $aids = $aids1 = $aids2 = '0';\n foreach ($data as $val) {\n if ($val['gid'] == $GLOBALS['config']['meituID']) {\n $aids2 .= ',' . $val['aid'];\n } else {\n $aids1 .= ',' . $val['aid'];\n }\n $aids .= ',' . $val['aid'];\n }\n $jSourceDataModel = new jSourceDataModel();\n $where4 = array('aid' => array('in', $aids));\n $sourcedata = $jSourceDataModel->getData($where4, 1000);\n\n //ๆŸฅ่ฏขbody\n $cmsModel = new cmsModel();\n $bodys1 = $cmsModel->getBodyByIds($aids);\n $cmsimgModel = new cmsimageModel();\n $bodys2 = $cmsimgModel->getBodyByIds($aids);\n //้‡ๅปบๆ•ฐ็ป„\n $bodys1 = array_column($bodys1, 'body', 'aid');\n $bodys2 = array_column($bodys2, 'body', 'aid');\n\n $newdata = array();\n //ๅˆๅนถๆ•ฐๆฎ\n foreach ($data as $k => $val) {\n foreach ($sourcedata as $row) {\n if ($val['aid'] == $row['aid'] && !empty($channelgamelist[$val['gid']])) {\n $tmp = json_decode($row['data'], true);\n if ($tmp) {\n $cat_id_pre = $val['datatype'] == 1 ? 'Youxi' : 'Lanmu';\n\n if ($val['gid'] != $GLOBALS['config']['meituID']) {\n $piclist = self::getCatImgList($bodys1[$val['aid']]);\n if ($val['atype'] == 1) {\n $media_type = 'video';\n } else if ($piclist) {\n $media_type = 'image_text';\n } else {\n $media_type = 'text';\n }\n } else {\n $piclist = self::getTagImgList($bodys2[$val['aid']]);\n\n $media_type = 'image';\n foreach ($piclist as $v) {\n if (substr($v['uri'], -3) == 'gif') {\n $media_type = 'image_gif';\n break;\n }\n }\n }\n $tmparr = array(\n 'id' => $val['aid'],\n 'gmt_create' => $tmp['pubdate'] * 1000,\n 'gmt_modified' => $tmp['pubdate'] * 1000,\n 'title' => $tmp['title'],\n 'description' => $tmp['description'],\n 'author' => $tmp['writer'],\n 'cat_id' => $cat_id_pre . $val['gid'],//ๅŒบๅˆ†ๆ ็›ฎๅ’Œๆธธๆˆๅ“Ÿ\n 'category' => $channelgamelist[$val['gid']]['gamename'],\n 'media_type' => $media_type,\n 'labels' => $tmp['keywords'],\n 'pic_default' => $val['litpic'],\n 'pic_list' => $piclist,\n 'm_url' => $val['url'].'?source=aliyunos',\n 'source' => '็€่ฟท็ฝ‘',\n 'status' => 'online'\n );\n\n $newdata[] = $tmparr;\n }\n }\n }\n\n\n }\n $code = empty($newdata) ? 404 : 200;\n $newdata = json_encode($newdata);\n $result = array(\n 'data' => array(\n 'code' => $code,\n 'msg' => 'OK',\n 'traceId' => $traceId,\n 'jsonModel' => $newdata,\n 'sign' => md5($newdata),\n 'nextOffset' => time()\n )\n );\n return $result;\n }\n\n public static function getCatImgList($content)\n {\n $res = array();\n preg_match_all('/<img.*?src=\"(.*?)\".*?width=\"(.*?)\".*?height=\"(.*?)\".*?>/is', $content, $match);\n preg_match_all('/<img.*?width=\"(.*?)\".*?height=\"(.*?)\".*?src=\"(.*?)\".*?\\/>/is', $content, $match2);\n if (empty($match[1]) && empty($match2[3])) {\n return array();\n } else {\n $imglist = array();\n if (!empty($match[1])) {\n foreach ($match[1] as $k => $src) {\n $imginfo = array('uri' => $src);\n if (!empty($match[2][$k])) {\n $imginfo['width'] = $match[2][$k];\n }\n if (!empty($match[3][$k])) {\n $imginfo['height'] = $match[3][$k];\n }\n if (!in_array($src, $imglist)) {\n $res[] = $imginfo;\n $imglist[] = $src;\n }\n }\n }\n if (!empty($match2[3])) {\n foreach ($match2[3] as $k => $src) {\n $imginfo = array('uri' => $src);\n if (!empty($match2[1][$k])) {\n $imginfo['width'] = $match2[1][$k];\n }\n if (!empty($match2[2][$k])) {\n $imginfo['height'] = $match2[2][$k];\n }\n if (!in_array($src, $imglist)) {\n $res[] = $imginfo;\n $imglist[] = $src;\n }\n }\n }\n return $res;\n }\n }\n\n public static function getTagImgList($content)\n {\n $res = array();\n preg_match_all(\"/{dede:img.*?ddimg='(.*?)'.*?width='(.*?)'.*?height='(.*?)'.*?{\\/dede:img}/is\", $content, $match);\n if (empty($match[1])) {\n return array();\n } else {\n foreach ($match[1] as $k => $src) {\n $imginfo = array('uri' => $src);\n if (!empty($match[2][$k])) {\n $imginfo['width'] = $match[2][$k];\n }\n if (!empty($match[3][$k])) {\n $imginfo['height'] = $match[3][$k];\n }\n $res[] = $imginfo;\n }\n return $res;\n }\n }\n\n\n}" }, { "alpha_fraction": 0.5319949984550476, "alphanum_fraction": 0.5470514297485352, "avg_line_length": 19.461538314819336, "blob_id": "28f004544ac01182d6a4f0602c4abee1db7484d5", "content_id": "35129ca8b4ad7185d5b862c360882d9b6fd4040c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 797, "license_type": "no_license", "max_line_length": 67, "num_lines": 39, "path": "/model/joymeChannelModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\n/**\n * Created by PhpStorm.\n * User: xinshi\n * Date: 2015/10/29\n * Time: 17:08\n */\nif (!defined('IN'))\n die('bad request');\n\nuse Joyme\\db\\JoymeModel;\n\nuse Joyme\\core\\Log;\n\nclass joymeChannelModel extends JoymeModel{\n\n public $fields = array();\n\n public $tableName = 'joyme_channel';\n\n public function __construct() {\n\n $this->db_config = array(\n 'hostname' => $GLOBALS['config']['rds']['db_host'],\n 'username' => $GLOBALS['config']['rds']['db_user'],\n 'password' => $GLOBALS['config']['rds']['db_password'],\n 'database' => $GLOBALS['config']['rds']['db_name']\n );\n parent::__construct();\n }\n\n public function allChannelName(){\n\n $files = 'channel_name';\n return $this->select($files);\n }\n\n}" }, { "alpha_fraction": 0.4454917907714844, "alphanum_fraction": 0.4512295126914978, "avg_line_length": 32.38028335571289, "blob_id": "f6e766a5f11024c4e91214a6bbe03b9628b0f681", "content_id": "7550c2c12cbc94aeee0ebb0862ad4fe556a0d2fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4992, "license_type": "no_license", "max_line_length": 117, "num_lines": 142, "path": "/controller/baiduoriginal.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n/**\r\n * Description:็™พๅบฆๅŽŸๅˆ›็ฎก็†\r\n * Author: gradydong\r\n * Date: 2017/6/30\r\n * Time: 15:43\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(AROOT . 'controller' . DS . 'app.class.php');\r\nuse Joyme\\core\\Request;\r\n\r\nclass baiduoriginalController extends appController\r\n{\r\n public function __construct()\r\n {\r\n parent::__construct();\r\n }\r\n\r\n public function index()\r\n {\r\n $conditions = $where = array();\r\n //้กน็›ฎ\r\n $source = Request::getParam('source', 0);\r\n if ($source) {\r\n $where['source'] = $source;\r\n $conditions['source'] = $source;\r\n }\r\n //ๆ–‡็ซ ๆ ‡้ข˜\r\n $title = Request::getParam('title');\r\n if ($title) {\r\n $where['title'] = array('like', '%' . $title . '%');\r\n $conditions['title'] = $title;\r\n }\r\n //ๆไบคๆ—ถ้—ดๅผ€ๅง‹\r\n $begintime = Request::getParam('begintime');\r\n //ๆไบคๆ—ถ้—ด็ป“ๆŸ\r\n $endtime = Request::getParam('endtime');\r\n if ($begintime && empty($endtime)) {\r\n $gbegintime = strtotime($begintime);\r\n $where['addtime'] = array('gt', $gbegintime);\r\n $conditions['begintime'] = $begintime;\r\n }\r\n elseif (empty($begintime) && $endtime) {\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['addtime'] = array('lt', $gendtime);\r\n $conditions['endtime'] = $endtime;\r\n }\r\n elseif ($begintime && $endtime) {\r\n $gbegintime = strtotime($begintime);\r\n $gendtime = strtotime(\"$endtime +1 days\");\r\n $where['addtime'] = array(array('gt',$gbegintime),array('lt',$gendtime),'and');\r\n $conditions['begintime'] = $begintime;\r\n $conditions['endtime'] = $endtime;\r\n }\r\n\r\n $baiduoriginalmodel = new baiduOriginalModel();\r\n $total = $baiduoriginalmodel->count($where);\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $lists = $baiduoriginalmodel->select(\"id,source,title,url,result,addtime\", $where, 'id DESC', $psize, $skip);\r\n if($lists){\r\n foreach ($lists as $k => $list){\r\n if($list['source']==1){\r\n $lists[$k]['source'] = 'wiki';\r\n }\r\n elseif ($list['source']==2){\r\n $lists[$k]['source'] = 'cms';\r\n }\r\n $lists[$k]['addtime'] = date('Y-m-d',$list['addtime']);\r\n }\r\n }\r\n $page = new pageModel();\r\n $page->mainPage(array('total' => $total, 'perpage' => $psize, 'nowindex' => $pno, 'pagebarnum' => 10));\r\n $phtml = $page->show(2, $conditions);\r\n $data = array(\r\n 'source' => $source,\r\n 'title' => $title,\r\n 'begintime' => $begintime,\r\n 'endtime' => $endtime,\r\n 'total' => $total,\r\n 'list' => $lists,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'baiduoriginal/list');\r\n }\r\n\r\n\r\n public function add()\r\n {\r\n render(array(), 'web', 'baiduoriginal/add');\r\n }\r\n\r\n public function addPro()\r\n {\r\n $source = Request::getParam('source');\r\n $title = Request::getParam('title');\r\n $url = Request::getParam('url');\r\n if($source&&$title&&$url){\r\n $opt_url = 'http://data.zz.baidu.com/urls?site=';\r\n if($source==1){\r\n $opt_url .= 'http://wiki.joyme.com/';\r\n }\r\n elseif ($source==2){\r\n $opt_url .= 'http://www.joyme.com/';\r\n }\r\n $opt_url .= '&token=WzGLirMD1oFFXN4n&type=original';\r\n $ch = curl_init();\r\n curl_setopt_array($ch,array(\r\n CURLOPT_URL=>$opt_url,\r\n CURLOPT_POST=>true,\r\n CURLOPT_RETURNTRANSFER=>true,\r\n CURLOPT_POSTFIELDS=>$url,\r\n CURLOPT_HTTPHEADER=>array('Content-Type: text/plain'),\r\n CURLOPT_TIMEOUT=>60\r\n ));\r\n $result = curl_exec($ch);\r\n curl_close($ch);\r\n $data = array(\r\n 'source' => $source,\r\n 'title' => $title,\r\n 'url' => $url,\r\n 'result' => $result,\r\n 'addtime' => time()\r\n );\r\n $baiduoriginalmodel = new baiduOriginalModel();\r\n $ret = $baiduoriginalmodel->insert($data);\r\n if ($ret) {\r\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=baiduoriginal&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=baiduoriginal&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }else{\r\n echo 'ๅ‚ๆ•ฐไธ่ƒฝไธบ็ฉบ <a href=\"?c=baiduoriginal&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n}" }, { "alpha_fraction": 0.37133485078811646, "alphanum_fraction": 0.38385847210884094, "avg_line_length": 39.24800109863281, "blob_id": "9d937dab4ad7419cd2aa99d4bb4ebd20f0a4181c", "content_id": "79e6d552cafa16c2e544a94ff9bd8813dc176e70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 10215, "license_type": "no_license", "max_line_length": 146, "num_lines": 250, "path": "/controller/sourceSougouclient.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(AROOT . 'controller' . DS . 'sourceAbstract.class.php');\n\nuse Joyme\\core\\Request;\n\nclass sourceSougouclientController extends sourceAbstractController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array('indexData', 'title', 'image', 'url', 'pubtime', 'category');\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n */\n public function query($cid)\n {\n global $GLOBALS;\n $type = Request::getParam('type', 1); // 1ๅˆ—่กจ 2ๆ•ฐๆฎ\n $page = Request::getParam('page', 1);\n $size = 1000;\n $skip = ($page - 1) * $size;\n\n $offset = time() - 30 * 24 * 3600;\n\n //ๆŸฅ่ฏขๆ‰€ๆœ‰็š„ๆธธๆˆ-ๆ ็›ฎ\n $jChannelGameModel = new jChannelGameModel();\n $where = array('cid' => $cid, 'gamestatus' => 1);\n\n $channelgamelist = $jChannelGameModel->getData($where, 1000);\n\n $gids = $gidall = '0';\n foreach ($channelgamelist as $val) {\n if ($val['datatype'] == 1) {\n $gids .= ',' . $val['gid'];\n }\n $gidall .= ',' . $val['gid'];\n }\n\n //ๆŸฅ่ฏขๆธธๆˆ้™„ๅŠ ๆ•ฐๆฎ\n $jGameModel = new jGameModel();\n $where2 = array('gid' => array('in', $gids));\n $gamelist = $jGameModel->getData($where2, 1000);\n\n foreach ($channelgamelist as $o => $p) {\n foreach ($gamelist as $k => $v) {\n if ($p['gid'] == $v['gid']) {\n $temparr = json_decode($v['extra'], true);\n $temparr = empty($temparr) ? array() : $temparr;\n if (empty($temparr['wikiUrl'])) {\n $temparr['wikiUrl'] = \"http://www.joyme.\" . $GLOBALS['domain'] . \"/collection/\" . $v['gid'];\n }\n $p['gamename'] = $temparr['gameName'];\n $channelgamelist[$o] = array_merge($p, $v, $temparr);\n }\n }\n }\n //้‡ๅปบๆ•ฐ็ป„\n $channelgameidlist = array_column($channelgamelist, 'gid');\n $channelgamelist = array_combine($channelgameidlist, $channelgamelist);\n\n //ๆŸฅ่ฏขๆธ ้“ๆ•ฐๆฎ\n $jChannelDataModel = new jChannelDataModel();\n $where3 = array('cid' => $cid, 'gid' => array('in', $gidall), 'isblock' => 0);\n if ($offset > 0) {\n $where3['pubdate'] = array('gt', $offset);\n }\n\n if ($type == 1) {\n $count = $jChannelDataModel->count($where3);\n $total = ceil($count/$size);\n $newdata = array('type'=>1,'total'=>$total);\n } else {\n $data = $jChannelDataModel->getData($where3, $size, $skip);\n\n $aids = $aids1 = $aids2 = '0';\n foreach ($data as $val) {\n if ($val['gid'] == $GLOBALS['config']['meituID']) {\n $aids2 .= ',' . $val['aid'];\n } else {\n $aids1 .= ',' . $val['aid'];\n }\n $aids .= ',' . $val['aid'];\n }\n $jSourceDataModel = new jSourceDataModel();\n $where4 = array('aid' => array('in', $aids));\n $sourcedata = $jSourceDataModel->getData($where4, 10000);\n\n //ๆŸฅ่ฏขbody\n $cmsModel = new cmsModel();\n $bodys1 = $cmsModel->getBodyByIds($aids);\n $cmsimgModel = new cmsimageModel();\n $bodys2 = $cmsimgModel->getBodyByIds($aids);\n //้‡ๅปบๆ•ฐ็ป„\n $bodys1 = array_column($bodys1, 'body', 'aid');\n $bodys2 = array_column($bodys2, 'body', 'aid');\n\n $newdata = array();\n //ๅˆๅนถๆ•ฐๆฎ\n foreach ($data as $k => $val) {\n foreach ($sourcedata as $row) {\n if ($val['aid'] == $row['aid'] && !empty($channelgamelist[$val['gid']])) {\n $tmp = json_decode($row['data'], true);\n if ($tmp) {\n $cat_id_pre = $val['datatype'] == 1 ? 'Youxi' : 'Lanmu';\n\n $read_num = ceil((time()-$tmp['pubdate'])/3600/24)*87+intval(intval(substr($val['aid'],-4))*0.8);\n\n if ($val['gid'] != $GLOBALS['config']['meituID']) {\n $piclistr = self::getCatImgList($bodys1[$val['aid']]);\n $piclist = array();\n $media_type = 'ๆ–ฐ้—ป';\n $body = $bodys1[$val['aid']];\n $pic_defaultsrc = empty($val['litpic'])?$piclistr[0]['uri']:$val['litpic'];\n $pic_default = array('url' => $pic_defaultsrc, 'width' => 230, 'height' => 130);\n } else {\n $piclist = self::getTagImgList($bodys2[$val['aid']],$tmp['title']);\n $media_type = 'ๅ›พ้›†';\n $body = $tmp['description'];\n foreach ($piclist as $v) {\n $body .= '<br/><img src=\"' . $v['uri'] . '\" width=\"' . $v['width'] . 'px\" height=\"' . $v['height'] . 'px\" />';\n }\n $pic_defaultsrc = empty($val['litpic'])?$piclist[0]['uri']:$val['litpic'];\n $pic_default = array('url' => $pic_defaultsrc, 'width' => 210, 'height' => 318);\n }\n $description = self::getStrTrimTag($tmp['description']);\n $keywords = self::getStrTrimTag($tmp['keywords']);\n $tmparr = array(\n 'id' => $val['aid'],\n 'update_time' => $tmp['pubdate'],\n 'title' => $tmp['title'],\n 'shorttitle' => $tmp['shorttitle'],\n 'description' => $description,\n 'body' => $body,\n 'keywords' => $keywords,\n 'author' => $tmp['writer'],\n 'cat_id' => $cat_id_pre . $val['gid'],//ๅŒบๅˆ†ๆ ็›ฎๅ’Œๆธธๆˆๅ“Ÿ\n 'category' => $channelgamelist[$val['gid']]['gamename'],\n 'media_type' => $media_type,\n 'labels' => $tmp['keywords'],\n 'pic_default' => $pic_default,\n 'pic_list' => $piclist,\n 'm_url' => self::getStrTrimTag($val['url'] . '?source=sougouclient'),\n 'source' => '็€่ฟท็ฝ‘',\n 'read_num' => $read_num\n );\n\n $newdata[] = $tmparr;\n }\n }\n }\n\n\n }\n }\n return $newdata;\n }\n\n public static function getCatImgList($content)\n {\n $res = array();\n preg_match_all('/<img.*?src=\"(.*?)\".*?width=\"(.*?)\".*?height=\"(.*?)\".*?>/is', $content, $match);\n preg_match_all('/<img.*?width=\"(.*?)\".*?height=\"(.*?)\".*?src=\"(.*?)\".*?\\/>/is', $content, $match2);\n if (empty($match[1]) && empty($match2[3])) {\n return array();\n } else {\n $imglist = array();\n if (!empty($match[1])) {\n foreach ($match[1] as $k => $src) {\n $src = trim($src);\n $imginfo = array('uri' => $src);\n if (!empty($match[2][$k])) {\n $imginfo['width'] = $match[2][$k];\n }\n if (!empty($match[3][$k])) {\n $imginfo['height'] = $match[3][$k];\n }\n if (!in_array($src, $imglist)) {\n $res[] = $imginfo;\n $imglist[] = $src;\n }\n }\n }\n if (!empty($match2[3])) {\n foreach ($match2[3] as $k => $src) {\n $src = trim($src);\n $imginfo = array('uri' => $src);\n if (!empty($match2[1][$k])) {\n $imginfo['width'] = $match2[1][$k];\n }\n if (!empty($match2[2][$k])) {\n $imginfo['height'] = $match2[2][$k];\n }\n if (!in_array($src, $imglist)) {\n $res[] = $imginfo;\n $imglist[] = $src;\n }\n }\n }\n return $res;\n }\n }\n\n public static function getTagImgList($content,$title)\n {\n $res = array();\n preg_match_all(\"/{dede:img.*?ddimg='(.*?)'.*?text='(.*?)'.*?width='(.*?)'.*?height='(.*?)'.*?{\\/dede:img}/is\", $content, $match);\n if (empty($match[1])) {\n return array();\n } else {\n foreach ($match[1] as $k => $src) {\n $src = trim($src);\n $imginfo = array('uri' => $src);\n if (!empty($match[2][$k])) {\n// $imginfo['text'] = $match[2][$k];\n $imginfo['text'] = self::getStrTrimTag($match[2][$k]);\n }else{\n $imginfo['text'] = $title;\n }\n if (!empty($match[3][$k])) {\n $imginfo['width'] = $match[3][$k];\n }\n if (!empty($match[4][$k])) {\n $imginfo['height'] = $match[4][$k];\n }\n $res[] = $imginfo;\n }\n return $res;\n }\n }\n\n //ๅŽปๆŽ‰็ฉบๆ ผ๏ผŒๅ›ž่ฝฆ๏ผŒๆข่กŒ\n public static function getStrTrimTag($content)\n {\n $content = str_replace(\" \",'',$content);\n $content = str_replace(\"ใ€€\",'',$content);\n// $content = str_replace(\"\\r\",'',$content);\n// $content = str_replace(\"\\n\",'',$content);\n// $content = str_replace(\"\\r\\n\",'',$content);\n// $content = str_replace(\"\\t\",'',$content);\n return $content;\n }\n\n}" }, { "alpha_fraction": 0.38738808035850525, "alphanum_fraction": 0.38978853821754456, "avg_line_length": 37.409908294677734, "blob_id": "e462f16a7134e200071c15340e7c99e80b8a172f", "content_id": "e32403648d5f4a900b89c764ff584e3b46835c86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 26895, "license_type": "no_license", "max_line_length": 259, "num_lines": 666, "path": "/controller/jgame.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:ๆธ ้“็ฎก็†ๆธธๆˆ็›ธๅ…ณ\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 16:20\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(AROOT . 'controller' . DS . 'app.class.php');\r\nuse Joyme\\core\\Request;\r\nuse Joyme\\qiniu\\Qiniu_Utils;\r\nuse Joyme\\net\\Curl;\r\n\r\nclass jgameController extends appController\r\n{\r\n public function __construct()\r\n {\r\n parent::__construct();\r\n }\r\n\r\n public function index()\r\n {\r\n $conditions = $jcgwhere = $where = array();\r\n\r\n $jchannelmodel = new jChannelModel();\r\n //ๆ ‡็ญพ็ฑปๅž‹\r\n $labeltype = (int)Request::getParam('labeltype','1');\r\n $labelname = Request::getParam('labelname');\r\n if($labeltype&&$labelname){\r\n $conditions['labeltype'] = $labeltype;\r\n $conditions['labelname'] = $labelname;\r\n if($labeltype==1){\r\n $jcgwhere['datatype'] = $where['datatype'] = 1;\r\n $jgamemodel = new jGameModel();\r\n $games = $jgamemodel->getsearchgame($labelname);\r\n if ($games) {\r\n $gameIds = array_column($games, 'gameId');\r\n $where['gid'] = array('in', $gameIds);\r\n $jcgwhere['gid'] = array('in', $gameIds);\r\n }else{\r\n $jcgwhere[1]=$where[1]=0;\r\n }\r\n }elseif ($labeltype==2){\r\n $jcgwhere['datatype'] = $where['datatype'] = 2;\r\n $jcgwhere['gamename'] = $where['gamename'] = array('like','%'.$labelname.'%');\r\n }else{\r\n $where[1]=0;\r\n }\r\n }elseif ($labeltype&&empty($labelname)){\r\n $conditions['labeltype'] = $labeltype;\r\n $jcgwhere['datatype'] = $where['datatype'] = $labeltype;\r\n }elseif (empty($labeltype)&&$labelname){\r\n $jcgwhere[1]=$where[1]=0;\r\n }\r\n\r\n //ๆธ ้“id\r\n $cid = (int)Request::getParam('cid');\r\n if ($cid) {\r\n $where['cid'] = $cid;\r\n $conditions['cid'] = $cid;\r\n } else {\r\n $jchannellist = $jchannelmodel->selectRow(\"cid,ckey,channel_name\",array(\r\n 'isshow'=>1\r\n ));\r\n if ($jchannellist) {\r\n $cid = $where['cid'] = $jchannellist['cid'];\r\n }\r\n }\r\n\r\n $lists = array();\r\n $jchannelgamemodel = new jChannelGameModel();\r\n $total = $jchannelgamemodel->count($where);\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $jchannelgamelists = $jchannelgamemodel->select(\"cid,gid,gamestatus,gamename,datatype\", $where, 'id DESC', $psize, $skip);\r\n if ($jchannelgamelists) {\r\n $gids = array();\r\n foreach ($jchannelgamelists as $k => $v){\r\n if($v['datatype']==1){\r\n $gids[] = $v['gid'];\r\n }\r\n }\r\n $extras = array();\r\n if($gids){\r\n $jgamemodel = new jGameModel();\r\n $jgamelists = $jgamemodel->select('gid,extra', array(\r\n 'gid' => array('in', $gids)\r\n ));\r\n if ($jgamelists) {\r\n $extras = array_column($jgamelists, 'extra', 'gid');\r\n }\r\n }\r\n $lists = array();\r\n foreach ($jchannelgamelists as $jk => $jchannelgamelist) {\r\n $lists[$jk]['gid'] = $jchannelgamelist['gid'];\r\n if($jchannelgamelist['datatype']==1){\r\n $lists[$jk]['datatype'] = 'ๆธธๆˆ';\r\n if ($extras[$jchannelgamelist['gid']]) {\r\n $extra = json_decode($extras[$jchannelgamelist['gid']], true);\r\n if($extra['gameName']){\r\n $lists[$jk]['gameName'] = $extra['gameName'];\r\n }else{\r\n $lists[$jk]['gameName'] = \"\";\r\n }\r\n $validstatus = $extra['validStatus']['code'];\r\n if ($validstatus == 'invalid') {\r\n $lists[$jk]['validstatus'] = 'ๅฎกๆ ธไธญ';\r\n } elseif ($validstatus == 'valid') {\r\n $lists[$jk]['validstatus'] = 'ๅฎกๆ ธ้€š่ฟ‡';\r\n } elseif ($validstatus == 'notvalid') {\r\n $lists[$jk]['validstatus'] = 'ๅฎกๆ ธๆœช้€š่ฟ‡';\r\n } elseif ($validstatus == 'removed') {\r\n $lists[$jk]['validstatus'] = 'ๅˆ ้™ค';\r\n } else {\r\n $lists[$jk]['validstatus'] = 'ๅฎกๆ ธไธญ';\r\n }\r\n }\r\n }elseif ($jchannelgamelist['datatype']==2){\r\n $lists[$jk]['datatype'] = 'ๆ ็›ฎ';\r\n $lists[$jk]['gameName'] = $jchannelgamelist['gamename'];\r\n $lists[$jk]['validstatus'] = '';\r\n }\r\n $op_desc = '';\r\n if ($jchannelgamelist['gamestatus'] == 1) {\r\n $op_desc .= '<a href=\"?c=jgame&a=edit&gid=' . $jchannelgamelist['gid'] . '&datatype='.$jchannelgamelist['datatype'].'\">็ผ–่พ‘</a><br><a href=\"javascript:;\" class=\"status\" data-gid=\"' . $jchannelgamelist['gid'] . '\" data-status=\"0\">็ฆ็”จ</a><br>';\r\n } else {\r\n $op_desc .= '<span>็ผ–่พ‘</span><br><a href=\"javascript:;\" class=\"status\" data-gid=\"' . $jchannelgamelist['gid'] . '\" data-status=\"1\">ๅฏ็”จ</a><br>';\r\n }\r\n $lists[$jk]['op_desc'] = $op_desc;\r\n }\r\n }\r\n $page = new pageModel();\r\n $page->mainPage(array('total' => $total, 'perpage' => $psize, 'nowindex' => $pno, 'pagebarnum' => 10));\r\n $phtml = $page->show(2, $conditions);\r\n\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey,channel_name\", array(\r\n 'isshow'=>1\r\n ), '', '');\r\n if ($jchannellists) {\r\n $jchannelgamecount = $jchannelgamemodel->select(\"cid,count(*) as num\", $jcgwhere, '', '', '', 'cid');\r\n if ($jchannelgamecount) {\r\n $nums = array_column($jchannelgamecount, 'num', 'cid');\r\n foreach ($jchannellists as $jk => $jchannellist) {\r\n if ($nums[$jchannellist['cid']]) {\r\n $jchannellists[$jk]['num'] = $nums[$jchannellist['cid']];\r\n } else {\r\n $jchannellists[$jk]['num'] = 0;\r\n }\r\n }\r\n }\r\n }\r\n\r\n $data = array(\r\n 'cid' => $cid,\r\n 'labeltype' => $labeltype,\r\n 'labelname' => $labelname,\r\n 'jchannellists' => $jchannellists,\r\n 'list' => $lists,\r\n 'total' => $total,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'jgame/list');\r\n }\r\n\r\n\r\n public function add()\r\n {\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey,channel_name\", array(\r\n 'isshow'=>1\r\n ), '', '', '');\r\n $uptoken = $this->getUptoken();\r\n $data = array(\r\n 'jchannellists' => $jchannellists,\r\n 'uptoken' => $uptoken\r\n );\r\n render($data, 'web', 'jgame/add');\r\n }\r\n\r\n public function addPro()\r\n {\r\n $cids = Request::getParam('cids');\r\n $datatype = (int)Request::getParam('datatype');\r\n if (!empty($cids) && !empty($datatype)) {\r\n if($datatype==1){\r\n $gid = (int)Request::getParam('gid');\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey\", array(\r\n 'isshow'=>1\r\n ), '', '', '');\r\n $ckeys = array();\r\n if ($jchannellists) {\r\n $ckeys = array_column($jchannellists, 'ckey', 'cid');\r\n }\r\n //ๆทปๅŠ ๆธ ้“ๆธธๆˆๆ•ฐๆฎ\r\n $jchannelgame = new jChannelGameModel();\r\n foreach ($cids as $cid) {\r\n $cgdata = array(\r\n 'cid' => $cid,\r\n 'gid' => $gid,\r\n 'datatype' => $datatype\r\n );\r\n if (isset($ckeys[$cid])) {\r\n $keygamename = Request::getParam($ckeys[$cid] . 'gamename');\r\n if ($keygamename) {\r\n $cgdata['gamename'] = $keygamename;\r\n }\r\n }\r\n $jchannelgame->addData($cgdata);\r\n }\r\n\r\n\r\n //ๆทปๅŠ ๆธธๆˆๆ•ฐๆฎ\r\n $jgame = new jGameModel();\r\n $gdata = array();\r\n //็ผฉ็•ฅๅ›พๅœฐๅ€\r\n $litpic = Request::getParam('litpic');\r\n if (!empty($litpic)) {\r\n $gdata['litpic'] = $litpic;\r\n }\r\n //ๆธธๆˆๅˆ†็ฑปๅœฐๅ€\r\n $gametypeurl = Request::getParam('gametypeurl');\r\n if (!empty($gametypeurl)) {\r\n $gdata['gametypeurl'] = $gametypeurl;\r\n }\r\n //่ต„่ฎฏๅœฐๅ€\r\n $listnewsurl = Request::getParam('listnewsurl');\r\n if (!empty($listnewsurl)) {\r\n $gdata['listnewsurl'] = $listnewsurl;\r\n }\r\n //ๆ”ป็•ฅๅœฐๅ€\r\n $liststrategyurl = Request::getParam('liststrategyurl');\r\n if (!empty($liststrategyurl)) {\r\n $gdata['liststrategyurl'] = $liststrategyurl;\r\n }\r\n //่ง†้ข‘ๅœฐๅ€\r\n $listvideourl = Request::getParam('listvideourl');\r\n if (!empty($listvideourl)) {\r\n $gdata['listvideourl'] = $listvideourl;\r\n }\r\n //็คผๅŒ…ๅœฐๅ€\r\n $listgifturl = Request::getParam('listgifturl');\r\n if (!empty($listgifturl)) {\r\n $gdata['listgifturl'] = $listgifturl;\r\n }\r\n //ๆŽ’่กŒ\r\n $rank = Request::getParam('rank');\r\n if (!empty($rank)) {\r\n $gdata['rank'] = $rank;\r\n }\r\n //ๆŽ’่กŒ้“พๆŽฅ\r\n $rankurl = Request::getParam('rankurl');\r\n if (!empty($rankurl)) {\r\n $gdata['rankurl'] = $rankurl;\r\n }\r\n //ไบบๆฐ”\r\n $follow = Request::getParam('follow');\r\n if (!empty($follow)) {\r\n $gdata['follow'] = $follow;\r\n }\r\n\r\n //ๆธธๆˆ็ฑปๅž‹\r\n $sougoupcgametype = Request::getParam('sougoupcgametype');\r\n if (!empty($sougoupcgametype)) {\r\n //ๆธธๆˆ้™„ๅŠ ๅญ—ๆฎตjsonๅญ˜ๅ‚จ\r\n $extra = array();\r\n $jgamelist = $jgame->selectRow('extra', array('gid' => $gid));\r\n if ($jgamelist) {\r\n $extra = json_decode($jgamelist['extra'], true);\r\n }\r\n $extra['sougoupcgametype'] = $sougoupcgametype;\r\n $gdata['extra'] = json_encode($extra);\r\n }\r\n $res = $jgame->update($gdata, array(\r\n 'gid' => $gid\r\n ));\r\n if ($res !== false) {\r\n echo 'ๆทปๅŠ ๆˆๅŠŸ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n } else {\r\n echo 'ๆทปๅŠ ๅคฑ่ดฅ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n }\r\n }elseif ($datatype==2){\r\n $gid = Request::getParam('labelid');\r\n $gamename = Request::getParam('labelname');\r\n //ๆทปๅŠ ๆธ ้“ๆธธๆˆๆ•ฐๆฎ\r\n $jchannelgame = new jChannelGameModel();\r\n $addflag = true;\r\n foreach ($cids as $cid) {\r\n $cgdata = array(\r\n 'cid' => $cid,\r\n 'gid' => $gid,\r\n 'gamename' => $gamename,\r\n 'datatype' => $datatype\r\n );\r\n $res = $jchannelgame->addData($cgdata);\r\n if($res===false){\r\n $addflag =false;\r\n break;\r\n }\r\n }\r\n if ($addflag) {\r\n echo 'ๆทปๅŠ ๆˆๅŠŸ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n } else {\r\n echo 'ๆทปๅŠ ๅคฑ่ดฅ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n }\r\n }else{\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n }\r\n } else {\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n }\r\n }\r\n\r\n public function edit()\r\n {\r\n $datatype = (int)Request::getParam('datatype');\r\n $gid = (int)Request::getParam('gid');\r\n if ($gid && $datatype) {\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey,channel_name\", array(\r\n 'isshow'=>1\r\n ), 'cid DESC', '', '');\r\n $jchannelgamemodel = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgamemodel->select('cid,gamename', array(\r\n 'gid' => $gid,\r\n 'datatype' => $datatype\r\n ));\r\n $jcgcids = array();\r\n $jcggamenames = array();\r\n $labelname = '';\r\n if ($jchannelgamelists) {\r\n $labelname = $jchannelgamelists[0]['gamename'];\r\n $jcgcids = array_column($jchannelgamelists, 'cid');\r\n $jcggamenames = array_column($jchannelgamelists, 'gamename', 'cid');\r\n }\r\n $baiduglbgroup = \"none\";\r\n $sougoupcgroup = \"none\";\r\n $sougouwapgroup = \"none\";\r\n $sgcomgroup = \"none\";\r\n $baiduglbgamename = \"\";\r\n $sougoupcgamename = \"\";\r\n $sougouwapgamename = \"\";\r\n foreach ($jchannellists as $jk => $jchannellist) {\r\n if (in_array($jchannellist['cid'], $jcgcids)) {\r\n $jchannellists[$jk]['checked'] = 'checked=\"checked\"';\r\n if ($jchannellist['ckey'] == \"baiduglb\") {\r\n $baiduglbgroup = \"block\";\r\n if ($jcggamenames[$jchannellist['cid']]) {\r\n $baiduglbgamename = $jcggamenames[$jchannellist['cid']];\r\n }\r\n }\r\n if ($jchannellist['ckey'] == \"sougoupc\") {\r\n $sougoupcgroup = \"block\";\r\n if ($jcggamenames[$jchannellist['cid']]) {\r\n $sougoupcgamename = $jcggamenames[$jchannellist['cid']];\r\n }\r\n }\r\n if ($jchannellist['ckey'] == \"sougouwap\") {\r\n $sougouwapgroup = \"block\";\r\n if ($jcggamenames[$jchannellist['cid']]) {\r\n $sougouwapgamename = $jcggamenames[$jchannellist['cid']];\r\n }\r\n }\r\n } else {\r\n $jchannellists[$jk]['checked'] = '';\r\n }\r\n }\r\n if ($sougoupcgroup == \"block\") {\r\n $sgcomgroup = \"block\";\r\n } else {\r\n if ($sougouwapgroup == \"block\") {\r\n $sgcomgroup = \"block\";\r\n }\r\n }\r\n $gamename = \"\";\r\n $wikikey = \"\";\r\n $jgamemodel = new jGameModel();\r\n $jgamelist = $jgamemodel->selectRow('*', array(\r\n 'gid' => $gid\r\n ));\r\n $gtypechecked = \"ONLINEGAME\";\r\n if ($jgamelist) {\r\n $extra = json_decode($jgamelist['extra'], true);\r\n if ($extra['sougoupcgametype']) {\r\n if ($extra['sougoupcgametype'] == \"PCGAME\") {\r\n $gtypechecked = \"PCGAME\";\r\n } else {\r\n $gtypechecked = \"ONLINEGAME\";\r\n }\r\n }\r\n $gamename = $extra['gameName'];\r\n $wikikey = $extra['wikiKey'];\r\n }\r\n $uptoken = $this->getUptoken();\r\n $data = array(\r\n 'gid' => $gid,\r\n 'datatype' => $datatype,\r\n 'jchannellists' => $jchannellists,\r\n 'jgamelist' => $jgamelist,\r\n 'baiduglbgroup' => $baiduglbgroup,\r\n 'sougoupcgroup' => $sougoupcgroup,\r\n 'sougouwapgroup' => $sougouwapgroup,\r\n 'sgcomgroup' => $sgcomgroup,\r\n 'baiduglbgamename' => $baiduglbgamename,\r\n 'sougoupcgamename' => $sougoupcgamename,\r\n 'sougouwapgamename' => $sougouwapgamename,\r\n 'uptoken' => $uptoken,\r\n 'labelname' => $labelname,\r\n 'gamename' => $gamename,\r\n 'wikikey' => $wikikey,\r\n 'gtypechecked' => $gtypechecked,\r\n );\r\n render($data, 'web', 'jgame/edit');\r\n } else {\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n exit();\r\n }\r\n }\r\n\r\n public function editPro()\r\n {\r\n $cids = Request::getParam('cids');\r\n $gid = Request::getParam('gid');\r\n $datatype = Request::getParam('datatype');\r\n if (!empty($cids) && !empty($gid) && !empty($datatype)) {\r\n $jchannelmodel = new jChannelModel();\r\n $jchannellists = $jchannelmodel->select(\"cid,ckey\", array(\r\n 'isshow'=>1\r\n ), '', '', '');\r\n $ckeys = array();\r\n if ($jchannellists) {\r\n $ckeys = array_column($jchannellists, 'ckey', 'cid');\r\n }\r\n //ๆทปๅŠ ๆธ ้“ๆธธๆˆๆ•ฐๆฎ\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamelists = $jchannelgame->select('cid',array(\r\n 'gid' => $gid\r\n ), '', '', '');\r\n if($jchannelgamelists){\r\n foreach ($jchannelgamelists as $jchannelgamelist){\r\n if(!in_array($jchannelgamelist['cid'],$cids)){\r\n $jchannelgame->delete(array(\r\n 'cid' => $jchannelgamelist['cid'],\r\n 'gid' => $gid,\r\n 'datatype' => $datatype\r\n ));\r\n }\r\n }\r\n }\r\n if($datatype==1){\r\n foreach ($cids as $cid) {\r\n $cgdata = array(\r\n 'cid' => $cid,\r\n 'gid' => $gid,\r\n 'datatype' => $datatype\r\n );\r\n if (isset($ckeys[$cid])) {\r\n $keygamename = Request::getParam($ckeys[$cid] . 'gamename');\r\n if ($keygamename) {\r\n $cgdata['gamename'] = $keygamename;\r\n }\r\n }\r\n $jchannelgame->addData($cgdata);\r\n }\r\n //ๆทปๅŠ ๆธธๆˆๆ•ฐๆฎ\r\n $jgame = new jGameModel();\r\n $gdata = array();\r\n //็ผฉ็•ฅๅ›พๅœฐๅ€\r\n $litpic = Request::getParam('litpic');\r\n if (!empty($litpic)) {\r\n $gdata['litpic'] = $litpic;\r\n }\r\n //ๆธธๆˆๅˆ†็ฑปๅœฐๅ€\r\n $gametypeurl = Request::getParam('gametypeurl');\r\n if (!empty($gametypeurl)) {\r\n $gdata['gametypeurl'] = $gametypeurl;\r\n }\r\n //่ต„่ฎฏๅœฐๅ€\r\n $listnewsurl = Request::getParam('listnewsurl');\r\n if (!empty($listnewsurl)) {\r\n $gdata['listnewsurl'] = $listnewsurl;\r\n }\r\n //ๆ”ป็•ฅๅœฐๅ€\r\n $liststrategyurl = Request::getParam('liststrategyurl');\r\n if (!empty($liststrategyurl)) {\r\n $gdata['liststrategyurl'] = $liststrategyurl;\r\n }\r\n //่ง†้ข‘ๅœฐๅ€\r\n $listvideourl = Request::getParam('listvideourl');\r\n if (!empty($listvideourl)) {\r\n $gdata['listvideourl'] = $listvideourl;\r\n }\r\n //็คผๅŒ…ๅœฐๅ€\r\n $listgifturl = Request::getParam('listgifturl');\r\n if (!empty($listgifturl)) {\r\n $gdata['listgifturl'] = $listgifturl;\r\n }\r\n //ๆŽ’่กŒ\r\n $rank = Request::getParam('rank');\r\n if (!empty($rank)) {\r\n $gdata['rank'] = $rank;\r\n }\r\n //ๆŽ’่กŒ้“พๆŽฅ\r\n $rankurl = Request::getParam('rankurl');\r\n if (!empty($rankurl)) {\r\n $gdata['rankurl'] = $rankurl;\r\n }\r\n //ไบบๆฐ”\r\n $follow = Request::getParam('follow');\r\n if (!empty($follow)) {\r\n $gdata['follow'] = $follow;\r\n }\r\n //ๆธธๆˆ้™„ๅŠ ๅญ—ๆฎตjsonๅญ˜ๅ‚จ\r\n $extra = array();\r\n $jgamelist = $jgame->selectRow('extra', array('gid' => $gid));\r\n if ($jgamelist) {\r\n $extra = json_decode($jgamelist['extra'], true);\r\n }\r\n //ๆธธๆˆ็ฑปๅž‹\r\n $sougoupcgametype = Request::getParam('sougoupcgametype');\r\n if (!empty($sougoupcgametype)) {\r\n $extra['sougoupcgametype'] = $sougoupcgametype;\r\n }\r\n if ($extra) {\r\n $gdata['extra'] = json_encode($extra);\r\n }\r\n $res = $jgame->update($gdata, array(\r\n 'gid' => $gid\r\n ));\r\n if ($res !== false) {\r\n echo 'ไฟฎๆ”นๆˆๅŠŸ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ไฟฎๆ”นๅคฑ่ดฅ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n elseif ($datatype==2){\r\n $labelname = Request::getParam('labelname');\r\n $editflag = true;\r\n foreach ($cids as $cid) {\r\n $cgdata = array(\r\n 'cid' => $cid,\r\n 'gid' => $gid,\r\n 'gamename' => $labelname,\r\n 'datatype' => $datatype\r\n );\r\n $res = $jchannelgame->addData($cgdata);\r\n if($res===false){\r\n $editflag = false;\r\n break;\r\n }\r\n }\r\n if ($editflag) {\r\n echo 'ไฟฎๆ”นๆˆๅŠŸ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ไฟฎๆ”นๅคฑ่ดฅ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n } else {\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jgame&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n exit();\r\n }\r\n\r\n\r\n public function editgamestatus()\r\n {\r\n $gid = (int)Request::getParam('gid');\r\n $cid = (int)Request::getParam('cid');\r\n $status = (int)Request::getParam('status');\r\n if ($gid && $cid) {\r\n $jchannelgamemodel = new jChannelGameModel();\r\n $errno = $jchannelgamemodel->update(\r\n array(\r\n 'gamestatus' => $status\r\n ),\r\n array(\r\n 'gid' => $gid,\r\n 'cid' => $cid,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n public function searchgame()\r\n {\r\n $searchtext = Request::getParam('searchtext');\r\n if ($searchtext) {\r\n $jgamemodel = new jGameModel();\r\n $result = $jgamemodel->getsearchgame($searchtext);\r\n if ($result) {\r\n $res = array('rs' => 1, 'msg' => $result);\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๆŸฅ่ฏขๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐไธ่ƒฝไธบ็ฉบ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n\r\n //่Žทๅ–ๆ–‡็ซ ๆ ็›ฎ\r\n public function getarctype()\r\n {\r\n global $GLOBALS;\r\n $typeid = Request::getParam('typeid');\r\n if ($typeid) {\r\n $jchannelgame = new jChannelGameModel();\r\n $jchannelgamecount = $jchannelgame->count(array(\r\n 'gid' => $typeid,\r\n 'datatype' => 2\r\n ));\r\n if($jchannelgamecount){\r\n $res = array('rs' => 3, 'msg' => '่ฏฅๆ ็›ฎๅทฒๆทปๅŠ ');\r\n }else{\r\n $url = \"http://article.joyme.\" . $GLOBALS['domain'] . \"/plus/channelapi.php\";\r\n $curl = new Curl();\r\n $result = $curl->Get($url, array(\r\n 'action' => 'searcharctype',\r\n 'typeid' => $typeid\r\n ));\r\n $result = json_decode($result, true);\r\n if ($result['rs'] == '1') {\r\n if($result['result']){\r\n $res = array('rs' => 1, 'msg' => $result['result']);\r\n }else{\r\n $res = array('rs' => 2, 'msg' => 'ๆŸฅ่ฏข็ป“ๆžœไธบ็ฉบ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๆŸฅ่ฏขๅคฑ่ดฅ');\r\n }\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐไธ่ƒฝไธบ็ฉบ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n\r\n private function getUptoken()\r\n {\r\n $bucket = $GLOBALS['config']['qiniu']['bucket'];\r\n return Qiniu_Utils::Qiniu_UploadToken($bucket);\r\n }\r\n\r\n}" }, { "alpha_fraction": 0.4956521689891815, "alphanum_fraction": 0.5009817481040955, "avg_line_length": 24.169116973876953, "blob_id": "df66359a7bd98300bde48aa88769ff661d595dcc", "content_id": "508b15f36b92a533c8bbe9ba7302d3fdca8bcae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3749, "license_type": "no_license", "max_line_length": 117, "num_lines": 136, "path": "/_lp/core/controller/core.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\nif( !defined('IN') ) die('bad request');\r\nuse Joyme\\qiniu\\Qiniu_ImageView;\r\n\r\nclass coreController \r\n{\r\n\tfunction __construct()\r\n\t{\r\n\r\n\t}\r\n //ๅˆคๆ–ญtoken ๆ˜ฏๅฆๆญฃ็กฎ๏ผŒ ้ป˜่ฎค่ฟ”ๅ›ž jsonๆ•ฐๆฎ๏ผ› ๅฆ‚ๆžœ jsonpไธบtrue ๅˆ™่ฟ”ๅ›žjsonp\r\n protected function checkToken($wikikey, $token, $callback = '') {\r\n\r\n if ($token !== $this->Protection($wikikey)) {\r\n $data = array('rs' => 1, 'msg' => \"token ้ชŒ่ฏๅคฑ่ดฅ\", 'result' => 'fail');\r\n if (strlen($callback) > 1) {\r\n echo $callback . \"([\" . json_encode($data) . \"])\";\r\n } else {\r\n echo json_encode($data);\r\n }\r\n exit;\r\n }\r\n }\r\n\r\n //ๅˆคๆ–ญ็ป“ๆžœ ๆ˜ฏๅฆๆญฃ็กฎ๏ผŒ ้ป˜่ฎค่ฟ”ๅ›ž jsonๆ•ฐๆฎ๏ผ› ๅฆ‚ๆžœ jsonpไธบtrue ๅˆ™่ฟ”ๅ›žjsonp\r\n protected function checkResult($data,$callback = '') {\r\n\r\n if($data){\r\n $data = is_array($data)?$data:'success';\r\n $data = array('rs' => 0, 'msg' => \"ๆ“ไฝœๆˆๅŠŸ\", 'result' => $data);\r\n }else{\r\n $data = array('rs' => 2, 'msg' => \"ๆ“ไฝœๅคฑ่ดฅ\", 'result' => 'fail');\r\n }\r\n if (strlen($callback) > 1) {\r\n echo $callback . \"([\" . json_encode($data) . \"])\";\r\n } else {\r\n echo json_encode($data);\r\n }\r\n exit;\r\n }\r\n}\r\n\r\n//ๆ“ไฝœๆ—ฅๅฟ—\r\nfunction addlog($btype,$stype,$opafter){\r\n global $secrectkey;\r\n $userinfo = explode(\"|\",$_COOKIE['t_jm_message']);\r\n $userid = $userinfo[2];\r\n $ip = $_SERVER[\"REMOTE_ADDR\"];\r\n $encrypt = md5($secrectkey.$userid);\r\n $url = \"tools.joyme.\". $GLOBALS['domain'].\"/log/addlog\";\r\n $data = \"userid=\".$userid.\"&btype=\".$btype.\"&stype=\".$stype.\"&ip=\".$ip.\"&encrypt=\".$encrypt.\"&opafter=\".$opafter;\r\n sendRequest($url,$data);\r\n}\r\n\r\n//ๅŠ ๅฏ†้ชŒ่ฏ\r\nfunction Encryption(){\r\n\r\n $info = func_get_args();\r\n //็บฆๅฎš็ง˜้’ฅ\r\n $arr = implode(\"\",$info);\r\n $key = \"zmsj&cz\";\r\n $dense = md5($arr.$key);\r\n return $dense;\r\n}\r\n\r\n//่ฎก็ฎ—ๅ›พ็‰‡ๅœฐๅ€\r\nfunction jget_save_path($file_name,$lvl=2){\r\n $name =$file_name;\r\n $levels = $lvl;\r\n if ( $levels == 0 ) {\r\n return '';\r\n } else {\r\n $hash = md5( $name );\r\n $path = '';\r\n for ( $i = 1; $i <= $levels; $i++ ) {\r\n $path .= substr( $hash, 0, $i ) . '/';\r\n }\r\n return $path;\r\n }\r\n}\r\n\r\n//่Žทๅ–็ผฉ็•ฅๅ›พurl\r\nfunction getImageThumbUrl($baseUrl,$width,$height){\r\n //็”ŸๆˆfopUrl\r\n $imgView = new Qiniu_ImageView;\r\n $imgView->Mode = 1;\r\n $imgView->Width = $width;\r\n $imgView->Height = $height;\r\n $imgViewUrl = $imgView->MakeRequest($baseUrl);\r\n return $imgViewUrl;\r\n}\r\n\r\n//curl่ฏทๆฑ‚\r\nfunction sendRequest($url,$data = false){\r\n if(empty($url)){\r\n return false;\r\n }\r\n $ch = curl_init();\r\n curl_setopt($ch, CURLOPT_URL, $url);\r\n curl_setopt($ch, CURLOPT_TIMEOUT, 60);\r\n curl_setopt($ch, CURLOPT_HEADER, 0);\r\n if($data){\r\n curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);\r\n curl_setopt ( $ch, CURLOPT_POSTFIELDS, $data );\r\n }\r\n curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);\r\n $result = trim(curl_exec($ch));\r\n curl_close($ch);\r\n return $result;\r\n}\r\n\r\n//ๅฎžไพ‹ๅŒ–Model\r\nfunction M($model_name){\r\n //ๅ…ˆๅˆคๆ–ญ็ฑปๆ˜ฏๅฆๅญ˜ๅœจ\r\n if(class_exists($model_name)){\r\n $_model = new $model_name();\r\n return $_model;\r\n }\r\n $suffix_name = '.class.php';\r\n $model_file = AROOT . 'model' . DS . $model_name.$suffix_name;\r\n if(file_exists($model_file)){\r\n include_once($model_file);\r\n $_model = new $model_name();\r\n return $_model;\r\n }\r\n}\r\n\r\n\r\nfunction jsonEncode($array){\r\n if(empty($array)){\r\n return '';\r\n }\r\n echo json_encode($array);\r\n exit();\r\n}\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.565152645111084, "alphanum_fraction": 0.5698684453964233, "avg_line_length": 37.01886749267578, "blob_id": "c3524366441bbe378c285ebf16e1fbaf2db98c45", "content_id": "a758c4ddf3bfcbd73f5df8c5df2a820b3dc82b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4193, "license_type": "no_license", "max_line_length": 112, "num_lines": 106, "path": "/controller/template.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n/**\n * Created by PhpStorm.\n * User: xinshi\n * Date: 2015/10/29\n * Time: 12:19\n */\nif( !defined('IN') ) die('bad request');\ninclude_once( AROOT . 'controller'.DS.'app.class.php' );\nuse Joyme\\core\\Request;\n\nclass templateController extends appController{\n\n function index(){\n\n global $GLOBALS;\n $wikimodel = M('joymeWikiModel');\n $channemode = M('joymeChannelModel');\n $tempmodel = M('joymeTemplateModel');\n $pb_show_num = 50; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\n $pb_page = Request::get('pb_page',1); //่Žทๅ–ๅฝ“ๅ‰้กต็ \n $conditions['wiki_name'] = Request::getParam('wiki_name');\n $conditions['wiki_channe_name'] = Request::getParam('wiki_channe_name');\n $conditions['is_home'] = Request::getParam('is_home');\n $conditions['is_pc'] = Request::getParam('is_pc');\n $total = $tempmodel->allTemplateList($conditions,true);\n $data['item'] = $tempmodel->allTemplateList($conditions,false,$pb_page,$pb_show_num);\n $page = M('pageModel');\n $page->mainPage(array('total' => $total,'perpage'=>$pb_show_num,'nowindex'=>$pb_page,'pagebarnum'=>10));\n $data['page_str'] = $page->show(2,$conditions);\n $data['static_url'] = $GLOBALS['static_url'];\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['param'] = $conditions;\n render($data,'web','template/template_list');\n }\n\n //ๆŸฅ็œ‹ๆจกๆฟๅ†…ๅฎน\n function showTemplatePage(){\n\n $temp_id = Request::get('temp_id'); //่Žทๅ–ๆจกๆฟID\n $templateContext = Request::get('templateContext');//้ข„่งˆๅ†…ๅฎน\n $data['callback'] = Request::get('callback');//ๅ›ž่ทณๅœฐๅ€\n if(empty($temp_id) && $templateContext){\n $data['item']['template_context'] = $templateContext;\n }else{\n $tempmodel = M('joymeTemplateModel');\n $data['item'] = $tempmodel->selectInfoByTempId($temp_id);\n }\n render($data,'web','template/preview');\n }\n\n //ๅˆ›ๅปบๆจกๆฟ\n function createTemplatePage(){\n\n global $GLOBALS;\n $channemode = M('joymeChannelModel');\n $wikimodel = M('joymeWikiModel');\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['static_url'] = $GLOBALS['static_url'];\n render($data,'web','template/create_template');\n }\n\n //ๅค„็†ๅˆ›ๅปบๆจกๆฟๆ•ฐๆฎ\n function addtemplateData(){\n $update_id = Request::post('update_id');\n $data['template_name'] = Request::post('template_name'); //ๆจกๆฟๅ็งฐ\n $data['wiki'] = Request::post('wiki_name'); //ๆ‰€ๅฑžwiki\n $data['context_path'] = Request::post('is_pc'); //ๆ˜ฏๅฆPC\n $data['channel'] = Request::post('wiki_channel_name'); //ๆธ ้“\n $data['is_index'] = Request::post('is_home'); //ๅญ้กต/้ฆ–้กต\n $data['template_context'] = Request::post('templateContext'); //ๆจกๆฟๅ†…ๅฎน\n $data['is_enable'] = Request::post('is_enable',1); //ๆจกๆฟๅ†…ๅฎน\n $data['create_time'] = date('Y-m-d H:i:s ',time());\n $tempmodel = M('joymeTemplateModel');\n if($update_id){\n $result = $tempmodel->updateTempById($data,$update_id);\n }else{\n $result = $tempmodel->insertTemplate($data);\n }\n if($result){\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=template&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\n }else{\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=template&a=createTemplatePage\">่ฟ”ๅ›žๆทปๅŠ </a>';\n }\n }\n\n //็ผ–่พ‘\n function editTemplate(){\n\n global $GLOBALS;\n $template_id = Request::get('template_id');\n if(empty($template_id)){\n return '';\n }\n $tempmodel = M('joymeTemplateModel');\n $channemode = M('joymeChannelModel');\n $wikimodel = M('joymeWikiModel');\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['item'] = $tempmodel->selectInfoByTempId($template_id);\n $data['static_url'] = $GLOBALS['static_url'];\n render($data,'web','template/edit_template');\n }\n}" }, { "alpha_fraction": 0.5032051205635071, "alphanum_fraction": 0.5224359035491943, "avg_line_length": 22.05128288269043, "blob_id": "39d76eefb050b21b7fa5b507aca0a8d9d9ecf1fa", "content_id": "fcd4a687a48f61ef966752d1ca017bd233925db3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 944, "license_type": "no_license", "max_line_length": 99, "num_lines": 39, "path": "/model/cmsModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 14:20\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) {\r\n die('bad request');\r\n}\r\nuse Joyme\\db\\JoymeModel;\r\n\r\nclass cmsModel extends JoymeModel\r\n{\r\n public $tableName = 'dede_addon17_lanmu';\r\n\r\n public function __construct()\r\n {\r\n $this->db_config = array(\r\n 'hostname' => $GLOBALS['config']['rds']['db_host'],\r\n 'username' => $GLOBALS['config']['rds']['db_user'],\r\n 'password' => $GLOBALS['config']['rds']['db_password'],\r\n 'database' => 'article_cms'\r\n );\r\n parent::__construct();\r\n }\r\n\r\n public function seltable($tablename){\r\n $this->tableName = $tablename;\r\n }\r\n\r\n //ๆ นๆฎid่Žทๅ–body\r\n public function getBodyByIds($ids){\r\n $rs = $this->select(array('aid','body'), $where = array('aid'=>array('in',$ids)),'',10000);\r\n return $rs;\r\n }\r\n}" }, { "alpha_fraction": 0.4023605287075043, "alphanum_fraction": 0.409155935049057, "avg_line_length": 52.86274337768555, "blob_id": "1709ce973208dc6f4a4e88bef2a38ef22490cb1d", "content_id": "4cae6a68bc5e45ac7042a64ef32730c64b9475f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2848, "license_type": "no_license", "max_line_length": 296, "num_lines": 51, "path": "/static/script/jgame.js", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "/**\r\n * Created by kexuedong on 2017/5/3.\r\n */\r\n$(function () {\r\n $(\"#selectgname\").click(function () {\r\n var html = '<div class=\"clearfix\"><input type=\"text\" name=\"searchtext\" id=\"searchtext\"> <i id=\"view_searchtext\" style=\"margin-left: 60px;\">ๆŸฅ่ฏข</i><div class=\"clearfix\"><ul class=\"searchresults\"></ul></div><hr><div class=\"clearfix selecteddiv\"><ul class=\"selectedresult\"></ul></div></div>';\r\n layer.alert('', {\r\n title: \"ๆŸฅ่ฏขๆธธๆˆ\",\r\n content: html,\r\n area: [\"400px\", \"500px\"],\r\n success: function(layero, index){\r\n $(\"#view_searchtext\").click(function () {\r\n var searchtext = $(\"#searchtext\").val();\r\n if(searchtext==''){\r\n alert(\"ๆธธๆˆๅ็งฐไธ่ƒฝไธบ็ฉบ\");\r\n return false;\r\n }\r\n $.post('/index.php', {c:'jgame', a:'searchgame', searchtext:searchtext}, function(res){\r\n res = JSON.parse(res);\r\n console.log(res);\r\n $(\".searchresults\").html(\" \");\r\n if(res.rs == 1){\r\n var ulhtml = '';\r\n for (x in res.msg) {\r\n console.log(\"x\",x,res.msg[x]);\r\n var ht = '<li><label style=\"display: inline-block;\" class=\"searchli\" data-gameid=\"'+res.msg[x].gameId+'\" data-gamename=\"'+res.msg[x].gameName+'\">'+res.msg[x].gameName+'</label></li>';\r\n ulhtml+=ht;\r\n }\r\n $(\".searchresults\").append(ulhtml);\r\n $(\".searchli\").click(function () {\r\n var gameid = $(this).data(\"gameid\");\r\n $(\"#gid\").val(gameid);\r\n var gamename = $(this).data(\"gamename\");\r\n $(\"#hiddengamename\").val(gamename);\r\n var ht = '<li><label style=\"display: inline-block;\" class=\"selectedli\" data-gameid=\"'+gameid+'\" data-gamename=\"'+gamename+'\">'+gamename+'</label></li>';\r\n $(\".selectedresult\").html(ht);\r\n });\r\n }else {\r\n var msght = '<span style=\"display:block;text-align: center;padding-right:25px;margin-top: 20px;\">ๆธธๆˆๅบ“ไธญๆฒกๆœ‰ๆŸฅๅˆฐ็›ธๅ…ณๆธธๆˆ</span>';\r\n $(\".searchresults\").append(msght);\r\n }\r\n });\r\n });\r\n }\r\n }, function (index) {\r\n var gamename = $(\"#hiddengamename\").val();\r\n $(\"#gamename\").val(gamename);\r\n layer.close(index);\r\n });\r\n });\r\n});" }, { "alpha_fraction": 0.6005024909973145, "alphanum_fraction": 0.6281406879425049, "avg_line_length": 16.34782600402832, "blob_id": "24c77c4ae1eb6a11641c0d6ee09a138b139825af", "content_id": "dfdfa197e663ebfad03050033c34c8072cae95ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 418, "license_type": "no_license", "max_line_length": 62, "num_lines": 23, "path": "/controller/sourceAbstract.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/13\n * Time: ไธŠๅˆ10:14\n */\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\n\nuse Joyme\\core\\Request;\n\nabstract class sourceAbstractController extends coreController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array();\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n */\n abstract public function query($cid);\n\n}" }, { "alpha_fraction": 0.5174314379692078, "alphanum_fraction": 0.5197076797485352, "avg_line_length": 44.124324798583984, "blob_id": "65054b2b76c54fbdfc9d2cf42beaba1ffab4c8f8", "content_id": "fd186e09ef181d6c8c3157f71c5d1f47aa73d98e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8649, "license_type": "no_license", "max_line_length": 122, "num_lines": 185, "path": "/controller/lable.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n/**\n * Created by PhpStorm.\n * User: xinshi\n * Date: 2015/10/29\n * Time: 12:12\n */\nif( !defined('IN') ) die('bad request');\ninclude_once( AROOT . 'controller'.DS.'app.class.php' );\nuse Joyme\\core\\Request;\n\nclass lableController extends appController{\n\n //้ฆ–้กตๆ˜พ็คบ\n public function index(){\n\n global $GLOBALS;\n $wikimodel = M('joymeWikiModel');\n $channemode = M('joymeChannelModel');\n $itemmodel = M('joymeItemModel');\n $pb_show_num = 50; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\n $pb_page = Request::get('pb_page',1); //่Žทๅ–ๅฝ“ๅ‰้กต็ \n $conditions['wiki_name'] = Request::getParam('wiki_name');\n $conditions['wiki_channe_name'] = Request::getParam('wiki_channe_name');\n $conditions['is_home'] = Request::getParam('is_home');\n $conditions['wiki_key'] = Request::getParam('wiki_key');\n $conditions['is_pc'] = Request::getParam('is_pc');\n $total = $itemmodel->allItemList($conditions,true);\n $data['item'] = $itemmodel->allItemList($conditions,false,$pb_page,$pb_show_num);\n $page = M('pageModel');\n $page->mainPage(array('total' => $total,'perpage'=>$pb_show_num,'nowindex'=>$pb_page,'pagebarnum'=>10));\n $data['page_str'] = $page->show(2,$conditions);\n $data['static_url'] = $GLOBALS['static_url'];\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['param'] = $conditions;\n render($data,'web','digital/lable_list');\n }\n\n //ๆทปๅŠ ๆ ‡็ญพ้กต้ขๆ˜พ็คบ\n function showAddLablePage(){\n\n global $GLOBALS;\n $wikimodel = M('joymeWikiModel');\n $channemode = M('joymeChannelModel');\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['static_url'] = $GLOBALS['static_url'];\n render($data,'web','digital/add_lable');\n }\n\n //ๅค„็†ๆทปๅŠ ้กต้ขๆ•ฐๆฎ\n function addLableData(){\n\n $update_id = Request::post('update_id'); //ๆ˜ฏๅฆๆ˜ฏไฟฎๆ”น\n $data['context_path'] = Request::post('is_pc'); //PC/ๆ‰‹ๆœบ\n $data['wiki'] = Request::post('wiki_name'); //ๆ‰€ๅฑžwiki\n $data['channel'] = Request::post('wiki_channel_name'); //ๆ‰€ๅฑžๆธ ้“\n $data['is_index'] = Request::post('is_home'); //ๆ˜ฏๅฆ้ฆ–้กต\n $isDefaultKey = Request::post('isDefaultKey'); //้ป˜่ฎคitem key\n if($isDefaultKey==1){\n $data['item_key'] = Request::post('defaultIndexKey'); //้ป˜่ฎคitem key\n }else{\n $data['item_key'] = Request::post('set_item_key'); //่‡ชๅฎšไน‰item key\n }\n $data['item_description'] = Request::post('describe_item_key'); //key็š„ๆ่ฟฐ\n $item_type = Request::post('itemType'); //ๆ‰€ๅฑž็ฑปๅž‹\n $data['item_type'] = $item_type;\n if($item_type=='image'){\n //ๅ›พ็‰‡\n $json['imageUrl'] = Request::post('imageUrl'); //ๅ›พ็‰‡ๅœฐๅ€\n $json['imageLinkUrl'] = Request::post('imageLinkUrl'); //ๅ›พ็‰‡ๅค–้“พ\n $json['imageIsBlank'] = Request::post('imageIsBlank'); //ๆ˜ฏๅฆๆ–ฐ้กต้ขๆ‰“ๅผ€\n $json['imageAlt'] = Request::post('imageAlt'); //ๅ›พ็‰‡ALTไฟกๆฏ\n $json['imageWidth'] = Request::post('imageWidth'); //ๅ›พ็‰‡ๅฎฝๅบฆไฟกๆฏ\n $json['imageHeight'] = Request::post('imageHeight'); //ๅ›พ็‰‡้ซ˜ๅบฆไฟกๆฏ\n $json['imageId'] = Request::post('imageId'); //ๅ›พ็‰‡ID\n $json['imageClass'] = Request::post('imageClass'); //ๅ›พ็‰‡Class\n $str = '';\n if(!empty($json['imageLinkUrl'] && $json['imageUrl'])){\n $str.='<a href=\"'.$json['imageLinkUrl'].'\"';\n }elseif(!empty($json['imageIsBlank'])){\n $str.=' target=\"_blank\" >';\n }\n $str.='<img src=\"'.$json['imageUrl'].'\" ';\n if(!empty($json['imageId'])){\n $str.= 'id=\"'.$json['imageId'].'\" ';\n }\n if(!empty($json['imageClass'])){\n $str.=' class=\"'.$json['imageClass'].'\" ';\n }\n $str.=' width=\"'.$json['imageWidth'].'\" height=\"'.$json['imageHeight'].'\" alt=\"'.$json['imageAlt'].'\" \\></a>';\n $data['item_properties'] = json_encode($json);\n $data['item_context'] = $str;\n }elseif($item_type=='textlink'){\n //ๆ–‡ๆœฌ้“พ\n $json['texturl'] = Request::post('texturl'); //ๆ–‡ๆœฌ้“พๆŽฅ\n $json['textInfo'] = Request::post('textInfo'); //ๆ–‡ๆœฌไฟกๆฏ\n $json['textIsBlank'] = Request::post('textIsBlank'); //ๆ˜ฏๅฆๆ–ฐ้กต้ขๆ‰“ๅผ€\n $json['textId'] = Request::post('textId'); //ๆ–‡ๆœฌID\n $json['textClass'] = Request::post('textClass'); //ๆ–‡ๆœฌclass\n $str = '<a';\n if(!empty($json['textId'])){\n $str.= ' id=\"'.$json['textId'];\n }else if(!empty($json['textClass'])){\n $str.= ' class=\"'.$json['textClass'].'\\\"';\n }else if(!empty($json['textIsBlank'])){\n $str.= ' target=_blank';\n }\n $str.= ' href=\"'.$json['texturl'].'>'.$json['textInfo'].\"<a>\";\n $data['item_properties'] = json_encode($json);\n $data['item_context'] = $str;\n }elseif($item_type=='flash'){\n //flash\n $json['flashUrl'] = Request::post('flashUrl'); //flashURL\n $json['flashWidth'] = Request::post('flashWidth'); //flashๅฎฝๅบฆ\n $json['flashHeight'] = Request::post('flashHeight'); //flash้ซ˜ๅบฆ\n $str = '<object width=\"'.$json['flashWidth'].'\" height=\"'.$json['flashHeight'].'\">';\n $str.='<param name=\"movie\" value=\"'.$json['flashUrl'].'\"></param> ';\n $str.='<param name=\"flashvars\">';\n $str.='<param name=\"allowFullScreen\" value=\"true\"></param>';\n $str.='<param name=\"allowscriptaccess\" value=\"always\"></param>';\n $str.='<embed src=\"'.$json['flashUrl'].'\" type=\"application/x-shockwave-flash\\\"';\n $str.=' allowscriptaccess=\"always\" allowfullscreen=\"true\"><\\/embed>';\n $str.='<\\/object>';\n $data['item_context'] = htmlspecialchars($str);\n $data['item_properties'] = json_encode($json);\n }elseif($item_type=='iframe'){\n //iframe\n $json['iframeUrl'] = Request::post('iframeUrl'); //iframeURL\n $json['iframeHeight'] = Request::post('iframeHeight'); //iframe้ซ˜ๅบฆ\n $json['iframeWidth'] = Request::post('iframeWidth'); //iframeๅฎฝๅบฆ\n $json['iframeId'] = Request::post('iframeId'); //iframeID\n $json['iframeClass'] = Request::post('iframeClass'); //iframeClass\n $str = '<iframe ';\n if(!empty($json['iframeId'])){\n $str.=' id=\"'.$json['iframeId'].'\"';\n }elseif(!empty($json['iframeClass'])){\n $str.=' class=\"'.$json['iframeClass'].'\"';\n }\n $str.=' src=\"'.$json['iframeUrl'].'\" width=\"'.$json['iframeWidth'].'\" height=\"'.$json['iframeHeight'].'\"';\n $data['item_context'] = htmlspecialchars($str);\n $data['item_properties'] = json_encode($json);\n }elseif($item_type=='HTML'){\n //HTML\n $json['htmlContext'] = Request::post('htmlContext'); //htmlContext\n $data['item_context'] = $json['htmlContext'];\n $data['item_properties'] = json_encode($json);\n }\n $data['create_date'] = date('Y-m-d H:i:s ',time());\n $model = M('joymeItemModel');\n if(!empty($update_id)){\n $result = $model->updateItemById($data,$update_id);\n }else{\n $result = $model->insertItem($data);\n }\n if($result){\n echo 'ๆ“ไฝœๆˆๅŠŸ! <a href=\"?c=lable&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\n }else{\n echo 'ๆ“ไฝœๅคฑ่ดฅ! <a href=\"?c=lable&a=showAddLablePage\">่ฟ”ๅ›žๆทปๅŠ </a>';\n }\n exit;\n }\n\n //ๆ˜พ็คบ็ผ–่พ‘้กต\n function showEditPage(){\n\n global $GLOBALS;\n\n $itemId = Request::get('item_id'); //htmlContext\n if(empty($itemId)){\n return ;\n }\n $model = M('joymeItemModel');\n $wikimodel = M('joymeWikiModel');\n $channemode = M('joymeChannelModel');\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['channe_name'] = $channemode->allChannelName();\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['static_url'] = $GLOBALS['static_url'];\n $dataInfo = $model->selectInfoByItemId($itemId);\n $data['item'] = $dataInfo;\n render($data,'web','digital/edit_lable');\n }\n}" }, { "alpha_fraction": 0.5619596838951111, "alphanum_fraction": 0.5682997107505798, "avg_line_length": 35.16666793823242, "blob_id": "0b23526de455ac90a64749ac1ac20296e725a85d", "content_id": "15cfb3efebaff7c9b6fd4971e1d0494d56463d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 112, "num_lines": 48, "path": "/controller/feedback.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\nif( !defined('IN') ) die('bad request');\ninclude_once( AROOT . 'controller'.DS.'app.class.php' );\nuse Joyme\\core\\Request;\n\nclass feedbackController extends appController{\n\n function index(){\n\n global $GLOBALS;\n $dopinionmodel = M('wikiDopinionModel');\n $wikimodel = M('joymeWikiModel');\n $pb_show_num = 50; //รฟาณ๏ฟฝ๏ฟฝสพ๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝ\n $pb_page = Request::get('pb_page',1); //๏ฟฝ๏ฟฝศก๏ฟฝ๏ฟฝวฐาณ๏ฟฝ๏ฟฝ\n $conditions['wiki_name'] = Request::getParam('wiki_name');\n $conditions['remove_state'] = Request::getParam('remove_state');\n $total = $dopinionmodel->allOpinionList($conditions,true);\n $data['item'] = $dopinionmodel->allOpinionList($conditions,false,$pb_page,$pb_show_num);\n $page = M('pageModel');\n $page->mainPage(array('total' => $total,'perpage'=>$pb_show_num,'nowindex'=>$pb_page,'pagebarnum'=>10));\n $data['page_str'] = $page->show(2,$conditions);\n $data['static_url'] = $GLOBALS['static_url'];\n $data['wiki_name'] = $wikimodel->allWikiName();\n $data['param'] = $conditions;\n render($data,'web','feedback/feedbacklist');\n }\n\n //๏ฟฝ๏ฟฝ๏ฟฝ๏ฟฝ\n function removeState(){\n\n $remove_state = Request::getParam('remove_state');\n $opinion_id = Request::getParam('opinion_id');\n if(!in_array($remove_state,array(0,1)) || empty($opinion_id)){\n return ;\n }\n\n if(intval($remove_state) == 0){\n $remove_state = 1;\n }else{\n $remove_state = 0;\n }\n\n $dopinionmodel = M('wikiDopinionModel');\n $dopinionmodel->updateState($remove_state,$opinion_id);\n $url = 'http://'.$_SERVER['HTTP_HOST'].'?c=feedback&a=index';\n header(\"Location:$url\");\n }\n}" }, { "alpha_fraction": 0.4225113093852997, "alphanum_fraction": 0.4383484125137329, "avg_line_length": 19.32183837890625, "blob_id": "fe1eecaee8b12457471a814d21ce01e900955818", "content_id": "3996b048aabaede4b9bb983aab4cc24cc156754d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 67, "num_lines": 87, "path": "/model/douyuHezuoModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php if (!defined('IN')) {\n die('bad request');\n}\n\n/**\n * Description of BaiduHezuoModel\n * @author ISLANDER\n * @datetime 2016-9-19 18:58:03\n */\n\n/**\n * Description of BaiduHezuoModel\n *\n * @author wenleihou\n */\nuse Joyme\\db\\JoymeModel;\n\nclass douyuHezuoModel extends JoymeModel\n{\n\n public $tableName = 'zhibo_douyu';\n\n public function __construct()\n {\n $this->db_config = array(\n 'hostname' => $GLOBALS['config']['rds']['db_host'],\n 'username' => $GLOBALS['config']['rds']['db_user'],\n 'password' => $GLOBALS['config']['rds']['db_password'],\n 'database' => $GLOBALS['config']['rds']['db_name']\n );\n parent::__construct();\n }\n\n public function getTotal($where)\n {\n return $this->count($where);\n }\n\n public function insertData($data = array())\n {\n return $this->insert($data);\n }\n\n public function getDataById($id = 0)\n {\n return $this->selectRow(\"*\", array(\n 'id'=>$id\n ));\n }\n\n public function updateData($data = array())\n {\n if (empty($data['id'])) {\n return false;\n }\n return $this->update($data, array(\n 'id' => $data['id']\n ));\n }\n\n public function getMainCatData()\n {\n return array(\n '1'=>'่ต„ๆ–™',\n '2'=>'่ต„่ฎฏ',\n '3'=>'ๆ”ป็•ฅ',\n '4'=>'่ง†้ข‘',\n '5'=>'็คผๅŒ…',\n );\n }\n\n public function getSonCatData()\n {\n return array(\n '1' => 'ๆœ€ๆ–ฐ่ต„่ฎฏ',\n '2' => 'ๆœ€ๆ–ฐๆ”ป็•ฅ',\n '3' => '่‹ฑ้›„ๆ”ป็•ฅ',\n '4' => 'ๆ”ป็•ฅๆŠ€ๅทง',\n '5' => 'ๅ‡บ่ฃ…ๆ”ป็•ฅ',\n '6' => 'ๆœ€ๆ–ฐ่ง†้ข‘',\n '7' => '่‹ฑ้›„่ง†้ข‘',\n '8' => '่งฃ่ฏด่ง†้ข‘',\n '9' => 'ๅ…ถไป–',\n );\n }\n\n}\n" }, { "alpha_fraction": 0.6211180090904236, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 15.149999618530273, "blob_id": "3fa41aa8d960a68e427265847f101e71ece48a88", "content_id": "a6b2c0a66ba959a459ff3961fb471bedbb773e9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 326, "license_type": "no_license", "max_line_length": 59, "num_lines": 20, "path": "/controller/test.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\core\\Log;\n\nclass testController extends coreController\n{\n public function index(){\n\n }\n}" }, { "alpha_fraction": 0.39900559186935425, "alphanum_fraction": 0.4083281457424164, "avg_line_length": 24.409835815429688, "blob_id": "e58609e5520a94e0b9445b4d2b6183b61f70d0dd", "content_id": "372c9cdb03eef3359264824db2928912d77626a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 72, "num_lines": 61, "path": "/controller/baiduoriginalsource.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:็™พๅบฆๅŽŸๅˆ›ๆŽฅๅฃๆไบค\r\n * Author: gradydong\r\n * Date: 2017/6/30\r\n * Time: 16:46\r\n * Copyright: Joyme.com\r\n */\r\n\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\r\n\r\nuse Joyme\\core\\Request;\r\nuse Joyme\\core\\Log;\r\n\r\nclass baiduoriginalsourceController extends coreController\r\n{\r\n\r\n public function savedata()\r\n {\r\n $source = Request::getParam('source', 1);\r\n $title = Request::getParam('title');\r\n $url = Request::getParam('url');\r\n $result = empty($_REQUEST['result']) ? '' : $_REQUEST['result'];\r\n $addtime = Request::getParam('addtime');\r\n if(empty($title)||\r\n empty($url)||\r\n empty($result)||\r\n empty($addtime)\r\n ){\r\n $data = array(\r\n 'rs' => '0',\r\n 'msg'=>'ๅ‚ๆ•ฐไธ่ƒฝไธบ็ฉบ'\r\n );\r\n }else{\r\n $data = array(\r\n 'source' => $source,\r\n 'title' => $title,\r\n 'url' => $url,\r\n 'result' => $result,\r\n 'addtime' => $addtime\r\n );\r\n $baiduoriginalmodel = new baiduOriginalModel();\r\n $ret = $baiduoriginalmodel->insert($data);\r\n if($ret){\r\n $data = array(\r\n 'rs' => '1',\r\n 'msg'=>'ๆไบคๆˆๅŠŸ'\r\n );\r\n }else{\r\n $data = array(\r\n 'rs' => '0',\r\n 'msg'=>'ๆไบคๅคฑ่ดฅ'\r\n );\r\n }\r\n }\r\n echo json_encode($data);\r\n exit;\r\n }\r\n}" }, { "alpha_fraction": 0.5148205757141113, "alphanum_fraction": 0.5304211974143982, "avg_line_length": 18.677419662475586, "blob_id": "9b4d2663b2eb87b2a846cd56bb2a67905e151957", "content_id": "72c5a83955caba0174619cb6c93897f3f12feadb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 671, "license_type": "no_license", "max_line_length": 64, "num_lines": 31, "path": "/controller/free.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\nif (!defined('IN'))\r\n die('bad request');\r\ninclude_once( CROOT . 'controller' . DS . 'core.class.php' );\r\n\r\nuse Joyme\\core\\Log;\r\n\r\n/**\r\n * Created by JetBrains PhpStorm.\r\n * User: xinshi\r\n * Date: 15-4-15\r\n * Time: ไธ‹ๅˆ2:51\r\n * To change this template use File | Settings | File Templates.\r\n */\r\nclass freeController extends coreController {\r\n\r\n function __construct() {\r\n // ่ฝฝๅ…ฅ้ป˜่ฎค็š„\r\n parent::__construct();\r\n// Log::config(Log::ALL);\r\n }\r\n\r\n //ๅŠ ๅฏ†้ชŒ่ฏ\r\n function Protection($str) {\r\n //็บฆๅฎš็ง˜้’ฅ\r\n $key = \"zm^!-tb\";\r\n $token = md5(md5($str . $key));\r\n return $token;\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.4282420873641968, "alphanum_fraction": 0.4363112449645996, "avg_line_length": 23.544116973876953, "blob_id": "303446efb29370fa13998df1543252f6d1959e58", "content_id": "c1de5f752a6ab0ca4777448bae1efa715f1b8bd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 69, "num_lines": 68, "path": "/model/jChannelGameModel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 14:20\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) {\r\n die('bad request');\r\n}\r\nuse Joyme\\db\\JoymeModel;\r\n\r\nclass jChannelGameModel extends JoymeModel\r\n{\r\n public $tableName = 'channelgame';\r\n\r\n public function __construct()\r\n {\r\n $this->db_config = array(\r\n 'hostname' => $GLOBALS['config']['db']['db_host'],\r\n 'username' => $GLOBALS['config']['db']['db_user'],\r\n 'password' => $GLOBALS['config']['db']['db_password'],\r\n 'database' => $GLOBALS['config']['db']['channel_db_name']\r\n );\r\n parent::__construct();\r\n }\r\n\r\n public function getData($where, $limit = 10, $skip = 0)\r\n {\r\n return $this->select('*', $where, 'id desc', $limit, $skip);\r\n }\r\n\r\n public function getRowData($where)\r\n {\r\n return $this->selectRow('*', $where);\r\n }\r\n\r\n public function getCount($where){\r\n return $this->count($where);\r\n }\r\n\r\n public function addData($data=array())\r\n {\r\n if($data){\r\n $count = $this->count(array(\r\n 'cid' => $data['cid'],\r\n 'gid' => $data['gid'],\r\n 'datatype' => $data['datatype']\r\n ));\r\n if($count){\r\n return $this->update(array(\r\n 'gamename' => $data['gamename']\r\n ),array(\r\n 'cid' => $data['cid'],\r\n 'gid' => $data['gid'],\r\n 'datatype' => $data['datatype']\r\n ));\r\n }else{\r\n return $this->insert($data);\r\n }\r\n }else{\r\n return false;\r\n }\r\n }\r\n\r\n}" }, { "alpha_fraction": 0.42643284797668457, "alphanum_fraction": 0.43042486906051636, "avg_line_length": 28.37229347229004, "blob_id": "e03438f6ea789832fd9a112d869c4c6adf9900f3", "content_id": "fca6f65cac15bf69790ff46d530277f3b38d3c78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 7380, "license_type": "no_license", "max_line_length": 116, "num_lines": 231, "path": "/controller/jchannel.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\r\n\r\n/**\r\n * Description:ๆธ ้“็ฎก็†ๆธ ้“็›ธๅ…ณ\r\n * Author: gradydong\r\n * Date: 2017/4/19\r\n * Time: 13:52\r\n * Copyright: Joyme.com\r\n */\r\nif (!defined('IN')) die('bad request');\r\ninclude_once(AROOT . 'controller' . DS . 'app.class.php');\r\nuse Joyme\\core\\Request;\r\nuse Joyme\\net\\RedisHelper;\r\n\r\nclass jchannelController extends appController\r\n{\r\n public function __construct()\r\n {\r\n parent::__construct();\r\n }\r\n\r\n public function index()\r\n {\r\n $conditions = $where = array();\r\n //ๆธ ้“ๆ ‡็คบ\r\n $ckey = Request::getParam('ckey', '');\r\n if ($ckey) {\r\n $where['ckey'] = array('like', '%' . $ckey . '%');\r\n $conditions['ckey'] = $ckey;\r\n }\r\n //ๆธ ้“ๅ็งฐ\r\n $channel_name = Request::getParam('channel_name', '');\r\n if ($channel_name) {\r\n $where['channel_name'] = array('like', '%' . $channel_name . '%');\r\n $conditions['channel_name'] = $channel_name;\r\n }\r\n //ๆธ ้“ๆจกๆฟ\r\n $template = Request::getParam('template', '');\r\n if ($template) {\r\n $where['template'] = array('like', '%' . $template . '%');\r\n $conditions['template'] = $template;\r\n }\r\n //ๆธ ้“ๆ˜พ็คบ็Šถๆ€\r\n $isshow = Request::getParam('isshow', 'all');\r\n if (is_numeric($isshow)) {\r\n $where['isshow'] = (int)$isshow;\r\n $conditions['isshow'] = $isshow;\r\n }\r\n\r\n $jchannelmodel = new jChannelModel();\r\n $total = $jchannelmodel->count($where);\r\n $psize = 20; //ๆฏ้กตๆ˜พ็คบๆกๆ•ฐ\r\n $pno = Request::get('pb_page', 1);\r\n $skip = 0;\r\n if ($pno) {\r\n $skip = (intval($pno) - 1) * $psize;\r\n }\r\n $lists = $jchannelmodel->select(\"cid,ckey,channel_name,template,isshow\", $where, 'cid DESC', $psize, $skip);\r\n $page = new pageModel();\r\n $page->mainPage(array('total' => $total, 'perpage' => $psize, 'nowindex' => $pno, 'pagebarnum' => 10));\r\n $phtml = $page->show(2, $conditions);\r\n $data = array(\r\n 'ckey' => $ckey,\r\n 'channel_name' => $channel_name,\r\n 'template' => $template,\r\n 'isshow' => $isshow,\r\n 'total' => $total,\r\n 'list' => $lists,\r\n 'phtml' => $phtml,\r\n 'pno' => $pno\r\n );\r\n render($data, 'web', 'jchannel/list');\r\n }\r\n\r\n\r\n public function add()\r\n {\r\n render(array(), 'web', 'jchannel/add');\r\n }\r\n\r\n public function addPro()\r\n {\r\n $jchannelmodel = new jChannelModel();\r\n $data = $this->getPostData();\r\n $ret = $jchannelmodel->insert($data);\r\n if ($ret) {\r\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=jchannel&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=jchannel&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n\r\n public function edit()\r\n {\r\n $cid = Request::get('cid', 0);\r\n $jchannelmodel = new jChannelModel();\r\n $item = $jchannelmodel->selectRow('cid,ckey,channel_name,template', array(\r\n 'cid' => $cid\r\n ));\r\n $data = array(\r\n 'item' => $item\r\n );\r\n render($data, 'web', 'jchannel/edit');\r\n }\r\n\r\n public function editPro()\r\n {\r\n $cid = Request::post('cid');\r\n if (is_numeric($cid)) {\r\n $jchannelmodel = new jChannelModel();\r\n $data = $this->getPostData();\r\n $errno = $jchannelmodel->update($data, array(\r\n 'cid' => $cid\r\n ));\r\n if ($errno) {\r\n echo 'ๆ“ไฝœๆˆๅŠŸ <a href=\"?c=jchannel&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n } else {\r\n echo 'ๆ“ไฝœๅคฑ่ดฅ <a href=\"?c=jchannel&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n } else {\r\n echo 'ๅ‚ๆ•ฐ้”™่ฏฏ <a href=\"?c=jchannel&a=index\">่ฟ”ๅ›žๅˆ—่กจ</a>';\r\n }\r\n }\r\n\r\n public function del()\r\n {\r\n $cid = (int)Request::post('cid');\r\n if ($cid) {\r\n $jchannelmodel = new jChannelModel();\r\n $errno = $jchannelmodel->delete(array(\r\n 'cid' => $cid\r\n ));\r\n if ($errno) {\r\n addlog('joymewiki', 'delete', 'ๆทปๅŠ ไบ†idไธบ' . $cid . '็š„ๆธ ้“ๆ•ฐๆฎ');\r\n $res = array('rs' => 1, 'msg' => 'ๅˆ ้™คๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅˆ ้™คๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n private function getPostData()\r\n {\r\n $data = array();\r\n $id = Request::post('id');\r\n $ckey = Request::post('ckey', '');\r\n if (empty($ckey)) {\r\n if ($id) {\r\n echo 'ๆธ ้“ๆ ‡็คบไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=edit&id=' . $id . '\">่ฟ”ๅ›ž</a>';\r\n } else {\r\n echo 'ๆธ ้“ๆ ‡็คบไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=add\">่ฟ”ๅ›ž</a>';\r\n }\r\n } else {\r\n $data['ckey'] = $ckey;\r\n }\r\n $channel_name = Request::post('channel_name', '');\r\n if (empty($channel_name)) {\r\n if ($id) {\r\n echo 'ๆธ ้“ๅ็งฐไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=edit&id=' . $id . '\">่ฟ”ๅ›ž</a>';\r\n } else {\r\n echo 'ๆธ ้“ๅ็งฐไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=add\">่ฟ”ๅ›ž</a>';\r\n }\r\n } else {\r\n $data['channel_name'] = $channel_name;\r\n }\r\n $template = Request::post('template', '');\r\n if (empty($template)) {\r\n if ($id) {\r\n echo 'ๆธ ้“ๆจกๆฟไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=edit&id=' . $id . '\">่ฟ”ๅ›ž</a>';\r\n } else {\r\n echo 'ๆธ ้“ๆจกๆฟไธ่ƒฝไธบ็ฉบ <a href=\"?c=jchannel&a=add\">่ฟ”ๅ›ž</a>';\r\n }\r\n } else {\r\n $data['template'] = $template;\r\n }\r\n return $data;\r\n }\r\n\r\n //ไฟฎๆ”นๆธ ้“ๆ˜พ็คบ็Šถๆ€\r\n public function editisshow()\r\n {\r\n $cid = (int)Request::getParam('cid');\r\n $isshow = (int)Request::getParam('isshow');\r\n if ($cid) {\r\n $jchannelmodel = new jChannelModel();\r\n $errno = $jchannelmodel->update(array(\r\n 'isshow' => $isshow\r\n ),array(\r\n 'cid' => $cid,\r\n ));\r\n if ($errno) {\r\n $res = array('rs' => 1, 'msg' => 'ไฟฎๆ”นๆˆๅŠŸ');\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ไฟฎๆ”นๅคฑ่ดฅ');\r\n }\r\n } else {\r\n $res = array('rs' => 2, 'msg' => 'ๅ‚ๆ•ฐ้”™่ฏฏ');\r\n }\r\n jsonEncode($res);\r\n }\r\n\r\n\r\n //่Žทๅ–ๆธ ้“ๆ ‡็คบ่Žทๅ–ๆธ ้“ID\r\n public function getCidByKey($key)\r\n {\r\n $redisconfig = $GLOBALS['config']['redis'];\r\n\r\n $mem = new RedisHelper($redisconfig['host'], $redisconfig['port'],0,$redisconfig['password']);\r\n\r\n $channelkey = memckey('channellist', 'key', $key);\r\n\r\n $cid = $mem->get($channelkey);\r\n\r\n if (empty($cid)) {\r\n $jchannelModel = new jChannelModel();\r\n\r\n $cid = $jchannelModel->getIDByKey($key);\r\n\r\n if (empty($cid)) {\r\n return 0;\r\n } else {\r\n $mem->set($channelkey, $cid);\r\n }\r\n\r\n }\r\n return $cid;\r\n }\r\n}" }, { "alpha_fraction": 0.4816136956214905, "alphanum_fraction": 0.49553731083869934, "avg_line_length": 30.133333206176758, "blob_id": "d6a9418c48adf3df3bc7ce9371bebdbc74ddff7b", "content_id": "62fa3f9398d186e94457319b95b509d0776c356a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2879, "license_type": "no_license", "max_line_length": 116, "num_lines": 90, "path": "/controller/sourceSougoupc.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(AROOT . 'controller' . DS . 'sourceAbstract.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\net\\RedisHelper;\n\nclass sourceSougoupcController extends sourceAbstractController\n{\n //ๆธ ้“ๅญ—ๆฎต\n public $fields = array('indexData', 'title', 'image', 'url', 'pubtime', 'category');\n\n /**\n * ๆŽฅๅฃๆŸฅ่ฏข\n */\n public function query($cid)\n {\n global $GLOBALS;\n //ๆŸฅ่ฏขๆ‰€ๆœ‰็š„ๆธธๆˆ\n $jChannelGameModel = new jChannelGameModel();\n $where = array('cid' => $cid, 'gamestatus' => 1);\n $channelgamelist = $jChannelGameModel->getData($where, 1000);\n\n $gids = '0';\n foreach ($channelgamelist as $val) {\n $gids .= ',' . $val['gid'];\n }\n\n\n //ๆŸฅ่ฏขๆธธๆˆ้™„ๅŠ ๆ•ฐๆฎ\n $jGameModel = new jGameModel();\n $where2 = array( 'gid' => array('in', $gids));\n $gamelist = $jGameModel->getData($where2, 1000);\n\n foreach ($channelgamelist as $o => $p) {\n foreach ($gamelist as $k => $v) {\n if ($p['gid'] == $v['gid']) {\n $temparr = json_decode($v['extra'],true);\n $temparr = empty($temparr)?array():$temparr;\n if(empty($temparr['wikiUrl'])){\n $temparr['wikiUrl'] = \"http://www.joyme.\" . $GLOBALS['domain'] . \"/collection/\" . $v['gid'];\n }\n $channelgamelist[$o] = array_merge($p, $v, $temparr);\n }\n }\n }\n //้‡ๅปบๆ•ฐ็ป„\n $channelgameidlist = array_column($channelgamelist, 'gid');\n $channelgamelist = array_combine($channelgameidlist, $channelgamelist);\n\n\n //ๆŸฅ่ฏขๆธ ้“ๆ•ฐๆฎ\n $jChannelDataModel = new jChannelDataModel();\n $where3 = array('cid' => $cid, 'isblock' => 0);\n $data = $jChannelDataModel->getData($where3, 10000);\n\n $aids = '0';\n foreach ($data as $val) {\n $aids .= ',' . $val['aid'];\n }\n $jSourceDataModel = new jSourceDataModel();\n $where4 = array('source' => 1, 'aid' => array('in', $aids));\n $sourcedata = $jSourceDataModel->getData($where4, 10000);\n\n //ๅˆๅนถๆ•ฐๆฎ\n foreach ($data as $k => $val) {\n foreach ($sourcedata as $row) {\n if ($val['aid'] == $row['aid']) {\n $temparr = json_decode($row['data'], true);\n $temparr = empty($temparr)?array():$temparr;\n $data[$k] = array_merge($temparr,$val);\n }\n }\n if (in_array($val['gid'], $channelgameidlist)) {\n $channelgamelist[$val['gid']]['articlelist'][] = $data[$k];\n }\n\n }\n return $channelgamelist;\n }\n\n\n}" }, { "alpha_fraction": 0.4181685447692871, "alphanum_fraction": 0.4271433651447296, "avg_line_length": 33.43718719482422, "blob_id": "bc58c38f153ffb530c8452d4320a683d52e159ee", "content_id": "6b6aaae2e5f73dbd087545ed2d521b2cd987de3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 13975, "license_type": "no_license", "max_line_length": 128, "num_lines": 398, "path": "/controller/source.class.php", "repo_name": "liu67224657/joyme-channel", "src_encoding": "UTF-8", "text": "<?php\n\nif (!defined('IN')) die('bad request');\n/**\n * Created by PhpStorm.\n * User: pengzhang\n * Date: 2017/4/12\n * Time: ไธ‹ๅˆ4:05\n */\ninclude_once(CROOT . 'controller' . DS . 'core.class.php');\n\nuse Joyme\\core\\Request;\nuse Joyme\\core\\Log;\n\nclass sourceController extends coreController\n{\n\n //channel list\n public function channellist()\n {\n $jchannelmodel = new jChannelModel();\n $lists = $jchannelmodel->select(\"cid,ckey,channel_name,template\", array(), 'cid DESC');\n\n echo json_encode($lists);\n exit;\n\n\n }\n\n public function test()\n {\n\n }\n\n //ไธŠๆŠฅcmsๅ’Œ็คผๅŒ…ๆ•ฐๆฎ\n public function savedata()\n {\n $aid = Request::getParam('aid', 0);\n $cid = Request::getParam('cid', '');\n $gid = Request::getParam('gid', 0);\n $atype = Request::getParam('atype', 0);\n $source = Request::getParam('source', 1);\n $extra = empty($_POST['extra']) ? '' : $_POST['extra'];\n $pubdate = Request::getParam('pubdate', '');\n $aurl = Request::getParam('url', '');\n $extraArr = json_decode($extra, true);\n\n if ($source == 2) {\n $cid = 1;\n $pubdate = $pubdate / 1000;\n }\n\n if (empty($aid) || empty($cid) || empty($source) || empty($extraArr) || empty($pubdate)) {\n self::sourceReturn(-10001, 'ๅ‚ๆ•ฐไธๅ…จ');\n }\n $litpic = empty($extraArr['litpic']) ? '' : $extraArr['litpic'];\n\n $sourcedata = array(\n 'aid' => $aid,\n 'data' => $extra,\n 'source' => $source\n );\n\n\n $sourceDataModel = new jSourceDataModel();\n $sourceDataRs = $sourceDataModel->getRowData(array('aid' => $aid, 'source' => $source));\n if ($sourceDataRs) {\n $rs = $sourceDataModel->update(array('data' => $extra), array('aid' => $aid, 'source' => $source));\n } else {\n $rs = $sourceDataModel->addData($sourcedata);\n }\n\n //ๅˆคๆ–ญๆ˜ฏๅฆๅˆ ้™ค\n if ($source == 2 && $extraArr['actStatus']['code'] == 'n') {\n $isblock = 1;\n } else {\n $isblock = 0;\n }\n\n $cidArr = explode(',', $cid);\n\n $jchannelmodel = new jChannelModel();\n $tplArr = $jchannelmodel->select(\"cid,template,size\", array());\n\n $tplCidArr = array_column($tplArr, 'cid');\n $tplArr = array_combine($tplCidArr, $tplArr);\n\n $typeid = empty($extraArr['typeid']) ? '' : $extraArr['typeid'];\n $typeid2 = empty($extraArr['typeid2']) ? '' : $extraArr['typeid2'];\n $typeid .= ',' . $typeid2;\n $typeidArr = explode(',', $typeid);\n\n $channelDataModel = new jChannelDataModel();\n\n //ๆŸฅ่ฏขๆ‰€ๅฑž็š„ๆ ็›ฎๅˆ—่กจ\n $channelGameModel = new jChannelGameModel();\n $channelgameRs = $channelGameModel->select(\"cid,gid\", array('gid' => array('in', $typeidArr), 'datatype' => 2));\n\n\n $channelgameArr = array();\n foreach($channelgameRs as $v){\n $channelgameArr[$v['cid']][] = $v['gid'];\n }\n\n if ($rs !== false && empty($sourceDataRs)) {\n $url = '';\n $channeldata = array(\n 'cid' => '',\n 'gid' => $gid,\n 'aid' => $aid,\n 'atype' => $atype,\n 'litpic' => $litpic,\n 'source' => $source,\n 'url' => $url,\n 'pubdate' => $pubdate,\n 'isblock' => $isblock\n );\n\n foreach ($cidArr as $v) {\n if ($tplArr[$v]['template'] == 'pc') {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'];\n } else if ($tplArr[$v]['template'] == 'wap') {\n $urlpre = 'http://m.joyme.' . $GLOBALS['domain'];\n } else {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'] . '/' . $tplArr[$v]['template'];\n }\n\n if ($atype == 5) {\n $channeldata['url'] = $aurl;\n } else {\n $channeldata['url'] = $urlpre . str_replace('article/pc/', '', $aurl);\n }\n $size = json_decode($tplArr[$v]['size'], true);\n if ($size && !empty($size[$atype])) {\n $channeldata['litpic'] = $litpic . '?imageView2/1/w/' . $size[$atype]['w'] . '/h/' . $size[$atype]['h'];\n }\n\n $channeldata['cid'] = $v;\n\n //ๅˆคๆ–ญๆธ ้“ๆ˜ฏๅฆๅผ€ๅฏๆธธๆˆๆˆ–ๆ ็›ฎ\n if ($gid) {\n $channeldata['datatype'] = 1;\n $channeldata['gid'] = $gid;\n $channelDataModel->addData($channeldata);\n }\n $typeid = empty($extraArr['typeid']) ? 0 : $extraArr['typeid'];\n\n if ($channelgameArr[$v] && in_array($typeid, $channelgameArr[$v])) {\n $channeldata['datatype'] = 2;\n\n $channeldata['gid'] = $typeid;\n $channelDataModel->addData($channeldata);\n\n $typeid2 = empty($extraArr['typeid2']) ? '' : $extraArr['typeid2'];\n $typeid2Arr = explode(',', $typeid2);\n if (!empty($typeid2Arr)) {\n foreach ($typeid2Arr as $v2) {\n if (in_array($v2, $channelgameArr[$v])) {\n $channeldata['gid'] = $v2;\n $channelDataModel->addData($channeldata);\n }\n }\n\n }\n\n }\n }\n self::sourceReturn(1, 'ok');\n } else if ($rs !== false) {\n $channelDataModel = new jChannelDataModel();\n $where1 = $where2 = $where = array('aid' => $aid, 'source' => $source);\n\n $where1['datatype'] = 1;\n $channelDatars1 = $channelDataModel->getData($where1, 50);\n $cids1 = array_column($channelDatars1,'cid');\n $where2['datatype'] = 2;\n $channelDatars2 = $channelDataModel->getData($where2, 50);\n $cids2 = array_column($channelDatars2,'cid');\n\n $cidArr = explode(',', $cid);\n\n //ไธๅญ˜ๅœจ็š„ๅˆ™ๆทปๅŠ \n if (!empty($cidArr)) {\n $url = '';\n $channeldata = array(\n 'cid' => '',\n 'gid' => '',\n 'aid' => $aid,\n 'atype' => $atype,\n 'litpic' => $litpic,\n 'source' => $source,\n 'url' => $url,\n 'pubdate' => $pubdate,\n 'isblock' => $isblock\n );\n\n\n foreach ($cidArr as $v) {\n if ($tplArr[$v]['template'] == 'pc') {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'];\n } else if ($tplArr[$v]['template'] == 'wap') {\n $urlpre = 'http://m.joyme.' . $GLOBALS['domain'];\n } else {\n $urlpre = 'http://www.joyme.' . $GLOBALS['domain'] . '/' . $tplArr[$v]['template'];\n }\n\n if ($atype == 5) {\n $channeldata['url'] = $aurl;\n } else {\n $channeldata['url'] = $urlpre . str_replace('article/pc/', '', $aurl);\n }\n\n $size = json_decode($tplArr[$v]['size'], true);\n if ($size && !empty($size[$atype]) && $source == 1) {\n $channeldata['litpic'] = $litpic . '?imageView2/1/w/' . $size[$atype]['w'] . '/h/' . $size[$atype]['h'];\n }\n\n $channeldata['cid'] = $v;\n //ๅˆคๆ–ญๆธ ้“ๆ˜ฏๅฆๅผ€ๅฏๆธธๆˆๆˆ–ๆ ็›ฎ\n if ($gid && !in_array($v,$cids1)) {\n $channeldata['datatype'] = 1;\n $channeldata['gid'] = $gid;\n $channelDataModel->addData($channeldata);\n }\n $typeid = empty($extraArr['typeid']) ? 0 : $extraArr['typeid'];\n\n if ($channelgameArr[$v] && in_array($typeid, $channelgameArr[$v]) && !in_array($v,$cids2)) {\n $channeldata['datatype'] = 2;\n\n $channeldata['gid'] = $typeid;\n\n $channelDataModel->addData($channeldata);\n\n $typeid2 = empty($extraArr['typeid2']) ? '' : $extraArr['typeid2'];\n $typeid2Arr = explode(',', $typeid2);\n if (!empty($typeid2Arr)) {\n foreach ($typeid2Arr as $v2) {\n if (in_array($v2, $channelgameArr[$v])) {\n $channeldata['gid'] = $v2;\n $channelDataModel->addData($channeldata);\n }\n }\n\n }\n\n }\n }\n }\n\n $channeldata = array(\n 'atype' => $atype,\n 'pubdate' => $pubdate,\n 'isblock' => $isblock,\n );\n $rs = $channelDataModel->update($channeldata, $where);\n if ($rs !== false) {\n self::sourceReturn(1, 'ok');\n } else {\n $message = 'ๆ›ดๆ–ฐchanneldataๆ•ฐๆฎๅคฑ่ดฅ';\n Log::error($message);\n self::sourceReturn(-10002, $message);\n }\n } else {\n $message = 'ๆ›ดๆ–ฐsourcedataๆ•ฐๆฎๅคฑ่ดฅ';\n Log::error($message);\n self::sourceReturn(-10002, $message);\n }\n }\n\n //ไธŠๆŠฅๆธธๆˆๅบ“\n public function savegame()\n {\n $gid = Request::getParam('gid', 0);\n $extra = empty($_POST['extra']) ? '' : $_POST['extra'];\n $extraArr = json_decode($extra, true);\n if (empty($gid) || empty($extraArr)) {\n self::sourceReturn(-10001, 'ๅ‚ๆ•ฐไธๅ…จ');\n }\n $litpic = empty($extraArr['litpic']) ? '' : $extraArr['litpic'];\n $gameurl = 'http://www.joyme.com/collection/' . $gid;\n\n $gamedata = array(\n 'gid' => $gid,\n 'listnewsurl' => $gameurl . '/news',\n 'liststrategyurl' => $gameurl . '/guides',\n 'listvideourl' => $gameurl . '/videos',\n 'extra' => $extra\n );\n\n $gameModel = new jGameModel();\n $row = $gameModel->getRowData(array('gid' => $gid));\n if ($row) {\n $exsitextra = json_decode($row['extra'], true);\n if ($exsitextra['sougoupcgametype']) {\n $extra = json_decode($extra, true);\n $extra['sougoupcgametype'] = $exsitextra['sougoupcgametype'];\n $extra = json_encode($extra);\n }\n $rs = $gameModel->update(array('extra' => $extra), array('gid' => $gid));\n if ($rs !== false) {\n self::sourceReturn(1, 'ok');\n } else {\n $message = 'ๆ›ดๆ–ฐๆ•ฐๆฎๅบ“ๅคฑ่ดฅ';\n Log::error($message);\n self::sourceReturn(-10002, $message);\n }\n } else {\n $rs = $gameModel->addData($gamedata);\n if ($rs !== false) {\n self::sourceReturn(1, 'ok');\n } else {\n $message = 'ๆ’ๅ…ฅๆ•ฐๆฎๅบ“ๅคฑ่ดฅ';\n Log::error($message);\n self::sourceReturn(-10002, $message);\n }\n }\n\n }\n\n public function query()\n {\n $GLOBALS['config']['checklogin'] = false;\n $ckey = Request::getParam('ckey', '');\n $gameid = Request::getParam('gameid', 0);\n $type = Request::getParam('type', 0); // 1ๆธธๆˆ 2ๆ•ฐๆฎ\n $page = Request::getParam('page', 1);\n $size = Request::getParam('page_size', 20);\n\n if (empty($ckey)) {\n echo 'no ckey';\n exit;\n }\n\n $cache_file = AROOT . 'cache/' . $ckey . '_' . $type . '_' . $gameid . '_' . $page. '_' . $size . '.data';\n $expiration = $GLOBALS['config']['source']['expiration']; // ่ฟ‡ๆœŸๆ—ถ้—ด\n\n $now = time();\n\n if (file_exists($cache_file) && (filemtime($cache_file) > $now - $expiration)) {\n $str = file_get_contents($cache_file);\n $data = json_decode($str, true);\n } else {\n $jchannel = NC('jchannel');\n $cid = $jchannel->getCidByKey($ckey);\n\n if (empty($cid)) {\n echo 'no this channel';\n exit;\n }\n\n $source = sourceController::createSource($ckey);\n $data = $source->query($cid);\n @file_put_contents($cache_file, json_encode($data));\n }\n\n $tpl = $ckey;\n render($data, 'source', $tpl);\n }\n\n /**\n * @param $name\n * @return source object\n * @throws Exception\n */\n public static function createSource($name)\n {\n switch ($name) {\n case 'baiduglb':\n return NC('sourceBaiduglb'); //็™พๅบฆๅ“ฅไผฆๅธƒ\n break;\n case 'sougoupc':\n return NC('sourceSougoupc'); //ๆœ็‹—pc็ซฏ\n break;\n case 'sougouwap':\n return NC('sourceSougouwap'); //ๆœ็‹—ๆ‰‹ๆœบ็ซฏ\n break;\n case 'baidugl':\n return NC('sourceBaidugl'); //็™พๅบฆๆ”ป็•ฅ\n break;\n case 'aliyunos':\n return NC('sourceAliyunos'); //้˜ฟ้‡Œไบ‘OS\n break;\n case 'sougouclient':\n return NC('sourceSougouclient'); //ๆœ็‹—ๆต่งˆๅ™จ\n break;\n default :\n throw new Exception('ๆฒกๆœ‰ไปปไฝ•ๆธ ้“');\n break;\n }\n }\n\n public function sourceReturn($code, $msg)\n {\n $data = array('rs' => $code, 'msg' => $msg);\n echo json_encode($data);\n exit;\n }\n}" } ]
36
drmonkeysee/Tapestry
https://github.com/drmonkeysee/Tapestry
16699452b407bfc1dbac9e9cd00170d0a91f5804
32098c48773a46c73e4e21d4db2a5aef380ec6b7
538f699ea499d4089fbaaeb77c737273073738ff
refs/heads/main
2022-06-07T23:58:47.803262
2022-05-25T03:32:45
2022-05-25T03:32:45
231,491,822
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6219512224197388, "alphanum_fraction": 0.6219512224197388, "avg_line_length": 15.399999618530273, "blob_id": "eaa09953f13fbc896b2ed89c8340d4b612b865eb", "content_id": "1ce01b2cbe11e9c432169a126811c8615fe5847e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82, "license_type": "permissive", "max_line_length": 32, "num_lines": 5, "path": "/src/tapestry/app.py", "repo_name": "drmonkeysee/Tapestry", "src_encoding": "UTF-8", "text": "\"\"\"Tapestry Application.\"\"\"\n\n\ndef run() -> None:\n print('Hello from Tapestry')\n" }, { "alpha_fraction": 0.6065573692321777, "alphanum_fraction": 0.6088992953300476, "avg_line_length": 18.409090042114258, "blob_id": "ed5f3a4f903eb8849ea820192ef1407c5a766d97", "content_id": "a89b3177068f2d9c4ba6b73cdcd57a62a45e93fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 427, "license_type": "permissive", "max_line_length": 77, "num_lines": 22, "path": "/Makefile", "repo_name": "drmonkeysee/Tapestry", "src_encoding": "UTF-8", "text": "PY := python3\nVENV := venv\nACTIVATE := source $(VENV)/bin/activate\nSRC_DIR := src\n\n.PHONY: clean int purge run\n\nrun: $(VENV)\n\t$(ACTIVATE) && PYTHONPATH=$(SRC_DIR) $(PY) -m tapestry\n\nint:\n\t$(ACTIVATE) && PYTHONPATH=$(SRC_DIR) $(PY)\n\nclean:\n\tfind . -type d -path ./$(VENV) -prune -o -name __pycache__ -exec rm -rf {} +\n\npurge: clean\n\trm -rf $(VENV)\n\n$(VENV):\n\t$(PY) -m venv $@\n\t$(ACTIVATE) && pip install -U pip setuptools wheel\n" }, { "alpha_fraction": 0.5178571343421936, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 27, "blob_id": "28ffef6bde3df1d9d716873af604636b330700fe", "content_id": "99cb2d01a51a004750b8ce9a0f69e049bdd99e6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "permissive", "max_line_length": 28, "num_lines": 2, "path": "/src/tapestry/__init__.py", "repo_name": "drmonkeysee/Tapestry", "src_encoding": "UTF-8", "text": "\"\"\"Main Tapestry Package.\"\"\"\n__version__: str = '0.0.1'\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 26, "blob_id": "908f3ba6930f2900dc02e75a99a3dc08ae9a6f47", "content_id": "ba3a55e3a10fc524e15bb72ade185d93743a88f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 81, "license_type": "permissive", "max_line_length": 68, "num_lines": 3, "path": "/README.md", "repo_name": "drmonkeysee/Tapestry", "src_encoding": "UTF-8", "text": "# Tapestry\n\nAn economic simulation, written in [Python](https://www.python.org).\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 20, "blob_id": "ae85f1cf3c30b5973b75c79242ab90e131927af6", "content_id": "80a91b2eb2439c10a196f8d9b412d6cb1268e6bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "permissive", "max_line_length": 43, "num_lines": 4, "path": "/src/tapestry/__main__.py", "repo_name": "drmonkeysee/Tapestry", "src_encoding": "UTF-8", "text": "\"\"\"Entry point for Tapestry application.\"\"\"\nimport tapestry.app\n\ntapestry.app.run()\n" } ]
5
charlisewilliams/Adobe-SIP-2016
https://github.com/charlisewilliams/Adobe-SIP-2016
842958e9c418921a29f7cfbc196330336615527e
8f93267da6ac314464eea9222d5d44666ab69e5e
15d2db4d56d8d3cc73334c9d949733d3615d6050
refs/heads/master
2020-12-25T13:34:14.339030
2016-07-29T22:24:22
2016-07-29T22:24:22
63,885,249
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6212409734725952, "alphanum_fraction": 0.6783403158187866, "avg_line_length": 24.259614944458008, "blob_id": "c054812a4ce8ce829ec64ac106c721beb6730de7", "content_id": "ef5a48285e3cf845022c57708b1e6b98975e72d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2627, "license_type": "no_license", "max_line_length": 75, "num_lines": 104, "path": "/piezo_starter/piezo_starter.ino", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "int PIEZOPIN = 5;\n\n// One octave of notes between A4 and A5\nint note_A3 = 220;\nint note_A4 = 440;\nint note_As4 = 466; int note_Bb4 = note_As4;\nint note_B3 = 246.9;\nint note_B4 = 494;\nint note_C5 = 523.3;\nint note_Cs5 = 554; int note_Db5 = note_Cs5;\nint note_D5 = 587;\nint note_Ds5 = 622; int note_Eb5 = note_Ds5;\nint note_E5 = 659;\nint note_F5 = 698;\nint note_Fs5 = 740; int note_Gb5 = note_Fs5;\nint note_G5 = 784;\nint note_Gs5 = 830; int note_Ab5 = note_Gs5;\nint note_C4=261.6;\nint note_F4=349.2;\nint note_E4=329.6;\nint note_D4=293.7;\n\n\nint note_G4=392;\nint note_A5 = note_A4 * 2;\nint note_As5 = note_As4 * 2; int note_Bb5 = note_As5;\nint note_B5 = note_B4 * 2;\n\nint note_rest = -1;\n\n// note lengths defined here\nlong whole_note = 2000; // change tempo by changing duration of one measure\nlong half_note = whole_note / 2;\nlong quarter_note = whole_note / 4;\nlong eighth_note = whole_note / 8;\nlong sixteenth_note = whole_note / 16;\n\n// WRITE YOUR SONG HERE\n\n \n// if you want there to be silence between notes,\n// staccato should be true\nbool staccato = true;\n\nint piezo_pin=5;\nvoid setup() {\n pinMode(PIEZOPIN, OUTPUT);\n \n}\n\nvoid loop() {\n tone(piezo_pin,note_C4,half_note );\n delay(half_note);\n tone(piezo_pin,note_C5 ,half_note);\n delay(half_note);\n tone(piezo_pin,note_B4 ,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_G4 ,eighth_note );\n delay(eighth_note);\n tone(piezo_pin, note_A4, eighth_note);\n delay(eighth_note);\n tone(piezo_pin,note_B4,quarter_note );\n delay(quarter_note);\n tone(piezo_pin,note_C5,quarter_note );\n delay(quarter_note);\n delay(150);\n tone(piezo_pin,note_C4,half_note);\n delay(half_note);\n tone(piezo_pin,note_A4 ,half_note);\n delay(half_note); \n tone(piezo_pin,note_G4 ,whole_note);\n delay(whole_note);\n delay(100);\n tone(piezo_pin,note_A3,half_note);\n delay(half_note);\n tone(piezo_pin,note_F4,half_note);\n delay(half_note);\n tone(piezo_pin,note_E4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_C4,eighth_note);\n delay(eighth_note);\n tone(piezo_pin,note_D4,eighth_note);\n delay(eighth_note);\n tone(piezo_pin,note_E4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_F4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_D4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_B3,eighth_note);\n delay(eighth_note);\n tone(piezo_pin,note_C4,eighth_note);\n delay(eighth_note);\n tone(piezo_pin,note_D4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_E4,quarter_note);\n delay(quarter_note);\n tone(piezo_pin,note_C4,half_note);\n delay(half_note);\n delay(2000);\n \n \n //PLAY YOUR SONG HERE\n}\n" }, { "alpha_fraction": 0.74210524559021, "alphanum_fraction": 0.7473683953285217, "avg_line_length": 26.14285659790039, "blob_id": "d74b42b8a7fed4eef1aa6df0ab6a1a5070bf280b", "content_id": "498d897f2e05cccc22c180f543061840a5d195aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/Week_2/random generator 3.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "from random import\nfive_syllable[\"Trying to hold on\"]\nseven_syllable[\"Planning, plotting, scheming but\"]\nfive_syllable [\"Nothing stays the same\"]\n\nfor i in range(3):\n print(five_syllable\n" }, { "alpha_fraction": 0.6409395933151245, "alphanum_fraction": 0.6409395933151245, "avg_line_length": 38.733333587646484, "blob_id": "6a63a051109ccbb8ef7ce867bcaaa1b194fcc6e1", "content_id": "2c8bea99744c54c14baf2979c7cf75dd5082f2b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "no_license", "max_line_length": 84, "num_lines": 15, "path": "/Week_1/flowchart2.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "print(\"Type 'left' to go left or 'Right' to go right\")\nuser_input = input()\nwhile (user_input !=\"left\"or \"Right\"):\n print (\"choose agian\")\n user_input = input()\nif user_input == \"right\":\n print (\" you choosen right and ... order a sandwhich\")\nelif user_input == \"left\":\n print (\"you choose left and... order a burger\")\n user_input == input()\nif user_input == \"right\":\n print(\"You decide to go right and... choose if you want ground beef or Chicken\")\n user_input == input()\nelif user_input == \"left\":\n print(\"you decide to go left and... choose if you want turkey ot ham\")\n" }, { "alpha_fraction": 0.7052441239356995, "alphanum_fraction": 0.7052441239356995, "avg_line_length": 24.090909957885742, "blob_id": "970370304ba5e04a921e68068c62aab35b174ce7", "content_id": "32c2124567ae943d2065bdffd2df8f2b5525c8fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 81, "num_lines": 22, "path": "/Week_3/House.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "Attr:big small,hair color, skin,tone,eye color,tall,short,male,female\nmethods: walk,run,sit,sleep,eat,talk\n\nclass person():\n\tdef __init__(self,haircolor,skintone,eyecolor,tall,short,female,male,small,big):\n\t\tself.haircolor=haircolor\n\t\tself.eyecolor=eyecolor\n\t\tself.skintone=skintone\n\t\tself.tall=tall\n\t\tself.short=short\n\t\tself.female=female\n\t\tself.male=male\n\t\tself.small=small\n\t\tself.big=big\n\t\tself.name=name\n\n\t\t#methods\n\n\tdef person(what kind of person):\n\t\tprint(self.name + \" small \" + \" brown \" + \" brown \" + \" black \" + \" tall \" +)\n\n\t\tdef methods()\n\n" }, { "alpha_fraction": 0.5975103974342346, "alphanum_fraction": 0.6473029255867004, "avg_line_length": 39.16666793823242, "blob_id": "866dd42075044c6725fdacb88667f617d22687a8", "content_id": "d0aec247f6be30dd28b2d5cd2a4e8676a540dd1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 125, "num_lines": 6, "path": "/Week_2/random generator.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "import random\nlist1=[\"the\",\"an\",\"a\"]\nlist2=[\"cool\",\"funky\",\"blue\",\"red\"]\nlist3=[\"house\",\"car\",\"book\",\"butterfly\"]\n\nprint (list1[random.randint (0,len(list1))]+\" \" +list2[random.randint(0,len(list2))]+\" \"+list3[random.randint(0,len(list3))])\n" }, { "alpha_fraction": 0.47953736782073975, "alphanum_fraction": 0.5329181551933289, "avg_line_length": 18.64912223815918, "blob_id": "bb5f035b4bc8245728b4e676bb1c091b149a1d59", "content_id": "1eb2ad9547f5444bc082035534f3985bc31e35d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 37, "num_lines": 57, "path": "/Week-4/Day_2/Day_2.ino", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": " int LEDPIN=7;\n int LEDPIN2=8;\n int BUTTON=10;\n \nvoid setup() {\n pinMode(LEDPIN,OUTPUT);\n pinMode(LEDPIN2,OUTPUT);\n pinMode(BUTTON,INPUT_PULLUP);\n\n\n}\n\nvoid loop() {\n if(digitalRead(BUTTON)==LOW){\n while(digitalRead(BUTTON)==HIGH){\n \n \n for(int i = 0; i < 6; i++){\n \n \n digitalWrite(LEDPIN,HIGH);\n delay(300);\n digitalWrite(LEDPIN,LOW);\n delay(100);\n digitalWrite(LEDPIN,HIGH);\n delay(300);\n digitalWrite(LEDPIN,LOW);\n delay(100);\n digitalWrite(LEDPIN2,HIGH);\n delay(300);\n digitalWrite(LEDPIN2,LOW);\n delay(150);\n digitalWrite(LEDPIN2,HIGH);\n delay(200);\n digitalWrite(LEDPIN2,LOW);\n delay(150);\n digitalWrite(LEDPIN,HIGH);\n delay(300);\n digitalWrite(LEDPIN,LOW);\n delay(100);\n digitalWrite(LEDPIN,HIGH);\n delay(300);\n digitalWrite(LEDPIN,LOW);\n delay(150);\n digitalWrite(LEDPIN2,HIGH);\n delay(300);\n digitalWrite(LEDPIN2,LOW);\n delay(500);\n }\n delay(3000);\n }\n \n\n }\n\n \n}\n \n" }, { "alpha_fraction": 0.610228419303894, "alphanum_fraction": 0.6285998225212097, "avg_line_length": 29.96923065185547, "blob_id": "7c3d57a53ef6b93007aa85e44109a4d0e0032b89", "content_id": "3d4af442799fddaa35f03f14b1d349890f5f48a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2014, "license_type": "no_license", "max_line_length": 463, "num_lines": 65, "path": "/Week_5/My website/website.html", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": " <!DOCTYPE html>\n<html>\n<head>\n <title>Charlise My Fashion</title>\n <link rel=\"stylesheet\" href=\"styles.css\">\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js\"></script>\n <script src=\"my_scripts.js\"></script>\n <link rel=\"stylesheet\" href=\"signuptoday.css\">\n</head>\n<body>\n \n <center><img src = \"http://www.fashionmagazine.com/wp-content/themes/sjm-bones-fashion/library/images/fashion-gfx_brand-black.svg\"/></center>\n\n\n <ul>\n <li><a class=\"active\" id=\"Signup\" href=\"#signuptoday.html\">Sign Up Today</a></li>\n\n </div>\n </li>\n</ul>\n\n \n <ul>\n <li><a class=\"active\" href=\"#home\">Home</a></li>\n\n \n <li class=\"dropdown\">\n <a href=\"#\" class=\"dropbtn\">Hair</a>\n <div class=\"dropdown-content\">\n <a href=\"#\">Curly</a>\n <a href=\"#\">Kinky</a>\n <a href=\"#\">Straight</a>\n \n \\\n <li class=\"dropdown\">\n <a href=\"#\" class=\"dropbtn\">Makeup</a>\n <div class=\"dropdown-content\">\n <a href=\"#\">EyeBrows</a>\n <a href=\"#\">Lips</a>\n <a href=\"#\">Face</a>\n \n <li class=\"dropdown\">\n <a href=\"#\" class=\"dropbtn\">Clothes</a>\n <div class=\"dropdown-content\">\n <a href=\"summer.html\">Summer</a>\n <a href=\"winter.html\">Winter</a>\n <a href=\"#\">Fall</a>\n\n <li><a class=\"active\" id=\"Signup\" href=\"signuptoday.html\">Sign Up Today</a></li>\n\n </div>\n </li>\n</ul>\n\n <center><img src = \"http://67.media.tumblr.com/bddd5ad0852273350ae3921afb7d6718/tumblr_mjtn7w7Uio1s6xalso1_500.png\"/></center>\n\n\n <center><h2> About Me</h2>\n <p><font size=\"4\">My name is Charlise.I made this website for other people<br>like me to get new and creative ideas about how to wear,<br>Makeup,Clothes,and Hairstyles.I think this website would<br>be good for women who want new ideas on diffrent products<br> that work for other women.This website is to share you ideas to<br> letting other people know what other makeup to try and other<br> clothes ideas and recipes to try for their hair.</font></p></center>\n\n\n\n\n\n</body>\n" }, { "alpha_fraction": 0.5700934529304504, "alphanum_fraction": 0.6124610304832458, "avg_line_length": 24.73796844482422, "blob_id": "972c69d3892710386213ca76409342c79c8d221c", "content_id": "175b4ac4ab27694f8f19b7f8cd064d9f357e8e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4815, "license_type": "no_license", "max_line_length": 101, "num_lines": 187, "path": "/Week_3/City.py.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "\n\nimport pygame\nimport random\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nGREY = (129, 129, 129)\ncolors = [BLACK, GREEN, BLUE, RED]\n\ndef random_color():\n return random.choice(BLUE,RED,GREEN,ORANGE)\n\n# initialize the pygame class\npygame.init()\n\n# Set the width and height of the screen [width, height]\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n\n# Set the title of the window\npygame.display.set_caption(\"CityScroller\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n\n\nclass Building():\n def __init__(self,x_point,y_point,width,height,color):\n self.x_point=x_point\n self.y_point=y_point\n self.width=width\n self.height=height\n self.color=color\n\n a_building=Building(100,random.randint(100,200),0,RED,6)\n building2=Building(0,300,random.randint(100,250),0,BLUE)\n\n def draw(self):\n \n pygame.draw.rect(screen,RED,(self.x_point,self.y_point,self.width,self.height))\n \n\n def move(self, speed):\n self.x_point += speed\n \n \n\n\n\nclass Scroller(object):\n \n \n def __init__(self, width, height, base, colors, speed):\n self.width=width\n self.height=height\n self.base=base\n self.colors=colors\n self.speed=speed\n self.list1=[]\n\n self.combined_width=0\n\n def create_buildings(self):\n combined_width = 0 \n while combined_width <= self.width:\n self.add_building(combined_width)\n combined_width+= self.list1[-1].width\n \n\n \n\n def create_building(self, x_location):\n width=random.randint(10,60)\n building1=Building(x_location,random.randint(0,300),random.randint(-80,-200),WHITE)\n\n self.list_.append(building1)\n \n\n \n def draw_buildings(self):\n for current_building in self.building_list:\n current_building.draw()\n\n \n \n def draw(self):\n screen\n pygame.draw.rect(screen,RED,(0,random.randint(100,400),150,200))\n\n\n\n\n def move_buildings(self):\n for current_building in self.building_list:\n current_building.move(2)\n\n if self.building_list[-1].x_point> SCREEN_WIDTH:\n self.building_list.remove(sellf.building_list[-1])\n if self.building_list[0].x_point > 0:\n width=random.randint(10,60)\n x_location=self.building_list[0].x_point=width\n building=building(x_location,300,width.random.randint(-200,-800),WHITE)\n self.building_list.insert(0,building)\n\n # if last_building.x_point > 800:\n # self.building_list.remove(last_building)\n # if first_building.x_location < 0:\n # self.create_building\n\n\n \n\n # self.building1.move(speed)\n # a_building=Building(100,random.randint(100,200),0,RED,6)\n # building2=Building(0,300,random.randint(100,250),0,BLUE)\n \n \n\n \n \n \n\n\n\n FRONT_SCROLLER_COLORS = (0,0,30)\n MIDDLE_SCROLLER_COLORS= (30,30,100)\n BACK_SCROLLER_COLORS = (50,50,150)\n BACKGROUND_COLORS = (17, 9, 89)\n\n front_scroller = Scroller(SCREEN_WIDTH, 500, SCREEN_HEIGHT, FRONT_SCROLLER_COLOR, 3)\n middle_scroller = Scroller(SCREEN_WIDTH, 200, (SCREEN_HEIGHT - 50), MIDDLE_SCROLLER_COLOR, 2)\n back_scroller = Scroller(SCREEN_WIDTH, 20, (SCREEN_HEIGHT - 100), BACK_SCROLLER_COLOR, 1)\n\n\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n # --- Game logic should go here\n\n # --- Screen-clearing code goes here\n\n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n\n # If you want a background image, replace this clear with blit'ing the\n # background image.\n screen.fill(BLUE)\n\n # --- Drawing code should go here\n # build.draw()\n # build.move(5)\n # build.draw()\n # build.move(5)\n\n\n pygame.draw.rect(screen,RED,(0,random.randint(100,400),150,200))\n \n # back_scroller.create.draw_buildings()\n # back_scroller.create.move_buildings()\n # middle_scroller.create.draw_buildings()\n # # middle_scroller.move_buildings()\n # # front_scroller.draw_buildings()\n # # front_scroller.move_buildings()\n\n\n # --- Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n # --- Limit to 60 frames per second\n clock.tick(60)\n\n# Close the window and quit.\npygame.quit()\nexit() # Needed when using IDLE\n" }, { "alpha_fraction": 0.5270588397979736, "alphanum_fraction": 0.5317646861076355, "avg_line_length": 20.25, "blob_id": "71baef8ffc2baf7c0271e3e9150facb546553dcc", "content_id": "221439b55aaafb875d06e2ba58e67c8a7c611308", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 425, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/Week_3/dog.py", "repo_name": "charlisewilliams/Adobe-SIP-2016", "src_encoding": "UTF-8", "text": "class Dog():\n\n\t #Constuctor function\n\t def __init__(self, furColor,weight,eyecolor):\n\t \t self.furcolor = furColor\n\t \t self.weight = weight\n\t \t self.eyecolor = eyecolor\n\t \t self.name = name\n\n\t #function\n\t def bark(self):\n\t print(\"woof\")\n\t \t \n\t \tdef wag(self):\n\t \t\tprint(\"wagging tail\")\n\n def run(self\ntoto = dog(\"Brown\",25,\"Blue\",\"toto\") \n\nprint (toto.bark())\n" } ]
9
gui1080/IPI_Trabalho3_Landing_Zones
https://github.com/gui1080/IPI_Trabalho3_Landing_Zones
582d8b72aceca03d950f209dad77b1fa415e238f
e8250bc869857fee931fb4fbdfb6fc55da1739fa
e2da82781567827b222c39f1ce56157d0c6b78d5
refs/heads/master
2021-10-02T17:41:25.304504
2018-11-29T21:29:05
2018-11-29T21:29:05
153,336,221
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5578431487083435, "alphanum_fraction": 0.6019607782363892, "avg_line_length": 37.230770111083984, "blob_id": "316943eee3a0b4a8baaf67732aa5ce8e3f442142", "content_id": "6bf640e5a0309dc3d3454682be01cf748b66727f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "permissive", "max_line_length": 208, "num_lines": 26, "path": "/code/euclidian_distance.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# Aluno: Guilherme Braga Pinto\r\n# 17/0162290\r\n\r\nimport numpy as np\r\n\r\ndef euclidian_distance(coordenada_asfalto, coordenada_perigo, coordenada_grama, coordenada_atual, i):\r\n\r\n\tdist_asf = ( ( ((abs(coordenada_atual[i][0] - coordenada_asfalto[0]))**2) + ((abs(coordenada_atual[i][1] - coordenada_asfalto[1]))**2) + ((abs(coordenada_atual[i][2] - coordenada_asfalto[2]))**2) ) **(1/2))\r\n\r\n\tdist_per = ( ( ((abs(coordenada_atual[i][0] - coordenada_perigo[0]))**2) + ((abs(coordenada_atual[i][1] - coordenada_perigo[1]))**2) + ((abs(coordenada_atual[i][2] - coordenada_perigo[2]))**2) ) **(1/2))\r\n\r\n\tdist_gra = ( ( ((abs(coordenada_atual[i][0] - coordenada_grama[0]))**2) + ((abs(coordenada_atual[i][1] - coordenada_grama[1]))**2) + ((abs(coordenada_atual[i][2] - coordenada_grama[2]))**2) ) **(1/2))\r\n\r\n\tif(dist_asf < dist_per): \r\n\t\tif(dist_asf < dist_gra):\r\n\t\t\treturn 1\r\n\tif(dist_gra < dist_per): \r\n\t\tif(dist_gra < dist_asf):\r\n\t\t\treturn 2\r\n\tif(dist_per < dist_gra): \r\n\t\tif(dist_per < dist_asf):\r\n\t\t\treturn 3\r\n\r\n\r\n\r\n#def vizinhos():\r\n" }, { "alpha_fraction": 0.6353166699409485, "alphanum_fraction": 0.6513115763664246, "avg_line_length": 29.239999771118164, "blob_id": "e3c12543768b3fda7f06835e3e8d51c68e53bf90", "content_id": "772280a992de29e2621ef6a1eab7a77af272c5d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1581, "license_type": "permissive", "max_line_length": 122, "num_lines": 50, "path": "/code/GLCM_module.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# Aluno: Guilherme Braga Pinto\r\n# 17/0162290\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\n# !!!Gray-Level Co-Occurrence Matrix!!!\r\n\r\ndef GLCM (imagem, altura, largura):\r\n\t\r\n\tlistHistogram = []\r\n\r\n\t# no caso, Histogram_imagem e GLCM funcionam como um dicionario para fazer a operaรงรฃo nos ifยดs dentro dos for funcionarem\r\n\t# nรฃo necessรกriamente atrelaremos um nome a cada posiรงรฃo, apenas faremos um tipo de index\r\n\tHistogram_imagem = {}\r\n\tGLCM = {}\r\n\tpixel_atual = (0, 1)\r\n\tfor x in range(altura):\r\n\t\tfor y in range(largura - 1):\r\n\t\t\t# fazendo atรฉ a largura normal nรฃo foi possรญvel, pois em algum ponto imagem[x, y +1] nรฃo existirรก \r\n\r\n\t\t\t# x_atual = [x, y]\r\n\t\t\t# y_atual = [x, y + 1]\r\n\t\t\t\r\n\t\t\tpixel_atual = (imagem[x, y], imagem[x, y + 1]) \r\n\t\t\t\r\n\t\t\t# se a posiรงรฃo que queremos nรฃo รฉ zero, entรฃo adicionamos 1, se รฉ zero, faremos com que seja 1 para iniciar\r\n\t\t\tif pixel_atual in GLCM: \r\n\t\t\t\tGLCM[pixel_atual] += 1\r\n\t\t\telse:\r\n\t\t\t\tGLCM[pixel_atual] = 1\r\n\r\n\tfor i in range(altura):\r\n\t\tfor j in range(largura):\r\n\t\t\tif imagem[i, j] in Histogram_imagem:\r\n\t\t\t\tHistogram_imagem[imagem[i, j]] += 1\r\n\t\t\telse:\r\n\t\t\t\tHistogram_imagem[imagem[i, j]] = 1\r\n\t\t\t\tlistHistogram.insert(0, imagem[i, j])\r\n\r\n\tlistHistogram.sort()\r\n\ttam_histogram = len(Histogram_imagem)\r\n\t# fazemos uma matriz vazia para uma matriz de GLCM, efetivamente\r\n\tmatrizGLCM = np.zeros((tam_histogram, tam_histogram), dtype=np.float32)\r\n\t\r\n\tfor n, i in zip(listHistogram, range(tam_histogram)):\r\n\t\tfor m, j in zip(listHistogram, range(tam_histogram)):\r\n\t\t\tif (n, m) in GLCM:\r\n\t\t\t\tmatrizGLCM[i, j] = GLCM[(n, m)]\r\n\treturn matrizGLCM\t\r\n" }, { "alpha_fraction": 0.6007130146026611, "alphanum_fraction": 0.609463632106781, "avg_line_length": 38.880794525146484, "blob_id": "9f37590e15e1913e4faa04833897f640bf97f315", "content_id": "a19b94520ae15adae9425d4a5bada0f17ceb4a76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6198, "license_type": "permissive", "max_line_length": 248, "num_lines": 151, "path": "/code/parte1.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# Aluno: Guilherme Braga Pinto\r\n# 17/0162290\r\n\r\n# Durante testes, a execuรงรฃo total do programa durou 15~20 minutos em mรฉdia.\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport glob\r\nimport copy\r\nimport os\r\n\r\nfrom rgb_ycbcr import rgb_para_ycbcr\r\nfrom features import contrast\r\nfrom features import correlation\r\nfrom features import energy\r\nfrom features import homogeneity\r\nfrom GLCM_module import GLCM\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n# Este รฉ o endereรงo do meu diretรณrio de imagens quando estava trabalhando no cรณdigo no meu PC\r\n# este caminho deverรก ser atualizado para ser rodado em outro PC, com o caminho onde as imagens se encontram\r\npath_asfalto=\"C:\\\\Users\\\\Guilherme Braga\\\\Desktop\\\\trab3\\\\Images\\\\asfalto\\\\*.png\"\r\npath_grama=\"C:\\\\Users\\\\Guilherme Braga\\\\Desktop\\\\trab3\\\\Images\\\\grama\\\\*.png\" \r\npath_perigo=\"C:\\\\Users\\\\Guilherme Braga\\\\Desktop\\\\trab3\\\\Images\\\\perigo\\\\*.png\"\r\n\r\n# leio todas as imagens do asfalto\r\narray_imagens_asfalto = [cv2.imread(file) for file in glob.glob(path_asfalto)]\r\nnumero_imagens_asfalto = len(array_imagens_asfalto)\r\naltura_asfalto, largura_asfalto, channels_asfalto = array_imagens_asfalto[0].shape\r\n\r\n# apenas para checar as imagens lidas\r\nprint(\"Imagens do asfalto lidas: \")\r\nprint(numero_imagens_asfalto)\r\n\r\n# leio todas as imagens da grama\r\narray_imagens_grama = [cv2.imread(file) for file in glob.glob(path_grama)]\r\nnumero_imagens_grama = len(array_imagens_grama)\r\naltura_grama, largura_grama, channels_grama = array_imagens_grama[0].shape\r\n\r\n# apenas para checar as imagens lidas\r\nprint(\"Imagens da grama lidas: \")\r\nprint(numero_imagens_grama)\r\n\r\n# leio todas as imagens de perigo\r\narray_imagens_perigo = [cv2.imread(file) for file in glob.glob(path_perigo)]\r\nnumero_imagens_perigo = len(array_imagens_perigo)\r\naltura_perigo, largura_perigo, channels_perigo = array_imagens_perigo[0].shape\r\n\r\n# apenas para checar as imagens lidas\r\nprint(\"Imagens de perigo lidas: \")\r\nprint(numero_imagens_perigo)\r\n\r\n# no total, lemos as 150 imagens disponibilizadas\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n# para facilitar, pretende-se aplicar o KNN em outro mรณdulo, o atual mรณdulo salvarรก as caracteristicas das imagens em um arquivo externo\r\n# >contrast\r\n# >correlation \r\n# >energy\r\n# >homogeneity \r\n# (tudo retirado a partir da GLCM)\r\n\r\n# faremos uma matriz das 4 caracteristas, posiรงรฃo 0 = contrast, posiรงรฃo 1 = correlation, posiรงรฃo 2 = energy, posiรงรฃo 3 = homogeneity \r\ncolunas, linhas = 4, 50\r\nMatriz_caracteristicas_asfalto = [[0 for x in range(colunas)] for y in range(linhas)] \r\nMatriz_caracteristicas_grama = [[0 for x in range(colunas)] for y in range(linhas)] \r\nMatriz_caracteristicas_perigo = [[0 for x in range(colunas)] for y in range(linhas)] \r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n# ASFALTO\r\n\r\n# primeiras 25 imagens sรฃo treino, as outras sรฃo teste\r\nprint(\"Imagens do asfalto sendo processadas...\")\r\nfor i in range(numero_imagens_asfalto):\r\n imagem = rgb_para_ycbcr(array_imagens_asfalto[i]) \r\n\r\n matrizGLCM = GLCM(imagem, altura_asfalto, largura_asfalto) \r\n matrizGLCM /= np.sum(matrizGLCM)\r\n\r\n Matriz_caracteristicas_asfalto[i][0] = contrast(matrizGLCM)\r\n #printf(\"Contraste processado!\")\r\n Matriz_caracteristicas_asfalto[i][1] = correlation(matrizGLCM)\r\n #printf(\"Correlaรงรฃo processada!\")\r\n Matriz_caracteristicas_asfalto[i][2] = energy(matrizGLCM)\r\n #printf(\"Energia processada!\")\r\n Matriz_caracteristicas_asfalto[i][3] = homogeneity(matrizGLCM)\r\n #printf(\"Homogeneidade processada!\")\r\n\r\n print(\"A seguinte imagem do array de imagens do asfalto acaba de ser processada!\")\r\n print(i + 1)\r\n\r\nnp.savetxt('asfalto.txt', Matriz_caracteristicas_asfalto)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n# GRAMA\r\nprint(\"Imagens da grama sendo processadas...\")\r\nfor i in range(numero_imagens_grama):\r\n imagem = rgb_para_ycbcr(array_imagens_grama[i]) \r\n\r\n matrizGLCM = GLCM(imagem, altura_grama, largura_grama) \r\n matrizGLCM /= np.sum(matrizGLCM)\r\n\r\n Matriz_caracteristicas_grama[i][0] = contrast(matrizGLCM)\r\n #printf(\"Contraste processado!\")\r\n Matriz_caracteristicas_grama[i][1] = correlation(matrizGLCM)\r\n #printf(\"Correlaรงรฃo processada!\")\r\n Matriz_caracteristicas_grama[i][2] = energy(matrizGLCM)\r\n #printf(\"Energia processada!\")\r\n Matriz_caracteristicas_grama[i][3] = homogeneity(matrizGLCM)\r\n #printf(\"Homogeneidade processada!\")\r\n\r\n print(\"A seguinte imagem do array de imagens da grama acaba de ser processada!\")\r\n print(i + 1)\r\n\r\n\r\n\r\nnp.savetxt('grama.txt', Matriz_caracteristicas_grama)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n# PERIGO\r\n\r\nprint(\"Imagens de perigo sendo processadas...\")\r\nfor i in range(numero_imagens_perigo):\r\n imagem = rgb_para_ycbcr(array_imagens_perigo[i]) \r\n\r\n matrizGLCM = GLCM(imagem, altura_perigo, largura_perigo) \r\n matrizGLCM /= np.sum(matrizGLCM)\r\n\r\n Matriz_caracteristicas_perigo[i][0] = contrast(matrizGLCM)\r\n #printf(\"Contraste processado!\")\r\n Matriz_caracteristicas_perigo[i][1] = correlation(matrizGLCM)\r\n #printf(\"Correlaรงรฃo processada!\")\r\n Matriz_caracteristicas_perigo[i][2] = energy(matrizGLCM)\r\n #printf(\"Energia processada!\")\r\n Matriz_caracteristicas_perigo[i][3] = homogeneity(matrizGLCM)\r\n #printf(\"Homogeneidade processada!\")\r\n\r\n print(\"A seguinte imagem do array de imagens de perigo acaba de ser processada!\")\r\n print(i + 1)\r\n\r\n\r\nnp.savetxt('perigo.txt', Matriz_caracteristicas_perigo)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\nprint(\"Features processadas com sucesso!\")" }, { "alpha_fraction": 0.5688468217849731, "alphanum_fraction": 0.6098321676254272, "avg_line_length": 26.69135856628418, "blob_id": "10db88a8d48c5e4c455b81dd0b7cec0320776804", "content_id": "43f4a6396e853c4dbe34a12109c2591b06802056", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9301, "license_type": "permissive", "max_line_length": 169, "num_lines": 324, "path": "/code/parte2.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# Aluno: Guilherme Braga Pinto\r\n# 17/0162290\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom euclidian_distance import euclidian_distance \r\n#from euclidian_distance import vizinhos\r\n\r\n# KNN - Kยดs Nearest Neighbour\r\n\r\n#----------------------------------------------------------LEITURA DOS ARQUIVOS-------------------------------------------------------------------\r\n\r\n# lemos os arquivos e dividimos \r\nasfalto = np.genfromtxt(\"asfalto.txt\")\r\ngrama = np.genfromtxt(\"grama.txt\")\r\nperigo = np.genfromtxt(\"perigo.txt\")\r\nprint(\"Arquivos de texto lidos!\")\r\nprint(\"...\")\r\n\r\n# criamos matrizes vazias de teste e de treino\r\ncolunas, linhas = 4, 25\r\nMatriz_asfalto_treino = [[0 for x in range(colunas)] for y in range(linhas)] \r\nMatriz_asfalto_teste = [[0 for x in range(colunas)] for y in range(linhas)] \r\n\r\nMatriz_grama_treino = [[0 for x in range(colunas)] for y in range(linhas)] \r\nMatriz_grama_teste = [[0 for x in range(colunas)] for y in range(linhas)] \r\n\r\nMatriz_perigo_treino = [[0 for x in range(colunas)] for y in range(linhas)] \r\nMatriz_perigo_teste = [[0 for x in range(colunas)] for y in range(linhas)] \r\n\r\n#---------------------------------------------------------------FEATURE SELECTION-------------------------------------------------------------------------------\r\nfeature_asfalto_0 = 0\r\nfeature_asfalto_1 = 0\r\nfeature_asfalto_2 = 0\r\nfeature_asfalto_3 = 0\r\n\r\nfeature_perigo_0 = 0\r\nfeature_perigo_1 = 0\r\nfeature_perigo_2 = 0\r\nfeature_perigo_3 = 0\r\n\r\nfeature_grama_0 = 0\r\nfeature_grama_1 = 0\r\nfeature_grama_2 = 0\r\nfeature_grama_3 = 0\r\n\r\nfor i in range(50):\r\n\tfeature_asfalto_0 += asfalto[i][0]\r\n\tfeature_asfalto_1 += asfalto[i][1]\r\n\tfeature_asfalto_2 += asfalto[i][2]\r\n\tfeature_asfalto_3 += asfalto[i][3]\r\n\r\nfeature_asfalto_0 = (feature_asfalto_0/50)\r\nfeature_asfalto_1 = (feature_asfalto_1/50)\r\nfeature_asfalto_2 = (feature_asfalto_2/50)\r\nfeature_asfalto_3 = (feature_asfalto_3/50)\r\n\r\n#print(\"Features do asfalto:\")\r\n#print(feature_asfalto_0)\r\n#print(feature_asfalto_1)\r\n#print(feature_asfalto_2)\r\n#print(feature_asfalto_3)\r\n\r\n\r\nfor i in range(50):\r\n\tfeature_grama_0 += grama[i][0]\r\n\tfeature_grama_1 += grama[i][1]\r\n\tfeature_grama_2 += grama[i][2]\r\n\tfeature_grama_3 += grama[i][3]\r\n\r\nfeature_grama_0 = (feature_grama_0/50)\r\nfeature_grama_1 = (feature_grama_1/50)\r\nfeature_grama_2 = (feature_grama_2/50)\r\nfeature_grama_3 = (feature_grama_3/50)\r\n\r\n#print(\"Features da grama:\")\r\n#print(feature_grama_0)\r\n#print(feature_grama_1)\r\n#print(feature_grama_2)\r\n#print(feature_grama_3)\r\n\r\n\r\nfor i in range(50):\r\n\tfeature_perigo_0 += perigo[i][0]\r\n\tfeature_perigo_1 += perigo[i][1]\r\n\tfeature_perigo_2 += perigo[i][2]\r\n\tfeature_perigo_3 += perigo[i][3]\r\n\r\nfeature_perigo_0 = (feature_perigo_0/50)\r\nfeature_perigo_1 = (feature_perigo_1/50)\r\nfeature_perigo_2 = (feature_perigo_2/50)\r\nfeature_perigo_3 = (feature_perigo_3/50)\r\n\r\n#print(\"Features da grama:\")\r\n#print(feature_perigo_0)\r\n#print(feature_perigo_1)\r\n#print(feature_perigo_2)\r\n#print(feature_perigo_3)\r\n\r\nfeatures_0 = [feature_perigo_0, feature_grama_0, feature_asfalto_0]\r\nfeatures_1 = [feature_perigo_1, feature_grama_1, feature_asfalto_1]\r\nfeatures_2 = [feature_perigo_2, feature_grama_2, feature_asfalto_2]\r\nfeatures_3 = [feature_perigo_3, feature_grama_3, feature_asfalto_3]\r\n\r\n\r\nprint(\"Variancia do contraste:\")\r\nprint(np.var(features_0))\r\nprint(\"Variancia da correlacao:\")\r\nprint(np.var(features_1))\r\nprint(\"Variancia da energia:\")\r\nprint(np.var(features_2))\r\nprint(\"Variancia da homogeneidade:\")\r\nprint(np.var(features_3))\r\n\r\nprint(\"Retiramos a feature que esta variando menos! Queremos diferentes caracteristicas para cada grupo!\")\r\nprint(\"...\")\r\nprint(\" \")\r\n\r\n# a variancia da terceira feature parece a que menos variou entre todos, representando a \"Energy\".\r\n# A desconsideraremos. \r\n\r\n\r\n#---------------------------------------------------DIVISAO ENTRE IMAGEM DE TESTE E DE TREINO--------------------------------------------------------------------------\r\n\r\n# divisรฃo entre o array de asfalto\r\nfor i in range(50):\r\n\tif (i<25):\r\n\t\tMatriz_asfalto_treino[i][0] = asfalto[i][0]\r\n\t\tMatriz_asfalto_treino[i][1] = asfalto[i][1]\r\n\t\tMatriz_asfalto_treino[i][2] = asfalto[i][3]\r\n\t\t#Matriz_asfalto_treino[i][3] = asfalto[i][3]\r\n\telse:\r\n\t\tMatriz_asfalto_teste[(i - 25)][0] = asfalto[i][0]\r\n\t\tMatriz_asfalto_teste[(i - 25)][1] = asfalto[i][1]\r\n\t\tMatriz_asfalto_teste[(i - 25)][2] = asfalto[i][3]\r\n\t\t#Matriz_asfalto_teste[(i - 25)][3] = asfalto[i][3]\r\n\r\n# divisรฃo entre o array de grama\r\nfor i in range(50):\r\n\tif (i<25):\r\n\t\tMatriz_grama_treino[i][0] = grama[i][0]\r\n\t\tMatriz_grama_treino[i][1] = grama[i][1]\r\n\t\tMatriz_grama_treino[i][2] = grama[i][3]\r\n\t\t#Matriz_grama_treino[i][3] = grama[i][3]\r\n\telse:\r\n\t\tMatriz_grama_teste[(i - 25)][0] = grama[i][0]\r\n\t\tMatriz_grama_teste[(i - 25)][1] = grama[i][1]\r\n\t\tMatriz_grama_teste[(i - 25)][2] = grama[i][3]\r\n\t\t#Matriz_grama_teste[(i - 25)][3] = grama[i][3]\r\n\r\n# divisรฃo entre o array de perigo\r\nfor i in range(50):\r\n\tif (i<25):\r\n\t\tMatriz_perigo_treino[i][0] = perigo[i][0]\r\n\t\tMatriz_perigo_treino[i][1] = perigo[i][1]\r\n\t\tMatriz_perigo_treino[i][2] = perigo[i][3]\r\n\t\t#Matriz_perigo_treino[i][3] = perigo[i][3]\r\n\telse:\r\n\t\tMatriz_perigo_teste[(i - 25)][0] = perigo[i][0]\r\n\t\tMatriz_perigo_teste[(i - 25)][1] = perigo[i][1]\r\n\t\tMatriz_perigo_teste[(i - 25)][2] = perigo[i][3]\r\n\t\t#Matriz_perigo_teste[(i - 25)][3] = perigo[i][3]\r\n\r\n#-----------------------------------------------------PONTO MEDIO DE CADA TIPO DE IMAGEM (TREINO)------------------------------------------------------------------------\r\n\r\nmedia_asfalto = []\r\nmedia_0 = 0\r\nmedia_1 = 0\r\nmedia_2 = 0\r\n\r\n\r\n\r\nfor i in range(25):\r\n\tmedia_0 += Matriz_asfalto_treino[i][0] \r\n\tmedia_1 += Matriz_asfalto_treino[i][1]\r\n\tmedia_2 += Matriz_asfalto_treino[i][2]\r\n\t#media_3 += Matriz_asfalto_treino[i][3]\r\n\r\nmedia_asfalto.insert(0, (media_0/25))\r\nmedia_asfalto.insert(1, (media_1/25))\r\nmedia_asfalto.insert(2, (media_2/25))\r\n#media_asfalto.insert(3, (media_3/25))\r\n\r\n\r\nmedia_grama = []\r\nmedia_0 = 0\r\nmedia_1 = 0\r\nmedia_2 = 0\r\n\r\n\r\n\r\nfor i in range(25):\r\n\tmedia_0 += Matriz_grama_treino[i][0] \r\n\tmedia_1 += Matriz_grama_treino[i][1]\r\n\tmedia_2 += Matriz_grama_treino[i][2]\r\n\t#media_3 += Matriz_grama_treino[i][3]\r\n\r\nmedia_grama.insert(0, (media_0/25))\r\nmedia_grama.insert(1, (media_1/25))\r\nmedia_grama.insert(2, (media_2/25))\r\n#media_grama.insert(3, (media_3/25))\r\n\r\n\r\nmedia_perigo = []\r\nmedia_0 = 0\r\nmedia_1 = 0\r\nmedia_2 = 0\r\n\r\n\r\n\r\nfor i in range(25):\r\n\tmedia_0 += Matriz_perigo_treino[i][0] \r\n\tmedia_1 += Matriz_perigo_treino[i][1]\r\n\tmedia_2 += Matriz_perigo_treino[i][2]\r\n\t#media_3 += Matriz_perigo_treino[i][3]\r\n\r\nmedia_perigo.insert(0, (media_0/25))\r\nmedia_perigo.insert(1, (media_1/25))\r\nmedia_perigo.insert(2, (media_2/25))\r\n\r\nprint(\"Mรฉdias do set de treino calculadas!\")\r\nprint(\"...\")\r\nprint(\" \")\r\n\r\n#--------------------------------------------------TESTES---------------------------------------------------------------------------\r\n\r\nprint(\"Executando testes das imagens do asfalto!\")\r\nprint(\"...\")\r\n\r\nnao_pousar = 0\r\npousar = 0\r\n\r\nasfalto_afirmativo = 0\r\nasfalto_negativo = 0\r\n\r\nfor i in range(25):\r\n\tteste = euclidian_distance(media_asfalto, media_perigo, media_grama, Matriz_asfalto_teste, i)\r\n\r\n\tif(teste == 1):\r\n\t\tasfalto_afirmativo += 1\r\n\telse:\r\n\t\tasfalto_negativo += 1\r\n\r\n\tif(teste == 3):\r\n\t\tnao_pousar += 1\r\n\telse:\r\n\t\tpousar += 1\r\n\r\nacerto_geral_asfalto = ((asfalto_afirmativo * 100) /25)\r\n\r\nprint(\"Quantidade de imagens do asfalto que foram reconhecidas como asfalto:\")\r\nprint(asfalto_afirmativo)\r\n\r\nprint(\"Porcentagem de acertos das imagens de asfalto:\")\r\nprint(acerto_geral_asfalto)\r\n\r\nprint(\"Executando testes das imagens da grama!\")\r\nprint(\"...\")\r\n\r\ngrama_afirmativo = 0\r\ngrama_negativo = 0\r\n\r\nfor i in range(25):\r\n\tteste = euclidian_distance(media_asfalto, media_perigo, media_grama, Matriz_grama_teste, i)\r\n\r\n\tif(teste == 2):\r\n\t\tgrama_afirmativo += 1\r\n\telse:\r\n\t\tgrama_negativo += 1\r\n\r\n\tif(teste == 3):\r\n\t\tnao_pousar += 1\r\n\telse:\r\n\t\tpousar += 1\r\n\r\nacerto_geral_grama = ((grama_afirmativo * 100) /25)\r\n\r\nprint(\"Quantidade de imagens da grama que foram foram reconhecidas como grama:\")\r\nprint(grama_afirmativo)\r\n\r\nprint(\"Porcentagem de acertos das imagens de grama\")\r\nprint(acerto_geral_grama)\r\n\r\nprint(\"Executando testes das imagens de perigo!\")\r\nprint(\"...\")\r\n\r\nperigo_afirmativo = 0\r\nperigo_negativo = 0\r\n\r\nfor i in range(25):\r\n\tteste = euclidian_distance(media_asfalto, media_perigo, media_grama, Matriz_perigo_teste, i)\r\n\r\n\tif(teste == 3):\r\n\t\tperigo_afirmativo += 1\r\n\telse:\r\n\t\tperigo_negativo += 1\r\n\r\n\tif(teste == 3):\r\n\t\tnao_pousar += 1\r\n\telse:\r\n\t\tpousar += 1\r\n\r\nacerto_geral_perigo = ((perigo_afirmativo * 100) /25)\r\n\r\nacerto_medio = ((acerto_geral_perigo + acerto_geral_grama + acerto_geral_asfalto) / 3)\r\n\r\nprint(\"Quantidade de imagens de perigo que foram foram reconhecidas como perigo:\")\r\nprint(perigo_afirmativo)\r\n\r\nprint(\"Porcentagem de acertos das imagens de perigo\")\r\nprint(acerto_geral_perigo)\r\n\r\nprint(\" \")\r\n\r\nprint(\"De 75 imagens de test, esta foi a quantidade de imagens consideradas como perigosas para pouso:\")\r\nprint(nao_pousar)\r\n\r\nprint(\"De 75 imagens de test, esta foi a quantidade de imagens consideradas como seguras para pouso:\")\r\nprint(pousar)\r\n\r\nprint(\"Porcentagem media dos acertos:\")\r\nprint(acerto_medio)\r\n\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n" }, { "alpha_fraction": 0.5370871424674988, "alphanum_fraction": 0.5571196675300598, "avg_line_length": 23.24657440185547, "blob_id": "36bb5a92078e6087bd0d6f871a018121c9c437a0", "content_id": "9c48becaef96a77e09c164b13ba5560c90729660", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "permissive", "max_line_length": 104, "num_lines": 73, "path": "/code/features.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "import numpy as np\r\n\r\n# esse site tem as equaรงรตes bem explicadas para cada feature que se deseja extrair \r\n# http://matlab.izmiran.ru/help/toolbox/images/graycoprops.html\r\n\r\ndef contrast(matriz):\r\n\taltura, largura = matriz.shape\r\n\tsoma = 0\r\n\tfor x in range(altura):\r\n\t\tfor y in range(largura):\r\n\t\t\tsoma += (((abs(x - y))**2) * matriz[x, y])\r\n\t\t\t# abs = absolute value\r\n\r\n\treturn soma\r\n\r\ndef correlation(matriz):\r\n height, width = matriz.shape\r\n listSumI = []\r\n listSumJ = []\r\n\r\n for i, j in zip(range(height), range(width)):\r\n sumI = np.sum(matriz[i, :])\r\n sumJ = np.sum(matriz[:, j])\r\n listSumI.append(sumI)\r\n listSumJ.append(sumJ)\r\n sumI = 0\r\n sumJ = 0\r\n\r\n mediaI = 0\r\n mediaJ = 0\r\n\r\n for i in range(1, height+1):\r\n mediaI += i * (listSumI[i-1])\r\n for j in range(1, width+1):\r\n mediaJ += j * (listSumJ[j-1])\r\n\r\n variancia_aux_I = 0\r\n for i in range(1, height+1):\r\n variancia_aux_I += ((i - mediaI)**2) * listSumI[i - 1]\r\n desvio_padraoI = variancia_aux_I**(1/2)\r\n \r\n variancia_aux_J = 0\r\n for j in range(1, width+1):\r\n variancia_aux_J += ((j - mediaJ)**2) * listSumJ[j - 1]\r\n desvio_padraoJ = variancia_aux_J**(1/2)\r\n\r\n soma = 0\r\n for i in range(1, height+1):\r\n for j in range(1, width+1):\r\n soma += (((i - mediaI) * (j - mediaJ) * matriz[i-1, j-1])/(desvio_padraoI * desvio_padraoJ))\r\n return soma\r\n\r\n\r\ndef energy(matriz):\r\n\taltura, largura = matriz.shape\r\n\tsoma = 0\r\n\t\r\n\tfor x in range(altura):\t\r\n\t\tfor y in range(largura):\r\n\t\t\tsoma += ((matriz[x, y])**2)\r\n\treturn soma\r\n\r\n\r\ndef homogeneity(matriz):\r\n\taltura, largura = matriz.shape\r\n\tsoma = 0\r\n\t\r\n\tfor x in range(altura):\t\r\n\t\tfor y in range(largura):\r\n\t\t\tsoma += ((matriz[x, y])/(1 + (abs(x - y))))\r\n\t\t\t# abs = absolute value\r\n\t\t\t\r\n\treturn soma\r\n\r\n\r\n" }, { "alpha_fraction": 0.753333330154419, "alphanum_fraction": 0.7919999957084656, "avg_line_length": 27.846153259277344, "blob_id": "a24d1efc211d408def79241e8c62537f67676381", "content_id": "484cae124ccc121c406a5cb37537276a70301873", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 760, "license_type": "permissive", "max_line_length": 143, "num_lines": 26, "path": "/README.md", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# IPI- Trabalho3\n\nIntroduรงรฃo ao Processamento de Imagens, Prof. Zaghetto, (2/2018)\n\nUniversidade de Brasรญlia\n\nFeito por Guilherme Braga Pinto\n\n17/0162290\n\nData de Entrega: 05 de Dezembro de 2018\n\n# Primeira parte da tarefa\n\nProcessamos e extraimos informaรงรตes das imagens, e exportamos para que outro mรณdulo aplique o algoritmo de KNN (deve-se executar o parte1.py). \n\nLink das Imagens utilizadas: https://github.com/zaghetto/ImageProcessing/tree/master/Assignments/Assignment%203/Images\n\nSite auxiliar com equaรงรตes das features: http://matlab.izmiran.ru/help/toolbox/images/graycoprops.html\n\n\n# Segunda parte da tarefa\n\nFeatures selection e aplicaรงรฃo do KNN (deve-se executar o parte2.py).\n\nTutorial de ajuda para entendimento do KNN: https://youtu.be/N8Fabn1om2k\n" }, { "alpha_fraction": 0.5126835703849792, "alphanum_fraction": 0.584779679775238, "avg_line_length": 29.29166603088379, "blob_id": "e13cb5bb7e94dd84c33fff2f192beefdbb4e0e6b", "content_id": "1dc51cd4f385f6c0ed7669fb4eca8f9dddba1dda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "permissive", "max_line_length": 97, "num_lines": 24, "path": "/code/rgb_ycbcr.py", "repo_name": "gui1080/IPI_Trabalho3_Landing_Zones", "src_encoding": "UTF-8", "text": "# Aluno: Guilherme Braga Pinto\r\n# 17/0162290\r\n\r\nimport numpy as np\r\n\r\ndef rgb_para_ycbcr(imagem):\r\n\r\n\r\n# img[i, j, 0] #pixel da matriz azul\r\n# img[i, j, 1] #pixel da matriz verde\r\n# img[i, j, 2]) #pixel da matriz vermelha\r\n\r\n height, width, channels = imagem.shape\r\n\r\n# crio matrizes vazias\r\n imagemY = np.zeros((height, width), dtype=np.int8)\r\n #imagemCr = np.zeros((height, width), dtype=np.int8)\r\n #imagemCb = np.zeros((height, width), dtype=np.int8)\r\n\r\n# preencho usando o que foi estabelecido\r\n imagemY[:, :] = (0.114 * imagem[:, :, 0] + 0.587 * imagem[:, :, 1] + 0.299 * imagem[:, :, 2])\r\n #imagemCr = (0.713 * imagem[:, :, 2] - 0.713 * imagemY + 128)\r\n #imagemCb = (0.564 * imagem[:, :, 0] - 0.564 * imagemY + 128)\r\n return imagemY" } ]
7
StrayDragon/dockernel
https://github.com/StrayDragon/dockernel
adc24bcd39a2fcc29ee183785e63723ada976257
a16416391c6a9bdae68445f1e36f5308f76cc153
22a8f5c101a531ca347e38f8c16b7de95d216015
refs/heads/master
2023-05-12T08:18:09.952219
2023-04-28T08:04:04
2023-04-28T08:11:05
448,505,058
0
0
MIT
2022-01-16T09:06:51
2022-01-14T04:00:58
2021-04-05T23:30:30
null
[ { "alpha_fraction": 0.6231329441070557, "alphanum_fraction": 0.6242258548736572, "avg_line_length": 25.521739959716797, "blob_id": "833b9c00eba559ff82dd87320645e9da69235c34", "content_id": "dff74db1de70edad337dbf1d45b0e75f9ed5fd32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5490, "license_type": "permissive", "max_line_length": 155, "num_lines": 207, "path": "/dockernel/cli/install.py", "repo_name": "StrayDragon/dockernel", "src_encoding": "UTF-8", "text": "import platform\nimport sys\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom typing import TypeVar, Union\n\nimport docker\nimport rich\n\nfrom dockernel.cli.main import set_subcommand_func, subparsers\nfrom dockernel.kernelspec import (\n InterruptMode,\n Kernelspec,\n ensure_kernelspec_store_exists,\n install_kernelspec,\n user_kernelspec_store,\n)\n\narguments = subparsers.add_parser(\n __name__.split(\".\")[-1],\n help=\"Install dockerized kernel image into Jupyter.\",\n)\narguments.add_argument(\n \"--image-name\",\n help=\"Name of the docker image to use.\",\n default=\"\",\n)\narguments.add_argument(\n \"--list\",\n help=\"show installed kernelspecs\",\n action=\"store_true\",\n)\narguments.add_argument(\n \"--name\",\n help=\"Display name for the kernelspec. \" \"By default, container hostname is used.\",\n default=\"\",\n)\narguments.add_argument(\n \"--language\",\n \"-l\",\n help=\"Language used by the kernel. \"\n \"Makes notebooks written in a given language \"\n \"run on different kernels, that use the same language, \"\n \"if this one is not found. \"\n \"By default, empty value is used.\",\n default=\"\",\n)\n\nDEFAULT_KERNELS_PATH = f\"{sys.prefix}/share/jupyter/kernels\"\narguments.add_argument(\n \"--kernels-path\",\n help=f\"kernels path to install, now env is ' {DEFAULT_KERNELS_PATH} ', see https://jupyter-client.readthedocs.io/en/stable/kernels.html\", # noqa: E501\n default=DEFAULT_KERNELS_PATH,\n)\narguments.add_argument(\n \"--docker-volumes\",\n help=\"same like docker run -v, e.g. '/home/xxx:/home/xxx,/home/a/b:/opt/a/b'\",\n default=\"\",\n)\narguments.add_argument(\n \"--force\",\n help=\"force install\",\n action=\"store_true\",\n)\n\n\nJUPYTER_CONNECTION_FILE_TEMPLATE = \"{connection_file}\"\n\n\ndef python_argv(system_type: str) -> list[str]:\n \"\"\"Return proper command-line vector for python interpreter\"\"\"\n if system_type in {\"Linux\", \"Darwin\"}:\n argv = [\"/usr/bin/env\", \"python\", \"-m\"]\n elif system_type == \"Windows\":\n argv = [\"python\", \"-m\"]\n else:\n raise ValueError(f\"unknown system type: {system_type}\")\n return argv\n\n\ndef _flatten(elems: list[Union[list[str], str]]) -> list[str]:\n res = []\n for elem in elems:\n if isinstance(elem, list):\n for e in elem:\n res.append(e)\n else:\n res.append(elem)\n return res\n\n\ndef generate_kernelspec_argv(\n image_name: str,\n system_type: str,\n docker_volumes: str = \"\",\n) -> list[str]:\n opt_docker_volumes = []\n if docker_volumes:\n opt_docker_volumes = [\n \"-v\",\n docker_volumes,\n ]\n\n dockernel_argv = _flatten(\n [\n \"dockernel\",\n \"start\",\n opt_docker_volumes,\n image_name,\n JUPYTER_CONNECTION_FILE_TEMPLATE,\n ]\n )\n return python_argv(system_type) + dockernel_argv\n\n\ndef image_digest(docker_client: docker.client.DockerClient, image_name: str) -> str:\n image = docker_client.images.get(image_name)\n return image.attrs[\"ContainerConfig\"][\"Hostname\"]\n\n\ndef _show_installed_kernelspecs_by_rich(kernels_path: Path) -> None:\n from rich.table import Table\n\n if kernels_path.exists() and kernels_path.is_dir():\n table = Table(title=\"kernelspec\")\n\n table.add_column(\"Name\", justify=\"left\", style=\"magenta\", no_wrap=True)\n table.add_column(\"Path\", justify=\"left\", style=\"green\")\n\n for k in kernels_path.glob(\"*\"):\n if not k.is_dir():\n continue\n table.add_row(k.name, str(k))\n rich.print(table)\n else:\n rich.print(f\"[red]WARNING[/red]: kernelspec dir not exist? check ' {str(kernels_path)} '!\")\n\n\nT = TypeVar(\"T\")\n\n\ndef _nvl(v: T, default_v: T) -> T:\n if not v:\n return default_v\n if isinstance(v, str):\n if not v.strip():\n return default_v\n return v\n\n\ndef install(args: Namespace) -> int:\n kernels_path_str = args.kernels_path\n if not kernels_path_str:\n raise ValueError(\"--kernels-path must not empty\")\n kernels_path = Path(kernels_path_str)\n\n if bool(args.list):\n _show_installed_kernelspecs_by_rich(kernels_path)\n return 0\n\n system_type = platform.system()\n store_path = user_kernelspec_store(system_type)\n ensure_kernelspec_store_exists(store_path)\n\n docker_volumes: str = args.docker_volumes\n if not docker_volumes:\n docker_volumes = \"\"\n\n image_name: str = _nvl(args.image_name, \"\")\n name: str = _nvl(args.name, \"\")\n if not name and not image_name:\n raise ValueError(\"--image-name or --name must not empty\")\n elif name and not image_name:\n image_name = name\n elif image_name and not name:\n name = image_name\n\n argv = generate_kernelspec_argv(\n image_name,\n system_type,\n docker_volumes=docker_volumes,\n )\n\n language = args.language\n if not language:\n raise ValueError(\"--language must not empty\")\n\n kernelspec = Kernelspec(\n argv,\n name,\n language,\n interrupt_mode=InterruptMode.message,\n )\n\n force = bool(args.force)\n\n # docker_client = docker.from_env()\n # kernel_id = image_digest(docker_client, args.image_name)\n # location = kernelspec_dir(store_path, kernel_id)\n\n location = kernels_path / name\n install_kernelspec(location, kernelspec, force=force)\n # TODO: bare numbered exit statusses seem bad\n return 0\n\n\nset_subcommand_func(parser=arguments, func=install)\n" }, { "alpha_fraction": 0.6643051505088806, "alphanum_fraction": 0.6670299768447876, "avg_line_length": 30.101694107055664, "blob_id": "7a807b8133379397cf7ec8ea82fe5d0002ba208b", "content_id": "993672ac920b9cc87106a372bf6d444d62628cd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "permissive", "max_line_length": 155, "num_lines": 59, "path": "/dockernel/cli/uninstall.py", "repo_name": "StrayDragon/dockernel", "src_encoding": "UTF-8", "text": "import shutil\nimport sys\nfrom argparse import Namespace\nfrom pathlib import Path\n\nimport rich\n\nfrom dockernel.cli.main import set_subcommand_func, subparsers\n\narguments = subparsers.add_parser(\n __name__.split(\".\")[-1],\n help=\"Uninstall dockerized kernelspec\",\n)\narguments.add_argument(\n \"--name\",\n help=\"the installed image_name or --name value from install cmd\",\n)\n\nDEFAULT_KERNELS_PATH = f\"{sys.prefix}/share/jupyter/kernels\"\narguments.add_argument(\n \"--kernels-path\",\n help=f\"kernels path to install, now env is ' {DEFAULT_KERNELS_PATH} ', see https://jupyter-client.readthedocs.io/en/stable/kernels.html\", # noqa: E501\n default=DEFAULT_KERNELS_PATH,\n)\narguments.add_argument(\n \"--dry-run\",\n help=\"not do actual operation, help for debug\",\n action=\"store_true\",\n)\n\n\ndef uninstall(args: Namespace) -> int:\n name: str = args.name or \"\"\n if not name:\n raise ValueError(\"--name\")\n\n kernels_path_str = args.kernels_path\n if not kernels_path_str:\n raise ValueError(\"--kernels-path must not empty\")\n kernels_path = Path(kernels_path_str)\n if not all({kernels_path.exists(), kernels_path.is_dir()}):\n raise ValueError(\"--kernels-path not exist\")\n\n is_dry_run = bool(args.dry_run)\n\n target_kernel_dir: Path = kernels_path / name\n if not all({target_kernel_dir.exists(), target_kernel_dir.is_dir()}):\n rich.print(\"[yellow]WARNING[/yellow]: not found target kernelspec, do nothing!\")\n else:\n if is_dry_run:\n rich.print(f\"[green]OK[/green]: ' {str(target_kernel_dir)} ' will be uninstalled (removed)\")\n else:\n shutil.rmtree(target_kernel_dir)\n rich.print(f\"[green]OK[/green]: ' {str(target_kernel_dir)} ' already uninstalled (removed)\")\n\n return 0\n\n\nset_subcommand_func(parser=arguments, func=uninstall)\n" }, { "alpha_fraction": 0.7386091351509094, "alphanum_fraction": 0.7386091351509094, "avg_line_length": 28.785715103149414, "blob_id": "0fe98a7e085e0d78076b86968920cc63847107f3", "content_id": "ca4f527b17d1286151389e97c4f753197bf89263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "permissive", "max_line_length": 55, "num_lines": 14, "path": "/dockernel/cli/__init__.py", "repo_name": "StrayDragon/dockernel", "src_encoding": "UTF-8", "text": "from .install import arguments as install_arguments\nfrom .main import arguments as main_arguments\nfrom .main import run_subcommand, set_subcommand_func\nfrom .start import arguments as start_arguments\nfrom .uninstall import arguments as uninstall_arguments\n\n__all__ = (\n \"main_arguments\",\n \"install_arguments\",\n \"uninstall_arguments\",\n \"start_arguments\",\n \"set_subcommand_func\",\n \"run_subcommand\",\n)\n" }, { "alpha_fraction": 0.5955010056495667, "alphanum_fraction": 0.6118609309196472, "avg_line_length": 20.637168884277344, "blob_id": "83dff86ad187461ad4ec40e7e9224e19d90da390", "content_id": "d421c9dadc54b5b7c8ea9639c1f5e6e0229806b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 2445, "license_type": "permissive", "max_line_length": 66, "num_lines": 113, "path": "/pyproject.toml", "repo_name": "StrayDragon/dockernel", "src_encoding": "UTF-8", "text": "[tool.pdm]\n[tool.pdm.dev-dependencies]\ndev = [\"mypy>=0.971\", \"pytest>=7.0.1\"]\n\n[project]\nname = \"dockernel\"\nversion = \"1.0.2\"\ndescription = \"Utility for creating dockerized Jupyter kernels\"\nauthors = [{ name = \"Blazej Michalik\", email = \"im.mr.mino@gmail.com\" }]\nmaintainers = [{ name = \"L8ng\", email = \"straydragonv@gmail.com\" }]\ndependencies = [\"docker\", \"rich\"]\nrequires-python = \">=3.6\"\nreadme = \"README.md\"\nlicense = { text = \"MIT\" }\nclassifiers = [\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n]\n\n[project.urls]\nSource = \"https://github.com/MrMino/dockernel\"\n\n[project.scripts]\ndockernel = \"dockernel.__main__:main\"\n\n[build-system]\nrequires = [\"pdm-backend\"]\nbuild-backend = \"pdm.backend\"\n\n[tool.black]\nline-length = 120\ntarget-version = ['py39']\n\n[tool.mypy]\npython_version = \"3.9\"\n\n#follow_imports = \"silent\"\nwarn_redundant_casts = true\nwarn_unused_ignores = true\n#disallow_any_generics = true\ncheck_untyped_defs = true\nno_implicit_reexport = true\ndisallow_untyped_defs = true\n\n[[tool.mypy.overrides]]\nmodule = [\"docker\"]\nignore_missing_imports = true\n\n[[tool.mypy.overrides]]\nmodule = [\"rich\", \"rich.table\"]\nignore_missing_imports = true\n\n[tool.ruff]\nline-length = 120\n\n# Enable Pyflakes `E` and `F` codes by default.\nselect = [\n # pyflakes\n \"F\",\n # pycodestyle\n \"E\",\n \"W\",\n # isort\n \"I001\",\n # pyupgrade subset\n \"UP\",\n]\n# src = []\nignore = []\nfix = true\n\n# Exclude a variety of commonly ignored directories.\nexclude = [\n \".bzr\",\n \".direnv\",\n \".eggs\",\n \".git\",\n \".hg\",\n \".mypy_cache\",\n \".nox\",\n \".pants.d\",\n \".ruff_cache\",\n \".svn\",\n \".tox\",\n \".venv\",\n \"__pypackages__\",\n \"_build\",\n \"buck-out\",\n \"build\",\n \"dist\",\n \"node_modules\",\n \"venv\",\n]\nper-file-ignores = {}\n\ntarget-version = \"py39\"\n\n[tool.ruff.mccabe]\nmax-complexity = 10\n" }, { "alpha_fraction": 0.7160493731498718, "alphanum_fraction": 0.7201645970344543, "avg_line_length": 19.25, "blob_id": "fd2c22053672911d0421d586b97034fc152df22f", "content_id": "0d2e543afcd641bd31270c05408cb7827b237ded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "permissive", "max_line_length": 53, "num_lines": 12, "path": "/dockernel/app.py", "repo_name": "StrayDragon/dockernel", "src_encoding": "UTF-8", "text": "from rich import traceback\n\nfrom .cli import main_arguments, run_subcommand\n\ntraceback.install(\n show_locals=True,\n)\n\n\ndef run(argv: list) -> int:\n parsed_args = main_arguments.parse_args(argv[1:])\n return run_subcommand(parsed_args)\n" } ]
5
dylanbuchi/Python_Practice
https://github.com/dylanbuchi/Python_Practice
02e81e8a00da3d036ae824eeda58cbf89b0adbb3
04825a453d47732c6c86f5c9a11e999a24d1ce71
74c741a4b97ba6163c2510bcb644a40693ffa019
refs/heads/master
2023-06-22T00:20:26.982717
2021-07-22T09:39:22
2021-07-22T09:39:22
274,246,835
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7636363506317139, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 21, "blob_id": "243b08db91e8565eabcdd6db6c58fd8bc95ed3a6", "content_id": "181d0c802b535c383d8b3e5cd4a534e9f3ecc56f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 330, "license_type": "no_license", "max_line_length": 93, "num_lines": 15, "path": "/README.md", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "# Python Practice\n\n- decorators\n- error handling\n- ds & algorithms\n- files manipulations\n- functional programming\n- generators\n- i/o streams\n- matrix\n- network\n- object oriented programming (oop)\n- regular expressions (regex)\n- strings (challenges questions are from https://www.w3resource.com/python-exercises/string/)\n- testing\n" }, { "alpha_fraction": 0.684021532535553, "alphanum_fraction": 0.684021532535553, "avg_line_length": 26.19512176513672, "blob_id": "5bd5bac1651e483af8f003bee1bc6a0ad52df08f", "content_id": "be7d3e5a62a167f7f52a278b35bc97f12b60871c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 69, "num_lines": 41, "path": "/oop/filehandler/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import os\nimport sys\nsys.path.append(\"./oop\")\n\nfrom filehandler.names_file import NamesFile\n\n\ndef print_every_names_file_methods(names_file: NamesFile):\n print(\"\\nLongest name:\", names_file.get_longest_name())\n print(\"\\nSum of every names length:\",\n names_file.get_total_sum_of_names_length())\n print(\"\\nAverage of every names length:\",\n names_file.get_average_names_length())\n\n print(\"\\nShortest names: \")\n names_file.print_shortest_names()\n\n\ndef create_every_file_from_names_file_methods(names_file: NamesFile):\n\n names_file.create_names_lengths_file(\n \"oop/filehandler/output/names_lengths.txt\")\n names_file.create_sorted_names_file(\n \"oop/filehandler/output/sorted_names.txt\")\n names_file.create_sorted_names_by_length_file(\n \"oop/filehandler/output/sorted_names_by_length.txt\")\n\n\ndef main():\n\n print(os.getcwd())\n names_file = NamesFile(\"oop/filehandler/data/names.txt\")\n\n print_every_names_file_methods(names_file)\n create_every_file_from_names_file_methods(names_file)\n\n\nif __name__ == \"__main__\":\n\n print(os.getcwd())\n main()" }, { "alpha_fraction": 0.5511550903320312, "alphanum_fraction": 0.5610560774803162, "avg_line_length": 26.545454025268555, "blob_id": "9ef60c264e2d8087335bf1a04bbaa84971b3a85e", "content_id": "1eca3769d052d218bbda193f7844f23621ab59b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/strings/pig_latin.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def make_pig_latin(string: str):\n result = []\n for word in string.split():\n if word[0] in 'aeiou':\n result.append(f'{word}way')\n else:\n result.append(f'{word[1:]}{word[0]}ay')\n return ' '.join(result)\n\n\nprint(make_pig_latin('This sentence is in pig latin'))\n" }, { "alpha_fraction": 0.564056932926178, "alphanum_fraction": 0.5765124559402466, "avg_line_length": 20.615385055541992, "blob_id": "82c7ff2ebfad201c2cc1f87c7c6f3b78939e8b12", "content_id": "e30d1d01ee79a2739382b83fb9606c42b047fadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 52, "num_lines": 26, "path": "/oop/classes/cat.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Cat():\n\n species = 'mammal'\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\n# 1 Create a function that finds the oldest cat\ndef get_oldest_cat_age(cats):\n return max(cats)\n\n\nif __name__ == \"__main__\":\n\n # 2 Instantiate the Cat object with 3 cats\n tom = Cat('Tom', 7)\n jerry = Cat('Jerry', 5)\n tony = Cat('Tony', 2)\n\n cats_age = [tom.age, jerry.age, tony.age]\n\n # 3 Print out: \"The oldest cat is x years old.\".\n x = get_oldest_cat_age(cats_age)\n print(f\"The oldest cat is {x} years old.\")\n" }, { "alpha_fraction": 0.42105263471603394, "alphanum_fraction": 0.4411027431488037, "avg_line_length": 14.34615421295166, "blob_id": "97956fc6b6a02dc49e5ec60c40b7e9dac320eae9", "content_id": "cb853f1aaf133f14a7f455be6391337cc1a10220", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 46, "num_lines": 26, "path": "/generators/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def my_generator(n):\n for i in range(1, n + 1):\n yield i**2\n\n\ndef generator_fib(n):\n a = 0\n b = 1\n lst = [a]\n\n for _ in range(n):\n temp = a\n a = b\n b = temp + b\n lst.append(a)\n return lst\n\n\nif __name__ == \"__main__\":\n\n g = my_generator(5)\n\n for i in range(5):\n print(next(g))\n n = 8\n print(f\"\\nfib of {n}: {generator_fib(n)}\")\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6076759099960327, "avg_line_length": 37.28571319580078, "blob_id": "c7ec338e3bb1663eeff12e0cf75cfeffd6244a6b", "content_id": "1863efb38110535d50c40e348821ce7450036e0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1876, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/oop/filehandler/names_file.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from .file_handler import FileHandler\n\n\nclass NamesFile(FileHandler):\n \"\"\"Class to perform operations on a file that contains names\"\"\"\n def __init__(self, filename: str) -> None:\n super().__init__(filename)\n self.names = self.get_names_from_file(filename)\n\n def get_names_from_file(self, filename: str) -> list[str]:\n with open(filename) as f:\n return f.read().splitlines()\n\n def get_names_lengths(self) -> list[int]:\n return list(map(lambda x: len(x), self.names))\n\n def get_longest_name(self) -> str:\n return max(self.names, key=len)\n\n def get_total_sum_of_names_length(self) -> int:\n return sum(self.get_names_lengths())\n\n def get_average_names_length(self) -> float:\n total_sum = self.get_total_sum_of_names_length()\n length = len(self.names)\n return f\"{total_sum / length:.2f}\"\n\n def get_shortest_name(self) -> str:\n names_lengths = self.get_names_lengths()\n min_length = min(names_lengths)\n shortest_names = list(\n filter(lambda x: len(x) == min_length, self.names))\n return shortest_names\n\n def print_shortest_names(self) -> None:\n print('\\n'.join([name for name in self.get_shortest_name()]))\n\n def create_names_lengths_file(self, filename: str) -> None:\n self.write_a_list_to_a_file(filename,\n list(map(str, self.get_names_lengths())))\n\n def create_sorted_names_file(self, filename: str) -> None:\n sorted_names = sorted(self.names)\n self.write_a_list_to_a_file(filename, list(map(str, sorted_names)))\n\n def create_sorted_names_by_length_file(self, filename: str) -> None:\n sorted_names_by_length = sorted(self.names, key=len)\n self.write_a_list_to_a_file(filename,\n list(map(str, sorted_names_by_length)))\n" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.5365853905677795, "avg_line_length": 15.399999618530273, "blob_id": "0d196c997b1c8ce2629393f5e34703ab8380749e", "content_id": "fc6bb2f9330f2dd1187e528504a2e948d6a962a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 48, "num_lines": 15, "path": "/functional_programming/map/map.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def multiply_by_2(number):\n return number * 2\n\n\ndef main():\n\n my_list = [1, 2, 3]\n map_list = list(map(multiply_by_2, my_list))\n\n print(f\"map list: {map_list}\")\n print(f\"my list: {my_list}\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6574802994728088, "alphanum_fraction": 0.6574802994728088, "avg_line_length": 24.399999618530273, "blob_id": "e19446c0d1fdeffa976141d87869412bb5b9cfc7", "content_id": "27cbd3320b0f7ac5aca03e96718304cefeefab09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/images/images.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from PIL import Image, ImageFilter\n\nif __name__ == \"__main__\":\n image = Image.open(r'images\\pokemons\\bulbasaur.jpg')\n\n grey_filter = image.convert('L')\n blur_filter = image.filter(ImageFilter.BLUR)\n\n blur_filter.show()\n grey_filter.show()\n" }, { "alpha_fraction": 0.5165562629699707, "alphanum_fraction": 0.5198675394058228, "avg_line_length": 16.823530197143555, "blob_id": "fcb2f3a35add9f45678789a8d06257a120eeacef", "content_id": "57a4b5cedaebcb64474735f8c52c18aa70dfbe25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 34, "num_lines": 17, "path": "/oop/classes/dog.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Dog:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def get_name(self):\n return self.name\n\n def get_age(self):\n return self.age\n\n\nif __name__ == \"__main__\":\n\n doggy = Dog(\"Dog\", 2)\n print(doggy.get_name())\n print(doggy.get_age())" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.5699999928474426, "avg_line_length": 19, "blob_id": "06b43f5ca4e8a4950ad20c407ede38dd21a2ba24", "content_id": "a2ed8dad6ece51bc07b697db7f494663759d535e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/oop/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from dice_game.game import Game\n\nif __name__ == \"__main__\":\n game = Game(4)\n game.play_game()\n" }, { "alpha_fraction": 0.3919753134250641, "alphanum_fraction": 0.4166666567325592, "avg_line_length": 15.25, "blob_id": "369b701738fd60135a05e80ca18ed92613670d03", "content_id": "502643792d80be87884f5559dfae6d8fe03a1765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 34, "num_lines": 20, "path": "/decorators/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def my_decorator(func):\n def box():\n print('-' * 10)\n print('|' + ' ' * 8 + '|')\n func()\n print('|' + ' ' * 8 + '|')\n print('-' * 10)\n\n return box\n\n\n@my_decorator\ndef hello():\n print(\"hello\")\n\n\nif __name__ == \"__main__\":\n hello()\n hello_2 = my_decorator(hello)\n hello_2()" }, { "alpha_fraction": 0.5290215611457825, "alphanum_fraction": 0.5373134613037109, "avg_line_length": 21.33333396911621, "blob_id": "25cf14e680b8ab008cf8c89f963dc750ae04785f", "content_id": "c2a0e093b0c667309632b93d40405da098829794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 57, "num_lines": 27, "path": "/files_manipulations/parse_csv_file.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def parse_csv_file(file_name: str) -> list:\n # parse a csv file into a table\n table = []\n\n with open(file_name, \"r\") as csv_file:\n for line in csv_file:\n words = line.strip().split(\",\")\n table.append(words)\n return table\n\n\ndef print_table(table: list):\n\n for row in table:\n print(f\"{row[0]:<20}\", end=\"|\")\n\n for col in row[1:]:\n print(f\"{col:>3}\", end=\"|\")\n print()\n\n\nif __name__ == \"__main__\":\n\n print()\n file_name = r\"files_manipulations\\files\\hightemp.csv\"\n table = parse_csv_file(file_name)\n print_table(table)\n" }, { "alpha_fraction": 0.537286639213562, "alphanum_fraction": 0.5543575882911682, "avg_line_length": 19.236364364624023, "blob_id": "b62869fca60d05dda377c4e6d8eb84b8be781092", "content_id": "143ea8f1700724a8c59e0a0c75c0dcd3214b610b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 78, "num_lines": 55, "path": "/decorators/timer.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from time import perf_counter\nfrom functools import wraps\n\n\ndef timer(func):\n \"\"\"timer decorator to time a function\"\"\"\n @wraps(func)\n def inner(*args, **kwargs):\n start = perf_counter()\n result = func(*args, **kwargs)\n end = perf_counter()\n\n time_elapsed = end - start\n\n args_list = [str(i)\n for i in args] + [f\"{k}={v}\" for k, v in kwargs.items()]\n args_string = ', '.join(args_list)\n\n print(\n f\"{func.__name__}({args_string}) took {time_elapsed:.6f}s to run\")\n return result\n\n return inner\n\n\ndef calculate_recursive_fib(n):\n if n <= 2:\n return 1\n return calculate_recursive_fib(n - 1) + calculate_recursive_fib(n - 2)\n\n\n@timer\ndef fib_iterative(n):\n fib_list = [1, 1, 2]\n if n == 0:\n return 1\n\n for i in range(3, n + 1):\n fib_list.append(fib_list[-2] + fib_list[-1])\n\n return fib_list[n - 1]\n\n\n@timer\ndef fib_recursive(n):\n return calculate_recursive_fib(n)\n\n\ndef main():\n print(fib_recursive(35))\n print(fib_iterative(35))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6131578683853149, "alphanum_fraction": 0.6131578683853149, "avg_line_length": 20.11111068725586, "blob_id": "f16277b928901df3ca44973401977ff9be4788e2", "content_id": "e3b163a73b3ea7c095acd3133a1bbe6c736e9b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/scripts/password_checker.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def password_checker(username, password):\n pass_len = len(password)\n converted_password = '*' * pass_len\n\n print(\n f\"{username}, your password {converted_password} is {pass_len} letters long\"\n )\n\n\ndef main():\n username = input(\"Username: \")\n password = input(\"Password: \")\n\n password_checker(username, password)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5822784900665283, "alphanum_fraction": 0.5991561412811279, "avg_line_length": 17.230770111083984, "blob_id": "e17b7e1980360a8a2bc54cf2c182e7e3da09283b", "content_id": "2959f5c0a1593c9ca1501fef0ec10f23c5a06fdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/functional_programming/reduce/reduce.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from functools import reduce\n\n\ndef addition(my_list, index):\n return my_list + index\n\n\nif __name__ == \"__main__\":\n my_list = [1, 2, 3, 4]\n reduced = reduce(addition, my_list)\n\n print(my_list)\n print(f\"reduced: {reduced}\")\n" }, { "alpha_fraction": 0.5477792620658875, "alphanum_fraction": 0.5612382292747498, "avg_line_length": 19.108108520507812, "blob_id": "d2f638f2f88d1bcc509ca4c6b0c38bab237b1178", "content_id": "f45b64f094b4bd58615844590ff8b611ecb4099b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 743, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/data_structures/dictionary/fantasy_game_inventory.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def addToInventory(inventory: dict, addedItems: list) -> dict:\n\n for item in addedItems:\n inventory.setdefault(item, 0)\n inventory[item] += 1\n return inventory\n\n\ndef displayInventory(inventory: dict):\n\n total = 0\n print(\"Inventory:\")\n for k, v in inventory.items():\n total += v\n print(f\"{v} {k}\")\n print(f\"Total number of items: {total}\\n\")\n\n\ndef main():\n\n inventory = ({\n 'rope': 1,\n 'torch': 6,\n 'gold coin': 42,\n 'dagger': 1,\n 'arrow': 12\n })\n\n loot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\n\n displayInventory(inventory)\n addToInventory(inventory, loot)\n displayInventory(inventory)\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5793731212615967, "alphanum_fraction": 0.5793731212615967, "avg_line_length": 23.725000381469727, "blob_id": "9f82c3f2071c2c9266b650c903dc212a2a52c112", "content_id": "82c19c11e8cc39b36787835cee288aa4a5a1c9dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 89, "num_lines": 40, "path": "/scripts/letter_count.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import sys\n\n\ndef greet(name):\n # greet\n print(f\"Hello, {name}\")\n\n\ndef get_letter_count_from(message, letter):\n # count occurences of a letter in a text\n return message.count(letter)\n\n\ndef app():\n\n name = input(\"What's your name ? \\n\").title().strip()\n greet(name)\n\n response = input(\n \"I will count how many times a specific letter occurs in a messsage. Ready ?! \\n\"\n ).lower().strip()\n\n while response == \"\":\n response = input(\n \"Please enter something or enter 'no' to quit:\").lower().strip()\n if response == \"no\":\n print(f\"Bye, {name}...\")\n sys.exit()\n\n message = input(\"Please enter a message: \\n\").lower().strip()\n letter = input(\n \"Wich letter would you like to count the occurrences of ? \\n\").lower(\n ).strip()\n\n letter_count = get_letter_count_from(message, letter)\n print(f\"{name}, your message has {letter_count} '{letter}' in it.\")\n\n\nif __name__ == \"__main__\":\n app()\n" }, { "alpha_fraction": 0.4673366844654083, "alphanum_fraction": 0.5276381969451904, "avg_line_length": 17.090909957885742, "blob_id": "549b67ec54f8b1165c60a3861a2dd385b90b430d", "content_id": "3fcb90ecaa6a81eb82b103174d0d07fe7670a85f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/modules/random_main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import random\n\nif __name__ == \"__main__\":\n print(__name__)\n print(random.randint(1, 100))\n print(random.choice([1, 2, 3]))\n\n lst = [1, 2, 3, 4, 5]\n\n random.shuffle(lst)\n print(lst)\n" }, { "alpha_fraction": 0.4950781762599945, "alphanum_fraction": 0.5263462662696838, "avg_line_length": 22.98611068725586, "blob_id": "775d4fd6b6b3d916eae629a23372ac509d571929", "content_id": "93fa69c42512b5c8be7e5d903c0b4b7a627c2256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "no_license", "max_line_length": 76, "num_lines": 72, "path": "/functional_programming/list-dict-set comprehension/main..py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from collections import Counter\nfrom pprint import pprint\n\n\ndef main():\n # Lists comprehensions\n string = 'hello'\n\n ch_list = [ch for ch in string]\n print(f\"{ch_list=}\")\n\n numbers = [n for n in range(0, 10)]\n print(f\"{numbers=}\")\n\n square_numbers = [n**2 for n in range(0, 10)]\n print(f\"{square_numbers=}\")\n\n odd_numbers = [n for n in square_numbers if not n % 2 == 0]\n print(f\"{odd_numbers=}\")\n\n # Sets comprehensions\n my_set = {i for i in range(10)}\n my_set2 = {i for i in my_set if i > 5}\n\n print(f\"{my_set=}\")\n print(f\"{my_set2=}\")\n\n # Dictionaries comprehensions\n dict_test = dict(a=2, b=3, c=4)\n\n my_dict = {k: v**2 for k, v in dict_test.items()}\n dict_nums = {n: n * 2 for n in [1, 2, 3]}\n\n print(f\"{my_dict=}\")\n print(f\"{dict_nums=}\")\n\n # convert duplicates into a dict comprehension\n some_list = ['a', 'b', 'c', 'b', 'd', 'm', 'n', 'n']\n\n only_duplicates_letters = {\n k: v\n for k, v in Counter(some_list).items() if v >= 2\n }\n\n print(f\"{only_duplicates_letters=}\")\n\n # nested loops with lists comprehensions\n multiplication_table = [[i * j for j in range(1, 10 + 1)]\n for i in range(1, 10 + 1)]\n\n print(\"\\ntable: \", end=\"\")\n pprint(multiplication_table)\n\n l1 = ['a', 'b', 'c', 'x']\n l2 = ['a', 'e', 'f', 'c']\n\n l3 = [''.join([i, j]) for i in l1 for j in l2 if i != j]\n\n print(\"\\nletters: \")\n pprint(l3)\n\n to_zip1 = [1, 2, 3, 4, 5]\n to_zip2 = [10, 9, 8, 7, 6, 5, 4]\n\n zip_lists = [(i, j) for index_i, i in enumerate(to_zip1)\n for index_j, j in enumerate(to_zip2) if index_i == index_j]\n\n print(zip_lists)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6061538457870483, "alphanum_fraction": 0.607692301273346, "avg_line_length": 22.214284896850586, "blob_id": "6ccb98ef734c55f670c1cedebbcac871df1f39bf", "content_id": "7f0b4bf6ca3e0034d7284811594f1fbc8117d00b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 65, "num_lines": 28, "path": "/scripts/translator.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import googletrans\n\n\ndef translate_in(lang, filename):\n with open(filename, 'r') as f:\n gt = googletrans.Translator()\n text = f.read()\n return gt.translate(text, lang).text\n\n\ndef append(filename, text):\n with open(filename, 'a', encoding=\"utf-8\") as f:\n f.write(\"\\n\" + text)\n\n\nif __name__ == \"__main__\":\n\n fname = r\"files_manipulations\\files\\translate.txt\"\n file_translated = r\"files_manipulations\\files\\translated.txt\"\n\n fr = translate_in('fr', fname)\n ko = translate_in('ko', fname)\n pt = translate_in('pt', fname)\n\n lst = [fr, ko, pt]\n\n for item in lst:\n append(file_translated, item)\n" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.5983606576919556, "avg_line_length": 21.592592239379883, "blob_id": "4bfc7bb0ce70f922c62c18c3add31f419e8fc5df", "content_id": "8802c41706ceb4cfcdfe0c8011e88e5760a8b55e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 54, "num_lines": 27, "path": "/decorators/authenticated.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "# Create an @authenticated decorator\n# that only allows the function to run\n# if user1 has 'valid' set to True:\n\n\ndef authenticated(func):\n def temp(*args, **kwargs):\n if args[0]['valid']:\n print(f\"Hello {args[0]['name']}\")\n return func(*args, **kwargs)\n\n return temp\n\n\n@authenticated\ndef message_friends(user):\n print('Your message has been sent with success')\n\n\nif __name__ == \"__main__\":\n user1 = {\n 'name': 'Homer',\n 'valid': True\n # changing this will either\n # run or not run the message_friends function.\n }\n message_friends(user1)\n" }, { "alpha_fraction": 0.6201058030128479, "alphanum_fraction": 0.62328040599823, "avg_line_length": 26.823530197143555, "blob_id": "f719de1291301a1ff3d6b897ec3df31d1b60b7d7", "content_id": "2128d37284d83b65a2b2f1f75fb67e41b5950b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 945, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/data_structures/dictionary/count_letters.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def count_letters(word_list: list) -> (str, int):\n\n ALPHABET = \"abcdefghijklmnopqrstuvwxyz\"\n\n letter_count = {}\n\n for letter in ALPHABET:\n letter_count[letter] = 0\n\n for word in word_list:\n for letter in word:\n if letter in letter_count:\n letter_count[letter] += 1\n count = 0\n result_letter = \"\"\n\n for key, value in letter_count.items():\n if value > count:\n result_letter = key\n count = value\n return result_letter, count\n\n\nif __name__ == \"__main__\":\n\n monty_quote = \"\"\"listen strange women lying in ponds\n distributing swords is no basis for a system of government\n supreme executive power derives from a mandate from the masses \n not from some farcical aquatic ceremony\"\"\"\n\n monty_words = monty_quote.split()\n letter, count = count_letters(monty_words)\n\n print(f\"The letter '{letter}' appears the most. ({count}) times in total\")" }, { "alpha_fraction": 0.5359355807304382, "alphanum_fraction": 0.5501858592033386, "avg_line_length": 25.899999618530273, "blob_id": "019f50f8ed9c15ad5c435b23fee55518d6dfc22a", "content_id": "983cb8d44d1a05eeaeb9f018ef03b423a1205d29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 79, "num_lines": 60, "path": "/oop/classes/fighter.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Fighter(object):\n def __init__(self, name, health, damage_per_attack):\n self.name = name\n self.health = health\n self.damage_per_attack = damage_per_attack\n self.dead = False\n\n def __str__(self):\n return \"Fighter({}, {}, {})\".format(self.name, self.health,\n self.damage_per_attack)\n\n def attack(self, other):\n other.health -= self.damage_per_attack\n\n def is_dead(self):\n if self.health <= 0:\n self.dead = True\n return True\n return False\n\n def battle_round(self, other):\n self.attack(other)\n self.is_dead()\n winner = self.get_name_of_the_winner(other)\n return winner\n\n def check_fight_is_over(self, round_1, round_2):\n if round_1 or round_2:\n return round_1 or round_2\n\n def fight(self, other):\n while True:\n round_1 = self.battle_round(other)\n round_2 = other.battle_round(self)\n winner = self.check_fight_is_over(round_1, round_2)\n if winner:\n return winner\n\n def get_name_of_the_winner(self, other):\n if self.dead:\n return other.name\n if other.dead:\n return self.name\n\n __repr__ = __str__\n\n\ndef declare_winner(fighter1, fighter2, first_attacker):\n if first_attacker == fighter1.name:\n return fighter1.fight(fighter2)\n else:\n return fighter2.fight(fighter1)\n\n\ndef main():\n print(declare_winner(Fighter(\"Lew\", 10, 2), Fighter(\"Harry\", 5, 4), \"Lew\"))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.48750001192092896, "alphanum_fraction": 0.5, "avg_line_length": 12.333333015441895, "blob_id": "9db99938f06eb5991f8b3f47da107c375bb71357", "content_id": "1020084cd07dc010aeeafdf09082029915fd4b84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/modules/sys_main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n\n args = sys.argv\n print(args[1:])\n" }, { "alpha_fraction": 0.6077551245689392, "alphanum_fraction": 0.6081632375717163, "avg_line_length": 25.354839324951172, "blob_id": "56023626b8eff67fc19a627df81639942a5f8401", "content_id": "7a46c695b5eaaae78b799fc2a3264e82600ff8ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2450, "license_type": "no_license", "max_line_length": 117, "num_lines": 93, "path": "/scripts/synonym.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import random\nimport sys\n\n\ndef print_menu(menu, word_to_synonyms):\n # print the menu user interface\n print(menu)\n print_words(word_to_synonyms)\n print()\n\n\ndef user_interface(word_to_synonyms):\n # the user interface that takes a list of words to choose for getting synonyms\n menu = f\"\"\"\nChoose a word from this list and I will give you a synonym!\nHere are the words:\n \"\"\"\n print_menu(menu, word_to_synonyms)\n\n text = \"What word would you like a synonym for: \"\n user_word = \"\"\n\n while True:\n user_word = get_user_word(text)\n\n if user_word in word_to_synonyms:\n print(\n f\"a synonym for '{user_word.title()}' is '{get_random_synonym(word_to_synonyms, user_word).title()}'\"\n )\n break\n print(\"\\nPlease enter a word from the list!\")\n print_menu(menu, word_to_synonyms)\n\n print(\n f\"Would you like to see the others synonym for the word '{user_word.title()}' ? (y/n)\"\n )\n response = input().lower().strip()\n\n if response == 'y':\n print()\n print_all_synonyms_from(user_word, word_to_synonyms)\n sys.exit()\n\n\ndef get_user_word(text):\n # return word from user input\n return input(text).lower().strip()\n\n\ndef print_words(word_to_synonyms: dict):\n # print the keys\n for word in word_to_synonyms:\n print(f\"\\t- {word.title()}\")\n\n\ndef get_random_synonym(word_to_synonyms: dict, word: str):\n # return a random synonym from the given key\n synonyms = word_to_synonyms[word]\n random.shuffle(synonyms)\n return synonyms[0].title()\n\n\ndef print_all_synonyms_from(word, word_to_synonyms):\n # print every synonmy from the given word\n synonyms = word_to_synonyms[word]\n print(f\"'{word.title()}' synonyms are:\")\n\n for synonym in synonyms:\n print(f\"\\t- {synonym.title()}\")\n\n\ndef main():\n\n # list of synonyms\n hot_synonyms = ['balmy', 'summery', 'tropical', 'boiling', 'scorching']\n cold_synonyms = ['cool', 'chilly', 'freezing', 'frigid', 'polar']\n happy_synonyms = ['content', 'cherry', 'merry', 'jovial', 'jocular']\n sad_synonyms = ['unhappy', 'downcast', 'miserable', 'glum', 'melancholy']\n\n # dict key -> value\n word_to_synonyms = {\n 'hot': hot_synonyms,\n 'happy': cold_synonyms,\n 'cold': happy_synonyms,\n 'sad': sad_synonyms,\n }\n\n # main app\n user_interface(word_to_synonyms)\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6090225577354431, "alphanum_fraction": 0.6223893165588379, "avg_line_length": 22.490196228027344, "blob_id": "a56120650d2ed5add064e4be6632f498a76e1b69", "content_id": "1c2ccaf3459618cddeb79d25954ccf57c8c9c980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 69, "num_lines": 51, "path": "/scripts/grade.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def get_grades(quantity):\n # get user grades and return as a list\n grades = []\n for i in range(1, quantity + 1):\n\n if i == 1:\n grade = int(input(f\"What's your {i}st grade (0-100):\\n\"))\n else:\n grade = int(input(f\"What's your {i}nd grade (0-100):\\n\"))\n grades.append(grade)\n return grades\n\n\ndef descending_order(grades):\n # sort grades inplace by descending order\n grades.sort(reverse=True)\n print(f\"Your grades from highest to lowest are: {grades}\")\n\n\ndef ascending_order(grades):\n # sort grades inplace ascending order\n grades.sort()\n\n\ndef remove_two_lowest_grades(grades):\n # drop 2 lowest grades\n grades.sort(reverse=True)\n for _ in range(2):\n print(f\"removing grade {grades.pop(-1)}\")\n\n\ndef get_highscore(grades):\n # get highest grade\n grades.sort(reverse=True)\n return grades[0]\n\n\ndef app():\n # main app\n grades = get_grades(4)\n print(f\"Your grades are: {grades}\")\n\n descending_order(grades)\n remove_two_lowest_grades(grades)\n\n print(f\"Your remaining grades now are: {grades}\")\n print(f\"Your highest grade is a {get_highscore(grades)}!\")\n\n\nif __name__ == \"__main__\":\n app()" }, { "alpha_fraction": 0.5727272629737854, "alphanum_fraction": 0.5848484635353088, "avg_line_length": 25.399999618530273, "blob_id": "a37902453e741e8aee2721daf4cb65367facbb0e", "content_id": "3fb9bc149238a9b9d22f6febb835362e534159e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 76, "num_lines": 50, "path": "/oop/classes/rectangle.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Rectangle:\n def __init__(self, width, height):\n self._width = width\n self._height = height\n self._check_width_and_height_values_are_valid()\n\n def _check_width_and_height_values_are_valid(self):\n if self.width <= 0 or self.height <= 0:\n raise ValueError(\n \"The width and height values must be greater than 0\")\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, width):\n self._check_width_and_height_values_are_valid()\n self._width = width\n\n @property\n def height(self):\n return self._height\n\n @height.setter\n def height(self, height):\n self._check_width_and_height_values_are_valid()\n self._height = height\n\n def __str__(self):\n return f\"Rectangle: width: {self.width} height: {self.height}\"\n\n def __repr__(self):\n return f\"Rectangle({self.width}, {self.height})\"\n\n def __eq__(self, other):\n if self._is_rectangle(other):\n return self.height == other.height and self.width == other.width\n return False\n\n def _is_rectangle(self, object):\n return isinstance(object, Rectangle)\n\n\nif __name__ == \"__main__\":\n r1 = Rectangle(34, 12)\n r2 = Rectangle(34, 12)\n\n print(repr(r1))\n print(r1 == r2)\n" }, { "alpha_fraction": 0.5567928552627563, "alphanum_fraction": 0.5668151378631592, "avg_line_length": 18.521739959716797, "blob_id": "65d136aca9f2ab81ff4c8fa6acaf2fd242c15713", "content_id": "0d2b0e79641b0d1626e36627660fc2981fffcd8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/scripts/miles_per_hour_conversion.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def convert_MPH_to_MPS(speed):\n # convert miles per hour (MPH) -> meter per second (MPS)\n mps = speed / 2.237\n return mps\n\n\ndef get_MPS(data):\n # print result\n return f\"Your speed in meter per second is {data:.2f}\"\n\n\ndef get_user_MPH():\n # get user miles per hour speed for conversion\n while True:\n try:\n mph = float(input(\"What's your speed in miles per hour?\\n\"))\n if isinstance(mph, float):\n break\n except ValueError:\n print(\"Please enter a float value (ex: 2.00)\")\n\n return mph\n\n\ndef loop_app(number):\n # how many times to run the app\n for _ in range(number):\n app()\n\n\ndef app():\n # the main app\n mph = get_user_MPH()\n mps = convert_MPH_to_MPS(mph)\n\n result = get_MPS(mps)\n print(result)\n\n\ndef start():\n # start the app\n loop_app(3)\n\n\nif __name__ == \"__main__\":\n start()\n" }, { "alpha_fraction": 0.494577020406723, "alphanum_fraction": 0.5184381604194641, "avg_line_length": 16.11111068725586, "blob_id": "545aa500d8163345c46d80ab7bb01208676504a7", "content_id": "0f0f5590a9b7ec3fcbf7e79098f6c420036b31b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 61, "num_lines": 27, "path": "/decorators/performace.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import time\n\n\ndef performance(func):\n def temp(*args, **kwargs):\n start = time.time()\n result = func(*args, **kwargs)\n\n end = time.time()\n seconds = round((end - start), 2)\n\n print(f\"This function took {seconds} seconds to run\")\n return result\n\n return temp\n\n\n@performance\ndef test():\n for i in range(3434):\n i * 2\n for i in range(3430):\n i += 1\n\n\nif __name__ == \"__main__\":\n test()" }, { "alpha_fraction": 0.5292258858680725, "alphanum_fraction": 0.5576619505882263, "avg_line_length": 13.744186401367188, "blob_id": "84413db4ebc1f7131552e1237649a62b70bffdd3", "content_id": "0ace71a1b2d7b3644cbfc8a7c15ed9815c3c2236", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 29, "num_lines": 43, "path": "/closures/closures.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def counter():\n count = 0\n\n def increment_count():\n nonlocal count\n count += 1\n return count\n\n return increment_count\n\n\ndef add_amount(amount):\n def inner(n):\n return amount + n\n return inner\n\n\ndef print_counter():\n counter_1 = counter()\n counter_2 = counter()\n\n print(counter_1())\n print(counter_1())\n\n print(counter_2())\n print(counter_2())\n\n\ndef print_add_amount():\n add_5_to = add_amount(10)\n add_1_to = add_amount(1)\n\n print(add_5_to(2))\n print(add_1_to(10))\n\n\ndef main():\n print_counter()\n print_add_amount()\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6708229184150696, "alphanum_fraction": 0.6882793307304382, "avg_line_length": 20.157894134521484, "blob_id": "36c1a9172d9867c4e9d53a3aab421bcfe136d3c1", "content_id": "4a98887a257ea39180bbf3640f249c066c913b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 58, "num_lines": 19, "path": "/network/json_app.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import json\nimport urllib.request\n\nurl = \"http://py4e-data.dr-chuck.net/comments_697177.json\"\nprint('Retrieved', url)\n\nurl_handle = urllib.request.urlopen(url)\nurl_data = url_handle.read()\nprint('Retrieved', len(url_data))\n\ndata = json.loads(url_data)\n\nresult = []\nfor key in data['comments']:\n num = int(key['count'])\n result.append(num)\n\nprint('Count:', len(result))\nprint('Sum:', sum(result))" }, { "alpha_fraction": 0.5809859037399292, "alphanum_fraction": 0.5845070481300354, "avg_line_length": 19.285715103149414, "blob_id": "fb2a08f0745a53e28761989fd2200abf22142d6d", "content_id": "dfd095799df489c13799b086cb0529c2d4bfd789", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/files_manipulations/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\n\ndef append_text(filename, text):\n with open(filename, 'a') as f:\n f.write(text + \"\\n\")\n\n\nif __name__ == \"__main__\":\n\n file_name = r\"files_manipulations\\files\\test.txt\"\n\n for i in range(5):\n append_text(file_name, \"number: \" + str(i))\n" }, { "alpha_fraction": 0.6113536953926086, "alphanum_fraction": 0.624454140663147, "avg_line_length": 16.615385055541992, "blob_id": "2e44b4a75dc0b1a1b772c15f81ee9f95873c2657", "content_id": "50d5f3ac55352bcccc0bfc557b67875dd65db7cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/regular_expressions/scripts/find_total.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import re\n\nnumbers = list()\ntotal = 0\n\nwith open(\"regular_expressions/scripts/sum.txt\") as file:\n for line in file:\n numbers += re.findall(r\"([0-9]+)\", line.strip())\n\n\nfor i in numbers:\n total += int(i)\nprint(total)\n" }, { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7166666388511658, "avg_line_length": 21, "blob_id": "81bceea3e125d8de26a2aa57169a9714a7594bff", "content_id": "03faad775149fc17b332059b04582bee3a997803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/files_manipulations/os_file.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import os\nimport datetime\n\nfile_name = \"files/file.txt\"\n\n\ndef main():\n # main method\n print(f\"file exists: {file_exists}\")\n print(f\"file size: {file_size}\")\n print(f\"file absolute path: {abs_path}\")\n print(f\"file last modification: {file_datetime}\")\n\n\n# check if file exists\nfile_exists = os.path.exists(file_name)\n\n# return size of the file\nfile_size = os.path.getsize(file_name)\n\n# get the last modification time of the file\ntimestamp = os.path.getmtime(file_name)\n\n# print time converted for readability\nfile_datetime = datetime.datetime.fromtimestamp(timestamp)\n\n# get the absolute path of file\nabs_path = os.path.abspath(file_name)\n\nmain()\n" }, { "alpha_fraction": 0.6360294222831726, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 22.314285278320312, "blob_id": "3b8a522e3f09de58d56970b41bbca43ca18efd97", "content_id": "d95d03789d19341330f800ba69fb85d95928f2ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 118, "num_lines": 35, "path": "/functional_programming/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from functools import reduce\n\n#1 Capitalize all of the pet names and print the list\nmy_pets = ['sisi', 'bibi', 'titi', 'carla']\n\n\ndef uppercase(string):\n return string.upper()\n\n\nprint(list(map(uppercase, my_pets)))\n\n#2 Zip the 2 lists into a list of tuples, but sort the numbers from lowest to highest.\nmy_strings = ['a', 'b', 'c', 'd', 'e']\nmy_numbers = [5, 4, 3, 2, 1]\n\nprint(list(zip(my_strings, sorted(my_numbers))))\n\n#3 Filter the scores that pass over 50%\nscores = [73, 20, 65, 19, 76, 100, 88]\n\n\ndef over_50_percent(number):\n return number > 50\n\n\nprint(list(filter(over_50_percent, scores)))\n\n\n#4 Combine all of the numbers that are in a list on this file using reduce (my_numbers and scores). What is the total?\ndef add(index, nums):\n return nums + index\n\n\nprint(reduce(add, (my_numbers + scores)))\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.4841628968715668, "avg_line_length": 23.55555534362793, "blob_id": "8b138b266a344895c0e11734332ab324881fb2aa", "content_id": "891747c0259b821c298deb8cf8e819e8336ce8cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 67, "num_lines": 27, "path": "/data_structures/algorithms/sorting/bubble_sort.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def bubble_sort(array):\n # bubble sort algorithm\n is_sorted = False\n length = len(array) - 1\n\n while not is_sorted:\n is_sorted = True\n for i in range(length):\n if array[i] > array[i + 1]:\n swap(i, (i + 1), array)\n is_sorted = False\n\n\ndef swap(i, j, array):\n # helper method to swap values\n array[i], array[j] = array[j], array[i]\n\n\nif __name__ == \"__main__\":\n\n array = [234, 9, 6, -12, 3, 0, 45, 1]\n\n print(f\"\\nList before bubble sort: {array}\")\n print(\"------------------------------------------------------\")\n\n bubble_sort(array)\n print(f\"List after bubble sort: {array}\")\n" }, { "alpha_fraction": 0.6190571188926697, "alphanum_fraction": 0.6280463337898254, "avg_line_length": 28.11046600341797, "blob_id": "dba3c3f012e8184652c531cc21f7feeea8eaaa29", "content_id": "0603dc8df13730fe1900bf1c424e48aca31748d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5006, "license_type": "no_license", "max_line_length": 257, "num_lines": 172, "path": "/strings/challenges.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "# Challenges are from www.w3resource.com\n\n\n# helper methods\ndef print_questions_and_results(dic):\n # print questions with responses\n for k, v in dic.items():\n print(k, v)\n\n\ndef add_question(dic, question, result):\n # add question to the questions dict\n dic[question] = result\n\n\n#--------------------------------------------\n# results of questions in order from 1 to n\n\n\ndef get_len(string):\n #1\n # get length of the a string\n return f\"string length is {len(string)}\\n\"\n\n\ndef get_character_to_number_occurence(string):\n #2\n # count the number of characters (character frequency) in a string\n\n temp = {}\n for ch in string:\n temp.setdefault(ch, 0)\n temp[ch] += 1\n return temp\n\n\ndef replace_with_dollar(string):\n #3\n # Write a Python program to get a string from a given string\n # where all occurrences of its first char have been changed to '$',\n # except the first char itself\n\n first_letter = string[0]\n result = string[1:]\n result = result.replace(first_letter, '$')\n\n letters = list(result)\n letters.insert(0, first_letter)\n\n return \"\".join(letters)\n\n\ndef swap(string_1, string_2):\n #4\n # Write a Python program to get a single\n # string from two given strings,\n # separated by a space and swap the first two characters of each string\n\n result = f\"{string_2} {string_1}\"\n return result\n\n\ndef add_ing_or_ly(string):\n #5\n # Write a Python program to add 'ing' at the end of a given string (length should be at least 3).\n # If the given string already ends with 'ing' then add 'ly' instead.\n # If the string length of the given string is less than 3, leave it unchanged\n\n result = string\n str_ing = 'ing'\n str_ly = 'ly'\n\n if len(string) > 2:\n if string.endswith('ing'):\n result = string[:-3]\n result += str_ly\n else:\n result += str_ing\n return result\n\n\ndef longest_len(string):\n # 6\n # Write a Python function that takes a list of words and returns the length of the longest one\n words = string.split()\n longest = ''\n for word in words:\n if len(word) > len(longest):\n longest = word\n return f\"longest word is : '{longest}'\"\n\n\ndef remove_odd_index(string):\n # 7\n # Write a Python program to remove\n # the characters which have odd index values of a given string\n result = ''\n\n for i, ch in enumerate(string):\n if i % 2 == 0:\n result += ch\n return result\n\n\ndef add_tags(tag: str, string: str):\n # 8\n #Write a Python function to create the HTML string with tags around the word(s):\n open_tag = f\"<{tag}>\"\n close_tag = f\"</{tag}>\"\n\n result = ''.join([open_tag, string, close_tag])\n\n return result\n\n\ndef main():\n #key -> value\n question_to_response = {}\n\n #1\n add_question(\n question_to_response,\n \"\\n#1 Write a Pythonprogram to calculate the length of a string:\\n\",\n get_len(DEFAULT_STRING))\n #2\n add_question(\n question_to_response,\n \"#2 Write a Python program to count the number of characters (character frequency) in a string:\\n\",\n get_character_to_number_occurence(DEFAULT_STRING))\n #3\n add_question(\n question_to_response,\n \"\\n#3 Write a Python program to get a string from a given string where all occurrences of its first char have been changed to '$', except the first char itself:\\n\",\n replace_with_dollar(DEFAULT_STRING))\n #4\n add_question(\n question_to_response,\n \"\\n#4 Write a Python program to get a single string from two given strings, separated by a space and swap the first two characters of each string:\\n\",\n swap(DEFAULT_STRING, DEFAULT_STRING_2))\n #5\n add_question(\n question_to_response,\n \"\\n#5 Write a Python program to add 'ing' at the end of a given string (length should be at least 3). If the given string already ends with 'ing' then add 'ly' instead. If the string length of the given string is less than 3, leave it unchanged:\\n\",\n add_ing_or_ly(DEFAULT_STRING))\n #6\n add_question(\n question_to_response,\n \"\\n #6 Write a Python function that takes a list of words and returns the length of the longest one:\\n\",\n longest_len(DEFAULT_STRING))\n #7\n add_question(\n question_to_response,\n \"\\n #7 Write a Python program to remove the characters which have odd index values of a given string:\\n\",\n remove_odd_index(DEFAULT_STRING))\n #8\n add_question(\n question_to_response,\n \"\\n #8 Write a Python function to create the HTML string with tags around the word(s):\\n\",\n add_tags('p', DEFAULT_STRING))\n\n print_questions_and_results(question_to_response)\n\n\nif __name__ == \"__main__\":\n\n # default strings used for challenges\n DEFAULT_STRING = \"welcome to String world!\"\n DEFAULT_STRING_2 = \"Good morning!\"\n\n print(f\"the string to be used will be : '{DEFAULT_STRING}':\")\n\n main()" }, { "alpha_fraction": 0.5471203923225403, "alphanum_fraction": 0.5811518430709839, "avg_line_length": 20.22222137451172, "blob_id": "805c1d137033d197ba4e2f0bfbf94c59b6c47893", "content_id": "a42dcb5e00d28488f7b2d7f4ac12e50f16a61d03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 46, "num_lines": 18, "path": "/functional_programming/zip/zip.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import random\n\n\ndef create_random_list():\n nums = []\n for i in range(4):\n nums.append(random.randint(1, 10))\n return nums\n\n\nif __name__ == \"__main__\":\n list_1 = create_random_list()\n list_2 = create_random_list()\n list_3 = create_random_list()\n\n print(list_1, list_2, list_3)\n zipped = list(zip(list_1, list_2, list_3))\n print(f\"zipped: {zipped}\")\n" }, { "alpha_fraction": 0.6382306218147278, "alphanum_fraction": 0.6382306218147278, "avg_line_length": 16.828571319580078, "blob_id": "47cd4993030643683a5e706fdd4dd370929e436e", "content_id": "ab27ae464f4326354f9b75eb0dbaf5a20590ea93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 43, "num_lines": 35, "path": "/files_manipulations/directories.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import os\n\n# create folder\nos.mkdir(\"hehe\")\n\n# remove fodler\nos.rmdir(\"hehe\")\n\n# rename folder or file\nos.rename(\"a\", \"b\")\nos.rename(\"b\", \"a\")\n\ndef get_current_path():\n # get current working path\n current_directory = os.getcwd()\n\n print(current_directory)\n return current_directory\n\nget_current_path()\n\n# list items in selected directory\nprint(os.listdir(\"a\"))\n\ndir = \"a\"\n\n#check if there is files or folders\nfor name in os.listdir(dir):\n \n fullname = os.path.join(dir, name)\n \n if os.path.isdir(fullname):\n print(f\"{fullname} is a directory\")\n else:\n print(f\"{fullname} is a file\") \n \n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5353982448577881, "avg_line_length": 19.636363983154297, "blob_id": "ec4464fc6efba50347d1fa3975e51f7584e2779c", "content_id": "d4f8a6adccb8ccc5c87332a5358d4ddc24464cc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/functional_programming/filter/filter.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def only_even(number):\n return number % 2 == 0\n\n\nif __name__ == \"__main__\":\n\n my_list = [1, 2, 3, 4, 5, 6]\n evens = list(filter(only_even, my_list))\n\n print(f\"filter list: {evens}\")\n print(f\"my list: {my_list}\")" }, { "alpha_fraction": 0.5375128388404846, "alphanum_fraction": 0.5734840631484985, "avg_line_length": 22.7560977935791, "blob_id": "57575be401a44e408dffa20c4cdbe227e6e6d91f", "content_id": "b6365cdea4e4733b967112d717fe66eeeda79cef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 58, "num_lines": 41, "path": "/data_structures/lists/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def string_from(array: list) -> str:\n # transform a List into a String\n convert = [str(i) for i in array]\n convert.insert(-1, \"and\")\n string_before_last = \", \".join(convert[:-1])\n last_part = convert[-1]\n result = \" \".join([string_before_last, last_part])\n return result\n\n\n# list of integers and returns the sum of those items \\\n# in the list that are not divisible by\n\n\ndef strange_sum(numbers: list) -> int:\n total = 0\n for i in numbers:\n if i % 3 == 0: continue\n total += i\n return total\n\n\ndef main():\n\n print(strange_sum([1, 2, 3, 4, 5, 1, 2, 3, 4, 5]))\n print(strange_sum(list(range(123)) + list(range(77))))\n\n array = ['apples', 'bananas', 'tofu', 'cats']\n array_2 = [3, 3, 3, 10, 15]\n array_3 = [1, 2, 'apples', 'bananas']\n\n print(f\"strange sum: {strange_sum(array_2)}\")\n\n examples = array, array_2, array_3\n\n for i in examples:\n print(string_from(i))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.6191176176071167, "alphanum_fraction": 0.6338235139846802, "avg_line_length": 18.428571701049805, "blob_id": "9c23d7791cfc3539b0aef5e8ed2389d8ac146bef", "content_id": "1480b073dd3a858421395b44ac7d6d561ddc8760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 680, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/files_manipulations/csv_files.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import csv\n\nfilename = \"files/FL_insurance_sample.csv\"\n\nfile = open(filename)\n\ncsv_file = csv.reader(file)\n\n# get 3 first objects in a line\nfor line in csv_file:\n policyID, statecode, country = line[0:3]\n\n if not line[0].isdigit():\n continue\n\n print(\n f\"Policy ID: {policyID}\\nState code: {statecode}\\nCountry: {country}\\n\")\n break\n\nfile.close()\n\n\n# write to a csv file\npeople = [[\"Bob\", \"20\"], [\"John\", \"45\"], [\"Doug\", \"69\"]]\n\nfilename = \"files/people.csv\"\nwith open(filename, \"w\") as people_csv:\n writer = csv.writer(people_csv)\n writer.writerows(people)\n\n\n# read it\nwith open(filename) as file:\n for line in file:\n print(line.strip())\n" }, { "alpha_fraction": 0.5557404160499573, "alphanum_fraction": 0.6156405806541443, "avg_line_length": 21.296297073364258, "blob_id": "be5e6abd29138c987fa8457c7e2607e0a68c4ed6", "content_id": "85315a94e4afe8a83428b68375ce5b97ea6a6d34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 67, "num_lines": 27, "path": "/strings/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def count_vowels(word: str) -> int:\n total = 0\n vowels = ['a', 'e', 'i', 'o', 'u']\n\n for i in word:\n if (i in vowels):\n total += 1\n return total\n\n\ndef demystify(l1_string: str) -> str:\n result = l1_string.replace(\"l\", \"a\")\n result = result.replace(\"1\", \"b\")\n return result\n\n\ndef main():\n\n print(demystify(\"lll111l1l1l1111lll\"))\n print(demystify(\"111l1l11l11lll1lll1lll11111ll11l1ll1l111\"))\n\n print(count_vowels(\"aaassseefffgggiiijjjoOOkkkuuuu\"))\n print(count_vowels(\"aovvouOucvicIIOveeOIclOeuvvauouuvciOIsle\"))\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.4951139986515045, "alphanum_fraction": 0.5407165884971619, "avg_line_length": 20.928571701049805, "blob_id": "3ac78dc5fe31bad97e0752e345fdf878c6258951", "content_id": "019060ce091542aad7fedeff6487efad9eabacbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/functional_programming/lambda/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\n\n # square\n my_list = [3, 4, 5]\n\n square_list = list(map(lambda item: item**2, my_list))\n\n print(square_list)\n\n #sorting list by second tuple item\n list_tuples = [(0, 2), (4, 3), (9, 9), (10, -1)]\n\n list_tuples.sort(key=lambda t: t[1])\n print(list_tuples)\n" }, { "alpha_fraction": 0.5665760636329651, "alphanum_fraction": 0.573369562625885, "avg_line_length": 19.47222137451172, "blob_id": "04034e19d74c21f0215c7d1daaa54a209b9df4c3", "content_id": "4f2e259399a86cf73f2f2ddedd2c9acca11327b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "no_license", "max_line_length": 52, "num_lines": 36, "path": "/matrix/matrix.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import numpy\n\n\ndef trace(matrix: list) -> int:\n #calcualte the trace (diag)\n total = 0\n\n for i in range(len(matrix)):\n total += matrix[i][i]\n return total\n\n\ndef contruct_matrix(rows: int, colums: int) -> list:\n # construct a matrix\n matrix = []\n\n for row in range(rows):\n new_row = []\n for col in range(colums):\n new_row.append(row * col)\n matrix.append(new_row)\n return matrix\n\n\ndef display_matrix(matrix: list):\n # display the matrix\n print(\"--\" * len(matrix) * 2)\n print(numpy.matrix(matrix))\n print(\"--\" * len(matrix) * 2)\n\n\nif __name__ == \"__main__\":\n\n matrix = contruct_matrix(5, 9)\n print(f\"trace is: {trace(matrix)} \\n\")\n display_matrix(matrix)" }, { "alpha_fraction": 0.4969325065612793, "alphanum_fraction": 0.5128834247589111, "avg_line_length": 21.027027130126953, "blob_id": "a994fce2b99c739d3457d1d610303fbc114df9d2", "content_id": "30091c6d1939544f9765fa086f7f0067e8e5d1a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 815, "license_type": "no_license", "max_line_length": 59, "num_lines": 37, "path": "/data_structures/algorithms/sorting/selection_sort.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def selection_sort(array: list):\n\n length = range(len(array) - 1)\n\n for i in length:\n i_min = i\n for j in range(i + 1, len(array)):\n if array[j] < array[i_min]:\n i_min = j\n if i_min != i:\n array[i_min], array[i] = array[i], array[i_min]\n\n\ndef find_smallest(array):\n small = array[0]\n small_index = 0\n for i in range(1, len(array)):\n if array[i] < small:\n small = array[i]\n small_index = i\n\n return small_index\n\n\ndef selection_sort_2(array):\n a = []\n for _ in range(len(array)):\n small = find_smallest(array)\n a.append(array.pop(small))\n return a\n\n\nif __name__ == \"__main__\":\n array = [9, 8, 7, 0, 1]\n print(array)\n print(\"selection sort 2: \")\n print(selection_sort_2(array))\n" }, { "alpha_fraction": 0.5987654328346252, "alphanum_fraction": 0.6037036776542664, "avg_line_length": 17.409090042114258, "blob_id": "5fb14bcb4663d5f29201f1dfd27fbe4c29977233", "content_id": "e19512074b933c1c869d165dcac3c78369611831", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 43, "num_lines": 44, "path": "/files_manipulations/file.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "file_name = \"files/file.txt\"\n\n\ndef write_file(file_name, line):\n # method to write into a file\n with open(file_name, \"w\") as file:\n for i in range(10):\n file.write(line)\n\n\ndef read_file(file_name):\n # method to read the file\n with open(file_name) as file:\n for line in file:\n print(line.strip())\n\n\ndef append_file(file_name, line):\n # method to append to file\n with open(file_name, \"a\") as file:\n file.write(line)\n\n\nfile = open(file_name)\n\n\ndef start():\n # main\n write_file(file_name, \"From abc@email.com\\n\")\n append_file(file_name, \"Hello\")\n read_file(file_name)\n print(count_file(file_name))\n\n\ndef count_file(file_name):\n # count every line in file\n count = 0\n for line in file:\n count += 1\n return count\n\n\nstart()\nfile.close()\n" }, { "alpha_fraction": 0.5355191230773926, "alphanum_fraction": 0.5573770403862, "avg_line_length": 14.25, "blob_id": "c39408d370c98047e78718fe999568878fc16155", "content_id": "171102ee545f93269f5e4ac8ed16f250cc3940a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 43, "num_lines": 12, "path": "/data_structures/tuples/namedtuples.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n\n\ndef main():\n Point = namedtuple('Point', ['x', 'y'])\n coords = Point(x=20, y=20)\n\n print(coords)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6288848519325256, "alphanum_fraction": 0.6288848519325256, "avg_line_length": 23.863636016845703, "blob_id": "cceb37f715a90dfa07f955cc5d62d8dda72b1729", "content_id": "27a994d9344ef1bb5d156f0fc3fa59c60726f870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/testing/rearrange/rearrange_test.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from testing.rearrange.rearrange import rearrange_name\nimport unittest\n\nclass TestRearrange(unittest.TestCase):\n\n def test_basic(self):\n testcase = \"Dylan Buchi\"\n result = \"Buchi Dylan\"\n self.assertEqual(rearrange_name(testcase), result)\n\n def test_empty(self):\n testcase = \"\"\n result = \"\"\n self.assertEqual(rearrange_name(testcase), result)\n\n def test_one_name(self):\n testcase = \"Dylan\"\n result = \"Dylan\"\n self.assertEqual(rearrange_name(testcase), result)\n \n\nunittest.main()\n" }, { "alpha_fraction": 0.4972502291202545, "alphanum_fraction": 0.5077910423278809, "avg_line_length": 19.203702926635742, "blob_id": "0f5c6ba7a73359958747b4471f658caec0076ea8", "content_id": "89e564ef097cf749c14b5b8b95a27d7440db86ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2182, "license_type": "no_license", "max_line_length": 61, "num_lines": 108, "path": "/oop/classes/car.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Car:\n def __init__(self,\n make='unknown',\n model='unknown',\n color='black',\n year=2000,\n price=10000.0):\n # constructor\n self._make = make\n self._model = model\n self._color = color\n self._year = year\n self._price = price\n\n# ------- getters and setters -------\n\n @property\n def make(self):\n\n return self._make\n\n @make.setter\n def make(self, make):\n self._make = make\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, model):\n self._model = model\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, color):\n self._color = color\n\n @property\n def year(self):\n return self._year\n\n @year.setter\n def year(self, year):\n self._year = year\n\n @property\n def price(self):\n return self._price\n\n @price.setter\n def price(self, price):\n if price >= 0:\n if price == 0:\n print(\"This car is free!\")\n self._price = price\n else:\n print(\"Error, Price is less than 0\")\n\n\n# ------- to string method -------\n\n def __str__(self):\n line = '---------------------'\n return f\"\"\"\n{line}\nMake:\\t {self.make.title()}\nModel:\\t {self.model.title()}\nColor:\\t {self.color.title()}\nYear:\\t {self.year}\nPrice:\\t {self.price}\n{line}\n\"\"\"\n\n\ndef main():\n # main method\n car = Car('buick', 'lesabre', 'red', 2000, 231)\n print(car)\n\n car.model = 'Batmobile'\n car.price = 0\n car.color = 'Black'\n\n print(car)\n\n # get cars from csv file and print them\n filename = r\"files_manipulations\\files\\cars.csv\"\n\n cars = []\n with open(filename) as csv_file:\n reader = csv_file.readlines()\n for data in reader[1:]:\n make, model, color, year, price = data.split(',')\n cars.append(Car(make, model, color, year, price))\n\n print(f\"There are {len(cars)} cars:\")\n\n for i, car in enumerate(cars):\n\n print(f\"Car #{i+1}: {car}\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5915641188621521, "alphanum_fraction": 0.5954305529594421, "avg_line_length": 21.58730125427246, "blob_id": "7a7efd65845ad24cc6bf7fe8754267d8d23f69a5", "content_id": "49175086a15e3e3cc8364ab9d9aca050a7785865", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2845, "license_type": "no_license", "max_line_length": 78, "num_lines": 126, "path": "/scripts/admin_login.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import sys\nimport getpass\n\n\ndef user_interface():\n # user interface interaction\n username = get_username()\n user_to_password.setdefault(username, 0)\n\n while True:\n password = get_password()\n if user_to_password[username] == 0:\n break\n if check_user_password(\n username,\n password) == False and user_to_password[username] != 0:\n print(\"wrong password\")\n continue\n break\n\n if is_new_user(username):\n print(f\"Welcome {username}, you are now logged in!\")\n\n else:\n print(f\"Welcome back {username}\")\n\n add_user(username, password)\n\n # admin\n if is_admin(username):\n print(\"The current user database: \")\n for user, password in user_to_password.items():\n print(f\"Username: {user}, Password: {password}\")\n # normal users\n else:\n response = input(\"press '1' to change your password or '0' to exit: \")\n\n if response == '0':\n sys.exit()\n elif response == '1':\n change_password()\n\n\ndef change_password():\n # change user's password\n password = ''\n confirm_password = ''\n\n while len(password) < 8:\n password = getpass.getpass(\"Enter your new password:\")\n\n while (password != confirm_password):\n confirm_password = getpass.getpass(\"Confirm your new password: \")\n\n print(\"password changed with success!\")\n\n\ndef check_user_password(user, password):\n # check if user is entering his correct password\n user_password = user_to_password[user]\n\n return user_password == password\n\n\ndef is_admin(user):\n # return true if user is an admin\n return user == 'admin'\n\n\ndef is_new_user(user):\n # return true if user is a new one\n return user_to_password[user] == 0\n\n\ndef add_user(user, password):\n # add user to the dict of user to password\n if user_to_password[user] == 0:\n user_to_password[user] = password\n\n\ndef prompt_for_password():\n # ask user to enter a valid password\n password_invalid = True\n while password_invalid:\n\n password = get_password()\n if is_valid_password(password):\n password_invalid = False\n return password\n\n\ndef get_password():\n # get user's password\n return getpass.getpass(\"Enter your password: \")\n\n\ndef get_username():\n # get user's username\n return input(\"Enter your username: \")\n\n\ndef is_valid_password(password):\n # return true if the password is valid\n if len(password) < 8:\n return False\n else:\n return True\n\n\ndef app():\n # main app\n\n user_interface()\n\n\nif __name__ == '__main__':\n\n user_to_password = {\n 'admin': '12345678',\n 'johny4': 'johnn',\n 'bobby34': 'hahaha',\n 'helmet23': 'password',\n 'doudou': 'hello21',\n }\n\n app()" }, { "alpha_fraction": 0.5914480686187744, "alphanum_fraction": 0.6148041486740112, "avg_line_length": 28.606382369995117, "blob_id": "117b154d491e93b65d549eee42d9f3f04f25242b", "content_id": "8e45621f3e8a28d529b27e5ac2d7e42f85748eea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2783, "license_type": "no_license", "max_line_length": 82, "num_lines": 94, "path": "/regular_expressions/regex.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import re\n\n\ndef search_pattern(regex, string, specialcase=re.UNICODE):\n # search pattern regex in the string, optional arg unicode by default\n result = re.search(regex, string, specialcase)\n return result\n\n\ndef regex_examples():\n\n # the r is for a raw string\n # the . matches any characters and\n print(search_pattern(r\"l.l\", \"lol\"))\n\n # use [] to specify a range\n print(search_pattern(r\"[0-9]\", \"abcd2\"))\n\n # [range of lower letter and upper letter]\n print(search_pattern(r\"[Jj]ava\", \"java\"))\n print(search_pattern(r\"[pP]ython\", \"Python\"))\n\n # ignore upper lower cases\n print(search_pattern(r\".eans\", \"Jeans\", re.IGNORECASE))\n\n # (the | is or) to match the first a or b\n print(search_pattern(r\"cat|dog\", \"there is a cat\"))\n\n # (* qualifier) is used for infinite number of characters or 0\n print(search_pattern(r\"py[a-z]*n\", \"pytttttthhhhhhoooooooon\"))\n print(search_pattern(r\"numbers:[0-9]..*\", \"numbers:01234567..\"))\n\n # (+ qualifier) for 1 or more characters\n print(search_pattern(r\"a+h!\", \"aaaaaaah!\"))\n print(search_pattern(r\"l+o+l+!\", \"lllooooooolllll!\"))\n\n # (the ^ is used to match on the start of the line!)\n print(search_pattern(r\"^start\", \"start, end!\"))\n\n # (the $ is used to match on the end of the line!)\n print(search_pattern(r\"end$\", \"start, end\"))\n\n # (the ? is used to match the character before it 0 or 1 time)\n print(search_pattern(r\"j?ava\", \"lava\"))\n print(search_pattern(r\"j?ava\", \"java\"))\n\n # the \\ is to escape a character\n print(search_pattern(r\"\\.com\", \"google.com\"))\n\n # \\w is used to match any alphabet, numeric, and underscores _ characters only\n print(search_pattern(r\"\\w*\", \"abc089__hehe\"))\n\n # \\d is used to match any digits characters only\n print(search_pattern(r\"\\d*\", \"123\"))\n\n # \\s is used to match white spaces characters only\n print(search_pattern(r\"\\shey*\", \" hey\"))\n\n\ndef practice():\n\n pattern = r\"^[a-zA-Z\\s]*[0-9]*$\"\n\n print(search_pattern(pattern, \"this is valid\"))\n print(search_pattern(pattern, \"this is valid 34\"))\n print(search_pattern(pattern, \"ThisIsValid\"))\n print(search_pattern(pattern, \" ThisIsValid \"))\n\n time = r\"(1[012]|[0-9]):(0[0]|[01-59]+) ?([amAM]+|[pmPM]+)\"\n print(search_pattern(time, \"12:00 AM\"))\n\n pattern = r\"^[\\w \\. \\- \\+]+\\.[a-zA-Z]+$\"\n print(search_pattern(pattern, \"gmail.com\"))\n\n pattern = r\"\\([a-z A-Z]+\\)|\\([\\w]+\\)\"\n result = re.search(pattern, \"hello (IG)\")\n print(result)\n\n pattern = r\"^[\\w \\s]+[\\s]{1,}[0-9]{5}|[0-9]{5}-[0-9]{4}\"\n result = re.search(\n pattern, \"Their address is: 123 Main Street, Anytown, AZ 85258-0001.\")\n print(result)\n\n\ndef main():\n\n print(\"Practice: \")\n practice()\n\n print(\"Examples: \")\n regex_examples()\n\n\nmain()\n" }, { "alpha_fraction": 0.5872675180435181, "alphanum_fraction": 0.5994277596473694, "avg_line_length": 23.120689392089844, "blob_id": "617cf6ca2302e59e7a46da5e3217ce4a4880f6fb", "content_id": "6016ca1c6f0eb3f71e9d8ac6fad29af14f5077bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 72, "num_lines": 58, "path": "/functional_programming/lambda/lambda.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import random\n\n\ndef get_line():\n # to print a line '-----' for visual purpose only\n return \"\\n\" + \"--\" * 26\n\n\ndef to_string(array):\n # format a list into a string\n return \" | \".join(map(str, array))\n\n\ndef get_result(array):\n # get the formated result\n return to_string(array) + get_line()\n\n\ndef print_result(dic):\n for k, v in dic.items():\n print(k, v)\n\n\ndef main():\n # list of nums (1-10) then randomly suffle the numbers\n nums = list(range(1, 11))\n random.shuffle(nums)\n\n # lambda for square nums\n square_nums = list(map(lambda num: num**2, nums))\n\n # filter only even numbers\n only_even_nums = list(filter(lambda num: num % 2 == 0, square_nums))\n\n # list of tuples from even list and square nums list\n list_tuples = list(\n map(lambda num1, num2: (num1, num2), only_even_nums, nums))\n\n # list of max nums from tuple list\n max_nums_list_tuples = list(\n map(lambda tup: max(tup[0], tup[1]), list_tuples))\n\n # dict with list name key and print result value\n listname_result = {\n \"nums:\": get_result(nums),\n \"squares:\": get_result(square_nums),\n \"evens:\": get_result(only_even_nums),\n \"tuples:\": get_result(list_tuples),\n \"max num from tuples:\": get_result(max_nums_list_tuples),\n }\n\n print_result(listname_result)\n\n\nif __name__ == \"__main__\":\n print()\n main()\n print()" }, { "alpha_fraction": 0.5476358532905579, "alphanum_fraction": 0.55539870262146, "avg_line_length": 27.93877601623535, "blob_id": "3bde766ef8f1f3a0dbe51491734a67fda5770d68", "content_id": "88ef246d0e2695060b79784b9b4ad34a7c703151", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1417, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/oop/dice_game/game.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from player.player import Player\nfrom random import randint\nfrom typing import List\n\n\nclass Game:\n MAX_SCORE = 100\n\n def __init__(self, player_amount: int) -> None:\n self.player_amount = player_amount\n\n def round(self, player: Player):\n roll_number = randint(1, 6)\n player.score += roll_number\n\n print(f\"Player_{player.number} (rolled a {roll_number})\")\n\n def check_winner(self, player: Player):\n if player.score >= self.MAX_SCORE:\n player.winner = True\n\n def reset_game(self, players: List[Player]):\n for player in players:\n player.reset()\n\n def print_score(self, players: List[Player]):\n sorted_players = sorted(players, key=lambda x: x.score, reverse=True)\n [i.print_scores() for i in sorted_players]\n\n def play_game(self):\n players = [Player(i) for i in range(1, self.player_amount + 1)]\n game_on = True\n round = 1\n\n while game_on:\n print(f\"Round: {round}\\n\")\n for player in players:\n self.round(player)\n self.check_winner(player)\n if player.winner:\n print(f\"\\n{player} wins the game!\\n\")\n game_on = False\n break\n print(\"-\" * 20)\n round += 1\n\n print(\"LeaderBoard\\n\")\n self.print_score(players)\n self.reset_game(players)" }, { "alpha_fraction": 0.5324324369430542, "alphanum_fraction": 0.5378378629684448, "avg_line_length": 23.733333587646484, "blob_id": "a8fa8dd68fa44d2f00083acbf977458c2b41f310", "content_id": "9fe0dd02c88fe07b579795cf6a5961421bcf9dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/oop/player/player.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class Player:\n def __init__(self, number: int) -> None:\n self.number = number\n self.score = 0\n self.winner = False\n\n def reset(self) -> None:\n self.score = 0\n self.winner = False\n\n def print_score(self) -> None:\n print(self, f\"Score: {self.score}\")\n\n def __repr__(self) -> str:\n return f\"Player_{self.number}\"" }, { "alpha_fraction": 0.5932721495628357, "alphanum_fraction": 0.5932721495628357, "avg_line_length": 20.866666793823242, "blob_id": "6f65c98b2beb020666724173819b562d76470b1f", "content_id": "3bf8dd1872860b6e5ee2ba8d5355af055be27e21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/strings/ubbi_dubbi.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def make_ubbi_dubbi(string: str):\n words = insert_ub_to(string)\n return ''.join(words)\n\n\ndef insert_ub_to(string: str):\n words = []\n for _, ch in enumerate(string):\n if ch in 'aeiou':\n words.append('ub')\n words.append(ch)\n return words\n\n\nprint(make_ubbi_dubbi('I love to drink coffee!'))" }, { "alpha_fraction": 0.6167597770690918, "alphanum_fraction": 0.6201117038726807, "avg_line_length": 21.94871711730957, "blob_id": "4e4626ab007016b2920b5ad2b0e71ec8fd4517c8", "content_id": "783d95497487a971fa1153334d2f7682e61d1178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "no_license", "max_line_length": 66, "num_lines": 39, "path": "/modules/json_main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import json\n\n\ndef read_list_json(filename):\n data = json.load(open(filename))\n for dic in data['names']:\n print(f\"name: {dic['name']} age: {dic['age']}\")\n return data\n\n\ndef read_simple_json(filename):\n data = json.load(open(filename))\n return data\n\n\ndef create_json_file(data, filename):\n json.dump(data, open(filename, 'w'), indent=4, sort_keys=True)\n\n\ndef append_json_list(data, filename):\n dic = read_list_json(filename)\n temp = dic['names']\n temp.append(data)\n create_json_file(dic, filename)\n\n\nif __name__ == \"__main__\":\n\n path = r'./modules/files/'\n numbers_file = 'numbers.json'\n\n names_file = 'names.json'\n\n data = {'name': 'Yuri', 'age': 23}\n create_json_file(data, rf'{path}{numbers_file}')\n append_json_list(data, rf'{path}{names_file}')\n\n read_simple_json(rf'{path}{numbers_file}')\n read_list_json(rf'{path}{names_file}')\n" }, { "alpha_fraction": 0.47770699858665466, "alphanum_fraction": 0.5010615587234497, "avg_line_length": 17.84000015258789, "blob_id": "29482db5dfd91fa58a5680e5e1c70d770b8dbbb8", "content_id": "86d86efcc73b463b98600e5477de279e0a285a4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 52, "num_lines": 25, "path": "/data_structures/algorithms/searching/binary_search.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def binary_search(numbers, n):\n low = 0\n high = len(numbers) - 1\n\n while low <= high:\n i = (low + high) // 2\n mid = numbers[i]\n\n if mid == n:\n return i\n elif mid > n:\n high = i - 1\n else:\n low = i + 1\n\n\nnumbers = [i+1 for i in range(100)]\n\nnum = 23\nindex = binary_search(numbers, num)\n\nif index is None:\n print(\"Number not found!\")\nelse:\n print(f\"Number {num} found at position {index}\")\n" }, { "alpha_fraction": 0.6383647918701172, "alphanum_fraction": 0.650943398475647, "avg_line_length": 18.875, "blob_id": "2c39096828122723f01d2d1137897858623f608b", "content_id": "ebd3eb890715bd5fbbbaa8830cb76173f21f97d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/testing/test.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport unittest\nsys.path.append(os.getcwd())\nfrom testing.add_five import add_five_to\n\n\nclass TestAddFive(unittest.TestCase):\n def test_add_five_to(self):\n test = 10\n result = add_five_to(test)\n self.assertEqual(result, 15)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.5343915224075317, "alphanum_fraction": 0.5509259104728699, "avg_line_length": 25.086206436157227, "blob_id": "bbd08e53053abc9aa175620c084e91e1c2ebf4df", "content_id": "7a62508ce4102cc5229c8a502f4f79e844d7a243", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1512, "license_type": "no_license", "max_line_length": 85, "num_lines": 58, "path": "/scripts/guessing_game.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "from random import randint\nfrom sys import argv\n\n\ndef generate_random_numbers(start=1, end=100):\n return randint(start, end)\n\n\ndef get_user_number(n1=1, n2=100):\n number = 0\n while True:\n try:\n number = int(input(f\"\\nGuess a number from {n1} to {n2}: \\n\"))\n if number < n1 or number > n2:\n print(\n f\"\\nOnly numbers between {n1} and {n2} (inclusive) are allowed\\n\"\n )\n continue\n except ValueError as error:\n print(f\"Please enter a number only\\nerror: {error}\")\n else:\n break\n return number\n\n\ndef is_user_number_correct(user_num, correct_number):\n return user_num == correct_number\n\n\ndef game():\n n1 = int(argv[1])\n n2 = int(argv[2])\n random_number = generate_random_numbers(n1, n2)\n\n while True:\n user_number = get_user_number(n1, n2)\n user_is_correct = is_user_number_correct(user_number, random_number)\n\n if user_is_correct:\n print(\n f\"Congratulations, you guessed right! The number was {random_number}\"\n )\n break\n else:\n print(\"\\nOops, wrong answer, please try again\")\n if user_number > random_number:\n print(f\"\\nthe correct number is less than {user_number}\")\n else:\n print(f\"\\nthe correct number is greater than {user_number}\")\n continue\n\n\ndef main():\n game()\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5315126180648804, "alphanum_fraction": 0.5315126180648804, "avg_line_length": 28.8125, "blob_id": "f8d55c378fa27708e488eb09c3a78b311ed8bfe6", "content_id": "bb0e3de84a99583d0ec4e45860de84dd501cd3e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 105, "num_lines": 16, "path": "/oop/filehandler/file_handler.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "class FileHandler():\n \"\"\"Class to perform operations on files\"\"\"\n def __init__(self, filename: str) -> None:\n self.filename = filename\n\n def write_a_list_to_a_file(\n self,\n filename: str,\n a_list: list,\n sep='\\n',\n ) -> None:\n \"\"\"Takes a list and write it on a file, sep is the separator for the join method default is \"\\\\n\"\n \n \"\"\"\n with open(filename, 'w') as f:\n f.write(sep.join(a_list))" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5125448107719421, "avg_line_length": 17.600000381469727, "blob_id": "d58e1ffb59473d7c0ed18a77fb85254e90df566d", "content_id": "57bfe4477a20b4e277f7b85b3fccbd372712f99f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 53, "num_lines": 30, "path": "/error_handling/main.py", "repo_name": "dylanbuchi/Python_Practice", "src_encoding": "UTF-8", "text": "def get_age():\n age = 0\n while True:\n try:\n age = int(input(\"What's your age: \"))\n except ValueError as error:\n print(f\"Please enter a number\\n{error}\")\n else:\n break\n return age\n\n\ndef add(n, n2):\n result = 0\n try:\n result = n + n2\n except TypeError as error:\n print(f\"Type Error\\n{error}\")\n return result\n\n\ndef main():\n my_age = get_age()\n add(2, 23)\n\n\nif __name__ == \"__main__\":\n main()\n flag = False\n if not flag: raise Exception(\"The Flag is False\")\n" } ]
62
kaixili/HttpServer
https://github.com/kaixili/HttpServer
b2bbb5a02f314326813baacec04106742b838454
b4c7d240505ce6d2722f1674ba9eb5e37e0e2220
2e097d436d20c82bb6cacfd76120d69ec6e03c4f
refs/heads/master
2020-12-30T10:50:26.477576
2015-10-17T12:37:51
2015-10-17T12:37:51
27,538,021
6
2
null
null
null
null
null
[ { "alpha_fraction": 0.5301376581192017, "alphanum_fraction": 0.5552918910980225, "avg_line_length": 33.557376861572266, "blob_id": "6c987a1466f86f9ec01fafbdf9cbbba9a9bbb690", "content_id": "96a54dcc494e9d9d0087e6b82b3e46bac8503285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2141, "license_type": "no_license", "max_line_length": 117, "num_lines": 61, "path": "/old&reference/send_https.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "import optparse\nimport os\nimport socket\nimport time\nimport ssl\nfrom threading import Thread\nfrom io import StringIO\n\nlocation = os.getcwd() + '/html'\nfrom data2 import content_type, responses_stat\n\n\n\ndef https_server(_ssl, client, address):\n location = os.getcwd() + '/html'\n client = _ssl.wrap_socket(client, server_side=True)\n\n def send_bytes(responses, data): #ๆ–‡ไปถๅ‘้€\n client.send(bytes('HTTP/1.1 {0} {1}\\r\\n'.format(responses, responses_stat[responses][0]), 'UTF-8'))\n client.send(bytes(\"Content-Type: {0}\\r\\n\\r\\n\".format(content_type[target[-3:]]), 'UTF-8'))\n client.send(bytes(data, 'UTF-8'))\n\n data = client.recv(1024)\n print('{0} From {1}'.format(data.decode('utf-8'), address)) \n if data:\n data_request_method = data.decode('utf-8').split()[0]\n if data_request_method == 'POST':\n print('\\t POST_DATA:' + data.decode('utf-8').split()[-1])\n data_request_address = data.decode('utf-8').split()[1]\n \n if data_request_address[-1] == '/':\n data_request_address += 'index.html' \n elif data_request_address[0] != '/':\n send_bytes(411, '<html><head><title>411 - badrequest</title></head><body>411 - badrequest</body></html>')\n \n target = location\n for i in data_request_address.split('/')[1:]:\n target = target + '/' + i\n \n try:\n try:\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(200, data_send)\n \n except UnicodeDecodeError:\n data_send = '''HTTP/1.1 200 OK\nContent_Type:{0}\n\n'''.format(content_type[target[-3:]]) #ๆ‰“ไธๅผ€็š„ๆ–‡ไปถ(ๅ›พ็‰‡)ไบŒ่ฟ›ๅˆถ sendallๅ‘้€\n f = open(target, 'rb').read()\n data_send = bytes(data_send, 'utf-8') + f\n client.sendall(data_send)\n\n except FileNotFoundError:\n target = location + '/404.html'\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(404, data_send)\n \n client.close()" }, { "alpha_fraction": 0.6295503377914429, "alphanum_fraction": 0.6745182275772095, "avg_line_length": 20.136363983154297, "blob_id": "85dc2dde782302baa76db813a855ef30f115acda", "content_id": "d5527ca827a800e71832bb6ee02eac6ac0ed59d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 791, "license_type": "no_license", "max_line_length": 88, "num_lines": 22, "path": "/README.md", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "# ็ฌฌๅ››ๆฌกๆ–ฐไบบไปปๅŠก\n \n **ๅฎž็Žฐไธ€ไธชhttpๆœๅŠกๅ™จ**\n \n## ๅฎŒๆˆ่ฟ›ๅบฆ\n \n - 1.httpๅ’ŒhttpsๅŒๆ—ถๅœจ80ๅ’Œ443็ซฏๅฃ่ฟ›่กŒ\n - 2.ๅฐ†htmlๆ–‡ไปถไธญ็š„้™ๆ€็ฝ‘้กต่ฟ”ๅ›ž็ป™ๅฎขๆˆท็ซฏ ็†่ฎบไธŠๅฏไปฅๆ”ฏๆŒๆ‰€ๆœ‰็š„้™ๆ€็ฝ‘้กตๆ–‡ไปถ ๆ”ฏๆŒgetๅ’Œpostไธค็งๆ–นๅผ\n - 3.ๅญ็บฟ็จ‹ๅค„็†ไผ˜ๅŒ–,ๅผ‚ๆญฅไปฅๅŽๅฎŒๆˆ\n \n## ๆ–‡ไปถไฟกๆฏ\n \n * data2.py ๅค„็†ๅŽ็š„ ไธๅŒๆ ผๅผๆ–‡ไปถ้€š่ฟ‡httpๅ‘้€็š„ๆŠฅๅคดไฟกๆฏ ๅ’Œ ไฟกๆฏๅ้ฆˆไปฃ็  ็š„ๆ•ฐๆฎๆ–‡ไปถ ๅŒ…content_type, responsesไธคไธชๅญ—ๅ…ธ\n * origin_data.py ๅค„็†ๅ‰็š„ ๆ•ฐๆฎ\n * httpserver.py ๆœๅŠกๅ™จ็จ‹ๅบ\n * html ๆ”พ็ฝฎhtmlๆ–‡ไปถ็š„ๆ–‡ไปถๅคน\n * demoCA cacert.pem privkey.pem ็”จไบŽsslๅŠ ๅฏ†่ฏไนฆ็ญ‰ๆ–‡ไปถๅคนๅ’Œๆ–‡ไปถ\n * old_vesion&reference ๆ—ง็‰ˆๆœฌๅ’Œๅ‚่€ƒๆ–‡ไปถ\n \n## ๆœ€ๅŽไฟฎๆ”น\n \n 2014-12-25 19:00\n \n" }, { "alpha_fraction": 0.40345656871795654, "alphanum_fraction": 0.4180704355239868, "avg_line_length": 67.26363372802734, "blob_id": "2115798d2c548093fca19e08257d1f0eacb20bb7", "content_id": "7fce6e89b82b4b3cdc1894e7a7956aa02fad227c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 22865, "license_type": "no_license", "max_line_length": 464, "num_lines": 330, "path": "/html/index.html", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html lang=\"en\">\r\n<head>\r\n <title></title>\r\n <meta charset=\"utf-8\">\r\n <link rel=\"stylesheet\" href=\"css/reset.css\" type=\"text/css\" media=\"all\">\r\n <link rel=\"stylesheet\" href=\"css/style.css\" type=\"text/css\" media=\"all\">\r\n <script type=\"text/javascript\" src=\"js/jquery-1.6.js\" ></script>\r\n <script type=\"text/javascript\" src=\"js/cufon-yui.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/cufon-replace.js\"></script> \r\n <script type=\"text/javascript\" src=\"js/Kozuka_Gothic_Pro_OpenType_400.font.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/script.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/content_switch.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/jquery.easing.1.3.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/superfish.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/forms.js\"></script>\r\n <script type=\"text/javascript\" src=\"js/jquery.color.js\"></script>\r\n <script src=\"js/googleMap.js\" type=\"text/javascript\"></script>\r\n <!--[if lt IE 9]>\r\n \t<script type=\"text/javascript\" src=\"js/html5.js\"></script>\r\n <link rel=\"stylesheet\" href=\"css/ie.css\" type=\"text/css\" media=\"all\">\r\n <![endif]-->\r\n\t<!--[if lt IE 8]>\r\n\t\t<div style=' clear: both; text-align:center; position: relative;'>\r\n\t\t\t<a href=\"http://windows.microsoft.com/en-US/internet-explorer/products/ie/home?ocid=ie6_countdown_bannercode\"><img src=\"http://storage.ie6countdown.com/assets/100/images/banners/warning_bar_0000_us.jpg\" border=\"0\" height=\"42\" width=\"820\" alt=\"You are using an outdated browser. For a faster, safer browsing experience, upgrade for free today.\" /></a>\r\n\t\t</div>\r\n\t<![endif]-->\r\n</head>\r\n<body>\r\n\r\n\t<div class=\"page_spinner\">\r\n\t\t<div></div>\r\n\t</div>\r\n\t\t<div class=\"main\">\r\n\t\t\t<!--content -->\r\n\t\t\t<article id=\"content\"><div class=\"ic\">More Website Templates @ <a href=\"http://www.cssmoban.com/\" >็ฝ‘้กตๆจกๆฟ</a>. March 26, 2012!</div>\r\n\t\t\t\t<ul>\r\n\t\t\t\t\t<li id=\"page_Home\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n \t<div class=\"wrapper\">\r\n \t <div class=\"col1\">\r\n \t<h2>Welcome to our website!</h2>\r\n <p class=\"pad_bot1\">Heart to Heart is one of <a href=\"http://blog.templatemonster.com/free-website-templates/\" target=\"_blank\" class=\"link1\">free website templates</a> created team. This website template is optimized for 1280X1024 screen resolution. It is also XHTML &amp; CSS valid.</p>\r\n <p>This <a href=\"http://blog.templatemonster.com/2012/03/26/free-full-javascript-animated-template-charity-project/\" class=\"link1\">Heart to Heart Template</a> goes with two packages โ€“ with PSD source files and without them. PSD source files are available for free for the registered members of TemplateMonster.com. The basic package (without PSD source) is available for anyone without registration.</p>\r\n <img src=\"images/sign.gif\" alt=\"\" class=\"sign\">\r\n <h3>Our promos:</h3>\r\n <ul class=\"promos\">\r\n <li><a href=\"#\"><span></span><img src=\"images/page1_img1.jpg\" alt=\"\"></a></li>\r\n <li><a href=\"#\"><span></span><img src=\"images/page1_img2.jpg\" alt=\"\"></a></li>\r\n <li class=\"last\"><a href=\"#\"><span></span><img src=\"images/page1_img3.jpg\" alt=\"\"></a></li>\r\n </ul>\r\n </div>\r\n <div class=\"col2 pad_left1\">\r\n \t<h2>News</h2>\r\n <ul class=\"news\">\r\n \t <li>\r\n \t<a href=\"#\" class=\"link1\"><strong>15.12.2012</strong> - Lorem ipsum dolor</a><br>\r\n \t \tLorem ipsum dolor, consectetuer consectetur adipisicing elit, sed do adipiscing elitsed diam. \r\n </li>\r\n \t <li>\r\n \t<a href=\"#\" class=\"link1\"><strong>15.12.2012</strong> - Lorem ipsum dolor</a><br>\r\n \t \tLorem ipsum dolor, consectetuer consectetur adipisicing elit, sed do adipiscing elitsed diam. \r\n </li>\r\n \t <li class=\"last\">\r\n \t<a href=\"#\" class=\"link1\"><strong>15.12.2012</strong> - Lorem ipsum dolor</a><br>\r\n \t \tLorem ipsum dolor, consectetuer consectetur adipisicing elit, sed do adipiscing elitsed diam. \r\n </li>\r\n </ul>\r\n <a href=\"#!/page_More\" class=\"button1\">News Archive</a>\r\n </div>\r\n </div>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t\t<li id=\"page_Mission\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n \t<h2>Mission statement</h2>\r\n <div class=\"wrapper\">\r\n \t<figure class=\"left marg_right1\"><img src=\"images/page2_img1.jpg\" alt=\"\"></figure>\r\n <p class=\"pad_bot1\"><a href=\"#\" class=\"link2\">Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero </a></p>\r\n <p class=\"pad_bot1\">Eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit orem ipsum dolor sit amet, consectetuer adipiscing elited diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. </p>\r\n <p class=\"pad_bot1\">At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr sed diam.</p>\r\n </div>\r\n \t<h3>Mission list:</h3>\r\n \t<div class=\"wrapper\">\r\n <div class=\"col1\">\r\n \t<ul class=\"list1\">\r\n \t<li><a href=\"#\">Accumsan et iusto odio dignissim blandit praesent</a></li>\r\n \t<li><a href=\"#\">Luptatum zzril delenit orem ipsum dolor sit amet</a></li>\r\n \t<li><a href=\"#\">Consectetuer adipiscing elited diam nommy nibh euismod</a></li>\r\n \t<li><a href=\"#\">Tincidunt ut laoreet dolore magna aliqam erat volutpat</a></li>\r\n </ul>\r\n </div>\r\n <div class=\"col1 pad_left1\">\r\n \t<ul class=\"list1\">\r\n \t<li><a href=\"#\">At vero eos et accusam et justo duo dolores et</a></li>\r\n \t<li><a href=\"#\">Stet clita kasd gubergren, no sea takimata sanctus est </a></li>\r\n \t<li><a href=\"#\">Lorem ipsum dolor sit amet. Lorem ipsum dolor consetetur</a></li>\r\n \t<li><a href=\"#\">Sadipscing elitr sed diam nonumy eirmod tempor</a></li>\r\n </ul>\r\n </div>\r\n </div>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t\t<li id=\"page_Donations\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n \t<div class=\"wrapper\">\r\n \t \t<div class=\"col1\">\r\n \t\t<h2>How to sponsor us</h2>\r\n \t<div class=\"table\">\r\n <table>\r\n <tr>\r\n <td class=\"col1\"></td>\r\n <td class=\"col2\"><strong>Sponsorship tariff</strong></td>\r\n <td class=\"col3\"><strong>Sum</strong></td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">1</td>\r\n <td class=\"col2\">Sponsor a child for a year</td>\r\n <td class=\"col3\">$2000</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">2</td>\r\n <td class=\"col2\">Sponsor a child up to 4th class</td>\r\n <td class=\"col3\">$1500</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">3</td>\r\n <td class=\"col2\">Sponsor a child up to 10th class</td>\r\n <td class=\"col3\">$3250</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">4</td>\r\n <td class=\"col2\">Sponsor food for a day (160 residents)</td>\r\n <td class=\"col3\">$2300</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">5</td>\r\n <td class=\"col2\">Sponsor special veg. food with payasan (160 residents)</td>\r\n <td class=\"col3\">$1000</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">6</td>\r\n <td class=\"col2\">Sponsor special veg. food with icecream (160 residents)</td>\r\n <td class=\"col3\">$3600</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">7</td>\r\n <td class=\"col2\">Sponsor a child for a year</td>\r\n <td class=\"col3\">$2100</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">8</td>\r\n <td class=\"col2\">Sponsor a child up to 4th class</td>\r\n <td class=\"col3\">$2000</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">9</td>\r\n <td class=\"col2\">Sponsor a child up to 10th class</td>\r\n <td class=\"col3\">$1500</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">10</td>\r\n <td class=\"col2\">Sponsor food for a day (160 residents)</td>\r\n <td class=\"col3\">$3250</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">11</td>\r\n <td class=\"col2\">Sponsor special veg. food with payasan (160 residents)</td>\r\n <td class=\"col3\">$2300</td>\r\n </tr>\r\n <tr>\r\n <td class=\"col1\">12</td>\r\n <td class=\"col2\">Sponsor special veg. food with icecream (160 residents)</td>\r\n <td class=\"col3\">$1000</td>\r\n </tr>\r\n </table> \r\n </div>\r\n </div>\r\n \t<div class=\"col2 pad_left1\">\r\n \t\t<h2 class=\"letter_spacing\">Donate now!</h2>\r\n \t<figure class=\"pad_bot1\"><img src=\"images/page3_img1.jpg\" alt=\"\"></figure>\r\n <p>Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud.</p>\r\n \t<a href=\"#!/page_More\" class=\"button1\">Read More</a>\r\n </div>\r\n </div>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t\t<li id=\"page_Volunteers\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n \t<div class=\"wrapper\">\r\n \t \t<div class=\"col1\">\r\n \t\t<h2>Flexible volunteer opportunity</h2>\r\n \t \t<div class=\"wrapper\">\r\n \t\t<figure class=\"left marg_right1\"><img src=\"images/page4_img1.jpg\" alt=\"\"></figure>\r\n <p class=\"pad_bot1\"><a href=\"#\" class=\"link2\">Duis autem vel eum iriure dolor in hendre nulla facilisis at vero </a></p>\r\n <p class=\"pad_bot1\">Eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit orem ipsum dolor sit amet, consectetuer adipiscing elited diam nonummy.</p>\r\n </div>\t\r\n <p class=\"pad_bot1\">Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.</p>\r\n Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio.\r\n \t</div>\r\n \t<div class=\"col2 pad_left1\">\r\n \t\t<h2>Focus</h2>\r\n \t<ul class=\"list1\">\r\n \t<li><a href=\"#\">Ut wisi enim ad minim veniam</a></li>\r\n \t<li><a href=\"#\">Quis nostrud exerci tation</a></li>\r\n \t<li><a href=\"#\">Ullamcorper suscipit lobortis nisl</a></li>\r\n \t<li><a href=\"#\">Aliquip ex ea commodo</a></li>\r\n \t<li><a href=\"#\">Duis autem vel eum iriure dolor</a></li>\r\n \t<li><a href=\"#\">Hendrerit in vulputate velit</a></li>\r\n \t<li><a href=\"#\">Esse molestie consequat</a></li>\r\n \t<li><a href=\"#\">Vel illum dolore eu feugiat nulla</a></li>\r\n \t<li><a href=\"#\">Facilisis at vero eros</a></li>\r\n \t<li><a href=\"#\">Accumsan et iusto odio dignissim </a></li>\r\n \t<li><a href=\"#\">Qui blandit praesent luptatum</a></li>\r\n \t<li><a href=\"#\">Xzril delenit augue duis dolore te </a></li>\r\n \t<li><a href=\"#\">Aliquip ex ea commodo</a></li>\r\n \t<li><a href=\"#\">Duis autem vel eum iriure dolor</a></li>\r\n </ul>\r\n </div>\r\n </div>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t\t<li id=\"page_Contacts\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n \t<div class=\"wrapper\">\r\n \t \t<div class=\"col1\">\r\n \t\t<h2>Contact form</h2>\r\n \t \t<form action=\"#\" id=\"ContactForm\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t<div class=\"success\"> Contact form submitted!<br>\r\n\t\t\t\t\t\t\t\t\t\t\t\t<strong>We will be in touch soon.</strong> </div>\r\n\t\t\t\t\t\t\t\t\t\t\t\t<fieldset>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t<div class=\"wrapper\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"col1\">Name:</span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<label class=\"name\">\r\n <span class=\"bg\"><input type=\"text\" class=\"input\"></span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"error\">*This is not a valid name.</span> <span class=\"empty\">*This field is required.</span> \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t</label>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t</div>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t<div class=\"wrapper\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"col1\">Email:</span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<label class=\"email\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"bg\"><input type=\"text\" class=\"input\"></span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"error\">*This is not a valid email address.</span> <span class=\"empty\">*This field is required.</span> \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t</label>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t</div>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t<div class=\"wrapper\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"col1\">Message:</span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<label class=\"message\">\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"bg\"><textarea rows=\"1\" cols=\"1\"></textarea></span>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t<span class=\"error\">*The message is too short.</span> <span class=\"empty\">*This field is required.</span> \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t</label>\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t</div>\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t<div class=\"btns\"><a href=\"#\" class=\"button1\" data-type=\"submit\">Send</a><a href=\"#\" class=\"button1\" data-type=\"reset\">Clear</a></div>\r\n\t\t\t\t\t\t\t\t\t\t\t\t</fieldset>\r\n\t\t\t\t\t\t\t\t\t\t\t</form>\r\n \t</div>\r\n \t <div class=\"col2 pad_left1\">\r\n \t\t<h2>Our contacts</h2>\r\n \t<figure class=\"google_map\"></figure> \r\n <p>8901 Marmora Road,<br>\r\n \t Glasgow, D04 89GR.<br>\r\n \t <span class=\"col3\">Freephone:</span> +1 800 559 6580<br>\r\n \t <span class=\"col3\">Telephone:</span> +1 800 603 6035<br>\r\n \t <span class=\"col3\">Fax:</span> +1 800 889 9898 <br>\r\n \t <span class=\"col3\">E-mail:</span><a href=\"mailto:\" class=\"link1\">mail@demolink.org</a></p>\r\n </div>\r\n </div>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t\t<li id=\"page_More\">\r\n\t\t\t\t\t\t<div class=\"box\">\r\n <h2>Duis autem vel</h2>\r\n <p class=\"pad_bot1\"><strong>Eros et accumsan et iusto odio</strong> dignissim qui blandit praesent luptatum zzril delenit orem ipsum dolor sit amet, consectetuer adipiscing elited diam nonummy.</p>\r\n <p class=\"pad_bot1\"><strong>Nam liber tempor cum</strong> soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.</p>\r\n <p>Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio.</p>\r\n </div>\r\n\t\t\t\t\t</li>\r\n\t\t\t\t</ul>\r\n\t\t\t</article>\r\n\t\t\t<!--content end-->\r\n\t\t\t<!--header -->\r\n\t\t\t<header>\r\n\t\t\t\t<h1><a href=\"#close\" id=\"logo\">Heart to Heart</a></h1>\r\n\t\t\t\t<nav class=\"menu\">\r\n\t\t\t\t\t<ul id=\"menu\">\r\n\t\t\t\t\t\t<li><a href=\"#!/page_Home\">Home</a></li>\r\n\t\t\t\t\t\t<li><a href=\"#!/page_Mission\">Our Mission</a></li>\r\n\t\t\t\t\t\t<li><a href=\"#!/page_Donations\">Donations</a>\r\n \t<div class=\"submenu_1\">\r\n \t<ul>\r\n \t<li><a href=\"#!/page_More\">Lorem ipsum </a></li>\r\n \t<li><a href=\"#!/page_More\">Dolor consectet </a></li>\r\n \t<li><a href=\"#!/page_More\">Consec adipisicing </a>\r\n \t<div class=\"submenu_2\">\r\n <ul>\r\n <li><a href=\"#!/page_More\">Lorem ipsum </a></li>\r\n <li><a href=\"#!/page_More\">Dolor consectet </a></li>\r\n <li><a href=\"#!/page_More\">Consec adipisicing </a></li>\r\n <li><a href=\"#!/page_More\">Elit sed do </a></li>\r\n </ul>\r\n </div>\r\n </li>\r\n \t<li><a href=\"#!/page_More\">Elit sed do </a></li>\r\n </ul>\r\n </div>\r\n </li>\r\n\t\t\t\t\t\t<li><a href=\"#!/page_Volunteers\">Volunteers</a></li>\r\n\t\t\t\t\t\t<li><a href=\"#!/page_Contacts\">Contacts</a></li>\r\n\t\t\t\t\t</ul>\r\n\t\t\t\t</nav>\r\n\t\t\t</header>\r\n\t\t\t<!--header end-->\r\n\t\t\t<!--footer -->\r\n\t\t\t<footer>\r\n \t&copy; 2012 Heart to Heart <br><a rel=\"nofollow\" href=\"http://www.cssmoban.com/\" target=\"_blank\">Website Template</a> | <a href=\"http://gmail.google.com\" >kaixili@hustunique.com</a><br>\r\n\t\t\t\t<!-- {%FOOTER_LINK} -->\r\n\t\t\t</footer>\r\n\t\t\t<!--footer end-->\r\n\t\t</div>\r\n<script type=\"text/javascript\">Cufon.now();</script>\r\n<script>\r\n$(window).load(function() {\t\r\n\t$('.page_spinner').fadeOut();\r\n\t$('body').css({overflow:'visible'})\r\n})\r\n</script>\r\n<!--coded by koma-->\r\n</body>\r\n</html>" }, { "alpha_fraction": 0.4950657784938812, "alphanum_fraction": 0.5144736766815186, "avg_line_length": 22.336000442504883, "blob_id": "929042c049fda94e15a7f5e8d1cba1a8cf5f158c", "content_id": "5b2aecf37c5ab77208d910603a925fefee751307", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3040, "license_type": "permissive", "max_line_length": 86, "num_lines": 125, "path": "/html/js/script.js", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\r\n\t// hover\r\n\t\r\n\t$('footer a').hover(function(){\r\n\t\t$(this).stop().animate({color:'#fff'})\t\t\t\t\t\t \r\n\t}, function(){\r\n\t\t$(this).stop().animate({color:'#6ab2b1'})\t\t\t\t\t\t \r\n\t})\r\n\t\r\n\t$('.link1, .link2').hover(function(){\r\n\t\t$(this).stop().animate({color:'#000'})\t\t\t\t\t \r\n\t}, function(){\r\n\t\t$(this).stop().animate({color:'#81b003'})\t\t\t\t\t \r\n\t})\r\n\t\r\n\t$('.promos span').css({opacity:0})\r\n\t\r\n\t$('.promos a').hover(function(){\r\n\t\t$(this).find('span').stop().animate({opacity:0.5})\t\t\t\t\t\t\r\n\t}, function(){\r\n\t\t$(this).find('span').stop().animate({opacity:0})\t\t\t\t\t\t\r\n\t})\r\n\t\r\n\t$('.button1').hover(function(){\r\n\t\t$(this).stop().animate({backgroundColor:'#4d4d4d'})\t\t\t\t\t\t \r\n\t}, function(){\r\n\t\t$(this).stop().animate({backgroundColor:'#81b003'})\t\t\t\t\t\t \r\n\t})\r\n\t\r\n\t$('.list1 a').hover(function(){\r\n\t\t$(this).stop().animate({color:'#81b003'})\t\t\t\t\t \r\n\t}, function(){\r\n\t\t$(this).stop().animate({color:'#4d4d4d'})\t\t\t\t\t \r\n\t})\r\n\t\r\n\t$('ul#menu').superfish({\r\n delay: 600,\r\n animation: {height:'show'},\r\n speed: 600,\r\n autoArrows: false,\r\n dropShadows: false\r\n });\r\n\t\r\n\t$('#content > ul > li').each(function(){\r\n\t\tvar height=$(this).height()+10;\r\n\t\t$(this).data({height:height})\r\n\t})\r\n\t\r\n\t\r\n });\r\n$(window).load(function() {\t\r\n\t\r\n\tvar min_height=300, height=300;\r\n\t\r\n\t//content switch\r\n\tvar content=$('#content'),\r\n\t\tnav=$('.menu');\r\n\tnav.navs({\r\n\t\tuseHash:true,\r\n\t\thoverIn:function(li){\r\n\t\t\tCufon.replace('#menu a', { fontFamily: 'Kozuka Gothic Pro OpenType', hover:true });\r\n\t\t},\r\n\t\thoverOut:function(li){\r\n\t\t\tCufon.replace('#menu a', { fontFamily: 'Kozuka Gothic Pro OpenType', hover:true });\r\n\t\t}\t\t\t\t\r\n\t})\t\r\n\tcontent.tabs({\r\n\t\tactFu:function(_){\r\n\t\t\tif (_.prev && _.curr) {\r\n\t\t\t\t_.prev.stop().animate({height:0}, function(){\r\n\t\t\t\t\t$(this).css({display:'none'})\r\n\t\t\t\t\theight=_.curr.data('height')\r\n\t\t\t\t\t$('#content').css({height:height});\r\n\t\t\t\t\tif (height<min_height) {height=min_height}\r\n\t\t\t\t\tcentre();\r\n\t\t\t\t\t_.curr.css({display:'block'}).stop().animate({height:height})\r\n\t\t\t\t})\r\n\t\t\t} else {\r\n\t\t\t\tif (_.prev) {\r\n\t\t\t\t\t_.prev.stop().animate({height:0}, function(){\r\n\t\t\t\t\t\t$(this).css({display:'none'})\r\n\t\t\t\t\t\theight=min_height;\r\n\t\t\t\t\t\tcentre();\r\n\t\t\t\t\t\t$('#content').css({height:min_height});\r\n\t\t\t\t\t})\t\r\n\t\t\t\t} \r\n\t\t\t\tif (_.curr) {\r\n\t\t\t\t\theight=_.curr.data('height')\r\n\t\t\t\t\t$('#content').css({height:height});\r\n\t\t\t\t\tif (height<min_height) {height=min_height}\r\n\t\t\t\t\tcentre();\r\n\t\t\t\t\t_.curr.css({display:'block'}).stop().animate({height:height})\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t},\r\n\t\tpreFu:function(_){\t\t\t\t\t\t\r\n\t\t\t_.li.css({display:'none', position:'absolute', height:0});\r\n\t\t\t$('#content').css({height:min_height});\r\n\t\t}\r\n\t})\r\n\tnav.navs(function(n, _){\r\n\t\tif (n=='close' || n=='#!/') {\r\n\t\t\tcontent.tabs(n);\r\n\t\t} else {\r\n\t\t\tcontent.tabs(n);\r\n\t\t}\r\n\t})\r\n\t\r\n\t\r\n\tvar m_top=30;\r\n\tfunction centre() {\r\n\t\tvar h=$(window).height();\r\n\t\th_cont=height+212;\r\n\t\tif (h>h_cont) {\r\n\t\t\tm_top=(h-h_cont);\r\n\t\t} else {\r\n\t\t\tm_top=30\r\n\t\t}\r\n\t\t$('#content').css({paddingTop:m_top})\r\n\t\t\r\n\t}\r\n\tcentre();\r\n\t$(window).resize(centre);\r\n\t\r\n})" }, { "alpha_fraction": 0.5191757082939148, "alphanum_fraction": 0.522896409034729, "avg_line_length": 33.60396194458008, "blob_id": "84fafd9ca1ce58b1cc88319d060bd4d8338112a7", "content_id": "1e7ae469298100e1268f64f5db2877aae640526b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3494, "license_type": "no_license", "max_line_length": 67, "num_lines": 101, "path": "/old&reference/example.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "import socket\nimport select\n \n \nclass Client(object):\n def __init__(self, addr, sock):\n self._socket = sock\n self._backlog = b''\n self.done = False\n \n def read(self, data):\n \"\"\"\n This function is meant to be overloaded by classes\n inheriting from Client. It's executed whenever something\n is received from the client.\n \"\"\"\n ss = data.decode('utf-8', 'replace')\n for s in ss.strip().split('\\n'):\n if s == \"hello\":\n self.write(\"Hello, World!\\n\")\n \n def write(self, data):\n self._backlog += bytearray(data, 'utf-8')\n \n def _read_ready(self):\n \"\"\"\n Since sockets only allow for reading a specified amount\n of bytes we can set the socket to not block on empty\n recv and continue receiving until the call fails. There\n by receiving more than the specified amount.\n \"\"\"\n data = b''\n self._socket.setblocking(False)\n while True:\n try:\n r = self._socket.recv(100)\n data += r\n except (socket.timeout, socket.error):\n break\n self._socket.setblocking(True)\n if not r:\n self.done = True\n return\n self.read(data)\n \n def _write_ready(self):\n \"\"\"\n We only write things to the socket when it signals that\n it's ready to receive something. Not doing this an\n relying on implicit buffers works most of the time but\n this really isn't a difficult thing to implement.\n \"\"\"\n if self._backlog:\n count = self._socket.send(self._backlog)\n self._backlog = self._backlog[count:]\n \n \ndef main_loop():\n sock = socket.socket()\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", 8002))\n sock.listen(1) # listen(1) means that we only allow new\n # connections when the previous ones have\n # been properly processed.\n clients = {} # Keep a dictionary mapping sockets to clients\n try:\n while True:\n # We add `sock` to the `rlist` since it will signal\n # that it's ready to be read when a new connection is\n # waiting to be accepted.\n rlist = [sock] + list(clients.keys())\n wlist = [s for (s, c) in clients.items() if c._backlog]\n (rs, ws, _) = select.select(rlist, wlist, [])\n try:\n for r in rs:\n if r == sock:\n (s, addr) = sock.accept()\n clients[s] = Client(addr, s)\n elif r in clients:\n clients[r]._read_ready()\n for w in ws:\n if w in clients:\n clients[w]._write_ready()\n except Exception:\n # Including this catch as an example. It's often\n # nice to handle this and then break from the loop.\n raise\n # Iterate over all the clients to find out which ones\n # the client has disconnected from so we can remove\n # their references and let them be collected.\n for s in list(clients.keys())[:]:\n if clients[s].done:\n del clients[s]\n sock.listen(1)\n except KeyboardInterrupt:\n print(\"Got CTRL-C, quitting\")\n finally:\n sock.close()\n \n \nmain_loop()" }, { "alpha_fraction": 0.5494253039360046, "alphanum_fraction": 0.5802298784255981, "avg_line_length": 29.64788818359375, "blob_id": "d06f6548c90bc7278f0ffc48fc28b0140b057f57", "content_id": "fdab0d222703abc80c733694520fc6bc29337f32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2319, "license_type": "no_license", "max_line_length": 120, "num_lines": 71, "path": "/old&reference/version2.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "import socket\nfrom data2 import content_type, responses_stat # 2ไธชๅญ—ๅ…ธๆ•ฐๆฎ\n\ndef send_bytes(responses, data):\n client.send(bytes('HTTP/1.1 {0} {1}\\r\\n'.format(responses, responses_stat[responses][0]), 'UTF-8'))\n client.send(bytes(\"Content-Type: {0}\\r\\n\\r\\n\".format(content_type[target[-3:]]), 'UTF-8'))\n client.send(bytes(data, 'UTF-8'))\n\nhost = ''\nport = 80\nbacklog = 1023\nsize_max = 1024\n\nlocation = '/home/lkx810/b'\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #ๅˆ›ๅปบsocketๅฏน่ฑก๏ผŒๅนถ่ฏดๆ˜Žไฝฟ็”จ็š„ๆ˜ฏIPv4(AF_INET๏ผŒIP version 4)ๅ’ŒTCPๅ่ฎฎ(SOCK_STREAM)ใ€‚\ns.bind((host,port)) #็ป‘ๅฎš็ซฏๅฃ\ns.listen(backlog) \n\nwhile 1:\n try:\n client, address = s.accept()\n except KeyboardInterrupt:\n s.close()\n\n data = client.recv(size_max) \n print('{0} From {1}'.format(data.decode('utf-8'), address)) \n \n \n if not data:\n continue\n data_request_address = data.decode('utf-8').split()[1]\n if data_request_address[-1] == '/':\n data_request_address += 'index.html' \n elif data_request_address[0] != '/':\n send_bytes(411, '') #ๆฒกๆœ‰ๅœฐๅ€ๅญ—ๆฎต ่ฟ”ๅ›ž411 ไธๆ”ฏๆŒ็ฉบๅญ—ๆฎต\n \n data_request_method = data.decode('utf-8').split()[0]\n #post ่ฏทๆฑ‚ ็•™ๅšๅŽ้ขๅค„็†\n #ๅˆ†ไธค็งๆƒ…ๅ†ต๏ผšapplication/x-www-form-urlencoded ๅ’Œ multipart/form-data\n #RFC 2616 ไปฅๅŽๅ†็œ‹\n if data_request_method == 'POST':\n print('\\t POST_DATA:' + data.decode('utf-8').split()[-1])\n \n target = location\n for i in data_request_address.split('/')[1:]:\n target = target + '/' + i\n \n try:\n try:\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(200, data_send)\n \n except UnicodeDecodeError:\n data_send = '''HTTP/1.1 200 OK\nContent_Type:{0}\n\n'''.format(content_type[target[-3:]]) #ๆ‰“ไธๅผ€็š„ๆ–‡ไปถ(ๅ›พ็‰‡)ไบŒ่ฟ›ๅˆถ sendallๅ‘้€\n f = open(target, 'rb').read()\n data_send = bytes(data_send, 'utf-8') + f\n client.sendall(data_send)\n \n except FileNotFoundError:\n target = location + '/404.html'\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(404, data_send)\n \n client.close()\n print()" }, { "alpha_fraction": 0.6421052813529968, "alphanum_fraction": 0.6631578803062439, "avg_line_length": 91, "blob_id": "79fc20c65ef033dced3ce432d9669a27bd99b382", "content_id": "12642ac139e892a9c6fbc3280e53521b50ee0ec0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 95, "license_type": "permissive", "max_line_length": 91, "num_lines": 1, "path": "/html/js/cufon-replace.js", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "Cufon.replace('#menu a, h2, h3', { fontFamily: 'Kozuka Gothic Pro OpenType', hover:true });\r\n\r\n" }, { "alpha_fraction": 0.6466974020004272, "alphanum_fraction": 0.6789554357528687, "avg_line_length": 30.047618865966797, "blob_id": "26290b14806d38d94c51ec137d6f48fd9492b06b", "content_id": "2b53c481c667244b12cd1c4db54f2d0d12d40ad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 651, "license_type": "no_license", "max_line_length": 70, "num_lines": 21, "path": "/old&reference/https.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "import ssl, socket, time\ncontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\ncontext.load_cert_chain(certfile='cacert.pem', keyfile='privkey.pem') \n \nbindsocket = socket.socket()\nbindsocket.bind(('127.0.0.1', 443))\nbindsocket.listen(1023)\n\nnewsocket, address = bindsocket.accept()\nconnstream = context.wrap_socket(newsocket, server_side=True)\n\ntry:\n data = connstream.recv(1024)\n print(data.decode('utf-8') + 'from' + str(address))\n data_send = 'time is %f\\n\\n\\n\\n'%time.time()\n \n connstream.send(bytes(data_send,'utf-8'))\nfinally:\n connstream.shutdown(socket.SHUT_RDWR)\n connstream.close()\n bindsocket.close()" }, { "alpha_fraction": 0.5861386060714722, "alphanum_fraction": 0.6128712892532349, "avg_line_length": 25.605262756347656, "blob_id": "c4d9ea9359fb2cefd0514beaa263539c0eff3124", "content_id": "1071a87ff1ed536550a254122addef7aaa63bc87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 96, "num_lines": 38, "path": "/old&reference/test.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "import optparse\nimport os\nimport socket\nimport time\nimport ssl\nfrom threading import Thread\nfrom io import StringIO\nfrom send_https import https_server\n\nlocation = os.getcwd() + '/html'\nfrom data2 import content_type, responses_stat\n\n\ndef main_https():\n _ssl = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n try:\n _ssl.load_cert_chain(certfile='cacert.pem', keyfile='privkey.pem')\n except:\n print('''ssl error!\n try: openssl req -new -x509 -days 365 -nodes -out cacert.pem -keyout privkey.pem''')\n return -1\n ss = socket.socket()\n ss.bind(('', 443))\n ss.listen(1023)\n print(\"HTTP server at {0}:{1}\".format('0.0.0.0', 443))\n \n while True:\n try:\n client, address = ss.accept()\n except KeyboardInterrupt:\n ss.close()\n return -1\n https_server(_ssl, client, address)\n #thread = Thread(target = https_server, args = [_ssl, client, address])\n #thread.setDaemon()\n #thread.start()\n\nmain_https()" }, { "alpha_fraction": 0.5161734223365784, "alphanum_fraction": 0.5372791886329651, "avg_line_length": 29.454545974731445, "blob_id": "c55cff373c23446100668cbc1b1182dac96bd734", "content_id": "66f463d66e93c1d7aa3e197a37533bfeea954af7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4413, "license_type": "no_license", "max_line_length": 117, "num_lines": 143, "path": "/httpserver.py", "repo_name": "kaixili/HttpServer", "src_encoding": "UTF-8", "text": "#import optparse\nimport os\nimport socket\nimport time\nimport ssl\n#import multiprocessing\n\nfrom threading import Thread\nfrom io import StringIO\n\nfrom data2 import content_type, responses_stat #2ไธชๅญ—ๅ…ธๆ•ฐๆฎ\n\nhost, port, ssl_port = '', 80, 443\nlocation = os.getcwd() + '/html'\n\n\n\ndef http_server(client, address, _ssl = 0):\n if _ssl:\n client = _ssl.wrap_socket(client, server_side=True)\n \n def send_bytes(responses, data): \n client.send(bytes('HTTP/1.1 {0} {1}\\r\\n'.format(responses, responses_stat[responses][0]), 'UTF-8'))\n client.send(bytes(\"Content-Type: {0}\\r\\n\\r\\n\".format(content_type[target[-3:]]), 'UTF-8'))\n client.send(bytes(data, 'UTF-8'))\n \n data = client.recv(1024)\n print('From{1}:\\n {0}'.format(data.decode('utf-8'), address), end='')\n \n if data:\n data_request_method = data.decode('utf-8').split()[0]\n if data_request_method == 'POST':\n http_GUI(data.decode('utf-8').split()[-1], address)\n \n data_get = data.decode('utf-8').split()[1].split('?')\n data_request_address = data_get[0]\n #if len(data_get) != 1:\n # data_post = '1'\n # for i in data_get:\n # try:\n # data_post += data_get[i]\n # except:\n # pass\n # http_GUI(data_post, address)\n \n \n \n if data_request_address[-1] == '/':\n data_request_address += 'index.html' \n elif data_request_address[0] != '/':\n send_bytes(411, '<html><head><title>411 - badrequest</title></head><body>411 - badrequest</body></html>')\n \n target = location\n for i in data_request_address.split('/')[1:]:\n target = target + '/' + i\n \n try:\n try:\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(200, data_send)\n \n except UnicodeDecodeError:\n data_send = '''HTTP/1.1 200 OK\nContent_Type:{0}\n\n'''.format(content_type[target[-3:]]) #ๆ‰“ไธๅผ€็š„ๆ–‡ไปถ(ๅ›พ็‰‡)ไบŒ่ฟ›ๅˆถ sendallๅ‘้€\n f = open(target, 'rb').read()\n data_send = bytes(data_send, 'utf-8') + f\n client.sendall(data_send)\n\n except FileNotFoundError:\n target = location + '/404.html'\n with open(target) as data_send:\n data_send = data_send.read()\n send_bytes(404, data_send)\n \n client.close()\n \n\ndef http_GUI(data_post, address):\n print('\\n!----RECEIVE SOMETHING FROM{0}:\\n |{1}'.format(address, data_post))\n c = open('post_data', 'w+')\n c.write(data_post)\n c.write('FROM {0}\\n'.format(address))\n c.close()\n \n\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #IPv4(AF_INET๏ผŒIP version 4)ๅ’ŒTCPๅ่ฎฎ(SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #้ž้˜ปๅกžๆจกๅผ\n s.bind((host,port))\n s.listen(1024)\n print(\"HTTP server at {0}:{1}\".format(host, port))\n \n thread_https = Thread(target = main_https, args = [])\n thread_https.setDaemon(True)\n thread_https.start()\n \n \n while True:\n try:\n client, address = s.accept()\n except KeyboardInterrupt:\n print('GOT CTRL^C, QUITing')\n s.close()\n return -1\n thread = Thread(target = http_server, args = [client, address])\n thread.setDaemon(False)\n thread.start()\n\n\ndef main_https():\n _ssl = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n try:\n _ssl.load_cert_chain(certfile='cacert.pem', keyfile='privkey.pem')\n except:\n print('''ssl error!\n try: openssl req -new -x509 -days 365 -nodes -out cacert.pem -keyout privkey.pem''')\n return -1\n ss = socket.socket()\n ss.bind(('', ssl_port))\n ss.listen(1023)\n print(\"HTTPS server at {0}:{1}\".format(host, ssl_port))\n \n while True:\n try:\n client, address = ss.accept()\n except:\n ss.close()\n return -1\n thread = Thread(target = http_server, args = [client, address, _ssl])\n thread.setDaemon(False)\n thread.start()\n \n\n\nif __name__ == '__main__':\n if os.path.exists(location + '/index.html'):\n main()\n else:\n print('Please put HTTP files in ../html')\n " } ]
10
Sharma387/hello-world
https://github.com/Sharma387/hello-world
d39b80ad3b7e2be600e13d393988e25c084dfde7
2c26116f484f1ef13af29d9e570170e9f3d4df89
6275e63fcac8fe80cbf64759a5966e8473b80cd9
refs/heads/master
2021-08-24T05:36:57.730922
2017-12-08T07:43:31
2017-12-08T07:43:31
112,998,664
0
0
null
2017-12-04T05:27:49
2017-12-04T05:27:49
2017-12-04T05:49:45
null
[ { "alpha_fraction": 0.6989436745643616, "alphanum_fraction": 0.6989436745643616, "avg_line_length": 19.321428298950195, "blob_id": "7c1065fc730a36aca1d7698e825e6697bd70842f", "content_id": "4200a5eb06aa16e58cc56ed3ae61919a117139c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 45, "num_lines": 28, "path": "/FileHandling.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "#File Handlings\nimport csv\n\n'''\nimport tensorflow as tf\n\nhello = tf.constant('Hello Tensorflow')\nsess = tf.session()\nprint(sess.run(hello))\n\n\n'''\nfileName = 'NewFileCreation.txt'\n\nfile = open(fileName, mode = 'w')\nfile.write('this is my first line \\n')\nfile.write('this is my Second line \\n')\nfile.write('this is my third line \\n')\nfile.write('this is my Fourth line \\n')\nfile.close()\n\n#now ope the same file and print\n\nwith open(fileName,mode ='r') as Diffcsvfile:\n allRowList = csv.reader(Diffcsvfile)\n\n for currentRow in allRowList:\n print(currentRow)" }, { "alpha_fraction": 0.8008658289909363, "alphanum_fraction": 0.8008658289909363, "avg_line_length": 76, "blob_id": "4c7baff741366bf951f612f490e7c3519a68a614", "content_id": "4e201da6afce2addd4905fd60687daffcdc1daf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 231, "license_type": "no_license", "max_line_length": 158, "num_lines": 3, "path": "/README.md", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "# hello-world\nThis is my first hello world program using git repository\nI have just created a new branch called readme-edits and doing a change to the Readme.md file and I will be comitting this changes to the readme-edits branch.\n" }, { "alpha_fraction": 0.6716294288635254, "alphanum_fraction": 0.7121587991714478, "avg_line_length": 28.5, "blob_id": "c718eb9ac34c50cb58d5c018193e1bd2d175c508", "content_id": "038c7fdf951f2f967d678f8c938b6ca1b10b30e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2418, "license_type": "no_license", "max_line_length": 116, "num_lines": 82, "path": "/DataTypes.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "#Datatypes\n#Integer - int 3, float 3.7 and complex 5j\n\n#String datatype\nstring1 = 'sharma'\nstring2 = 'shyam'\nprint('Fulname =', string2+string1) #concatinate strings example\nprint('repeate the string 10 times',string1*10) #repeate string\nprint('slicing the string it is similar to trim',string1[2:4]) #trim feature\nprint('indexing the string its nothing by picking char from a string',string1[-2]+string1[1]) #pick char from string\n\n#String type specific methods\nstring3 =\"this is very interesting and going good so far and in fact easier than java\"\n#find()\nprint(\"find the and word in the string3 \",string3.find('and'))\n\n#replace()\nprint(\"replace very word in the string3 \",string3.replace('very','very good'))\n\n#split() - this will split the string value and stores it in an array or otherwise called tuples\nprint(\"split the string by space and store it in tuplese\",string3.split(' '))\n\n#Count()\nprint('count a character in the string',string3.count('a'))\n\n#capitalize(0\nprint(string3.capitalize())\nprint(string3.upper())\nprint(string3.center(3))\nprint(string3.encode())\nprint(string3.isalpha())\nprint(string3.strip(\"java\"))\n\n# tuples - value cannot be changed kind of constant array\ntuples1 = (1,2,3,4,6)\nprint(tuples1.count(3))\n\n\n#Mutable strings\n\n# list - similar to array values can be changed\n\nlist1 = [1,2,4,5]\n#apped a value in the list1\nlist1.append(4)\nprint(list1)\n\n#Dictionary - it is like storing value in Key:value pair\n\n#empty dictionary - you can add value to it later\nmy_dict1 ={}\n\n#dictionary with integer key\nmy_dict2 = {1:'sharma',2:'shyam', 3:'rajasekar', 4:'minty'}\n\n\n#dictionary with mixed keys\nmy_dict3 = {1:'sharma',2:['a','b','c']}\n\n#from sequence having each item as pair\nmy_dict4 = dict([(1,'apple'),(2,'pine')])\n\n#lets what are functions we have it dictionary\nprint('keys from my_dict2',my_dict2.keys())\nprint('values from my_dict2',my_dict2.values())\nprint('values from key 2 in my_dict2',my_dict2.get(2))\n\n\n#set - its a collection of elements but cannot have duplicate values init\n\nmy_set ={1,2,3,4,5,6,8,9,8,10}\nprint(my_set)\n\n#operation you can perform are\n#Union, intersection, differences.. lets see the examples\n\nmy_set1 = {1,2,3,'s','r','y','u'}\nmy_set2 = {'a','c',6,3,5,9,'d','s','y','r','x','u'}\n\nprint('Union', my_set1 | my_set2) #this is A Union B\nprint('Intersection', my_set1 & my_set2) #this is A intersection B\nprint('Difference', my_set1 - my_set2) #this is A difference B" }, { "alpha_fraction": 0.7179487347602844, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 24.83333396911621, "blob_id": "11269fbcc915c527672fcf17d9f48334cd4cd449", "content_id": "cceda2ebbdc017d8b0e9250ab0a5f0bd86de07c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 92, "num_lines": 6, "path": "/DateHandling.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "#Date Handling\nimport datetime\n\nprint(datetime.date.today())\n\nprint(datetime.datetime.today().strftime('%d %b %y')) #%b is for month and %m is for minutes\n\n" }, { "alpha_fraction": 0.6377396583557129, "alphanum_fraction": 0.7083753943443298, "avg_line_length": 19.60416603088379, "blob_id": "80ed40e852f9401cfccae5b73190eefb742fdd9f", "content_id": "1f6c4edae5332cf52a5ef63101157316bb4fbe66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 991, "license_type": "no_license", "max_line_length": 74, "num_lines": 48, "path": "/OperatorExample.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "'''\n\nThis section is commenting a para or section\nNothing in this section will be executed\n\n'''\n\n\n#Arithmetic Operators\nnum1 = 10\nnum2 = 20\nprint(\"Num1 + Num2\",num1+num2)\nprint(\"Num1 - Num2\",num1-num2)\nprint(\"Num1 / Num2\",num1/num2)\nprint(\"Num1 * Num2\",num1*num2)\nprint(\"Num1 ** Num2\",num1**num2)\nprint(\"Num1 % Num2\",num1%num2)\nprint(\"Num1 // Num2\",num1//num2)\n\n#Assignment Operators\nnum2+=num1 #num2=num2+num1\nprint(num2)\n\n#Comparison Operator\n#>,<,=!,==\n\n#Logical Operator\n#and or not\n\n#bitwise operators\n#it converts the numbers into bits then perform the 'and or xor' operators\n\nnum3 = 6 #bit value is 110\nnum4 = 2 #bit value is 010\n\nprint(\"Bitwise operaton num3 and num3\",num3 & num4)\nprint(\"Bitwise operaton num3 or num3\",num3 | num4)\nprint(\"Bitwise operaton num3 xor num3\",num3 ^ num4)\n\n#shift operations in bit\nprint(\"rightshit num4 by 2 bit\", num4 >> 2)\nprint(\"leftshit num4 by 2 bit\", num4 << 2)\n\n#identity operator\n#is\n\n#membership operators - mainly used in list\n#in , not in\n\n\n" }, { "alpha_fraction": 0.6696832776069641, "alphanum_fraction": 0.7149321436882019, "avg_line_length": 14.785714149475098, "blob_id": "d4e4b1aabcaefff07dc1f130ca0b4eb65ee8a1f5", "content_id": "15eace689f433fff7016e9db93cc3a3c818caddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/Turtle.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "import turtle\n\n#the above library is used for drawing\n\nturtle.color('green')\n\n#put this code in loop\nfor square in range(0,4,1):\n\n turtle.forward(100)\n turtle.right(90) # 90 degree turn right\n\n\nturtle.exitonclick()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7521008253097534, "avg_line_length": 24.052631378173828, "blob_id": "700c933e8dbfd4601044f6a3354c2d24fd34aafa", "content_id": "22fb6bd07db72a359672bcade06e68cf941b69c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/testSelenium.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "import os\n\nfrom selenium import webdriver\n\nfrom selenium.webdriver.common.keys import Keys\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\nDRIVER_BIN = os.path.join(PROJECT_ROOT, \"chromedriver\")\n\nprint DRIVER_BIN\n\n\n\ndriver = webdriver.Chrome(executable_path = DRIVER_BIN)\ndriver.get('http://www.google.com')\ndriver.find_element_by_name(\"q\").send_keys(\"how to search in google\")\ndriver.find_element_by_name(\"btnK\").send_keys(Keys.ENTER)\ntime.sleep(4)\ndriver.close()\n" }, { "alpha_fraction": 0.6220472455024719, "alphanum_fraction": 0.6325459480285645, "avg_line_length": 19.052631378173828, "blob_id": "0931d86b5c1befecd9e8e185cc10218e90274fad", "content_id": "752a8139db5b7b40bbb3bc7fc7d057db002a8599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 83, "num_lines": 19, "path": "/Functions.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "#Functions - set of instructions. All the functions will start with the keyword def\n\ndef myAddFunction(a,b):\n result = a+b\n return result\n#lets call this fuction\nprint(myAddFunction(1,2))\n\n\ndef fibo(n):\n a=0\n b=1\n for x in range(n):\n a=b\n b=a+b\n print(a)\n return b\nnum=int(input(\"enter a fibo no:\")) #typecast is needed here\nprint(fibo(num))\n" }, { "alpha_fraction": 0.6743732690811157, "alphanum_fraction": 0.7142060995101929, "avg_line_length": 38.900001525878906, "blob_id": "8be431a5a7b16e852a853481093c93af44c2bf93", "content_id": "c4bec9ef3e824884a61291231b07b7160dab4796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3590, "license_type": "no_license", "max_line_length": 183, "num_lines": 90, "path": "/newWebScrapping.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "import os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom lxml import html\nimport requests\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\nDRIVER_BIN = os.path.join(PROJECT_ROOT, \"chromedriver\")\n\nprint DRIVER_BIN\n\ndriver = webdriver.Chrome(executable_path = DRIVER_BIN)\n\n#Load the main page.\n#driver.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/Exhibitors.aspx?ID=261&sortMenu=104001')\n\n#Loop the entire page to get the details printed for all the rows\n\nfor i in range(1,99,1):\n # Load the main page.\n driver.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/Exhibitors.aspx?ID=261&sortMenu=104001')\n\n #select the second row from the table\n xPathLink = '//*[@id=\"ctl00_dvContent\"]/div[11]/div[1]/div/div/div[3]/div/table/tbody/tr[' + str(i) +']/td[2]/a'\n print xPathLink\n driver.find_element_by_xpath(xPathLink).click()\n\n #driver.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/eBooth.aspx?IndexInList=1&FromPage=Exhibitors.aspx&ParentBoothID=&ListByBooth=true&BoothID=105225')\n #page = requests.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/eBooth.aspx?IndexInList=1&FromPage=Exhibitors.aspx&ParentBoothID=&ListByBooth=true&BoothID=105225')\n\n page = requests.get(driver.current_url)\n #//*[@id=\"ctl00_dvContent\"]/div[11]/div[1]/div/div/div[3]/div/table/tbody/tr[2]/td[2]/a\n\n\n #driver.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/eBooth.aspx?Task=&IndexInList=3&FromPage=Exhibitors.aspx&BoothID=104735&Upgraded=&SortMenu=')\n #page = requests.get('http://s15.a2zinc.net/clients/MJBiz/Fall17MJBizCon/Public/eBooth.aspx?Task=&IndexInList=3&FromPage=Exhibitors.aspx&BoothID=104735&Upgraded=&SortMenu=')\n #tree = html.fromstring(page.content)\n tree = html.fromstring(page.content)\n #tree = html.fromstring(page.text)\n #print tree1\n\n #This will create a list of buyers:\n #buyers = tree.xpath('//div[@title=\"\"]/text()')\n exhibitor_name = tree.xpath('//*[@id=\"eboothContainer\"]/div[2]/div/h1/text()')\n\n #Address\n # City\n exhibitor_city = tree.xpath('//*[@id=\"eboothContainer\"]/div[2]/div/div[1]/span[1]/text()')\n\n # State\n exhibitor_state = tree.xpath('//*[@id=\"eboothContainer\"]/div[2]/div/div[1]/span[2]/text()')\n # Country\n exhibitor_country = tree.xpath('//*[@id=\"eboothContainer\"]/div[2]/div/div[1]/span[3]/text()')\n\n #Company URL\n exhibitor_url = tree.xpath('//*[@id=\"BoothContactUrl\"]/text()')\n\n\n\n exhibitor_facebook = tree.xpath('//*[@id=\"ctl00_ContentPlaceHolder1_ctrlCustomField_Logos_dlCustomFieldList_ctl00_lnkCustomField\"]/@href')\n\n exhibitor_tweeter = tree.xpath('//*[@id=\"ctl00_ContentPlaceHolder1_ctrlCustomField_Logos_dlCustomFieldList_ctl01_lnkCustomField\"]/@href')\n exhibitor_linkedin = tree.xpath('//*[@id=\"ctl00_ContentPlaceHolder1_ctrlCustomField_Logos_dlCustomFieldList_ctl02_lnkCustomField\"]/@href')\n\n print exhibitor_name\n print exhibitor_city\n print exhibitor_state\n print exhibitor_country\n print exhibitor_facebook\n print exhibitor_linkedin\n print exhibitor_tweeter\n print exhibitor_url\n\n\n\n#CLick on the back button or reload the page\n # driver.find_element_by_xpath('//*[@id=\"ctl00_ContentPlaceHolder1_lnkBackToSearch\"]').click()\n\n\n #driver.find_element_by_class_name()\n\n #increment i by 1 for next element\n\n#time.wait(4)\n#driver.close()\n\n\n#//*[@id=\"ctl00_dvContent\"]/div[11]/div[1]/div/div/div[3]/div/table/tbody/tr[2]/td[2]/a\"\n#//*[@id=\"ctl00_dvContent\"]/div[10]/div[1]/div/div/div[3]/div/table/tbody/tr[2]/td[2]/a" }, { "alpha_fraction": 0.5629138946533203, "alphanum_fraction": 0.5860927104949951, "avg_line_length": 29.299999237060547, "blob_id": "a9df1090a8497ec0f8067bc221a1b3b4c0d138d1", "content_id": "d99e6d56b70bf8a2e3857671a92acd725cc46cb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 90, "num_lines": 10, "path": "/FlowControl.py", "repo_name": "Sharma387/hello-world", "src_encoding": "UTF-8", "text": "#for loop\n\nfor quant in range(100,0,-1):\n if(quant>0):\n print(quant, \" bottle of bear on the wall \", quant, \" bottles of bear\")\n print(\"Take one down and pass it around\", quant-1, \" bottle of bears on the wall\")\n else:\n print('no more bears on the wall')\n\n print('------')" } ]
10
SergeNanou/batch8_mednum
https://github.com/SergeNanou/batch8_mednum
8f445eee49b7779b48f855d007783deec27f1839
f7f11b4afb02fc1f6d6a345ad089f964d8c3188d
3f10bf0bb1c38c01972d8d390e6583f9ad6b5a3d
refs/heads/master
2023-02-03T08:13:19.761152
2020-12-15T23:16:13
2020-12-15T23:16:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6606335043907166, "alphanum_fraction": 0.6711915731430054, "avg_line_length": 22.963855743408203, "blob_id": "d7263bbea52200b24adf5e4428df9b9ccd62548a", "content_id": "069e6e1d8d5b23bc241e24097ecdbeff7ca3f74d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1990, "license_type": "permissive", "max_line_length": 124, "num_lines": 83, "path": "/dockerize-apps/panel/mednumapp.py", "repo_name": "SergeNanou/batch8_mednum", "src_encoding": "UTF-8", "text": "import panel as pn\nimport numpy as np\nimport holoviews as hv\nimport numpy as np\nimport panel as pn\nfrom panel.widgets.select import AutocompleteInput\nimport param\nfrom mednum.config import *\nfrom mednum.loaders import read_merged_data\n\n# from mednum.controlers.autocomplete import AppAuto\nfrom mednum.indicators.panels import TopIndicators, Indicators\nfrom pathlib import Path\nimport mednum\n\ncss_mednum = [str(Path(__file__).parent / \"statics\" / \"css\" / \"mednum.css\")]\n\ncss = [\n \"https://cdn.datatables.net/1.10.19/css/jquery.dataTables.min.css\",\n css_mednum[0],\n]\njs = {\n \"$\": \"https://code.jquery.com/jquery-3.4.1.slim.min.js\",\n \"DataTable\": \"https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js\",\n}\n\npn.extension(css_files=css_mednum)\n\n\ntemplate = \"\"\"\n{% extends base %}\n\n<!-- goes in body -->\n{% block postamble %}\n<link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n{% endblock %}\n\n<!-- goes in body -->\n{% block contents %}\n{{ app_title }}\n<p>This is a Panel app with a custom template allowing us to compose multiple Panel objects into a single HTML document.</p>\n<br>\n<div class=\"container-fluid\">\n<div class=\"row\">\n <div class=\"col-sm-2\">\n {{ embed(roots.sidebar)}}\n </div>\n <div class=\"col-sm-8 ml-auto\">\n <div class=\"row\">\n {{ embed(roots.top)}}\n </div>\n <div class=\"row\">\n {{ embed(roots.main)}}\n </div>\n </div>\n </div>\n</div>\n\n\n{% endblock %}\n\"\"\"\n\ntmpl = pn.Template(template)\ntmpl.add_variable(\"app_title\", \"<h1>Custom Template App</h1>\")\n\n# Sidebar\nclass AutoComplete(param.Parameterized):\n nom_commune = param.String()\n\n\nmednumapp = mednum.MedNumApp(name=\"Sรฉlection\")\n\n# Top indicator\ntmpl.add_panel(\"sidebar\", mednumapp.lat_widgets)\ntmpl.add_panel(\n \"top\", pn.Row(mednumapp.top_panel, sizing_mode=\"stretch_width\")\n),\ntmpl.add_panel(\n \"main\", mednumapp.tabs_view,\n)\n# tmpl.add_panel(\"main\", mednumapp.table_view)\n\ntmpl.servable()\n" } ]
1
ChangePlusPlusVandy/change-coding-challenge-Kwameoab
https://github.com/ChangePlusPlusVandy/change-coding-challenge-Kwameoab
74c4a5039c3c87ac9577a86217c6bc303c15e2f0
fa230bf610f59a7f56c489b8f25a95d6e5dfcfe4
ba8d7958ea1ac29da8b2330676c973939dbf4b82
refs/heads/master
2022-12-19T13:13:43.286483
2020-09-26T03:36:47
2020-09-26T03:36:47
295,882,836
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5913913249969482, "alphanum_fraction": 0.611397385597229, "avg_line_length": 31.02912712097168, "blob_id": "83389f2fc4918091a41c53316469b911f016879b", "content_id": "b8bc505747d9545d32cc3ba2cc6f04b31c805c57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3299, "license_type": "no_license", "max_line_length": 84, "num_lines": 103, "path": "/change2020.py", "repo_name": "ChangePlusPlusVandy/change-coding-challenge-Kwameoab", "src_encoding": "UTF-8", "text": "\"\"\"\n\n@author: Kwame Ampadu-Boateng\n\n\"\"\"\nimport requests\nimport random\nimport time\n\n\n# This class is used in order to make a request using my Bearer Token\nclass BearerAuth(requests.auth.AuthBase):\n def __init__(self, token):\n self.token = token\n\n def __call__(self, r):\n r.headers[\"authorization\"] = \"Bearer \" + self.token\n return r\n\n\n# This method return the json object of the last 3200 tweets of a user\ndef getJson(user, count=3200):\n url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name='\n url += user\n url += '&count=' + str(count)\n response = requests.get(url, auth=BearerAuth(\n 'AAAAAAAAAAAAAAAAAAAAAKyyHwEAAAAABAnig801%2FLZrGQ2Ta5hT9Sog4pg%3DJpWSr'\n + '583RYptt4TMttnz2I8IFb23EPrnqptRDNwIstIsIC2ktx'))\n return response.json()\n\n\n# This methods checks each tweet in the json object to make sure that it does \n# not contain a link or an @, if it does not then the tweet is added into an \n# array of acceptable tweets\ndef filterJson(json):\n realResponse = []\n for i in range(len(json)):\n if \"@\" not in json[i]['text'] and \"https\" not in json[i]['text']:\n realResponse.append(json[i]['text'])\n return realResponse\n\n\n# This method gets a random tweet from the array of acceptable tweets\ndef getTweet(tweetList):\n index = random.randrange(len(tweetList))\n tweet = tweetList[index]\n # We pop because we do not want to give to tweet back to the user\n tweetList.pop(index)\n return tweet\n\n\nif __name__ == \"__main__\":\n character1 = 'elonmusk'\n character2 = 'kanyewest'\n print(\"You will be playing a guessing game. You will get a random tweet \" +\n \"from 2 people and you have to choose which one the tweets \" +\n f\"belongs to. The people for this game are {character1} and \" +\n f\"{character2}.\")\n print()\n \n char1List = getJson('elonmusk')\n char2List = getJson('kanyewest')\n \n char1List = filterJson(char1List)\n char2List = filterJson(char2List)\n correct, total = 0, 0\n \n print()\n print(\"Get ready to play!!!\")\n print()\n time.sleep(5)\n \n # Continue with the game until one of the arrays is empty\n while len(char1List) > 0 and len(char2List) > 0:\n # choose from one of the characters to display the tweet\n chooseChar = random.randrange(1, 3) \n if chooseChar == 1:\n displayTweet = getTweet(char1List)\n else:\n displayTweet = getTweet(char2List)\n print(displayTweet)\n \n userChoice = input(\n f\"Whose tweet is this? The choices are {character1} or {character2}.\\t\")\n if ((userChoice.lower() == character1.lower() and chooseChar == 1)\n or (userChoice.lower() == character2.lower() and chooseChar == 2)):\n print(\"\\nCorrect!!!\")\n correct += 1\n total += 1\n else:\n print(\"Sorry incorrect. \\t :(\")\n total += 1\n \n doQuit = input(\"If you want to quit now type Q or Quit\\t\")\n print()\n print(\"=\"*30)\n print()\n if doQuit.lower() == \"q\" or doQuit.lower() == \"quit\":\n break\n \n print(\"Thank you for playing!!!\")\n print(\n f\"Your stats for this round were {(correct/total) * 100:.4}% correct\")\n" } ]
1
edgarzapeka/rate_people
https://github.com/edgarzapeka/rate_people
ffce5dfe32ca994a492c97b3b366b2e9df6f1818
fd360f464f21f857a90d6b0c604c675cd32d6254
dfb387af6a54b4c948184b0f5f42aefe2eb9fb45
refs/heads/master
2016-09-13T16:20:50.523761
2016-07-06T13:56:13
2016-07-06T13:56:13
62,724,863
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7030878663063049, "alphanum_fraction": 0.7125890851020813, "avg_line_length": 31.384614944458008, "blob_id": "b420c99056923013e1b871d95bb3a476afc5ac4b", "content_id": "1ff735323a8196b877cb03f7e4a6d335082ad4c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 85, "num_lines": 13, "path": "/blog/models.py", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\nclass Photo_model(models.Model):\n name = models.CharField(max_length=150)\n date = models.DateTimeField(auto_now=True)\n img = models.ImageField()\n rate = models.IntegerField(default=0)\n author = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, default=\"Admin\")\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.5155925154685974, "alphanum_fraction": 0.5488565564155579, "avg_line_length": 29.0625, "blob_id": "32d5f62287d267108155062f0cb83632229c0d1a", "content_id": "4534a8ed39a9f37491674738ebcbb8d4f0031eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 481, "license_type": "no_license", "max_line_length": 101, "num_lines": 16, "path": "/blog/templates/blog/index.html", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "{% extends 'blog/bace.html' %}\n\n{% block menu %}\n<ul class=\"nav navbar-nav\">\n <li class=\"active\"><a href=\"/\">Home</a></li>\n <li><a href=\"/Top/\">Top</a></li>\n <li><a href=\"#contact\">Contact</a></li>\n</ul>\n{% endblock %}\n\n{% block content %}\n<center>\n <a href=\"/Rate_record/{{ m1.id }}/\"><img src=\"{{ m1.img.url }}\" width=\"400px\" height=\"500px\"></a>\n <a href=\"/Rate_record/{{ m2.id }}/\"><img src=\"{{ m2.img.url }}\" width=\"400px\" height=\"500px\"></a>\n</center>\n{% endblock %}\n" }, { "alpha_fraction": 0.6381579041481018, "alphanum_fraction": 0.646616518497467, "avg_line_length": 26.28205108642578, "blob_id": "de4a7039ea455c11b24d2584d9d321d717fcbccb", "content_id": "5c7e96e3077f732dbc24d203b3b57826de5c7053", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 67, "num_lines": 39, "path": "/blog/views.py", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .models import Photo_model\nimport random\nfrom django.contrib.auth import authenticate, login\n\n# Create your views here.\ndef index(request):\n models = Photo_model.objects.all()\n m1 = random.choice(models)\n while True:\n m2 = random.choice(models)\n if m2 == m1:\n pass\n else:\n break\n\n return render(request, 'blog/index.html', {'m1': m1, 'm2': m2})\n\ndef rate_record(request, pk):\n model = Photo_model.objects.get(id=pk)\n model.rate += 1\n model.save()\n return HttpResponseRedirect('/')\n\ndef top(request):\n models = Photo_model.objects.all()\n\n return render(request, 'blog/top.html', {'models': models})\n\ndef user_log(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active():\n login(request, user)\n \n return HttpResponseRedirect('/')\n" }, { "alpha_fraction": 0.8110235929489136, "alphanum_fraction": 0.8110235929489136, "avg_line_length": 30.75, "blob_id": "cc9209ee1b8286cf76a3fef6369601ff366a8b47", "content_id": "de5fc8dea258a0f5dd6dfa6635de60d186569ee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/blog/admin.py", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Photo_model\n# Register your models here.\nadmin.site.register(Photo_model)\n" }, { "alpha_fraction": 0.5876288414001465, "alphanum_fraction": 0.5979381203651428, "avg_line_length": 23.25, "blob_id": "6828d7f629d920644bd95b0dad54bb3e2bda0863", "content_id": "3d33bd7648808eaf8eb5c2b7dc5b2b8b22341840", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/blog/urls.py", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^Rate_record/(?P<pk>[0-9]+)/$', views.rate_record),\n url(r'^Top/$', views.top),\n]\n" }, { "alpha_fraction": 0.5722070932388306, "alphanum_fraction": 0.5940054655075073, "avg_line_length": 33.40625, "blob_id": "d9ac9c75cb0cdfb7fa2acf6a5cb40716e68fc1ca", "content_id": "0d4437c6dfc558a9c030d1fca1f0a0c816edebac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1101, "license_type": "no_license", "max_line_length": 149, "num_lines": 32, "path": "/blog/migrations/0003_auto_20160705_1152.py", "repo_name": "edgarzapeka/rate_people", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2016-07-05 11:52\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('blog', '0002_post_rate'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Photo_model',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=150)),\n ('date', models.DateTimeField(auto_now=True)),\n ('img', models.ImageField(upload_to='')),\n ('rate', models.IntegerField(default=0)),\n ('author', models.ForeignKey(blank=True, default='Admin', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.DeleteModel(\n name='Post',\n ),\n ]\n" } ]
6
sambd86/Canny-Edge-Detector-in-Python
https://github.com/sambd86/Canny-Edge-Detector-in-Python
d54456144290c67fe5e91a50d97cc4bf605f3acd
7400f86b09cfba518ab9c1b1d7b43e6c58675515
f35a27bd6bce18972e0f7fbfb5a965e93b4e8d7f
refs/heads/master
2020-03-10T05:48:01.160034
2018-04-12T09:16:59
2018-04-12T09:16:59
129,225,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 50, "blob_id": "c3f5db28f564152ee29c4b07dc3277bd7965e160", "content_id": "bf36271e9e3e8b5723cd8243258ecaeb96a6c29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "no_license", "max_line_length": 69, "num_lines": 2, "path": "/README.md", "repo_name": "sambd86/Canny-Edge-Detector-in-Python", "src_encoding": "UTF-8", "text": "# Canny-Edge-Detector-in-Python\nImplemented canny edge detector in python as a course work assignment\n" }, { "alpha_fraction": 0.4372294247150421, "alphanum_fraction": 0.48602911829948425, "avg_line_length": 21.971698760986328, "blob_id": "0a587b2b9f8fd0ab74cab36cbdf0e19876671729", "content_id": "84de9c4fb19ca24d532d88cc94c10083df777c7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5082, "license_type": "no_license", "max_line_length": 108, "num_lines": 212, "path": "/cannySamT.py", "repo_name": "sambd86/Canny-Edge-Detector-in-Python", "src_encoding": "UTF-8", "text": "\r\nimport cv2\r\nimport numpy as np\r\nimport PIL\r\nimport scipy.misc\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import ImageFilter, Image\r\n\r\n\r\ndef histeris(retgrad):\r\n thresHigh=50\r\n thresLow=30\r\n init_point = stop(retgrad, thresHigh)\r\n # Hysteresis tracking. Since we know that significant edges are\r\n # continuous contours, we will exploit the same.\r\n # thresHigh is used to track the starting point of edges and\r\n # thresLow is used to track the whole edge till end of the edge.\r\n\r\n while (init_point != -1):\r\n # Image.fromarray(retgrad).show()\r\n # print 'next segment at',init_point\r\n retgrad[init_point[0], init_point[1]] = -1\r\n p2 = init_point\r\n p1 = init_point\r\n p0 = init_point\r\n p0 = nextNbd(retgrad, p0, p1, p2, thresLow)\r\n\r\n while (p0 != -1):\r\n # print p0\r\n p2 = p1\r\n p1 = p0\r\n retgrad[p0[0], p0[1]] = -1\r\n p0 = nextNbd(retgrad, p0, p1, p2, thresLow)\r\n\r\n init_point = stop(retgrad, thresHigh)\r\n\r\n # Finally, convert the image into a binary image\r\n x, y = np.where(retgrad == -1)\r\n retgrad[:, :] = 0\r\n retgrad[x, y] = 1.0\r\n return retgrad\r\ndef stop(im, thres):\r\n '''\r\n This method finds the starting point of an edge.\r\n '''\r\n X, Y = np.where(im> thres)\r\n try:\r\n y = Y.min()\r\n except:\r\n return -1\r\n X = X.tolist()\r\n Y = Y.tolist()\r\n index = Y.index(y)\r\n x = X[index]\r\n return [x, y]\r\n\r\ndef nextNbd(im, p0, p1, p2, thres):\r\n '''\r\n This method is used to return the next point on the edge.\r\n '''\r\n kit = [-1, 0, 1]\r\n X, Y = im.shape\r\n for i in kit:\r\n for j in kit:\r\n if (i + j) == 0:\r\n continue\r\n x = p0[0] + i\r\n y = p0[1] + j\r\n\r\n if (x < 0) or (y < 0) or (x >= X) or (y >= Y):\r\n continue\r\n if ([x, y] == p1) or ([x, y] == p2):\r\n continue\r\n if (im[x, y] > thres): # and (im[i,j] < 256):\r\n return [x, y]\r\n return -1\r\n\r\ndef maximum(det, phase):\r\n\r\n gmax = np.zeros(det.shape)\r\n\r\n for i in np.arange(gmax.shape[0]):\r\n\r\n for j in np.arange(gmax.shape[1]):\r\n\r\n if phase[i][j] < 0:\r\n\r\n phase[i][j] += 360\r\n\r\n\r\n\r\n if ((j+1) < gmax.shape[1]) and ((j-1) >= 0) and ((i+1) < gmax.shape[0]) and ((i-1) >= 0):\r\n\r\n # 0 degrees\r\n\r\n if (phase[i][j] >= 337.5 or phase[i][j] < 22.5) or (phase[i][j] >= 157.5 and phase[i][j] < 202.5):\r\n\r\n if det[i][j] >= det[i][j + 1] and det[i][j] >= det[i][j - 1]:\r\n\r\n gmax[i][j] = det[i][j]\r\n\r\n # 45 degrees\r\n\r\n if (phase[i][j] >= 22.5 and phase[i][j] < 67.5) or (phase[i][j] >= 202.5 and phase[i][j] < 247.5):\r\n\r\n if det[i][j] >= det[i - 1][j + 1] and det[i][j] >= det[i + 1][j - 1]:\r\n\r\n gmax[i][j] = det[i][j]\r\n\r\n # 90 degrees\r\n\r\n if (phase[i][j] >= 67.5 and phase[i][j] < 112.5) or (phase[i][j] >= 247.5 and phase[i][j] < 292.5):\r\n\r\n if det[i][j] >= det[i - 1][j] and det[i][j] >= det[i + 1][j]:\r\n\r\n gmax[i][j] = det[i][j]\r\n\r\n # 135 degrees\r\n\r\n if (phase[i][j] >= 112.5 and phase[i][j] < 157.5) or (phase[i][j] >= 292.5 and phase[i][j] < 337.5):\r\n\r\n if det[i][j] >= det[i - 1][j - 1] and det[i][j] >= det[i + 1][j + 1]:\r\n\r\n gmax[i][j] = det[i][j]\r\n\r\n return gmax\r\n\r\n'non max ends here'\r\ndef gray_gradient(image):\r\n Gx, Gy = np.gradient(image)\r\n Gm = np.sqrt(Gx ** 2 + Gy ** 2)\r\n Gd = np.arctan2(Gy, Gx)\r\n Gd[Gd > 0.5*np.pi] -= np.pi\r\n Gd[Gd < -0.5*np.pi] += np.pi\r\n return Gm, Gd\r\n'''gradient ends here'''\r\ndef gaussianSam(im):\r\n im_out=im\r\n height=im_out.shape[0]\r\n width=im_out.shape[1]\r\n gauss=(1.0/57)*np.array(\r\n [[0,1,2,1,0],\r\n [1,3,5,3,1],\r\n [2,5,9,5,2],\r\n [1,3,5,3,1],\r\n [0,1,2,1,0]])\r\n #sum(sum(gauss))\r\n for i in np.arange(2,height-2):\r\n for j in np.arange(2,width-2):\r\n sum=0\r\n for k in np.arange(-2,3):\r\n for l in np.arange(-2,3):\r\n a=im.item(i+k,j+l)\r\n p=gauss[2+k,2+l]\r\n sum=sum+(p*a)\r\n b=sum\r\n im_out.itemset((i,j),b)\r\n\r\n return im_out\r\n\r\n\r\n'''main start here'''\r\n# im1=cv2.imread('c:/cb.png',1)\r\n# imgRGB = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)\r\n# image00=im1\r\n#plt.imshow(im)\r\n\r\n\r\n\r\nim=cv2.imread('c:/cb.jpg',0)\r\nimage0=im\r\ng=gaussianSam(im)\r\n#print(im)\r\nimage1 = g\r\n\r\n\r\n\r\n\r\n#gi, theta=gray_gradient(g)\r\n#print(gi,theta)\r\ngix=cv2.sobelx(g,(5,5),0)\r\ngiy=cv2.sobely(g,(5,5),0)\r\ngi=np.sqrt(gix**2+giy**2)\r\ntheta=np.arctan2(giy,gix)\r\nimage2 = gi\r\n#plt.imshow(image)\r\n\r\n\r\nnms=maximum(gi, theta)\r\n\r\n#plt.imshow(image)\r\n\r\n\r\n\r\nimage3 = nms\r\nbinImage=histeris(nms)\r\nimage4 = binImage\r\n#plt.imshow(image)\r\n\r\n\r\n\r\n#binImage=histeris(nms[0])\r\n\r\n\r\n\r\nfig,ax=plt.subplots(2,3)\r\nax[0,0].imshow(image0)\r\n#ax[0,1].imshow(image0)\r\nax[0,1].imshow(image1)\r\nax[0,2].imshow(image2)\r\nax[1,0].imshow(image4)\r\nax[1,1].imshow(image3)\r\nplt.show()" } ]
2
JesseLivezey/mypytools
https://github.com/JesseLivezey/mypytools
bce7399703a5f48f0ce1aac34d56f7e5ae9ede76
f73821a030f85a2c0a0e9f977ef13b021ceb9662
57b000383641f8422f2be078c4770b5fc30a51b0
refs/heads/master
2021-01-24T01:58:47.286005
2013-06-30T00:54:22
2013-06-30T00:54:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6782323122024536, "alphanum_fraction": 0.6840759515762329, "avg_line_length": 30.837209701538086, "blob_id": "6ae41993e0bfe33dfc0ce550cab58eb8eb4d3511", "content_id": "69f148a7828395d45e227df3bf712f4fa2bf93a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5476, "license_type": "no_license", "max_line_length": 154, "num_lines": 172, "path": "/dbwrap.py", "repo_name": "JesseLivezey/mypytools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\nimport sqlite3\nimport os\n\ndb_path = None\ntable_name = None\n\n\"\"\"\nModule dbwrap:\n\tMerger of DBAddData and DBReadData\n\nA light wrapper for adding data to a sqlite database using python dicts.\nVariable db_path: sets the path to the desired database.\nVariable table_name: sets the name of the table to be modified in the database.\n\nCall PrintDBCols() to get a list of the columns names in the table & indexes, sorted by whether or not they are required fields.\nGetCols() is available to avoid typographical errors.\nAddToDB(**kwargs) is the function to add data to the database.\n\"\"\"\n\ndef CheckTable():\n\tif db_path == None or table_name == None:\n\t\tprint \"You must set BOTH the table_name and the db_path namespace variables to run Read()!\"\n\t\traise ValueError\n\ndef GetCols():\n\t\"\"\"\n\tReads the database table (table_name) from db_path and returns a list:\n\t\t0) Column names (unicode)\n\t\t1) Column types (unicode)\n\t\t2) Column defaults (unicode)\n\tNOTE: table_name string formatting NOT secure...\n\tNote that for default values, python will appropriately read the NULL character as None, independent of any .sqliterc settings.\n\t\"\"\"\n\tCheckTable()\n\n\t#arg = table_name,\n\tconn = sqlite3.connect(db_path)\n\twith conn:\n\t\tc = conn.cursor()\n\t\tx = c.execute('PRAGMA table_info('+table_name+')')\n\t\tquery = c.fetchall()\n\n\tcols = []\n\ttypes = []\n\treq = []\n\tfor line in query:\n\t\tcols.append(line[1])\n\t\ttypes.append(line[2])\n\t\treq.append(line[4])\n\n\treturn [cols, types, req]\n\ndef PrintCols():\n\t\"\"\"\n\tPrints the database columns in a user friendly way, and specifies which entries are required vs optional.\n\tAlso presents the user with the indexes for better using GetCols().\n\t\"\"\"\n\tCheckTable()\n\n\tprint \"PrintDBCols():\"\n\tprint \"NOTE: Call GetCols to get the raw data.\"\n\tprint \"The first number on each line is the index number for that column.\"\n\tprint \n\tcols = GetCols()\n\tprint \".........Required Arguments:\"\n\tfor ind,col in enumerate(cols[0]):\n\t\tif (cols[2][ind] == None):\n\t\t\tprint \"%d \" % ind + col + \" \" + cols[1][ind] + \"; DEFAULT = \" + str(cols[2][ind])\n\n\tprint\n\tprint \".........Optional Arguments:\"\n\tfor ind,col in enumerate(cols[0]):\n\t\tif (cols[2][ind] != None):\n\t\t\tprint \"%d \" % ind + col + \" \" + cols[1][ind] + \"; DEFAULT = \" + str(cols[2][ind])\n\t\ndef AddData(**kwargs):\n\t\"\"\"\n\tSetup local variables db_path and table_name first.\n\tPass required kwargs & corresponding data, as specified by PrintDBCols(), as key::value pairs.\n\tBecause sqlite is vulnerable to certaint strings, this function generates the sqlite command in a mostly secure way:\n\t1) It checks that all keys passed to it are present in the database before trying to put them in, and raises a ValueError if there is a mismatch\n\t2) It puts data into the arguments through the c.execute() function [http://docs.python.org/2/library/sqlite3.html]\n\t3) table_name not checked for security\n\n\tIt can raise any excpetions that sqlite3 can raise.\n\tIt returns the command to SQL and the argument list passed to c.execute().\n\t\"\"\"\n\tCheckTable()\n\n\tall_cols = GetCols()[0];\n\tdb_args = []\n\targ_list = []\n\tfor key, val in kwargs.iteritems():\n\t\tif any(col == key for col in all_cols):\n\t\t\tdb_args.append(key)\n\t\t\targ_list.append(val)\n\t\telse:\n\t\t\tprint 'AddToResultDB(**kwargs): ERROR: Key \"'+key+'\" was present in **kwargs. \"'+key+'\" is not a valid column name'\n\t\t\tprint \n\t\t\traise ValueError\n\tstr_db_args = \", \".join(db_args)\n\tstr_qmarks = \",\".join( ['?'] * len(db_args) )\n\n\tconn = sqlite3.connect(db_path)\n\twith conn:\n\t\tc = conn.cursor()\n\t\tcommand = (\"INSERT INTO \" + table_name + \"(\" + str_db_args + \") \"\n\t\t\t 'VALUES ('+str_qmarks+');' )\n\t#print command\n\t#print arg_list\n\t\tc.execute(command, arg_list)\n\t#print c.fetchone()\n\treturn [command, arg_list]\n\n\ndef ReadData(*args, **kwargs):\n\t\"\"\"\n\tCalls, schematically speaking, 'SELECT *args FROM table_name WHERE **kwargs'\n\tPass *args for the desired columns.\n\tPass **kwargs to specify WHERE options, in the simple format 'key == value'. Value can be a tuple, in which case an \"or\" is placed between tuple options.\n\n\t1) Checks that all **kwarg keys are in the database as columns.\n\t2) Formats **kwarg arguments into database properly.\n\t3) Checks that all *args values are in the database as columns.\n\n\tNote that table_name is still insecure.\n\t\"\"\"\n\tCheckTable()\n\n\tall_cols = GetCols()[0];\n\t#all_cols = ['Col1', 'shoebox']\n\tfor arg_col in args:\n\t\tif not any(col == arg_col for col in all_cols):\n\t\t\tprint \"Column \"+arc_col+\" supplied by *args not valid\"\n\t\t\traise ValueError\n\tcolumn_str = ', '.join(args)\n\t\n\tconstraints_and = []\n\tconstraints_val_list = []\n\tfor key, val_list in kwargs.iteritems():\n\t\tconstraints_or = []\n\t\tif any(col == key for col in all_cols):\n\t\t\tif type(val_list) != tuple and type(val_list) != list:\n\t\t\t\t# If we were not passed a tuple or list, make a tuple so that our object is properly iterable.\n\t\t\t\tval_list = (val_list,)\n\t\t\tfor val in val_list:\n\t\t\t\tconstraints_or.append(key + \"==?\")\n\t\t\t\tconstraints_val_list.append(val)\n\t\t\tconstraints_and.append('(' + \" OR \".join(constraints_or) + ')')\n\n\t\telse:\n\t\t\tprint \"Column \"+key+\" supplied by **kwargs not valid\"\n\t\t\traise ValueError\n\tconstraints = ' AND '.join(constraints_and)\n\t\n\tcommand = \"SELECT \" + column_str + \" FROM \" + table_name + \" WHERE \" + constraints\n\tprint command\n\tprint constraints_val_list\n\n\tconn = sqlite3.connect(db_path)\n\twith conn:\n\t\tc = conn.cursor()\n\t\tx = c.execute(command, constraints_val_list)\n\t\tquery = c.fetchall()\n\treturn query\n\n\n\ndef test():\n\t## test requires a test database...\n\treturn True\n" } ]
1
johnhoskinsjr/FunWithPython
https://github.com/johnhoskinsjr/FunWithPython
8edc2425f17a5f10e50060df4a38f3b35f22aad7
1ae4de0e74c417eb46f466a88733f15379c49dd7
3eec47ec63ca584af8fdb1c877e9dff84bacbcd8
refs/heads/master
2021-01-10T09:19:32.077747
2015-11-19T16:38:25
2015-11-19T16:38:25
46,503,748
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5759905576705933, "alphanum_fraction": 0.5890005826950073, "avg_line_length": 32.156864166259766, "blob_id": "74e08a82464afc8bfac9c5f53f56e28715d8bb04", "content_id": "3e113e51d363eadb454ead71f58862d6d5a29ae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1691, "license_type": "no_license", "max_line_length": 69, "num_lines": 51, "path": "/SearchPhoneAndEmail.py", "repo_name": "johnhoskinsjr/FunWithPython", "src_encoding": "UTF-8", "text": "#This script is used to find phone numbers and\n#email address in a clipboard paste\n\nimport re, pyperclip\n\n#Regex for phone number\nphoneNumRegex = re.compile(r'''(\n (\\d{3}|\\(d{3}\\))? #Area Code(optional)\n (\\s|-|\\.)? #Seperator(optional)\n (\\d{3}) #First 3 Digits\n (\\s|-|\\.) #Seperator\n (\\d{4}) #Last 4 Digits\n (\\s*(ext|x|ext.)\\s*(\\d{2,5}))? #Extension(optional)\n )''', re.VERBOSE)\n\n#Regex for email address\nemailRegex = re.compile(r'''(\n [a-zA-Z0-9._%+-]+ #Username\n @ \n [a-zA-Z0-9.-]+ #Domain Name\n (\\.[a-zA-Z]{2,4}) #Extension\n )''', re.VERBOSE)\n\n#Assign text in clipboard to a variable for Regex search\ntext = str(pyperclip.paste())\n#Declare list for holding search results\nmatches = []\n\n#For loop that adds all phone numbers to the matches list\nfor groups in phoneNumRegex.findall(text):\n #join the 3 groups of numbers together and seperate with a hyphen\n phoneNum = '-'.join([groups[1], groups[3], groups[5]])\n \n #If there is a phone extention line\n if groups[8] != '':\n phoneNum += ' x' + groups[8]\n \n #append the results to our matches list\n matches.append(phoneNum)\n\n#For loop that appends all found email addresses to the matches list\nfor groups in emailRegex.findall(text):\n matches.append(groups[0])\n\n#Print results of Regex search and copys result back to clipboard\nif len(matches) > 0:\n pyperclip.copy('\\n'.join(matches))\n print('Copied to clipboard: ')\n print('\\n'.join(matches))\nelse:\n print('No phone numbers or email addresses found.')\n" } ]
1
VyachAp/Echo-service
https://github.com/VyachAp/Echo-service
0e8ca2dece0e300fde54c93565957446e98bc82f
09176abc993ea141666308654ce4f2a03f817f82
f006d593fed666ccbe0ea3c73b2824cfab9807e8
refs/heads/master
2022-02-02T09:39:15.659932
2019-06-07T10:53:44
2019-06-07T10:53:44
190,032,027
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4677419364452362, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 14.5, "blob_id": "6e418e1e58e1006e358b5c198e2972b4445ffe0b", "content_id": "dd0b355a241abccb8514a3a859b051bd3ec4c4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 62, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/requirements.txt", "repo_name": "VyachAp/Echo-service", "src_encoding": "UTF-8", "text": "aioftp==0.13.0\naiohttp==3.5.4\naiojobs==0.2.2\nmultidict==4.5.2\n" }, { "alpha_fraction": 0.5903075337409973, "alphanum_fraction": 0.5943946838378906, "avg_line_length": 27.70391082763672, "blob_id": "bf78dc83013ca1e50c99d285f0ec84b0f120b552", "content_id": "b6fba6ab56bb173fc3ab24aae9b239df209f837f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5138, "license_type": "no_license", "max_line_length": 136, "num_lines": 179, "path": "/Pipeline.py", "repo_name": "VyachAp/Echo-service", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport aioftp\nimport os\nfrom aiohttp import web\nfrom pathlib import Path\nimport asyncio\nimport aiojobs\nfrom aiojobs.aiohttp import setup, spawn\nfrom multidict import MultiDict\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\nhost = \"localhost\"\nftp_user = \"bob\"\nftp_password = \"12345\"\nfolder = Path().absolute()\nstate = {\n 'status': 'IDLE',\n 'error': None,\n 'image_tag' : None\n }\n\nroutes = web.RouteTableDef()\n\n\npwd = os.path.dirname(os.path.abspath(__file__))\n\n\n# Loading block\n# async def upload_files():\n# files_path = '/home/vyachap/Test Files/'\n# walk = os.walk(files_path)\n# file_list = []\n# for address, dirs, files in walk:\n# file_list = files\n# for file in file_list:\n# f = open(files_path + file, \"rb\")\n# con.storbinary(\"STOR \" + file, f)\n\n\n# Downloading block\nasync def download_csv():\n global state\n error = None\n state['status'] = \"Files downloading\"\n try:\n async with aioftp.ClientSession(host=host, user=ftp_user, password=ftp_password) as client:\n for path, info in (await client.list()):\n try:\n await client.download(path, destination=f\"{pwd}/r-driveproject/src/data/\" + path.name,\n write_into=True)\n except Exception as e:\n state['status'] = 'FAILED'\n state['error'] = e\n error = 'CSV download failed'\n\n except Exception as exc:\n state['status'] = 'FAILED'\n state['error'] = exc\n error = 'Something went wrong with connection to FTP'\n\n return error\n\n\nasync def model_training():\n global state\n state['status'] = \"Model training\"\n error = None\n r_script = await asyncio.create_subprocess_shell(f'Rscript {pwd}/estimate_models.R '\n f'--booked {pwd}/md.csv '\n f'--cogs {pwd}/mdcogs.csv '\n f'--output {pwd}/r-driveproject/src/data/',\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await r_script.communicate()\n\n if r_script.returncode != 0:\n state['status'] = 'FAILED'\n state['error'] = stderr.decode()\n error = stderr.decode()\n\n return error\n\n\nasync def build_image():\n global state\n state['status'] = \"Image building\"\n error = None\n tag = datetime.now().strftime(\"%Y%m%d%H%M\")\n state['image_tag'] = tag\n image = await asyncio.create_subprocess_shell(f'docker build -t us.gcr.io/synapse-157713/r-demo-project:{tag} {pwd}/r-driveproject')\n stdout, stderr = await image.communicate()\n\n if image.returncode != 0:\n state['status'] = 'FAILED'\n state['error'] = stderr.decode()\n error = stderr.decode()\n\n return error\n\n\nasync def upload_image():\n global state\n state['status'] = \"Pushing image to Google Cloud Registry\"\n error = None\n tag = state['image_tag']\n image_push = await asyncio.create_subprocess_shell(f'gcloud docker -- push us.gcr.io/synapse-157713/r-demo-project:{tag}')\n stdout, stderr = await image_push.communicate()\n\n if image_push.returncode != 0:\n state['status'] = 'FAILED'\n state['error'] = stderr.decode()\n error = stderr.decode()\n\n return error\n\n\nasync def start():\n global state\n state['error'] = None\n pipeline = [\n # download_csv,\n # model_training,\n build_image,\n upload_image\n ]\n for func in pipeline:\n if await func() is not None:\n return\n\n state['status'] = 'IDLE'\n # upload_csv()\n # upload_model()\n\n\nasync def file_upload_handler(request):\n data = await request.post()\n script = data['script']\n filename = script.filename\n script_file = data['script'].file\n content = script_file.read()\n return web.Response(body=content, headers=MultiDict({'CONTENT-DISPOSITION': script_file}))\n\n\n@routes.get('/')\nasync def handler(request):\n return web.Response(text='OK')\n\n\n@routes.post('/training')\nasync def training_endpoint(request):\n try:\n await model_training()\n except Exception as e:\n return web.Response(text=\"R-model training failed. Reason: {}\".format(e), status=500)\n return web.Response(text=\"R-model trained successfully\", status=200)\n\n\n@routes.get('/start_pipeline')\nasync def start_pipeline(request):\n scheduler = await aiojobs.create_scheduler()\n await scheduler.spawn(start())\n return web.Response(text=\"Pipeline started\")\n\n\n@routes.get('/status')\nasync def status_endpoint(request):\n global state\n if state['error']:\n return web.Response(text=\" Status: {}.\\n Error: {}\".format(state['status'], state['error']))\n else:\n return web.Response(text=\" Status: {}.\".format(state['status']))\n\n\napp = web.Application()\napp.add_routes(routes)\nsetup(app)\nweb.run_app(app)\n" }, { "alpha_fraction": 0.6743515729904175, "alphanum_fraction": 0.7031700015068054, "avg_line_length": 30.545454025268555, "blob_id": "2609d6486fe9b1467cb2b141522f2f3e76eb50f0", "content_id": "9750089f9e9590394b3e6734fa8044e404166169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 347, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/Dockerfile", "repo_name": "VyachAp/Echo-service", "src_encoding": "UTF-8", "text": "FROM r-base\nCOPY . /app\nWORKDIR /app\nRUN R -e \"install.packages('plumber')\"\nRUN R -e \"install.packages('uuid')\"\nRUN R -e \"install.packages('lme4', version = '1.1-19')\"\nRUN R -e \"install.packages('dplyr')\"\nRUN R -e \"install.packages('devtools')\"\nRUN R -e \"install.packages('scales')\"\nEXPOSE 5000\nCMD sudo apt install libssl-dev libcurl4-gnutls-dev\n" }, { "alpha_fraction": 0.6308080554008484, "alphanum_fraction": 0.6398990154266357, "avg_line_length": 38.599998474121094, "blob_id": "bc937b1eb721eb2878d89ef177fb7640347e8bb0", "content_id": "03125a45b6cf7ee3a7a3c750e61f1c74246d2343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1980, "license_type": "no_license", "max_line_length": 116, "num_lines": 50, "path": "/estimate_models.R", "repo_name": "VyachAp/Echo-service", "src_encoding": "UTF-8", "text": "# the models require the lme4 package, version 1.1-19\n# require(devtools)\n# install_version(\"lme4\", version = \"1.1-19\", repos = \"http://cran.us.r-project.org\")\nlibrary(lme4);\nlibrary(optparse)\n\noption_list = list(\n make_option(c(\"--booked\"), type=\"character\", default=NULL, \n help=\"Dataset for model probability of booking a truck (filepath)\", metavar=\"character\"),\n make_option(c(\"--cogs\"), type=\"character\", default=NULL, \n help=\"Dataset for model buy cost given successful booking (filepath)\", metavar=\"character\"),\n make_option(c(\"--output\"), type=\"character\", default='./', \n help=\"Path to directory for saving models (filepath)\", metavar=\"character\")\n); \n\nopt_parser = OptionParser(option_list=option_list);\nopt = parse_args(opt_parser);\nif (substr(opt$output, nchar(opt$output), nchar(opt$output)) != '/') {\n opt$output = paste(opt$output, '/', sep='')\n}\n\nif (is.null(opt$booked)){\n print_help(opt_parser)\n stop(\"--booked argument must be supplied\", call.=FALSE)\n}\nif (is.null(opt$cogs)){\n print_help(opt_parser)\n stop(\"--cogs argument must be supplied\", call.=FALSE)\n}\n\ndf_booked <- read.csv(file=opt$booked, header=TRUE, sep=\",\")\ndf_cogs <- read.csv(file=opt$cogs, header=TRUE, sep=\",\")\n\n#model probability of booking a truck\nmod_booked <- glmer(Booked ~ (1|AssignmentType) + (1|RepOriginMarketId) + (1|RepDestinationMarketId) + (1|RepLaneId)\n , data=df_booked\n , family=\"binomial\"\n , nAGQ = 0\n , control=glmerControl(optimizer=\"nloptwrap\", calc.derivs = FALSE))\n\nsave(mod_booked,file=paste(opt$output, \"mod_booked.Rda\", sep=''))\n\n#model buy cost given successful booking\nmod_cogs <- \n lmer(logCOGS ~ (1 + logDAT + logMiles|LaneId) + (1|RepLaneId)\n , data=df_cogs\n , control=lmerControl(optimizer=\"nloptwrap\", calc.derivs = FALSE)\n , weights = Wt)\n\nsave(mod_cogs,file=paste(opt$output, \"mod_cogs.Rda\", sep=''))\n" } ]
4
visi1/python
https://github.com/visi1/python
63a3eec13732d4e108d5e8009e3d7ae1dc640bae
e0a04ef2a3420412058638339dca3a3de19a22ec
34b3167b2f924068f16935817985b29bb1b24ad5
refs/heads/master
2021-09-01T08:06:23.155003
2017-12-25T23:02:51
2017-12-25T23:02:51
115,367,360
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.726190447807312, "alphanum_fraction": 0.7301587462425232, "avg_line_length": 35, "blob_id": "5b2511135eab8b1db734de8a22fe57f14d879171", "content_id": "b3a270131a01a967465bb69e4d21a3c45e96ef8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/new.py", "repo_name": "visi1/python", "src_encoding": "UTF-8", "text": "import sys\n\nprint(sys.version_info)\nprint('int is object: ', isinstance(3, object))\nprint('True is object: ', isinstance(True, object))\nprint('None is object: ', isinstance(None, object))\nprint('isinstance is object: ', isinstance(isinstance, object))\n" } ]
1